zip
stringlengths 19
109
| filename
stringlengths 4
185
| contents
stringlengths 0
30.1M
| type_annotations
sequencelengths 0
1.97k
| type_annotation_starts
sequencelengths 0
1.97k
| type_annotation_ends
sequencelengths 0
1.97k
|
---|---|---|---|---|---|
archives/zyks_django-react-docker.zip | backend/config/settings/production.py | import environ # Usage of 12f settings pattern
from os.path import join, exists
from .base import *
env = environ.Env(
DEBUG=(bool, False),
)
env_file = join(BASE_DIR, 'local.env')
if exists(env_file):
environ.Env.read_env(str(env_file))
else:
print(env_file, "does not exist", BASE_DIR)
DEBUG = env('DEBUG')
SECRET_KEY = env('SECRET_KEY')
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db(),
}
ALLOWED_HOSTS = [
'localhost',
'0.0.0.0',
]
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
STATIC_ROOT = '/var/app/public/'
COMPRESS_ROOT = '/var/app/public/'
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/config/settings/test.py | from datetime import datetime
from .base import *
# override database settings for tests etc.
# it is important to run test on PostgreSQL because
# SQLite doesn't respect char length constraints
DATABASES = {
'default': default_psql,
}
DATABASES['default']['HOST'] = 'localhost'
DATABASES['default']['NAME'] += f'_{datetime.now()}'
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/config/urls.py | """app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url, include
import api.urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include(api.urls)),
]
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/config/wsgi.py | """
WSGI config for app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
application = get_wsgi_application()
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/__init__.py | # main or core
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/admin.py | # configure Django Admin panel
# add models etc.
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/api/serializers.py | from rest_framework import serializers
from ..models import Article, Comment
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('author', 'content', 'created_at', 'article')
class ArticleSerializer(serializers.ModelSerializer):
comments = CommentSerializer(many=True, read_only=True)
class Meta:
model = Article
fields = ('id', 'author', 'title', 'content', 'created_at', 'comments')
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/api/urls.py | from django.conf.urls import include, url
from rest_framework import routers
from .views import ArticleViewSet, CommentViewSet
router = routers.DefaultRouter()
router.register(r'article', ArticleViewSet)
router.register(r'comment', CommentViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
]
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/api/views.py | from rest_framework.viewsets import ModelViewSet
from ..models import Article, Comment
from .serializers import ArticleSerializer, CommentSerializer
class ArticleViewSet(ModelViewSet):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
class CommentViewSet(ModelViewSet):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/migrations/0001_initial.py | # Generated by Django 2.0.7 on 2018-12-27 09:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=50)),
('title', models.CharField(max_length=100)),
('content', models.CharField(max_length=1000)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=50)),
('content', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now_add=True)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Article')),
],
),
]
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/migrations/__init__.py | [] | [] | [] |
|
archives/zyks_django-react-docker.zip | backend/main/models/__init__.py | from .models import *
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/models/models.py | from django.db import models
class Article(models.Model):
author = models.CharField(max_length=50)
title = models.CharField(max_length=100)
content = models.CharField(max_length=1000)
created_at = models.DateTimeField(auto_now_add=True)
class Comment(models.Model):
author = models.CharField(max_length=50)
content = models.CharField(max_length=200)
article = models.ForeignKey(Article, null=False, related_name='comments', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/tasks/__init__.py | from .model import create_article, create_comment, delete_all_articles
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/tasks/model.py | from celery import shared_task
from faker import Faker
from ..models import Article, Comment
fake = Faker('en_US')
@shared_task()
def create_article():
Article.objects.create(
author=fake.name()[:50],
title=fake.sentence()[:100],
content=fake.text(max_nb_chars=1000),
)
@shared_task()
def create_comment():
article = Article.objects.order_by("?").first()
Comment.objects.create(
author=fake.name()[:50],
content=fake.sentence()[:200],
article=article,
)
@shared_task
def delete_all_articles():
Article.objects.all().delete()
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/tests/__init__.py | [] | [] | [] |
|
archives/zyks_django-react-docker.zip | backend/main/tests/article/__init__.py | [] | [] | [] |
|
archives/zyks_django-react-docker.zip | backend/main/tests/article/test_api.py | from rest_framework import status
from rest_framework.test import APITestCase
from main.models import Article, Comment
from ..utils import get_comment_data, get_values_set, create_article, get_article_data
class ArticleAPITestCase(APITestCase):
url = '/api/main/article/'
class ArticleAPIFormatTest(ArticleAPITestCase):
def test_api_returns_200(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_api_returns_json_list(self):
response = self.client.get(self.url)
self.assertIsInstance(response.data, list)
def test_api_returns_empty_list(self):
response = self.client.get(self.url)
self.assertEqual(len(response.data), 0)
class ArticleAPIContentTest(ArticleAPITestCase):
def setUp(self):
self.article1 = create_article(title="test 1")
self.article2 = create_article(title="test 2")
def test_api_returns_2_articles(self):
response = self.client.get(self.url)
self.assertEqual(len(response.data), 2)
def test_api_returns_correct_articles(self):
response = self.client.get(self.url)
self.assertSetEqual(
get_values_set(response, 'title'),
{'test 1', 'test 2'},
)
def test_api_returns_newly_created_article(self):
create_article(title="test 3")
response = self.client.get(self.url)
self.assertSetEqual(
get_values_set(response, 'title'),
{'test 1', 'test 2', 'test 3'},
)
class ArticleAPICreateTest(ArticleAPITestCase):
def test_valid_post_creates_db_record(self):
data = get_article_data()
self.assertEqual(Article.objects.count(), 0)
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Article.objects.count(), 1)
def test_article_post_does_not_create_comment(self):
data = get_article_data()
comment = get_comment_data()
comment.pop('article')
data['comments'] = [comment]
self.assertEqual(Article.objects.count(), 0)
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Article.objects.count(), 1)
self.assertEqual(Comment.objects.count(), 0)
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/tests/article/test_model.py | from django.test import TestCase
from django.db.utils import DataError
from main.models import Article, Comment
from ..utils import create_article, create_comment
# TODO: use mommy make
class ArticleModelTest(TestCase):
def test_objects_create_adds_table_row(self):
create_article()
self.assertEqual(Article.objects.count(), 1)
def test_author_constraint_valid(self):
create_article(author='a' * 50)
self.assertEqual(Article.objects.count(), 1)
def test_author_constraint_invalid(self):
with self.assertRaises(DataError):
create_article(author='a' * 51)
def test_title_constraint_valid(self):
create_article(title='t' * 100)
self.assertEqual(Article.objects.count(), 1)
def test_title_constraint_invalid(self):
with self.assertRaises(DataError):
create_article(title='t' * 101)
def test_content_constraint_valid(self):
create_article(content='c' * 1000)
self.assertEqual(Article.objects.count(), 1)
def test_content_constraint_invalid(self):
with self.assertRaises(DataError):
create_article(content='c' * 1001)
def test_deleting_article_deletes_comments(self):
article = create_article()
create_comment(article=article)
self.assertEqual(Article.objects.count(), 1)
self.assertEqual(Comment.objects.count(), 1)
article.delete()
self.assertEqual(Article.objects.count(), 0)
self.assertEqual(Comment.objects.count(), 0)
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/tests/comment/__init__.py | [] | [] | [] |
|
archives/zyks_django-react-docker.zip | backend/main/tests/comment/test_api.py | from rest_framework import status
from rest_framework.test import APITestCase
from main.models import Article, Comment
from ..utils import create_comment, get_comment_data, get_values_set
class CommentAPITestCase(APITestCase):
url = '/api/main/comment/'
class CommentAPIFormatTest(CommentAPITestCase):
def test_api_returns_200(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_api_returns_json_list(self):
response = self.client.get(self.url)
self.assertIsInstance(response.data, list)
def test_api_returns_empty_list(self):
response = self.client.get(self.url)
self.assertEqual(len(response.data), 0)
class CommentAPIContentTest(CommentAPITestCase):
def setUp(self):
self.comment1 = create_comment(content="test 1")
self.comment2 = create_comment(content="test 2")
def test_api_returns_2_comments(self):
response = self.client.get(self.url)
self.assertEqual(len(response.data), 2)
def test_api_returns_correct_comments(self):
response = self.client.get(self.url)
self.assertSetEqual(
get_values_set(response, 'content'),
{'test 1', 'test 2'},
)
def test_api_returns_newly_created_comment(self):
create_comment(content="test 3")
response = self.client.get(self.url)
self.assertSetEqual(
get_values_set(response, 'content'),
{'test 1', 'test 2', 'test 3'},
)
class CommentAPICreateTest(CommentAPITestCase):
# id (pk) should not be send in response
# TODO: add unique public value
def test_valid_post_creates_db_record(self):
data = get_comment_data()
article_url = '/api/main/article/'
response = self.client.post(article_url, data=data.pop('article'))
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data['article'] = response.data.get('id')
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Article.objects.count(), 1)
self.assertEqual(Comment.objects.count(), 1)
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/tests/comment/test_model.py | from django.test import TestCase
from django.db.utils import DataError
from main.models import Comment, Article
from ..utils import create_comment
# TODO: use mommy make
class CommentModelTest(TestCase):
def test_objects_create_adds_table_row(self):
create_comment()
self.assertEqual(Comment.objects.count(), 1)
def test_author_constraint_valid(self):
create_comment(author='a' * 50)
self.assertEqual(Comment.objects.count(), 1)
def test_author_constraint_invalid(self):
with self.assertRaises(DataError):
create_comment(author='a' * 51)
def test_content_constraint_valid(self):
create_comment(content='c' * 200)
self.assertEqual(Comment.objects.count(), 1)
def test_content_constraint_invalid(self):
with self.assertRaises(DataError):
create_comment(content='c' * 201)
def test_deleting_comment_does_not_delete_article(self):
comment = create_comment()
self.assertEqual(Article.objects.count(), 1)
self.assertEqual(Comment.objects.count(), 1)
comment.delete()
self.assertEqual(Article.objects.count(), 1)
self.assertEqual(Comment.objects.count(), 0)
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/main/tests/utils.py | from main.models import Article, Comment
def get_article_data(author='author', title='title', content='content'):
return {
'author': author,
'title': title,
'content': content,
}
def get_comment_data(author='author', content='content', **kwargs):
return {
'author': author,
'content': content,
'article': get_article_data(**kwargs),
}
def create_article(**kwargs):
return Article.objects.create(**get_article_data(**kwargs))
def create_comment(article=None, **kwargs):
data = get_comment_data(**kwargs)
article_data = data.pop('article')
if article is None:
article = create_article(**article_data)
return Comment.objects.create(**data, article=article)
def get_values_set(response, key):
return set(map(lambda a: a.get(key, None), response.data))
| [] | [] | [] |
archives/zyks_django-react-docker.zip | backend/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.development")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [] | [] | [] |
archives/zyks_django-react-docker.zip | services/celery/config/__init__.py | from .app import celery_app
__all__ = ['celery_app']
| [] | [] | [] |
archives/zyks_django-react-docker.zip | services/celery/config/app.py | import os
from celery import Celery
from .beat import CELERYBEAT_SCHEDULE
APP_NAME = "backend_app_celery"
BROKER_HOST = "rabbitmq"
BROKER_PORT = 5672
BROKER_URL = f'amqp://{BROKER_HOST}:{BROKER_PORT}'
config = {
'CELERY_BROKER_URL': BROKER_URL,
'CELERY_RESULT_BACKEND': BROKER_URL,
'CELERY_IMPORTS': ("main.tasks", ),
'CELERY_TASK_RESULT_EXPIRES': 300,
'CELERY_AMQP_TASK_RESULT_EXPIRES': 10,
}
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
celery_app = Celery(APP_NAME, broker=BROKER_URL)
celery_app.conf.update(config)
celery_app.conf.CELERYBEAT_SCHEDULE = CELERYBEAT_SCHEDULE
| [] | [] | [] |
archives/zyks_django-react-docker.zip | services/celery/config/beat.py | """
Additional place for scheduling Celery tasks using crontab.
"""
from celery.schedules import crontab
CELERYBEAT_SCHEDULE = {
'create_article': {
'task': 'main.tasks.model.create_article',
'schedule': 30.0,
'options': {'queue': 'celerybeat_periodic'},
},
'create_comment': {
'task': 'main.tasks.model.create_comment',
'schedule': 30.0,
'options': {'queue': 'celerybeat_periodic'},
},
'delete_all_articles': {
'task': 'main.tasks.model.delete_all_articles',
'schedule': 300.0,
'options': {'queue': 'celerybeat_periodic'},
}
}
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/api/urls.py | import main.api.urls
from django.conf.urls import include, url
urlpatterns = [
url(r'main/', include(main.api.urls)),
]
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/config/settings/base.py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%9ag=!=^h0*)tu=v2oiuqklarb$6lq_sz-wghu$r%111wf%+04'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'main'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
default_sqlite3 = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
default_psql = {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'web_app_database',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'database',
'PORT': '5432',
}
DATABASES = {
'default': default_psql
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
CORS_ORIGIN_ALLOW_ALL = True
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/config/settings/development.py | from .base import *
DEBUG = True
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/config/settings/production.py | import environ # Usage of 12f settings pattern
from os.path import join, exists
from .base import *
env = environ.Env(
DEBUG=(bool, False),
)
env_file = join(BASE_DIR, 'local.env')
if exists(env_file):
environ.Env.read_env(str(env_file))
else:
print(env_file, "does not exist", BASE_DIR)
DEBUG = env('DEBUG')
SECRET_KEY = env('SECRET_KEY')
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db(),
}
ALLOWED_HOSTS = [
'localhost',
'0.0.0.0',
]
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
STATIC_ROOT = '/var/app/public/'
COMPRESS_ROOT = '/var/app/public/'
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/config/settings/test.py | from datetime import datetime
from .base import *
# override database settings for tests etc.
# it is important to run test on PostgreSQL because
# SQLite doesn't respect char length constraints
DATABASES = {
'default': default_psql,
}
DATABASES['default']['HOST'] = 'localhost'
DATABASES['default']['NAME'] += f'_{datetime.now()}'
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/config/urls.py | """app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url, include
from rest_framework_jwt.views import obtain_jwt_token
import api.urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include(api.urls)),
url(r'^api-token-auth/', obtain_jwt_token),
]
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/config/wsgi.py | """
WSGI config for app project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
application = get_wsgi_application()
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/main/__init__.py | # main or core
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/main/admin.py | # configure Django Admin panel
# add models etc.
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/main/api/serializers.py | from rest_framework import serializers
# from ..models import *
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/main/api/urls.py | from django.conf.urls import include, url
from rest_framework import routers
# from .views import *
router = routers.DefaultRouter()
urlpatterns = [
url(r'^', include(router.urls)),
]
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/main/api/views.py | from rest_framework.viewsets import ModelViewSet
# from ..models import *
# from .serializers import *
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/main/management/commands/create_user.py | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
class Command(BaseCommand):
USERNAME = "testuser"
PASSWORD = "user1234"
def handle(self, *args, **kwargs):
if not User.objects.filter(username=self.USERNAME).exists():
user = User(username=self.USERNAME)
user.set_password(self.PASSWORD)
user.save()
print("User has been created!")
else:
print("User already exists.")
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/main/migrations/__init__.py | [] | [] | [] |
|
archives/zyks_movie-db.zip | backend/main/models/__init__.py | from .models import *
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/main/models/models.py | from django.db import models
| [] | [] | [] |
archives/zyks_movie-db.zip | backend/main/tests/__init__.py | [] | [] | [] |
|
archives/zyks_movie-db.zip | backend/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.development")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:CNN 句子分类(2014)/code/cnn-text-classification-tf-master/data_helpers.py | import numpy as np
import re
import itertools
from collections import Counter
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels(positive_data_file, negative_data_file):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open(positive_data_file, "r").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file, "r").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:CNN 句子分类(2014)/code/cnn-text-classification-tf-master/eval.py | #! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn
import csv
# Parameters
# ==================================================
# Data Parameters
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("eval_train", False, "Evaluate on all training data")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# CHANGE THIS: Load data. Load your own data here
if FLAGS.eval_train:
x_raw, y_test = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
y_test = np.argmax(y_test, axis=1)
else:
x_raw = ["a masterpiece four years in the making", "everything is off."]
y_test = [1, 0]
# Map data into vocabulary
vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "vocab")
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_test = np.array(list(vocab_processor.transform(x_raw)))
print("\nEvaluating...\n")
# Evaluation
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# Generate batches for one epoch
batches = data_helpers.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
for x_test_batch in batches:
batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
all_predictions = np.concatenate([all_predictions, batch_predictions])
# Print accuracy if y_test is defined
if y_test is not None:
correct_predictions = float(sum(all_predictions == y_test))
print("Total number of test examples: {}".format(len(y_test)))
print("Accuracy: {:g}".format(correct_predictions/float(len(y_test))))
# Save the evaluation to a csv
predictions_human_readable = np.column_stack((np.array(x_raw), all_predictions))
out_path = os.path.join(FLAGS.checkpoint_dir, "..", "prediction.csv")
print("Saving evaluation to {0}".format(out_path))
with open(out_path, 'w') as f:
csv.writer(f).writerows(predictions_human_readable)
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:CNN 句子分类(2014)/code/cnn-text-classification-tf-master/text_cnn.py | import tensorflow as tf
import numpy as np
class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(
self, sequence_length, num_classes, vocab_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="W")
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
# set sequence_length as 30 and filter_size as 3
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
# filter_shape: [3,128,1,128]
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
# the size of conv is [1*28*1*128]
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
# the size of max pool us [1,sequence_length - filter_size,1,1] that is [1,28,1,1],so the result after max_pool is [1,1,1,128]
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features:128*3
num_filters_total = num_filters * len(filter_sizes)
# one of pooled is [1*1*1*128],so pooled_outputs is [1*1*1*128]*3,so the self.h_pool is [1*1*1*484]
self.h_pool = tf.concat(pooled_outputs, 3)
# [1*484]
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# Calculate mean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
if __name__ == "__main__":
TextCNN() | [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:CNN 句子分类(2014)/code/cnn-text-classification-tf-master/train.py | #! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn
# Parameters
# ==================================================
# Data loading params
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")
# Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# Data Preparation
# ==================================================
# Load data
print("Loading data...")
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
# Split train/test set
# TODO: This is very crude, should use cross-validation
dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
del x, y, x_shuffled, y_shuffled
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
# Training
# ==================================================
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn = TextCNN(
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
l2_reg_lambda=FLAGS.l2_reg_lambda)
# Define Training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.summary.scalar("loss", cnn.loss)
acc_summary = tf.summary.scalar("accuracy", cnn.accuracy)
# Train Summaries
train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)
# Write vocabulary
vocab_processor.save(os.path.join(out_dir, "vocab"))
# Initialize all variables
sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: FLAGS.dropout_keep_prob
}
_, step, summaries, loss, accuracy = sess.run(
[train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
train_summary_writer.add_summary(summaries, step)
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on a dev set
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0
}
step, summaries, loss, accuracy = sess.run(
[global_step, dev_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
if writer:
writer.add_summary(summaries, step)
# Generate batches
batches = data_helpers.batch_iter(
list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch)
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if current_step % FLAGS.evaluate_every == 0:
print("\nEvaluation:")
dev_step(x_dev, y_dev, writer=dev_summary_writer)
print("")
if current_step % FLAGS.checkpoint_every == 0:
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
if __name__ == "__main__":
pass | [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/contrib/__init__.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License. | [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/contrib/sacrebleu/__init__.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from contrib.sacrebleu.sacrebleu import raw_corpus_bleu, compute_bleu
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/contrib/sacrebleu/sacrebleu.py | #!/usr/bin/env python3
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
SacréBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores.
Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text.
It also knows all the standard test sets and handles downloading, processing, and tokenization for you.
Why use this version of BLEU?
- It automatically downloads common WMT test sets and processes them to plain text
- It produces a short version string that facilitates cross-paper comparisons
- It properly computes scores on detokenized outputs, using WMT ([Conference on Machine Translation](http://statmt.org/wmt17)) standard tokenization
- It produces the same values as official script (`mteval-v13a.pl`) used by WMT
- It outputs the BLEU score without the comma, so you don't have to remove it with `sed` (Looking at you, `multi-bleu.perl`)
# QUICK START
Install the Python module (Python 3 only)
pip3 install sacrebleu
This installs a shell script, `sacrebleu`.
(You can also directly run the shell script `sacrebleu.py` in the source repository).
Get a list of available test sets:
sacrebleu
Download the source for one of the pre-defined test sets:
sacrebleu -t wmt14 -l de-en --echo src > wmt14-de-en.src
(you can also use long parameter names for readability):
sacrebleu --test-set wmt14 --langpair de-en --echo src > wmt14-de-en.src
After tokenizing, translating, and detokenizing it, you can score your decoder output easily:
cat output.detok.txt | sacrebleu -t wmt14 -l de-en
SacréBLEU knows about common WMT test sets, but you can also use it to score system outputs with arbitrary references.
It also works in backwards compatible model where you manually specify the reference(s), similar to the format of `multi-bleu.txt`:
cat output.detok.txt | sacrebleu REF1 [REF2 ...]
Note that the system output and references will all be tokenized internally.
SacréBLEU generates version strings like the following.
Put them in a footnote in your paper!
Use `--short` for a shorter hash if you like.
BLEU+case.mixed+lang.de-en+test.wmt17 = 32.97 66.1/40.2/26.6/18.1 (BP = 0.980 ratio = 0.980 hyp_len = 63134 ref_len = 64399)
# MOTIVATION
Comparing BLEU scores is harder than it should be.
Every decoder has its own implementation, often borrowed from Moses, but maybe with subtle changes.
Moses itself has a number of implementations as standalone scripts, with little indication of how they differ (note: they mostly don't, but `multi-bleu.pl` expects tokenized input).
Different flags passed to each of these scripts can produce wide swings in the final score.
All of these may handle tokenization in different ways.
On top of this, downloading and managing test sets is a moderate annoyance.
Sacré bleu!
What a mess.
SacréBLEU aims to solve these problems by wrapping the original Papineni reference implementation together with other useful features.
The defaults are set the way that BLEU should be computed, and furthermore, the script outputs a short version string that allows others to know exactly what you did.
As an added bonus, it automatically downloads and manages test sets for you, so that you can simply tell it to score against 'wmt14', without having to hunt down a path on your local file system.
It is all designed to take BLEU a little more seriously.
After all, even with all its problems, BLEU is the default and---admit it---well-loved metric of our entire research community.
Sacré BLEU.
# VERSION HISTORY
- 1.1.7 (27 November 2017)
- corpus_bleu() now raises an exception if input streams are different lengths
- thanks to Martin Popel for:
- small bugfix in tokenization_13a (not affecting WMT references)
- adding `--tok intl` (international tokenization)
- added wmt17/dev and wmt17/dev sets (for languages intro'd those years)
- 1.1.6 (15 November 2017)
- bugfix for tokenization warning
- 1.1.5 (12 November 2017)
- added -b option (only output the BLEU score)
- removed fi-en from list of WMT16/17 systems with more than one reference
- added WMT16/tworefs and WMT17/tworefs for scoring with both en-fi references
- 1.1.4 (10 November 2017)
- added effective order for sentence-level BLEU computation
- added unit tests from sockeye
- 1.1.3 (8 November 2017).
- Factored code a bit to facilitate API:
- compute_bleu: works from raw stats
- corpus_bleu for use from the command line
- raw_corpus_bleu: turns off tokenization, command-line sanity checks, floor smoothing
- Smoothing (type 'exp', now the default) fixed to produce mteval-v13a.pl results
- Added 'floor' smoothing (adds 0.01 to 0 counts, more versatile via API), 'none' smoothing (via API)
- Small bugfixes, windows compatibility (H/T Christian Federmann)
- 1.0.3 (4 November 2017).
- Contributions from Christian Federmann:
- Added explicit support for encoding
- Fixed Windows support
- Bugfix in handling reference length with multiple refs
- version 1.0.1 (1 November 2017).
- Small bugfix affecting some versions of Python.
- Code reformatting due to Ozan Çağlayan.
- version 1.0 (23 October 2017).
- Support for WMT 2008--2017.
- Single tokenization (v13a) with lowercase fix (proper lower() instead of just A-Z).
- Chinese tokenization.
- Tested to match all WMT17 scores on all arcs.
# LICENSE
SacréBLEU is licensed under the Apache 2.0 License.
# CREDITS
This was all Rico Sennrich's idea.
Originally written by Matt Post.
The official version can be found at github.com/awslabs/sockeye, under `contrib/sacrebleu`.
"""
import re
import os
import sys
import math
import gzip
import tarfile
import logging
import urllib.request
import urllib.parse
import argparse
import unicodedata
from collections import Counter, namedtuple
from itertools import zip_longest
from typing import List
VERSION = '1.1.7'
try:
# SIGPIPE is not available on Windows machines, throwing an exception.
from signal import SIGPIPE
# If SIGPIPE is available, change behaviour to default instead of ignore.
from signal import signal, SIG_DFL
signal(SIGPIPE, SIG_DFL)
except ImportError:
logging.warning('Could not import signal.SIGPIPE (this is expected on Windows machines)')
# Where to store downloaded test sets.
# Define the environment variable $SACREBLEU, or use the default of ~/.sacrebleu.
#
# Querying for a HOME environment variable can result in None (e.g., on Windows)
# in which case the os.path.join() throws a TypeError. Using expanduser() is
# a safe way to get the user's home folder.
USERHOME = os.path.expanduser("~")
SACREBLEU = os.environ.get('SACREBLEU', os.path.join(USERHOME, '.sacrebleu'))
# n-gram order. Don't change this.
NGRAM_ORDER = 4
# This defines data locations.
# At the top level are test sets.
# Beneath each test set, we define the location to download the test data.
# The other keys are each language pair contained in the tarball, and the respective locations of the source and reference data within each.
# Many of these are *.sgm files, which are processed to produced plain text that can be used by this script.
# The canonical location of unpacked, processed data is $SACREBLEU/$TEST/$SOURCE-$TARGET.{$SOURCE,$TARGET}
DATASETS = {
'wmt17': {
'data': 'http://data.statmt.org/wmt17/translation-task/test.tgz',
'description': 'Official evaluation data.',
'cs-en': ['test/newstest2017-csen-src.cs.sgm', 'test/newstest2017-csen-ref.en.sgm'],
'de-en': ['test/newstest2017-deen-src.de.sgm', 'test/newstest2017-deen-ref.en.sgm'],
'en-cs': ['test/newstest2017-encs-src.en.sgm', 'test/newstest2017-encs-ref.cs.sgm'],
'en-de': ['test/newstest2017-ende-src.en.sgm', 'test/newstest2017-ende-ref.de.sgm'],
'en-fi': ['test/newstest2017-enfi-src.en.sgm', 'test/newstest2017-enfi-ref.fi.sgm'],
'en-lv': ['test/newstest2017-enlv-src.en.sgm', 'test/newstest2017-enlv-ref.lv.sgm'],
'en-ru': ['test/newstest2017-enru-src.en.sgm', 'test/newstest2017-enru-ref.ru.sgm'],
'en-tr': ['test/newstest2017-entr-src.en.sgm', 'test/newstest2017-entr-ref.tr.sgm'],
'en-zh': ['test/newstest2017-enzh-src.en.sgm', 'test/newstest2017-enzh-ref.zh.sgm'],
'fi-en': ['test/newstest2017-fien-src.fi.sgm', 'test/newstest2017-fien-ref.en.sgm'],
'lv-en': ['test/newstest2017-lven-src.lv.sgm', 'test/newstest2017-lven-ref.en.sgm'],
'ru-en': ['test/newstest2017-ruen-src.ru.sgm', 'test/newstest2017-ruen-ref.en.sgm'],
'tr-en': ['test/newstest2017-tren-src.tr.sgm', 'test/newstest2017-tren-ref.en.sgm'],
'zh-en': ['test/newstest2017-zhen-src.zh.sgm', 'test/newstest2017-zhen-ref.en.sgm'],
},
'wmt17/B': {
'data': 'http://data.statmt.org/wmt17/translation-task/test.tgz',
'description': 'Additional reference for EN-FI and FI-EN.',
'en-fi': ['test/newstestB2017-enfi-src.en.sgm', 'test/newstestB2017-enfi-ref.fi.sgm'],
},
'wmt17/tworefs': {
'data': 'http://data.statmt.org/wmt17/translation-task/test.tgz',
'description': 'Systems with two references.',
'en-fi': ['test/newstest2017-enfi-src.en.sgm', 'test/newstest2017-enfi-ref.fi.sgm', 'test/newstestB2017-enfi-ref.fi.sgm'],
},
'wmt17/improved': {
'data': 'http://data.statmt.org/wmt17/translation-task/test-update-1.tgz',
'description': 'Improved zh-en and en-zh translations.',
'en-zh': ['newstest2017-enzh-src.en.sgm', 'newstest2017-enzh-ref.zh.sgm'],
'zh-en': ['newstest2017-zhen-src.zh.sgm', 'newstest2017-zhen-ref.en.sgm'],
},
'wmt17/dev': {
'data': 'http://data.statmt.org/wmt17/translation-task/dev.tgz',
'description': 'Development sets released for new languages in 2017.',
'en-lv': ['dev/newsdev2017-enlv-src.en.sgm', 'dev/newsdev2017-enlv-ref.lv.sgm'],
'en-zh': ['dev/newsdev2017-enzh-src.en.sgm', 'dev/newsdev2017-enzh-ref.zh.sgm'],
'lv-en': ['dev/newsdev2017-lven-src.lv.sgm', 'dev/newsdev2017-lven-ref.en.sgm'],
'zh-en': ['dev/newsdev2017-zhen-src.zh.sgm', 'dev/newsdev2017-zhen-ref.en.sgm'],
},
'wmt16': {
'data': 'http://data.statmt.org/wmt16/translation-task/test.tgz',
'description': 'Official evaluation data.',
'cs-en': ['test/newstest2016-csen-src.cs.sgm', 'test/newstest2016-csen-ref.en.sgm'],
'de-en': ['test/newstest2016-deen-src.de.sgm', 'test/newstest2016-deen-ref.en.sgm'],
'en-cs': ['test/newstest2016-encs-src.en.sgm', 'test/newstest2016-encs-ref.cs.sgm'],
'en-de': ['test/newstest2016-ende-src.en.sgm', 'test/newstest2016-ende-ref.de.sgm'],
'en-fi': ['test/newstest2016-enfi-src.en.sgm', 'test/newstest2016-enfi-ref.fi.sgm'],
'en-ro': ['test/newstest2016-enro-src.en.sgm', 'test/newstest2016-enro-ref.ro.sgm'],
'en-ru': ['test/newstest2016-enru-src.en.sgm', 'test/newstest2016-enru-ref.ru.sgm'],
'en-tr': ['test/newstest2016-entr-src.en.sgm', 'test/newstest2016-entr-ref.tr.sgm'],
'fi-en': ['test/newstest2016-fien-src.fi.sgm', 'test/newstest2016-fien-ref.en.sgm'],
'ro-en': ['test/newstest2016-roen-src.ro.sgm', 'test/newstest2016-roen-ref.en.sgm'],
'ru-en': ['test/newstest2016-ruen-src.ru.sgm', 'test/newstest2016-ruen-ref.en.sgm'],
'tr-en': ['test/newstest2016-tren-src.tr.sgm', 'test/newstest2016-tren-ref.en.sgm'],
},
'wmt16/B': {
'data': 'http://data.statmt.org/wmt16/translation-task/test.tgz',
'description': 'Additional reference for EN-FI.',
'en-fi': ['test/newstest2016-enfi-src.en.sgm', 'test/newstestB2016-enfi-ref.fi.sgm'],
},
'wmt16/tworefs': {
'data': 'http://data.statmt.org/wmt16/translation-task/test.tgz',
'description': 'EN-FI with two references.',
'en-fi': ['test/newstest2016-enfi-src.en.sgm', 'test/newstest2016-enfi-ref.fi.sgm', 'test/newstestB2016-enfi-ref.fi.sgm'],
},
'wmt16/dev': {
'data': 'http://data.statmt.org/wmt16/translation-task/dev.tgz',
'description': 'Development sets released for new languages in 2016.',
'en-ro': ['dev/newsdev2016-enro-src.en.sgm', 'dev/newsdev2016-enro-ref.ro.sgm'],
'en-tr': ['dev/newsdev2016-entr-src.en.sgm', 'dev/newsdev2016-entr-ref.tr.sgm'],
'ro-en': ['dev/newsdev2016-roen-src.ro.sgm', 'dev/newsdev2016-roen-ref.en.sgm'],
'tr-en': ['dev/newsdev2016-tren-src.tr.sgm', 'dev/newsdev2016-tren-ref.en.sgm']
},
'wmt15': {
'data': 'http://statmt.org/wmt15/test.tgz',
'description': 'Official evaluation data.',
'en-fr': ['test/newsdiscusstest2015-enfr-src.en.sgm', 'test/newsdiscusstest2015-enfr-ref.fr.sgm'],
'fr-en': ['test/newsdiscusstest2015-fren-src.fr.sgm', 'test/newsdiscusstest2015-fren-ref.en.sgm'],
'cs-en': ['test/newstest2015-csen-src.cs.sgm', 'test/newstest2015-csen-ref.en.sgm'],
'de-en': ['test/newstest2015-deen-src.de.sgm', 'test/newstest2015-deen-ref.en.sgm'],
'en-cs': ['test/newstest2015-encs-src.en.sgm', 'test/newstest2015-encs-ref.cs.sgm'],
'en-de': ['test/newstest2015-ende-src.en.sgm', 'test/newstest2015-ende-ref.de.sgm'],
'en-fi': ['test/newstest2015-enfi-src.en.sgm', 'test/newstest2015-enfi-ref.fi.sgm'],
'en-ru': ['test/newstest2015-enru-src.en.sgm', 'test/newstest2015-enru-ref.ru.sgm'],
'fi-en': ['test/newstest2015-fien-src.fi.sgm', 'test/newstest2015-fien-ref.en.sgm'],
'ru-en': ['test/newstest2015-ruen-src.ru.sgm', 'test/newstest2015-ruen-ref.en.sgm']
},
'wmt14': {
'data': 'http://statmt.org/wmt14/test-filtered.tgz',
'description': 'Official evaluation data.',
'cs-en': ['test/newstest2014-csen-src.cs.sgm', 'test/newstest2014-csen-ref.en.sgm'],
'en-cs': ['test/newstest2014-csen-src.en.sgm', 'test/newstest2014-csen-ref.cs.sgm'],
'de-en': ['test/newstest2014-deen-src.de.sgm', 'test/newstest2014-deen-ref.en.sgm'],
'en-de': ['test/newstest2014-deen-src.en.sgm', 'test/newstest2014-deen-ref.de.sgm'],
'en-fr': ['test/newstest2014-fren-src.en.sgm', 'test/newstest2014-fren-ref.fr.sgm'],
'fr-en': ['test/newstest2014-fren-src.fr.sgm', 'test/newstest2014-fren-ref.en.sgm'],
'en-hi': ['test/newstest2014-hien-src.en.sgm', 'test/newstest2014-hien-ref.hi.sgm'],
'hi-en': ['test/newstest2014-hien-src.hi.sgm', 'test/newstest2014-hien-ref.en.sgm'],
'en-ru': ['test/newstest2014-ruen-src.en.sgm', 'test/newstest2014-ruen-ref.ru.sgm'],
'ru-en': ['test/newstest2014-ruen-src.ru.sgm', 'test/newstest2014-ruen-ref.en.sgm']
},
'wmt14/full': {
'data': 'http://statmt.org/wmt14/test-full.tgz',
'description': 'Evaluation data released after official evaluation for further research.',
'cs-en': ['test-full/newstest2014-csen-src.cs.sgm', 'test-full/newstest2014-csen-ref.en.sgm'],
'en-cs': ['test-full/newstest2014-csen-src.en.sgm', 'test-full/newstest2014-csen-ref.cs.sgm'],
'de-en': ['test-full/newstest2014-deen-src.de.sgm', 'test-full/newstest2014-deen-ref.en.sgm'],
'en-de': ['test-full/newstest2014-deen-src.en.sgm', 'test-full/newstest2014-deen-ref.de.sgm'],
'en-fr': ['test-full/newstest2014-fren-src.en.sgm', 'test-full/newstest2014-fren-ref.fr.sgm'],
'fr-en': ['test-full/newstest2014-fren-src.fr.sgm', 'test-full/newstest2014-fren-ref.en.sgm'],
'en-hi': ['test-full/newstest2014-hien-src.en.sgm', 'test-full/newstest2014-hien-ref.hi.sgm'],
'hi-en': ['test-full/newstest2014-hien-src.hi.sgm', 'test-full/newstest2014-hien-ref.en.sgm'],
'en-ru': ['test-full/newstest2014-ruen-src.en.sgm', 'test-full/newstest2014-ruen-ref.ru.sgm'],
'ru-en': ['test-full/newstest2014-ruen-src.ru.sgm', 'test-full/newstest2014-ruen-ref.en.sgm']
},
'wmt13': {
'data': 'http://statmt.org/wmt13/test.tgz',
'description': 'Official evaluation data.',
'cs-en': ['test/newstest2013-src.cs.sgm', 'test/newstest2013-src.en.sgm'],
'en-cs': ['test/newstest2013-src.en.sgm', 'test/newstest2013-src.cs.sgm'],
'de-en': ['test/newstest2013-src.de.sgm', 'test/newstest2013-src.en.sgm'],
'en-de': ['test/newstest2013-src.en.sgm', 'test/newstest2013-src.de.sgm'],
'es-en': ['test/newstest2013-src.es.sgm', 'test/newstest2013-src.en.sgm'],
'en-es': ['test/newstest2013-src.en.sgm', 'test/newstest2013-src.es.sgm'],
'fr-en': ['test/newstest2013-src.fr.sgm', 'test/newstest2013-src.en.sgm'],
'en-fr': ['test/newstest2013-src.en.sgm', 'test/newstest2013-src.fr.sgm'],
'ru-en': ['test/newstest2013-src.ru.sgm', 'test/newstest2013-src.en.sgm'],
'en-ru': ['test/newstest2013-src.en.sgm', 'test/newstest2013-src.ru.sgm']
},
'wmt12': {
'data': 'http://statmt.org/wmt12/test.tgz',
'description': 'Official evaluation data.',
'cs-en': ['test/newstest2012-src.cs.sgm', 'test/newstest2012-src.en.sgm'],
'en-cs': ['test/newstest2012-src.en.sgm', 'test/newstest2012-src.cs.sgm'],
'de-en': ['test/newstest2012-src.de.sgm', 'test/newstest2012-src.en.sgm'],
'en-de': ['test/newstest2012-src.en.sgm', 'test/newstest2012-src.de.sgm'],
'es-en': ['test/newstest2012-src.es.sgm', 'test/newstest2012-src.en.sgm'],
'en-es': ['test/newstest2012-src.en.sgm', 'test/newstest2012-src.es.sgm'],
'fr-en': ['test/newstest2012-src.fr.sgm', 'test/newstest2012-src.en.sgm'],
'en-fr': ['test/newstest2012-src.en.sgm', 'test/newstest2012-src.fr.sgm']
},
'wmt11': {
'data': 'http://statmt.org/wmt11/test.tgz',
'description': 'Official evaluation data.',
'cs-en': ['newstest2011-src.cs.sgm', 'newstest2011-src.en.sgm'],
'en-cs': ['newstest2011-src.en.sgm', 'newstest2011-src.cs.sgm'],
'de-en': ['newstest2011-src.de.sgm', 'newstest2011-src.en.sgm'],
'en-de': ['newstest2011-src.en.sgm', 'newstest2011-src.de.sgm'],
'fr-en': ['newstest2011-src.fr.sgm', 'newstest2011-src.en.sgm'],
'en-fr': ['newstest2011-src.en.sgm', 'newstest2011-src.fr.sgm'],
'es-en': ['newstest2011-src.es.sgm', 'newstest2011-src.en.sgm'],
'en-es': ['newstest2011-src.en.sgm', 'newstest2011-src.es.sgm']
},
'wmt10': {
'data': 'http://statmt.org/wmt10/test.tgz',
'description': 'Official evaluation data.',
'cs-en': ['test/newstest2010-src.cz.sgm', 'test/newstest2010-src.en.sgm'],
'en-cs': ['test/newstest2010-src.en.sgm', 'test/newstest2010-src.cz.sgm'],
'de-en': ['test/newstest2010-src.de.sgm', 'test/newstest2010-src.en.sgm'],
'en-de': ['test/newstest2010-src.en.sgm', 'test/newstest2010-src.de.sgm'],
'es-en': ['test/newstest2010-src.es.sgm', 'test/newstest2010-src.en.sgm'],
'en-es': ['test/newstest2010-src.en.sgm', 'test/newstest2010-src.es.sgm'],
'fr-en': ['test/newstest2010-src.fr.sgm', 'test/newstest2010-src.en.sgm'],
'en-fr': ['test/newstest2010-src.en.sgm', 'test/newstest2010-src.fr.sgm']
},
'wmt09': {
'data': 'http://statmt.org/wmt09/test.tgz',
'description': 'Official evaluation data.',
'cs-en': ['test/newstest2009-src.cz.sgm', 'test/newstest2009-src.en.sgm'],
'en-cs': ['test/newstest2009-src.en.sgm', 'test/newstest2009-src.cz.sgm'],
'de-en': ['test/newstest2009-src.de.sgm', 'test/newstest2009-src.en.sgm'],
'en-de': ['test/newstest2009-src.en.sgm', 'test/newstest2009-src.de.sgm'],
'es-en': ['test/newstest2009-src.es.sgm', 'test/newstest2009-src.en.sgm'],
'en-es': ['test/newstest2009-src.en.sgm', 'test/newstest2009-src.es.sgm'],
'fr-en': ['test/newstest2009-src.fr.sgm', 'test/newstest2009-src.en.sgm'],
'en-fr': ['test/newstest2009-src.en.sgm', 'test/newstest2009-src.fr.sgm'],
'hu-en': ['test/newstest2009-src.hu.sgm', 'test/newstest2009-src.en.sgm'],
'en-hu': ['test/newstest2009-src.en.sgm', 'test/newstest2009-src.hu.sgm'],
'it-en': ['test/newstest2009-src.it.sgm', 'test/newstest2009-src.en.sgm'],
'en-it': ['test/newstest2009-src.en.sgm', 'test/newstest2009-src.it.sgm']
},
'wmt08': {
'data': 'http://statmt.org/wmt08/test.tgz',
'description': 'Official evaluation data.',
'cs-en': ['test/newstest2008-src.cz.sgm', 'test/newstest2008-src.en.sgm'],
'en-cs': ['test/newstest2008-src.en.sgm', 'test/newstest2008-src.cz.sgm'],
'de-en': ['test/newstest2008-src.de.sgm', 'test/newstest2008-src.en.sgm'],
'en-de': ['test/newstest2008-src.en.sgm', 'test/newstest2008-src.de.sgm'],
'es-en': ['test/newstest2008-src.es.sgm', 'test/newstest2008-src.en.sgm'],
'en-es': ['test/newstest2008-src.en.sgm', 'test/newstest2008-src.es.sgm'],
'fr-en': ['test/newstest2008-src.fr.sgm', 'test/newstest2008-src.en.sgm'],
'en-fr': ['test/newstest2008-src.en.sgm', 'test/newstest2008-src.fr.sgm'],
'hu-en': ['test/newstest2008-src.hu.sgm', 'test/newstest2008-src.en.sgm'],
'en-hu': ['test/newstest2008-src.en.sgm', 'test/newstest2008-src.hu.sgm']
},
'wmt08/nc': {
'data': 'http://statmt.org/wmt08/test.tgz',
'description': 'Official evaluation data (news commentary).',
'cs-en': ['test/nc-test2008-src.cz.sgm', 'test/nc-test2008-src.en.sgm'],
'en-cs': ['test/nc-test2008-src.en.sgm', 'test/nc-test2008-src.cz.sgm']
},
'wmt08/europarl': {
'data': 'http://statmt.org/wmt08/test.tgz',
'description': 'Official evaluation data (Europarl).',
'de-en': ['test/test2008-src.de.sgm', 'test/test2008-src.en.sgm'],
'en-de': ['test/test2008-src.en.sgm', 'test/test2008-src.de.sgm'],
'es-en': ['test/test2008-src.es.sgm', 'test/test2008-src.en.sgm'],
'en-es': ['test/test2008-src.en.sgm', 'test/test2008-src.es.sgm'],
'fr-en': ['test/test2008-src.fr.sgm', 'test/test2008-src.en.sgm'],
'en-fr': ['test/test2008-src.en.sgm', 'test/test2008-src.fr.sgm']
},
}
def tokenize_13a(line):
"""
Tokenizes an input line using a relatively minimal tokenization that is however equivalent to mteval-v13a, used by WMT.
:param line: a segment to tokenize
:return: the tokenized line
"""
norm = line
# language-independent part:
norm = norm.replace('<skipped>', '')
norm = norm.replace('-\n', '')
norm = norm.replace('\n', ' ')
norm = norm.replace('"', '"')
norm = norm.replace('&', '&')
norm = norm.replace('<', '<')
norm = norm.replace('>', '>')
# language-dependent part (assuming Western languages):
norm = " {} ".format(norm)
norm = re.sub(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])', ' \\1 ', norm)
norm = re.sub(r'([^0-9])([\.,])', '\\1 \\2 ', norm) # tokenize period and comma unless preceded by a digit
norm = re.sub(r'([\.,])([^0-9])', ' \\1 \\2', norm) # tokenize period and comma unless followed by a digit
norm = re.sub(r'([0-9])(-)', '\\1 \\2 ', norm) # tokenize dash when preceded by a digit
norm = re.sub(r'\s+', ' ', norm) # one space only between words
norm = re.sub(r'^\s+', '', norm) # no leading space
norm = re.sub(r'\s+$', '', norm) # no trailing space
return norm
class UnicodeRegex:
"""Ad-hoc hack to recognize all punctuation and symbols.
without dependening on https://pypi.python.org/pypi/regex/."""
def _property_chars(prefix):
return ''.join(chr(x) for x in range(sys.maxunicode)
if unicodedata.category(chr(x)).startswith(prefix))
punctuation = _property_chars('P')
nondigit_punct_re = re.compile(r'([^\d])([' + punctuation + r'])')
punct_nondigit_re = re.compile(r'([' + punctuation + r'])([^\d])')
symbol_re = re.compile('([' + _property_chars('S') + '])')
def tokenize_v14_international(string):
r"""Tokenize a string following the official BLEU implementation.
See https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v14.pl#L954-L983
In our case, the input string is expected to be just one line
and no HTML entities de-escaping is needed.
So we just tokenize on punctuation and symbols,
except when a punctuation is preceded and followed by a digit
(e.g. a comma/dot as a thousand/decimal separator).
Note that a number (e.g., a year) followed by a dot at the end of sentence is NOT tokenized,
i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g`
does not match this case (unless we add a space after each sentence).
However, this error is already in the original mteval-v14.pl
and we want to be consistent with it.
The error is not present in the non-international version,
which uses `$norm_text = " $norm_text "` (or `norm = " {} ".format(norm)` in Python).
:param string: the input string
:return: a list of tokens
"""
string = UnicodeRegex.nondigit_punct_re.sub(r'\1 \2 ', string)
string = UnicodeRegex.punct_nondigit_re.sub(r' \1 \2', string)
string = UnicodeRegex.symbol_re.sub(r' \1 ', string)
return string.strip()
def tokenize_zh(sentence):
"""MIT License
Copyright (c) 2017 - Shujian Huang <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
The tokenization of Chinese text in this script contains two steps: separate each Chinese
characters (by utf-8 encoding); tokenize the non Chinese part (following the mteval script).
Author: Shujian Huang [email protected]
:param sentence: input sentence
:return: tokenized sentence
"""
def is_chinese_char(uchar):
"""
:param uchar: input char in unicode
:return: whether the input char is a Chinese character.
"""
if uchar >= u'\u3400' and uchar <= u'\u4db5': # CJK Unified Ideographs Extension A, release 3.0
return True
elif uchar >= u'\u4e00' and uchar <= u'\u9fa5': # CJK Unified Ideographs, release 1.1
return True
elif uchar >= u'\u9fa6' and uchar <= u'\u9fbb': # CJK Unified Ideographs, release 4.1
return True
elif uchar >= u'\uf900' and uchar <= u'\ufa2d': # CJK Compatibility Ideographs, release 1.1
return True
elif uchar >= u'\ufa30' and uchar <= u'\ufa6a': # CJK Compatibility Ideographs, release 3.2
return True
elif uchar >= u'\ufa70' and uchar <= u'\ufad9': # CJK Compatibility Ideographs, release 4.1
return True
elif uchar >= u'\u20000' and uchar <= u'\u2a6d6': # CJK Unified Ideographs Extension B, release 3.1
return True
elif uchar >= u'\u2f800' and uchar <= u'\u2fa1d': # CJK Compatibility Supplement, release 3.1
return True
elif uchar >= u'\uff00' and uchar <= u'\uffef': # Full width ASCII, full width of English punctuation, half width Katakana, half wide half width kana, Korean alphabet
return True
elif uchar >= u'\u2e80' and uchar <= u'\u2eff': # CJK Radicals Supplement
return True
elif uchar >= u'\u3000' and uchar <= u'\u303f': # CJK punctuation mark
return True
elif uchar >= u'\u31c0' and uchar <= u'\u31ef': # CJK stroke
return True
elif uchar >= u'\u2f00' and uchar <= u'\u2fdf': # Kangxi Radicals
return True
elif uchar >= u'\u2ff0' and uchar <= u'\u2fff': # Chinese character structure
return True
elif uchar >= u'\u3100' and uchar <= u'\u312f': # Phonetic symbols
return True
elif uchar >= u'\u31a0' and uchar <= u'\u31bf': # Phonetic symbols (Taiwanese and Hakka expansion)
return True
elif uchar >= u'\ufe10' and uchar <= u'\ufe1f':
return True
elif uchar >= u'\ufe30' and uchar <= u'\ufe4f':
return True
elif uchar >= u'\u2600' and uchar <= u'\u26ff':
return True
elif uchar >= u'\u2700' and uchar <= u'\u27bf':
return True
elif uchar >= u'\u3200' and uchar <= u'\u32ff':
return True
elif uchar >= u'\u3300' and uchar <= u'\u33ff':
return True
return False
sentence = sentence.strip()
sentence_in_chars = ""
for char in sentence:
if is_chinese_char(char):
sentence_in_chars += " "
sentence_in_chars += char
sentence_in_chars += " "
else:
sentence_in_chars += char
sentence = sentence_in_chars
# tokenize punctuation
sentence = re.sub(r'([\{-\~\[-\` -\&\(-\+\:-\@\/])', r' \1 ', sentence)
# tokenize period and comma unless preceded by a digit
sentence = re.sub(r'([^0-9])([\.,])', r'\1 \2 ', sentence)
# tokenize period and comma unless followed by a digit
sentence = re.sub(r'([\.,])([^0-9])', r' \1 \2', sentence)
# tokenize dash when preceded by a digit
sentence = re.sub(r'([0-9])(-)', r'\1 \2 ', sentence)
# one space only between words
sentence = re.sub(r'\s+', r' ', sentence)
# no leading space
sentence = re.sub(r'^\s+', r'', sentence)
# no trailing space
sentence = re.sub(r'\s+$', r'', sentence)
return sentence
TOKENIZERS = {
'13a': tokenize_13a,
'intl': tokenize_v14_international,
'zh': tokenize_zh,
'none': lambda x: x,
}
DEFAULT_TOKENIZER = '13a'
def _read(file, encoding='utf-8'):
"""Convenience function for reading compressed or plain text files.
:param file: The file to read.
:param encoding: The file encoding.
"""
if file.endswith('.gz'):
return gzip.open(file, 'rt', encoding=encoding)
return open(file, 'rt', encoding=encoding)
def my_log(num):
"""
Floors the log function
:param num: the number
:return: log(num) floored to a very low number
"""
if num == 0.0:
return -9999999999
return math.log(num)
def build_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the signature
"""
# Abbreviations for the signature
abbr = {
'test': 't',
'lang': 'l',
'smooth': 's',
'case': 'c',
'tok': 'tok',
'numrefs': '#',
'version': 'v'
}
signature = {'tok': args.tokenize,
'version': VERSION,
'smooth': args.smooth,
'numrefs': numrefs,
'case': 'lc' if args.lc else 'mixed'}
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x, signature[x]) for x in sorted(signature.keys())])
return sigstr
def extract_ngrams(line, max_order=NGRAM_ORDER):
"""Extracts all the ngrams (1 <= n <= NGRAM_ORDER) from a sequence of tokens.
:param line: a segment containing a sequence of words
:param max_order: collect n-grams from 1<=n<=max
:return: a dictionary containing ngrams and counts
"""
ngrams = Counter()
tokens = line.split()
for n in range(1, max_order + 1):
for i in range(0, len(tokens) - n + 1):
ngram = ' '.join(tokens[i: i + n])
ngrams[ngram] += 1
return ngrams
def ref_stats(output, refs):
ngrams = Counter()
closest_diff = None
closest_len = None
for ref in refs:
tokens = ref.split()
reflen = len(tokens)
diff = abs(len(output.split()) - reflen)
if closest_diff is None or diff < closest_diff:
closest_diff = diff
closest_len = reflen
elif diff == closest_diff:
if reflen < closest_len:
closest_len = reflen
ngrams_ref = extract_ngrams(ref)
for ngram in ngrams_ref.keys():
ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])
return ngrams, closest_diff, closest_len
def process_to_text(rawfile, txtfile):
"""Processes raw files to plain text files.
:param rawfile: the input file (possibly SGML)
:param txtfile: the plaintext file
"""
if not os.path.exists(txtfile):
if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):
logging.info("Processing %s to %s", rawfile, txtfile)
with _read(rawfile) as fin, open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
fout.write(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\1', line))
def print_test_set(test_set, langpair, side):
"""Prints to STDOUT the specified side of the specified test set
:param test_set: the test set to print
:param langpair: the language pair
:param side: 'src' for source, 'ref' for reference
"""
where = download_test_set(test_set, langpair)
infile = where[0] if side == 'src' else where[1]
with open(infile) as fin:
for line in fin:
print(line.rstrip())
def download_test_set(test_set, langpair=None):
"""Downloads the specified test to the system location specified by the SACREBLEU environment variable.
:param test_set: the test set to download
:param langpair: the language pair (needed for some datasets)
:return: the set of processed files
"""
# if not data.has_key(test_set):
# return None
dataset = DATASETS[test_set]['data']
outdir = os.path.join(SACREBLEU, test_set)
if not os.path.exists(outdir):
logging.info('Creating %s', outdir)
os.makedirs(outdir)
tarball = os.path.join(outdir, os.path.basename(dataset))
rawdir = os.path.join(outdir, 'raw')
if not os.path.exists(tarball):
# TODO: check MD5sum
logging.info("Downloading %s to %s", dataset, tarball)
with urllib.request.urlopen(dataset) as f, open(tarball, 'wb') as out:
out.write(f.read())
# Extract the tarball
logging.info('Extracting %s', tarball)
tar = tarfile.open(tarball)
tar.extractall(path=rawdir)
found = []
# Process the files into plain text
languages = DATASETS[test_set].keys() if langpair is None else [langpair]
for pair in languages:
if '-' not in pair:
continue
src, tgt = pair.split('-')
rawfile = os.path.join(rawdir, DATASETS[test_set][pair][0])
outfile = os.path.join(outdir, '{}.{}'.format(pair, src))
process_to_text(rawfile, outfile)
found.append(outfile)
for i, ref in enumerate(DATASETS[test_set][pair][1:]):
rawfile = os.path.join(rawdir, ref)
if len(DATASETS[test_set][pair][1:]) >= 2:
outfile = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))
else:
outfile = os.path.join(outdir, '{}.{}'.format(pair, tgt))
process_to_text(rawfile, outfile)
found.append(outfile)
return found
BLEU = namedtuple('BLEU', 'score, counts, totals, precisions, bp, sys_len, ref_len')
def compute_bleu(correct: List[int], total: List[int], sys_len: int, ref_len: int, smooth = 'none', smooth_floor = 0.01,
use_effective_order = False) -> BLEU:
"""Computes BLEU score from its sufficient statistics. Adds smoothing.
:param correct: List of counts of correct ngrams, 1 <= n <= NGRAM_ORDER
:param total: List of counts of total ngrams, 1 <= n <= NGRAM_ORDER
:param sys_len: The cumulative system length
:param ref_len: The cumulative reference length
:param smooth: The smoothing method to use
:param smooth_floor: The smoothing value added, if smooth method 'floor' is used
:param use_effective_order: Use effective order.
:return: A BLEU object with the score (100-based) and other statistics.
"""
precisions = [0 for x in range(NGRAM_ORDER)]
smooth_mteval = 1.
effective_order = NGRAM_ORDER
for n in range(NGRAM_ORDER):
if total[n] == 0:
break
if use_effective_order:
effective_order = n + 1
if correct[n] == 0:
if smooth == 'exp':
smooth_mteval *= 2
precisions[n] = 100. / (smooth_mteval * total[n])
elif smooth == 'floor':
precisions[n] = 100. * smooth_floor / total[n]
else:
precisions[n] = 100. * correct[n] / total[n]
# If the system guesses no i-grams, 1 <= i <= NGRAM_ORDER, the BLEU score is 0 (technically undefined).
# This is a problem for sentence-level BLEU or a corpus of short sentences, where systems will get no credit
# if sentence lengths fall under the NGRAM_ORDER threshold. This fix scales NGRAM_ORDER to the observed
# maximum order. It is only available through the API and off by default
brevity_penalty = 1.0
if sys_len < ref_len:
brevity_penalty = math.exp(1 - ref_len / sys_len) if sys_len > 0 else 0.0
bleu = brevity_penalty * math.exp(sum(map(my_log, precisions[:effective_order])) / effective_order)
return BLEU._make([bleu, correct, total, precisions, brevity_penalty, sys_len, ref_len])
def corpus_bleu(sys_stream, ref_streams, smooth='exp', smooth_floor=0.0, force=False, lowercase=False, tokenize=DEFAULT_TOKENIZER, use_effective_order=False) -> BLEU:
"""Produces BLEU scores along with its sufficient statistics from a source against one or more references.
:param sys_stream: The system stream (a sequence of segments)
:param ref_streams: A list of one or more reference streams (each a sequence of segments)
:param smooth: The smoothing method to use
:param smooth_floor: For 'floor' smoothing, the floor to use
:param force: Ignore data that looks already tokenized
:param lowercase: Lowercase the data
:param tokenize: The tokenizer to use
:return: a BLEU object containing everything you'd want
"""
# Add some robustness to the input arguments
if isinstance(sys_stream, str):
sys_stream = [sys_stream]
if isinstance(ref_streams, str):
ref_streams = [[ref_streams]]
sys_len = 0
ref_len = 0
correct = [0 for n in range(NGRAM_ORDER)]
total = [0 for n in range(NGRAM_ORDER)]
# look for already-tokenized sentences
tokenized_count = 0
fhs = [sys_stream] + ref_streams
for lines in zip_longest(*fhs):
if None in lines:
raise EOFError("Source and reference streams have different lengths!")
if lowercase:
lines = [x.lower() for x in lines]
if (not force or tokenize != 'none') and lines[0].rstrip().endswith(' .'):
tokenized_count += 1
if tokenized_count == 100:
logging.warning('That\'s > 100 lines that end in a tokenized period (\'.\')')
logging.warning('It looks like you forgot to detokenize your test data, which may hurt your score.')
logging.warning('If you insist your data is tokenized, you can suppress this message with \'--force\'.')
output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]
ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)
sys_len += len(output.split())
ref_len += closest_len
sys_ngrams = extract_ngrams(output)
for ngram in sys_ngrams.keys():
n = len(ngram.split())
correct[n-1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))
total[n-1] += sys_ngrams[ngram]
return compute_bleu(correct, total, sys_len, ref_len, smooth, smooth_floor, use_effective_order)
def raw_corpus_bleu(sys_stream, ref_streams, smooth_floor=0.01) -> BLEU:
"""Convenience function that wraps corpus_bleu().
This is convenient if you're using sacrebleu as a library, say for scoring on dev.
It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).
:param sys_stream: the system stream (a sequence of segments)
:param ref_streams: a list of one or more reference streams (each a sequence of segments)
"""
return corpus_bleu(sys_stream, ref_streams, smooth='floor', smooth_floor=smooth_floor, force=True, tokenize='none', use_effective_order=True)
def main():
arg_parser = argparse.ArgumentParser(description='sacréBLEU: Hassle-free computation of shareable BLEU scores.'
'Quick usage: score your detokenized output against WMT\'14 EN-DE:'
' cat output.detok.de | ./sacreBLEU -t wmt14 -l en-de')
arg_parser.add_argument('--test-set', '-t', type=str, default=None,
choices=DATASETS.keys(),
help='the test set to use')
arg_parser.add_argument('-lc', action='store_true', default=False,
help='use case-insensitive BLEU (default: actual case)')
arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor', 'none'], default='exp',
help='smoothing method: exponential decay (default), floor (0 count -> 0.01), or none')
arg_parser.add_argument('--tokenize', '-tok', choices=[x for x in TOKENIZERS.keys() if x != 'none'], default='13a',
help='tokenization method to use')
arg_parser.add_argument('--language-pair', '-l', dest='langpair', default=None,
help='source-target language pair (2-char ISO639-1 codes)')
arg_parser.add_argument('--download', type=str, default=None,
help='download a test set and quit')
arg_parser.add_argument('--echo', choices=['src', 'ref'], type=str, default=None,
help='output the source or reference to STDOUT and quit')
arg_parser.add_argument('refs', nargs='*', default=[],
help='optional list of references (for backwards-compatibility with older scripts)')
arg_parser.add_argument('--short', default=False, action='store_true',
help='produce a shorter (less human readable) signature')
arg_parser.add_argument('--score-only', '-b', default=False, action='store_true',
help='output only the BLEU score')
arg_parser.add_argument('--force', default=False, action='store_true',
help='insist that your tokenized input is actually detokenized')
arg_parser.add_argument('--quiet', '-q', default=False, action='store_true',
help='suppress informative output')
arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',
help='open text files with specified encoding (default: %(default)s)')
arg_parser.add_argument('-V', '--version', action='version',
version='%(prog)s {}'.format(VERSION))
args = arg_parser.parse_args()
if not args.quiet:
logging.basicConfig(level=logging.INFO, format='sacréBLEU: %(message)s')
if args.download:
download_test_set(args.download, args.langpair)
sys.exit(0)
if args.test_set is not None and args.test_set not in DATASETS:
logging.error('The available test sets are: ')
for testset in sorted(DATASETS.keys(), reverse=True):
logging.error(' %s: %s', testset, DATASETS[testset].get('description', ''))
sys.exit(1)
if args.test_set and (args.langpair is None or args.langpair not in DATASETS[args.test_set]):
if args.langpair is None:
logging.error('I need a language pair (-l).')
elif args.langpair not in DATASETS[args.test_set]:
logging.error('No such language pair "%s"', args.langpair)
logging.error('Available language pairs for test set "%s": %s', args.test_set, ', '.join(filter(lambda x: '-' in x, DATASETS[args.test_set].keys())))
sys.exit(1)
if args.echo:
if args.langpair is None or args.test_set is None:
logging.warning("--echo requires a test set (--t) and a language pair (-l)")
sys.exit(1)
print_test_set(args.test_set, args.langpair, args.echo)
sys.exit(0)
if args.test_set is None and len(args.refs) == 0:
logging.error('I need either a predefined test set (-t) or a list of references')
logging.error('The available test sets are: ')
for testset in sorted(DATASETS.keys(), reverse=True):
logging.error(' %s: %s', testset, DATASETS[testset].get('description', ''))
sys.exit(1)
elif args.test_set is not None and len(args.refs) > 0:
logging.error('I need exactly one of (a) a predefined test set (-t) or (b) a list of references')
sys.exit(1)
if args.test_set:
_, *refs = download_test_set(args.test_set, args.langpair)
else:
refs = args.refs
# Read references
refs = [_read(x, args.encoding) for x in refs]
if args.langpair is not None:
_, target = args.langpair.split('-')
if target == 'zh' and args.tokenize != 'zh':
logging.warning('You should also pass "--tok zh" when scoring Chinese...')
try:
bleu = corpus_bleu(sys.stdin, refs, smooth=args.smooth, force=args.force, lowercase=args.lc, tokenize=args.tokenize)
except EOFError:
logging.error('The input and reference stream(s) were of different lengths.\n'
'This could be a problem with your system output, or with sacreBLEU\'s reference database.\n'
'If the latter, you can clean out the references cache by typing:\n'
'\n'
' rm -r %s/%s\n'
'\n'
'They will be downloaded automatically again the next time you run sacreBLEU.', SACREBLEU, args.test_set)
sys.exit(1)
version_str = build_signature(args, len(refs))
if args.score_only:
print('{:.2f}'.format(bleu.score))
else:
print('BLEU+{} = {:.2f} {:.1f}/{:.1f}/{:.1f}/{:.1f} (BP = {:.3f} ratio = {:.3f} hyp_len = {:d} ref_len = {:d})'.format(version_str, bleu.score, bleu.precisions[0], bleu.precisions[1], bleu.precisions[2], bleu.precisions[3], bleu.bp, bleu.sys_len / bleu.ref_len, bleu.sys_len, bleu.ref_len))
if __name__ == '__main__':
main()
| [
"List[int]",
"List[int]",
"int",
"int"
] | [
36841,
36859,
36879,
36893
] | [
36850,
36868,
36882,
36896
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/contrib/sacrebleu/setup.py | #!/usr/bin/env python3
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
A setuptools based setup module.
See:
- https://packaging.python.org/en/latest/distributing.html
- https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup
import re
import os
def get_version():
VERSION_RE = re.compile(r'''VERSION\s+=\s+['"]([0-9.]+)['"]''')
init = open(os.path.join(os.path.dirname(__file__), 'sacrebleu.py')).read()
return VERSION_RE.search(init).group(1)
setup(
name = 'sacrebleu',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version = get_version(),
description = 'Hassle-free computation of shareable, comparable, and reproducible BLEU scores',
long_description = 'SacréBLEU is a standard BLEU implementation that downloads and manages WMT datasets, produces scores on detokenized outputs, and reports a string encapsulating BLEU parameters, facilitating the production of shareable, comparable BLEU scores.',
# The project's main homepage.
url = 'https://github.com/awslabs/sockeye',
author = 'Amazon',
author_email='[email protected]',
maintainer_email='[email protected]',
license = 'Apache License 2.0',
python_requires = '>=3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers = [
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Text Processing',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3 :: Only',
],
# What does your project relate to?
keywords = ['machine translation, evaluation, NLP, natural language processing, computational linguistics'],
# Alternatively, if you want to distribute just a my_module.py, uncomment this:
py_modules = ["sacrebleu"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires = ['typing'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require = {},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'sacrebleu = sacrebleu:main',
],
},
)
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# sockeye documentation build configuration file, created by
# sphinx-quickstart on Wed May 17 15:38:17 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import re
import sys
from unittest.mock import MagicMock
sys.path.insert(0, os.path.abspath('..'))
class MockClass(MagicMock):
def __init__(self, name="", *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = name
def __repr__(self):
return ":class:`~%s`" % self.name
class MockModule(MagicMock):
@classmethod
def __getattr__(cls, name):
return MockClass(name)
MOCK_MODULES = ['mxnet', 'mxnet.metric', 'numpy']
sys.modules.update((mod_name, MockModule()) for mod_name in MOCK_MODULES)
ROOT = os.path.dirname(__file__)
def get_version():
VERSION_RE = re.compile(r'''__version__ = ['"]([0-9.]+)['"]''')
init = open(os.path.join(ROOT, '../sockeye', '__init__.py')).read()
return VERSION_RE.search(init).group(1)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx_autodoc_typehints',
'sphinx.ext.imgmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'sockeye'
copyright = 'Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.'
author = 'Amazon'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_version()
# The full version, including alpha/beta/rc tags.
release = get_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
'navigation_depth': 2,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'sockeye_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sockeye.tex', 'Sockeye Documentation',
'amazon', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sockeye', 'Sockeye Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sockeye', 'Sockeye Documentation',
author, 'sockeye', 'Sequence-to-Sequence modeling with MXNet',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/setup.py | import sys
import os
import re
import logging
import argparse
import subprocess
from setuptools import setup, find_packages
from contextlib import contextmanager
ROOT = os.path.dirname(__file__)
def get_long_description():
with open(os.path.join(ROOT, 'README.md'), encoding='utf-8') as f:
markdown_txt = f.read()
try:
import pypandoc
long_description = pypandoc.convert(markdown_txt, 'rst', format='md')
except(IOError, ImportError):
logging.warning("Could not import package 'pypandoc'. Will not convert markdown readme to rst for PyPI.")
long_description = markdown_txt
return long_description
def get_version():
VERSION_RE = re.compile(r'''__version__ = ['"]([0-9.]+)['"]''')
init = open(os.path.join(ROOT, 'sockeye', '__init__.py')).read()
return VERSION_RE.search(init).group(1)
def get_git_hash():
try:
sp = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out_str = sp.communicate()[0].decode("utf-8").strip()
return out_str
except:
return "unkown"
@contextmanager
def temporarily_write_git_hash(git_hash, filename=os.path.join('sockeye', 'git_version.py')):
"""Temporarily create a module git_version in sockeye so that it will be included when installing and packaging."""
content = """
# This file is automatically generated in setup.py
git_hash = "%s"
""" % git_hash
if os.path.exists(filename):
raise RuntimeError("%s already exists, will not overwrite" % filename)
with open(filename, "w") as out:
out.write(content)
try:
yield
except:
raise
finally:
os.remove(filename)
def get_requirements(filename):
with open(os.path.join(ROOT, filename)) as f:
return [line.rstrip() for line in f]
try:
from sphinx.setup_command import BuildDoc
cmdclass = {'build_sphinx': BuildDoc}
except:
logging.warning("Package 'sphinx' not found. You will not be able to build docs.")
cmdclass = {}
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-r', '--requirement', help='Optionally specify a different requirements.txt file.', required=False)
args, unparsed_args = parser.parse_known_args()
sys.argv[1:] = unparsed_args
if args.requirement is None:
install_requires = get_requirements('requirements.txt')
else:
install_requires = get_requirements(args.requirement)
args = dict(
name='sockeye',
version=get_version(),
description='Sequence-to-Sequence framework for Neural Machine Translation',
long_description=get_long_description(),
url='https://github.com/awslabs/sockeye',
author='Amazon',
author_email='[email protected]',
maintainer_email='[email protected]',
license='Apache License 2.0',
python_requires='>=3',
packages=find_packages(exclude=("test",)),
setup_requires=['pytest-runner'],
tests_require=['pytest', 'pytest-cov'],
extras_require={
'optional': ['tensorboard', 'matplotlib'],
'dev': get_requirements('requirements.dev.txt')
},
install_requires=install_requires,
entry_points={
'console_scripts': [
'sockeye-average = sockeye.average:main',
'sockeye-embeddings = sockeye.embeddings:main',
'sockeye-evaluate = sockeye.evaluate:main',
'sockeye-extract-parameters = sockeye.extract_parameters:main',
'sockeye-lexicon = sockeye.lexicon:main',
'sockeye-init-embed = sockeye.init_embedding:main',
'sockeye-prepare-data = sockeye.prepare_data:main',
'sockeye-train = sockeye.train:main',
'sockeye-translate = sockeye.translate:main',
'sockeye-vocab = sockeye.vocab:main'
],
},
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only',
],
cmdclass=cmdclass,
)
with temporarily_write_git_hash(get_git_hash()):
setup(**args)
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/__init__.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
__version__ = '1.16.2'
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/arguments.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Defines commandline arguments for the main CLIs with reasonable defaults.
"""
import argparse
import sys
import os
from typing import Callable, Optional
from sockeye.lr_scheduler import LearningRateSchedulerFixedStep
from . import constants as C
from . import data_io
def regular_file() -> Callable:
"""
Returns a method that can be used in argument parsing to check the argument is a regular file or a symbolic link,
but not, e.g., a process substitution.
:return: A method that can be used as a type in argparse.
"""
def check_regular_file(value_to_check):
value_to_check = str(value_to_check)
if not os.path.isfile(value_to_check):
raise argparse.ArgumentTypeError("must exist and be a regular file.")
return value_to_check
return check_regular_file
def regular_folder() -> Callable:
"""
Returns a method that can be used in argument parsing to check the argument is a directory.
:return: A method that can be used as a type in argparse.
"""
def check_regular_directory(value_to_check):
value_to_check = str(value_to_check)
if not os.path.isdir(value_to_check):
raise argparse.ArgumentTypeError("must be a directory.")
return value_to_check
return check_regular_directory
def int_greater_or_equal(threshold: int) -> Callable:
"""
Returns a method that can be used in argument parsing to check that the argument is greater or equal to `threshold`.
:param threshold: The threshold that we assume the cli argument value is greater or equal to.
:return: A method that can be used as a type in argparse.
"""
def check_greater_equal(value_to_check):
value_to_check = int(value_to_check)
if value_to_check < threshold:
raise argparse.ArgumentTypeError("must be greater or equal to %d." % threshold)
return value_to_check
return check_greater_equal
def learning_schedule() -> Callable:
"""
Returns a method that can be used in argument parsing to check that the argument is a valid learning rate schedule
string.
:return: A method that can be used as a type in argparse.
"""
def parse(schedule_str):
try:
schedule = LearningRateSchedulerFixedStep.parse_schedule_str(schedule_str)
except ValueError:
raise argparse.ArgumentTypeError(
"Learning rate schedule string should have form rate1:num_updates1[,rate2:num_updates2,...]")
return schedule
return parse
def simple_dict() -> Callable:
"""
A simple dictionary format that does not require spaces or quoting.
Supported types: bool, int, float
:return: A method that can be used as a type in argparse.
"""
def parse(dict_str: str):
def _parse(value: str):
if value == "True":
return True
if value == "False":
return False
if "." in value:
return float(value)
return int(value)
_dict = dict()
try:
for entry in dict_str.split(","):
key, value = entry.split(":")
_dict[key] = _parse(value)
except ValueError:
raise argparse.ArgumentTypeError("Specify argument dictionary as key1:value1,key2:value2,..."
" Supported types: bool, int, float.")
return _dict
return parse
def multiple_values(num_values: int = 0,
greater_or_equal: Optional[float] = None,
data_type: Callable = int) -> Callable:
"""
Returns a method to be used in argument parsing to parse a string of the form "<val>:<val>[:<val>...]" into
a tuple of values of type data_type.
:param num_values: Optional number of ints required.
:param greater_or_equal: Optional constraint that all values should be greater or equal to this value.
:param data_type: Type of values. Default: int.
:return: Method for parsing.
"""
def parse(value_to_check):
if ':' in value_to_check:
expected_num_separators = num_values - 1 if num_values else 0
if expected_num_separators > 0 and (value_to_check.count(':') != expected_num_separators):
raise argparse.ArgumentTypeError("Expected either a single value or %d values separated by %s" %
(num_values, C.ARG_SEPARATOR))
values = tuple(map(data_type, value_to_check.split(C.ARG_SEPARATOR, num_values - 1)))
else:
values = tuple([data_type(value_to_check)] * num_values)
if greater_or_equal is not None:
if any((value < greater_or_equal for value in values)):
raise argparse.ArgumentTypeError("Must provide value greater or equal to %d" % greater_or_equal)
return values
return parse
def file_or_stdin() -> Callable:
"""
Returns a file descriptor from stdin or opening a file from a given path.
"""
def parse(path):
if path is None or path == "-":
return sys.stdin
else:
return data_io.smart_open(path)
return parse
def add_average_args(params):
average_params = params.add_argument_group("Averaging")
average_params.add_argument(
"inputs",
metavar="INPUT",
type=str,
nargs="+",
help="either a single model directory (automatic checkpoint selection) "
"or multiple .params files (manual checkpoint selection)")
average_params.add_argument(
"--metric",
help="Name of the metric to choose n-best checkpoints from. Default: %(default)s.",
default=C.PERPLEXITY,
choices=C.METRICS)
average_params.add_argument(
"-n",
type=int,
default=4,
help="number of checkpoints to find. Default: %(default)s.")
average_params.add_argument(
"--output", "-o", required=True, type=str, help="File to write averaged parameters to.")
average_params.add_argument(
"--strategy",
choices=["best", "last", "lifespan"],
default="best",
help="selection method. Default: %(default)s.")
def add_extract_args(params):
extract_params = params.add_argument_group("Extracting")
extract_params.add_argument("input",
metavar="INPUT",
type=str,
help="Either a model directory (using params.best) or a specific params.x file.")
extract_params.add_argument('--names', '-n',
nargs='*',
default=[],
help='Names of parameters to be extracted.')
extract_params.add_argument('--list-all', '-l',
action='store_true',
help='List names of all available parameters.')
extract_params.add_argument('--output', '-o',
type=str,
help="File to write extracted parameters to (in .npz format).")
def add_lexicon_args(params):
lexicon_params = params.add_argument_group("Lexicon")
lexicon_params.add_argument(
"--input",
"-i",
required=True,
type=str,
help="Probabilistic lexicon (fast_align format) to use for building top-k lexicon.")
lexicon_params.add_argument(
"--output",
"-o",
required=True,
type=str,
help="JSON file to write top-k lexicon to.")
lexicon_params.add_argument(
"--model",
"-m",
required=True,
type=str,
help="Trained model directory for source and target vocab.")
lexicon_params.add_argument(
"-k",
type=int,
default=20,
help="Number of target translations to keep per source. Default: %(default)s.")
def add_logging_args(params):
logging_params = params.add_argument_group("Logging")
logging_params.add_argument('--quiet', '-q',
default=False,
action="store_true",
help='Suppress console logging.')
def add_training_data_args(params, required=False):
params.add_argument(C.TRAINING_ARG_SOURCE, '-s',
required=required,
type=regular_file(),
help='Source side of parallel training data.')
params.add_argument(C.TRAINING_ARG_TARGET, '-t',
required=required,
type=regular_file(),
help='Target side of parallel training data.')
def add_validation_data_params(params):
params.add_argument('--validation-source', '-vs',
required=True,
type=regular_file(),
help='Source side of validation data.')
params.add_argument('--validation-target', '-vt',
required=True,
type=regular_file(),
help='Target side of validation data.')
def add_prepared_data_args(params):
params.add_argument(C.TRAINING_ARG_PREPARED_DATA, '-d',
type=regular_folder(),
help='Prepared training data directory created through python -m sockeye.prepare_data.')
def add_monitoring_args(params):
params.add_argument('--use-tensorboard',
action='store_true',
help='Track metrics through tensorboard. Requires installed tensorboard.')
params.add_argument('--monitor-pattern',
default=None,
type=str,
help="Pattern to match outputs/weights/gradients to monitor. '.*' monitors everything. "
"Default: %(default)s.")
params.add_argument('--monitor-stat-func',
default=C.STAT_FUNC_DEFAULT,
choices=list(C.MONITOR_STAT_FUNCS.keys()),
help="Statistics function to run on monitored outputs/weights/gradients. "
"Default: %(default)s.")
def add_training_output_args(params):
params.add_argument('--output', '-o',
required=True,
help='Folder where model & training results are written to.')
params.add_argument('--overwrite-output',
action='store_true',
help='Delete all contents of the model directory if it already exists.')
def add_training_io_args(params):
params = params.add_argument_group("Data & I/O")
# Unfortunately we must set --source/--target to not required as we either accept these parameters
# or --prepared-data which can not easily be encoded in argparse.
add_training_data_args(params, required=False)
add_prepared_data_args(params)
add_validation_data_params(params)
add_bucketing_args(params)
add_vocab_args(params)
add_training_output_args(params)
add_monitoring_args(params)
def add_bucketing_args(params):
params.add_argument('--no-bucketing',
action='store_true',
help='Disable bucketing: always unroll the graph to --max-seq-len. Default: %(default)s.')
params.add_argument('--bucket-width',
type=int_greater_or_equal(1),
default=10,
help='Width of buckets in tokens. Default: %(default)s.')
params.add_argument('--max-seq-len',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(100, 100),
help='Maximum sequence length in tokens. Note that the target side will be extended by '
'the <BOS> (beginning of sentence) token, increasing the effective target length. '
'Use "x:x" to specify separate values for src&tgt. Default: %(default)s.')
def add_prepare_data_cli_args(params):
params = params.add_argument_group("Data preparation.")
add_training_data_args(params, required=True)
add_vocab_args(params)
add_bucketing_args(params)
params.add_argument('--num-samples-per-shard',
type=int_greater_or_equal(1),
default=1000000,
help='The approximate number of samples per shard. Default: %(default)s.')
params.add_argument('--min-num-shards',
default=1,
type=int_greater_or_equal(1),
help='The minimum number of shards to use, even if they would not '
'reach the desired number of samples per shard. Default: %(default)s.')
params.add_argument('--seed',
type=int,
default=13,
help='Random seed used that makes shard assignments deterministic. Default: %(default)s.')
params.add_argument('--output', '-o',
required=True,
help='Folder where the prepared and possibly sharded data is written to.')
def add_device_args(params):
device_params = params.add_argument_group("Device parameters")
device_params.add_argument('--device-ids', default=[-1],
help='List or number of GPUs ids to use. Default: %(default)s. '
'Use negative numbers to automatically acquire a certain number of GPUs, e.g. -5 '
'will find 5 free GPUs. '
'Use positive numbers to acquire a specific GPU id on this host. '
'(Note that automatic acquisition of GPUs assumes that all GPU processes on '
'this host are using automatic sockeye GPU acquisition).',
nargs='+', type=int)
device_params.add_argument('--use-cpu',
action='store_true',
help='Use CPU device instead of GPU.')
device_params.add_argument('--disable-device-locking',
action='store_true',
help='Just use the specified device ids without locking.')
device_params.add_argument('--lock-dir',
default="/tmp",
help='When acquiring a GPU we do file based locking so that only one Sockeye process '
'can run on the a GPU. This is the folder in which we store the file '
'locks. For locking to work correctly it is assumed all processes use the same '
'lock directory. The only requirement for the directory are file '
'write permissions.')
def add_vocab_args(params):
params.add_argument('--source-vocab',
required=False,
default=None,
help='Existing source vocabulary (JSON).')
params.add_argument('--target-vocab',
required=False,
default=None,
help='Existing target vocabulary (JSON).')
params.add_argument(C.VOCAB_ARG_SHARED_VOCAB,
action='store_true',
default=False,
help='Share source and target vocabulary. '
'Will be automatically turned on when using weight tying. Default: %(default)s.')
params.add_argument('--num-words',
type=multiple_values(num_values=2, greater_or_equal=0),
default=(50000, 50000),
help='Maximum vocabulary size. Use "x:x" to specify separate values for src&tgt. '
'Default: %(default)s.')
params.add_argument('--word-min-count',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(1, 1),
help='Minimum frequency of words to be included in vocabularies. Default: %(default)s.')
def add_model_parameters(params):
model_params = params.add_argument_group("ModelConfig")
model_params.add_argument('--params', '-p',
type=str,
default=None,
help='Initialize model parameters from file. Overrides random initializations.')
model_params.add_argument('--allow-missing-params',
action="store_true",
default=False,
help="Allow misssing parameters when initializing model parameters from file. "
"Default: %(default)s.")
model_params.add_argument('--encoder',
choices=C.ENCODERS,
default=C.RNN_NAME,
help="Type of encoder. Default: %(default)s.")
model_params.add_argument('--decoder',
choices=C.DECODERS,
default=C.RNN_NAME,
help="Type of encoder. Default: %(default)s.")
model_params.add_argument('--num-layers',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(1, 1),
help='Number of layers for encoder & decoder. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--conv-embed-output-dim',
type=int_greater_or_equal(1),
default=None,
help="Project segment embeddings to this size for ConvolutionalEmbeddingEncoder. Omit to"
" avoid projection, leaving segment embeddings total size of all filters. Default:"
" %(default)s.")
model_params.add_argument('--conv-embed-max-filter-width',
type=int_greater_or_equal(1),
default=8,
help="Maximum filter width for ConvolutionalEmbeddingEncoder. Default: %(default)s.")
model_params.add_argument('--conv-embed-num-filters',
type=multiple_values(greater_or_equal=1),
default=(200, 200, 250, 250, 300, 300, 300, 300),
help="List of number of filters of each width 1..max for ConvolutionalEmbeddingEncoder. "
"Default: %(default)s.")
model_params.add_argument('--conv-embed-pool-stride',
type=int_greater_or_equal(1),
default=5,
help="Pooling stride for ConvolutionalEmbeddingEncoder. Default: %(default)s.")
model_params.add_argument('--conv-embed-num-highway-layers',
type=int_greater_or_equal(0),
default=4,
help="Number of highway layers for ConvolutionalEmbeddingEncoder. Default: %(default)s.")
model_params.add_argument('--conv-embed-add-positional-encodings',
action='store_true',
default=False,
help="Add positional encodings to final segment embeddings for"
" ConvolutionalEmbeddingEncoder. Default: %(default)s.")
# convolutional encoder/decoder arguments arguments
model_params.add_argument('--cnn-kernel-width',
type=multiple_values(num_values=2, greater_or_equal=1, data_type=int),
default=(3, 5),
help='Kernel width of the convolutional encoder and decoder. Default: %(default)s.')
model_params.add_argument('--cnn-num-hidden',
type=int_greater_or_equal(1),
default=512,
help='Number of hidden units for the convolutional encoder and decoder. '
'Default: %(default)s.')
model_params.add_argument('--cnn-activation-type',
choices=C.CNN_ACTIVATION_TYPES,
default=C.GLU,
help="Type activation to use for each convolutional layer. Default: %(default)s.")
model_params.add_argument('--cnn-positional-embedding-type',
choices=C.POSITIONAL_EMBEDDING_TYPES,
default=C.LEARNED_POSITIONAL_EMBEDDING,
help='The type of positional embedding. Default: %(default)s.')
model_params.add_argument('--cnn-project-qkv',
action='store_true',
default=False,
help="Optionally apply query, key and value projections to the source and target hidden "
"vectors before applying the attention mechanism.")
# rnn arguments
model_params.add_argument('--rnn-cell-type',
choices=C.CELL_TYPES,
default=C.LSTM_TYPE,
help='RNN cell type for encoder and decoder. Default: %(default)s.')
model_params.add_argument('--rnn-num-hidden',
type=int_greater_or_equal(1),
default=1024,
help='Number of RNN hidden units for encoder and decoder. Default: %(default)s.')
model_params.add_argument('--rnn-encoder-reverse-input',
action='store_true',
help='Reverse input sequence for RNN encoder. Default: %(default)s.')
model_params.add_argument('--rnn-decoder-state-init',
default=C.RNN_DEC_INIT_LAST,
choices=C.RNN_DEC_INIT_CHOICES,
help='How to initialize RNN decoder states. Default: %(default)s.')
model_params.add_argument('--rnn-residual-connections',
action="store_true",
default=False,
help="Add residual connections to stacked RNNs. (see Wu ETAL'16). Default: %(default)s.")
model_params.add_argument('--rnn-first-residual-layer',
type=int_greater_or_equal(2),
default=2,
help='First RNN layer to have a residual connection. Default: %(default)s.')
model_params.add_argument('--rnn-context-gating', action="store_true",
help="Enables a context gate which adaptively weighs the RNN decoder input against the "
"source context vector before each update of the decoder hidden state.")
# transformer arguments
model_params.add_argument('--transformer-model-size',
type=int_greater_or_equal(1),
default=512,
help='Size of all layers and embeddings when using transformer. Default: %(default)s.')
model_params.add_argument('--transformer-attention-heads',
type=int_greater_or_equal(1),
default=8,
help='Number of heads for all self-attention when using transformer layers. '
'Default: %(default)s.')
model_params.add_argument('--transformer-feed-forward-num-hidden',
type=int_greater_or_equal(1),
default=2048,
help='Number of hidden units in feed forward layers when using transformer. '
'Default: %(default)s.')
model_params.add_argument('--transformer-activation-type',
choices=C.TRANSFORMER_ACTIVATION_TYPES,
default=C.RELU,
help="Type activation to use for each feed forward layer. Default: %(default)s.")
model_params.add_argument('--transformer-positional-embedding-type',
choices=C.POSITIONAL_EMBEDDING_TYPES,
default=C.FIXED_POSITIONAL_EMBEDDING,
help='The type of positional embedding. Default: %(default)s.')
model_params.add_argument('--transformer-preprocess',
type=multiple_values(num_values=2, greater_or_equal=None, data_type=str),
default=('', ''),
help='Transformer preprocess sequence for encoder and decoder. Supports three types of '
'operations: d=dropout, r=residual connection, n=layer normalization. You can '
'combine in any order, for example: "ndr". '
'Leave empty to not use any of these operations. '
'You can specify separate sequences for encoder and decoder by separating with ":" '
'For example: n:drn '
'Default: %(default)s.')
model_params.add_argument('--transformer-postprocess',
type=multiple_values(num_values=2, greater_or_equal=None, data_type=str),
default=('drn', 'drn'),
help='Transformer postprocess sequence for encoder and decoder. Supports three types of '
'operations: d=dropout, r=residual connection, n=layer normalization. You can '
'combine in any order, for example: "ndr". '
'Leave empty to not use any of these operations. '
'You can specify separate sequences for encoder and decoder by separating with ":" '
'For example: n:drn '
'Default: %(default)s.')
# embedding arguments
model_params.add_argument('--num-embed',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(512, 512),
help='Embedding size for source and target tokens. '
'Use "x:x" to specify separate values for src&tgt. Default: %(default)s.')
# attention arguments
model_params.add_argument('--rnn-attention-type',
choices=C.ATT_TYPES,
default=C.ATT_MLP,
help='Attention model for RNN decoders. Choices: {%(choices)s}. '
'Default: %(default)s.')
model_params.add_argument('--rnn-attention-num-hidden',
default=None,
type=int,
help='Number of hidden units for attention layers. Default: equal to --rnn-num-hidden.')
model_params.add_argument('--rnn-attention-use-prev-word', action="store_true",
help="Feed the previous target embedding into the attention mechanism.")
model_params.add_argument('--rnn-attention-coverage-type',
choices=["tanh", "sigmoid", "relu", "softrelu", "gru", "count"],
default="count",
help="Type of model for updating coverage vectors. 'count' refers to an update method"
"that accumulates attention scores. 'tanh', 'sigmoid', 'relu', 'softrelu' "
"use non-linear layers with the respective activation type, and 'gru' uses a"
"GRU to update the coverage vectors. Default: %(default)s.")
model_params.add_argument('--rnn-attention-coverage-num-hidden',
type=int,
default=1,
help="Number of hidden units for coverage vectors. Default: %(default)s.")
model_params.add_argument('--rnn-attention-in-upper-layers',
action="store_true",
help="Pass the attention to the upper layers of the RNN decoder, similar "
"to GNMT paper. Only applicable if more than one layer is used.")
model_params.add_argument('--rnn-attention-mhdot-heads',
type=int, default=None,
help='Number of heads for Multi-head dot attention. Default: %(default)s.')
model_params.add_argument('--weight-tying',
action='store_true',
help='Turn on weight tying (see arxiv.org/abs/1608.05859). '
'The type of weight sharing is determined through '
'--weight-tying-type. Default: %(default)s.')
model_params.add_argument('--weight-tying-type',
default=C.WEIGHT_TYING_TRG_SOFTMAX,
choices=[C.WEIGHT_TYING_SRC_TRG_SOFTMAX,
C.WEIGHT_TYING_SRC_TRG,
C.WEIGHT_TYING_TRG_SOFTMAX],
help='The type of weight tying. source embeddings=src, target embeddings=trg, '
'target softmax weight matrix=softmax. Default: %(default)s.')
model_params.add_argument('--layer-normalization', action="store_true",
help="Adds layer normalization before non-linear activations. "
"This includes MLP attention, RNN decoder state initialization, "
"RNN decoder hidden state, and cnn layers."
"It does not normalize RNN cell activations "
"(this can be done using the '%s' or '%s' rnn-cell-type." % (C.LNLSTM_TYPE,
C.LNGLSTM_TYPE))
model_params.add_argument('--weight-normalization', action="store_true",
help="Adds weight normalization to decoder output layers "
"(and all convolutional weight matrices for CNN decoders). Default: %(default)s.")
def add_training_args(params):
train_params = params.add_argument_group("Training parameters")
train_params.add_argument('--batch-size', '-b',
type=int_greater_or_equal(1),
default=64,
help='Mini-batch size. Default: %(default)s.')
train_params.add_argument("--batch-type",
type=str,
default=C.BATCH_TYPE_SENTENCE,
choices=[C.BATCH_TYPE_SENTENCE, C.BATCH_TYPE_WORD],
help="Sentence: each batch contains X sentences, number of words varies. Word: each batch"
" contains (approximately) X words, number of sentences varies. Default: %(default)s.")
train_params.add_argument('--fill-up',
type=str,
default='replicate',
help=argparse.SUPPRESS)
train_params.add_argument('--loss',
default=C.CROSS_ENTROPY,
choices=[C.CROSS_ENTROPY],
help='Loss to optimize. Default: %(default)s.')
train_params.add_argument('--label-smoothing',
default=0.0,
type=float,
help='Smoothing constant for label smoothing. Default: %(default)s.')
train_params.add_argument('--loss-normalization-type',
default=C.LOSS_NORM_VALID,
choices=[C.LOSS_NORM_VALID, C.LOSS_NORM_BATCH],
help='How to normalize the loss. By default we normalize by the number '
'of valid/non-PAD tokens (%s)' % C.LOSS_NORM_VALID)
train_params.add_argument('--metrics',
nargs='+',
default=[C.PERPLEXITY],
choices=[C.PERPLEXITY, C.ACCURACY],
help='Names of metrics to track on training and validation data. Default: %(default)s.')
train_params.add_argument('--optimized-metric',
default=C.PERPLEXITY,
choices=C.METRICS,
help='Metric to optimize with early stopping {%(choices)s}. '
'Default: %(default)s.')
train_params.add_argument('--max-updates',
type=int,
default=None,
help='Maximum number of updates/batches to process. Default: %(default)s.')
train_params.add_argument(C.TRAIN_ARGS_CHECKPOINT_FREQUENCY,
type=int_greater_or_equal(1),
default=1000,
help='Checkpoint and evaluate every x updates/batches. Default: %(default)s.')
train_params.add_argument('--max-num-checkpoint-not-improved',
type=int,
default=8,
help='Maximum number of checkpoints the model is allowed to not improve in '
'<optimized-metric> on validation data before training is stopped. '
'Default: %(default)s')
train_params.add_argument('--min-num-epochs',
type=int,
default=None,
help='Minimum number of epochs (passes through the training data) '
'before fitting is stopped. Default: %(default)s.')
train_params.add_argument('--max-num-epochs',
type=int,
default=None,
help='Maximum number of epochs (passes through the training data) '
'before fitting is stopped. Default: %(default)s.')
train_params.add_argument('--embed-dropout',
type=multiple_values(2, data_type=float),
default=(.0, .0),
help='Dropout probability for source & target embeddings. Use "x:x" to specify '
'separate values. Default: %(default)s.')
train_params.add_argument('--rnn-dropout-inputs',
type=multiple_values(2, data_type=float),
default=(.0, .0),
help='RNN variational dropout probability for encoder & decoder RNN inputs. (Gal, 2015)'
'Use "x:x" to specify separate values. Default: %(default)s.')
train_params.add_argument('--rnn-dropout-states',
type=multiple_values(2, data_type=float),
default=(.0, .0),
help='RNN variational dropout probability for encoder & decoder RNN states. (Gal, 2015)'
'Use "x:x" to specify separate values. Default: %(default)s.')
train_params.add_argument('--rnn-dropout-recurrent',
type=multiple_values(2, data_type=float),
default=(.0, .0),
help='Recurrent dropout without memory loss (Semeniuta, 2016) for encoder & decoder '
'LSTMs. Use "x:x" to specify separate values. Default: %(default)s.')
train_params.add_argument('--rnn-decoder-hidden-dropout',
type=float,
default=.0,
help='Dropout probability for hidden state that combines the context with the '
'RNN hidden state in the decoder. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-attention',
type=float,
default=0.,
help='Dropout probability for multi-head attention. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-act',
type=float,
default=0.,
help='Dropout probability before activation in feed-forward block. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-prepost',
type=float,
default=0.,
help='Dropout probability for pre/postprocessing blocks. Default: %(default)s.')
train_params.add_argument('--conv-embed-dropout',
type=float,
default=.0,
help="Dropout probability for ConvolutionalEmbeddingEncoder. Default: %(default)s.")
train_params.add_argument('--cnn-hidden-dropout',
type=float,
default=.0,
help="Dropout probability for dropout between convolutional layers. Default: %(default)s.")
train_params.add_argument('--optimizer',
default=C.OPTIMIZER_ADAM,
choices=C.OPTIMIZERS,
help='SGD update rule. Default: %(default)s.')
train_params.add_argument('--optimizer-params',
type=simple_dict(),
default=None,
help='Additional optimizer params as dictionary. Format: key1:value1,key2:value2,...')
train_params.add_argument("--kvstore",
type=str,
default=C.KVSTORE_DEVICE,
choices=C.KVSTORE_TYPES,
help="The MXNet kvstore to use. 'device' is recommended for single process training. "
"Use any of 'dist_sync', 'dist_device_sync' and 'dist_async' for distributed "
"training. Default: %(default)s.")
train_params.add_argument("--gradient-compression-type",
type=str,
default=C.GRADIENT_COMPRESSION_NONE,
choices=C.GRADIENT_COMPRESSION_TYPES,
help='Type of gradient compression to use. Default: %(default)s.')
train_params.add_argument("--gradient-compression-threshold",
type=float,
default=0.5,
help="Threshold for gradient compression if --gctype is '2bit'. Default: %(default)s.")
train_params.add_argument('--weight-init',
type=str,
default=C.INIT_XAVIER,
choices=C.INIT_TYPES,
help='Type of base weight initialization. Default: %(default)s.')
train_params.add_argument('--weight-init-scale',
type=float,
default=2.34,
help='Weight initialization scale. Applies to uniform (scale) and xavier (magnitude). '
'Default: %(default)s.')
train_params.add_argument('--weight-init-xavier-factor-type',
type=str,
default='in',
choices=['in', 'out', 'avg'],
help='Xavier factor type. Default: %(default)s.')
train_params.add_argument('--weight-init-xavier-rand-type',
type=str,
default=C.RAND_TYPE_UNIFORM,
choices=[C.RAND_TYPE_UNIFORM, C.RAND_TYPE_GAUSSIAN],
help='Xavier random number generator type. Default: %(default)s.')
train_params.add_argument('--embed-weight-init',
type=str,
default=C.EMBED_INIT_DEFAULT,
choices=C.EMBED_INIT_TYPES,
help='Type of embedding matrix weight initialization. If normal, initializes embedding '
'weights using a normal distribution with std=1/srqt(vocab_size). '
'Default: %(default)s.')
train_params.add_argument('--initial-learning-rate',
type=float,
default=0.0003,
help='Initial learning rate. Default: %(default)s.')
train_params.add_argument('--weight-decay',
type=float,
default=0.0,
help='Weight decay constant. Default: %(default)s.')
train_params.add_argument('--momentum',
type=float,
default=None,
help='Momentum constant. Default: %(default)s.')
train_params.add_argument('--gradient-clipping-threshold',
type=float,
default=1.0,
help='Clip absolute gradients values greater than this value. '
'Set to negative to disable. Default: %(default)s.')
train_params.add_argument('--gradient-clipping-type',
choices=C.GRADIENT_CLIPPING_TYPES,
default=C.GRADIENT_CLIPPING_TYPE_ABS,
help='The type of gradient clipping. Default: %(default)s.')
train_params.add_argument('--learning-rate-scheduler-type',
default=C.LR_SCHEDULER_PLATEAU_REDUCE,
choices=C.LR_SCHEDULERS,
help='Learning rate scheduler type. Default: %(default)s.')
train_params.add_argument('--learning-rate-reduce-factor',
type=float,
default=0.5,
help="Factor to multiply learning rate with "
"(for 'plateau-reduce' learning rate scheduler). Default: %(default)s.")
train_params.add_argument('--learning-rate-reduce-num-not-improved',
type=int,
default=3,
help="For 'plateau-reduce' learning rate scheduler. Adjust learning rate "
"if <optimized-metric> did not improve for x checkpoints. Default: %(default)s.")
train_params.add_argument('--learning-rate-schedule',
type=learning_schedule(),
default=None,
help="For 'fixed-step' scheduler. Fully specified learning schedule in the form"
" \"rate1:num_updates1[,rate2:num_updates2,...]\". Overrides all other args related"
" to learning rate and stopping conditions. Default: %(default)s.")
train_params.add_argument('--learning-rate-half-life',
type=float,
default=10,
help="Half-life of learning rate in checkpoints. For 'fixed-rate-*' "
"learning rate schedulers. Default: %(default)s.")
train_params.add_argument('--learning-rate-warmup',
type=int,
default=0,
help="Number of warmup steps. If set to x, linearly increases learning rate from 10%% "
"to 100%% of the initial learning rate. Default: %(default)s.")
train_params.add_argument('--learning-rate-decay-param-reset',
action='store_true',
help='Resets model parameters to current best when learning rate is reduced due to the '
'value of --learning-rate-reduce-num-not-improved. Default: %(default)s.')
train_params.add_argument('--learning-rate-decay-optimizer-states-reset',
choices=C.LR_DECAY_OPT_STATES_RESET_CHOICES,
default=C.LR_DECAY_OPT_STATES_RESET_OFF,
help="Action to take on optimizer states (e.g. Adam states) when learning rate is "
"reduced due to the value of --learning-rate-reduce-num-not-improved. "
"Default: %(default)s.")
train_params.add_argument('--rnn-forget-bias',
default=0.0,
type=float,
help='Initial value of RNN forget biases.')
train_params.add_argument('--rnn-h2h-init', type=str, default=C.RNN_INIT_ORTHOGONAL,
choices=[C.RNN_INIT_ORTHOGONAL, C.RNN_INIT_ORTHOGONAL_STACKED, C.RNN_INIT_DEFAULT],
help="Initialization method for RNN parameters. Default: %(default)s.")
train_params.add_argument(C.TRAIN_ARGS_MONITOR_BLEU,
default=0,
type=int,
help='x>0: decode x sampled sentences from validation data and '
'compute evaluation metrics. x==-1: use full validation data. Default: %(default)s.')
train_params.add_argument('--decode-and-evaluate-use-cpu',
action='store_true',
help='Use CPU for decoding validation data. Overrides --decode-and-evaluate-device-id. '
'Default: %(default)s.')
train_params.add_argument('--decode-and-evaluate-device-id',
default=None,
type=int,
help='Separate device for decoding validation data. '
'Use a negative number to automatically acquire a GPU. '
'Use a positive number to acquire a specific GPU. Default: %(default)s.')
train_params.add_argument('--seed',
type=int,
default=13,
help='Random seed. Default: %(default)s.')
train_params.add_argument('--keep-last-params',
type=int,
default=-1,
help='Keep only the last n params files, use -1 to keep all files. Default: %(default)s')
def add_train_cli_args(params):
add_training_io_args(params)
add_model_parameters(params)
add_training_args(params)
add_device_args(params)
add_logging_args(params)
def add_translate_cli_args(params):
add_inference_args(params)
add_device_args(params)
add_logging_args(params)
def add_inference_args(params):
decode_params = params.add_argument_group("Inference parameters")
decode_params.add_argument(C.INFERENCE_ARG_INPUT_LONG, C.INFERENCE_ARG_INPUT_SHORT,
default=None,
help='Input file to translate. One sentence per line. '
'If not given, will read from stdin.')
decode_params.add_argument(C.INFERENCE_ARG_OUTPUT_LONG, C.INFERENCE_ARG_OUTPUT_SHORT,
default=None,
help='Output file to write translations to. '
'If not given, will write to stdout.')
decode_params.add_argument('--models', '-m',
required=True,
nargs='+',
help='Model folder(s). Use multiple for ensemble decoding. '
'Model determines config, best parameters and vocab files.')
decode_params.add_argument('--checkpoints', '-c',
default=None,
type=int,
nargs='+',
help='If not given, chooses best checkpoints for model(s). '
'If specified, must have the same length as --models and be integer')
decode_params.add_argument('--beam-size', '-b',
type=int_greater_or_equal(1),
default=5,
help='Size of the beam. Default: %(default)s.')
decode_params.add_argument('--batch-size',
type=int_greater_or_equal(1),
default=1,
help='Batch size during decoding. Determines how many sentences are translated '
'simultaneously. Default: %(default)s.')
decode_params.add_argument('--chunk-size',
type=int_greater_or_equal(1),
default=None,
help='Size of the chunks to be read from input at once. The chunks are sorted and then '
'split into batches. Therefore the larger the chunk size the better the grouping '
'of segments of similar length and therefore the higher the increase in throughput.'
' Default: %d without batching '
'and %d * batch_size with batching.' % (C.CHUNK_SIZE_NO_BATCHING,
C.CHUNK_SIZE_PER_BATCH_SEGMENT))
decode_params.add_argument('--ensemble-mode',
type=str,
default='linear',
choices=['linear', 'log_linear'],
help='Ensemble mode. Default: %(default)s.')
decode_params.add_argument('--bucket-width',
type=int_greater_or_equal(0),
default=10,
help='Bucket width for encoder steps. 0 means no bucketing. Default: %(default)s.')
decode_params.add_argument('--max-input-len', '-n',
type=int,
default=None,
help='Maximum input sequence length. Default: value from model(s).')
decode_params.add_argument('--softmax-temperature',
type=float,
default=None,
help='Controls peakiness of model predictions. Values < 1.0 produce '
'peaked predictions, values > 1.0 produce smoothed distributions.')
decode_params.add_argument('--max-output-length-num-stds',
type=int,
default=C.DEFAULT_NUM_STD_MAX_OUTPUT_LENGTH,
help='Number of target-to-source length ratio standard deviations from training to add '
'to calculate maximum output length for beam search for each sentence. '
'Default: %(default)s.')
decode_params.add_argument('--restrict-lexicon',
type=str,
default=None,
help="Specify top-k lexicon to restrict output vocabulary based on source. See lexicon "
"module. Default: %(default)s.")
decode_params.add_argument('--output-type',
default='translation',
choices=C.OUTPUT_HANDLERS,
help='Output type. Default: %(default)s.')
decode_params.add_argument('--sure-align-threshold',
default=0.9,
type=float,
help='Threshold to consider a soft alignment a sure alignment. Default: %(default)s')
decode_params.add_argument('--length-penalty-alpha',
default=1.0,
type=float,
help='Alpha factor for the length penalty used in beam search: '
'(beta + len(Y))**alpha/(beta + 1)**alpha. A value of 0.0 will therefore turn off '
'length normalization. Default: %(default)s')
decode_params.add_argument('--length-penalty-beta',
default=0.0,
type=float,
help='Beta factor for the length penalty used in beam search: '
'(beta + len(Y))**alpha/(beta + 1)**alpha. Default: %(default)s')
def add_evaluate_args(params):
eval_params = params.add_argument_group("Evaluate parameters")
eval_params.add_argument('--references', '-r',
required=True,
type=str,
help="File with references.")
eval_params.add_argument('--hypotheses', '-i',
type=file_or_stdin(),
default=sys.stdin,
help="File with hypotheses. If none will read from stdin. Default: %(default)s.")
eval_params.add_argument('--metrics',
nargs='+',
default=[C.BLEU, C.CHRF],
help='List of metrics to compute. Default: %(default)s.')
eval_params.add_argument('--sentence', '-s',
action="store_true",
help="Show sentence-level metrics. Default: %(default)s.")
eval_params.add_argument('--offset',
type=float,
default=0.01,
help="Numerical value of the offset of zero n-gram counts. Default: %(default)s.")
eval_params.add_argument('--not-strict', '-n',
action="store_true",
help="Do not fail if number of hypotheses does not match number of references. "
"Default: %(default)s.")
def add_build_vocab_args(params):
params.add_argument('-i', '--inputs', required=True, nargs='+', help='List of text files to build vocabulary from.')
params.add_argument('-o', '--output', required=True, type=str, help="Output filename to write vocabulary to.")
add_vocab_args(params)
def add_init_embedding_args(params):
params.add_argument('--embeddings', '-e', required=True, nargs='+',
help='List of input embedding weights in .npy format.')
params.add_argument('--vocabularies-in', '-i', required=True, nargs='+',
help='List of input vocabularies as token-index dictionaries in .json format.')
params.add_argument('--vocabularies-out', '-o', required=True, nargs='+',
help='List of output vocabularies as token-index dictionaries in .json format.')
params.add_argument('--names', '-n', required=True, nargs='+',
help='List of Sockeye parameter names for embedding weights.')
params.add_argument('--file', '-f', required=True,
help='File to write initialized parameters to.')
params.add_argument('--encoding', '-c', type=str, default=C.VOCAB_ENCODING,
help='Open input vocabularies with specified encoding. Default: %(default)s.')
| [
"int",
"str",
"str"
] | [
1918,
3370,
3403
] | [
1921,
3373,
3406
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/average.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Average parameters from multiple model checkpoints. Checkpoints can be either
specified manually or automatically chosen according to one of several
strategies. The default strategy of simply selecting the top-scoring N points
works well in practice.
"""
import argparse
import itertools
import os
from typing import Dict, Iterable, List
import mxnet as mx
from sockeye.log import setup_main_logger, log_sockeye_version
from . import arguments
from . import constants as C
from . import utils
logger = setup_main_logger(__name__, console=True, file_logging=False)
def average(param_paths: Iterable[str]) -> Dict[str, mx.nd.NDArray]:
"""
Averages parameters from a list of .params file paths.
:param param_paths: List of paths to parameter files.
:return: Averaged parameter dictionary.
"""
all_arg_params = []
all_aux_params = []
for path in param_paths:
logger.info("Loading parameters from '%s'", path)
arg_params, aux_params = utils.load_params(path)
all_arg_params.append(arg_params)
all_aux_params.append(aux_params)
logger.info("%d models loaded", len(all_arg_params))
utils.check_condition(all(all_arg_params[0].keys() == p.keys() for p in all_arg_params),
"arg_param names do not match across models")
utils.check_condition(all(all_aux_params[0].keys() == p.keys() for p in all_aux_params),
"aux_param names do not match across models")
avg_params = {}
# average arg_params
for k in all_arg_params[0]:
arrays = [p[k] for p in all_arg_params]
avg_params["arg:" + k] = utils.average_arrays(arrays)
# average aux_params
for k in all_aux_params[0]:
arrays = [p[k] for p in all_aux_params]
avg_params["aux:" + k] = utils.average_arrays(arrays)
return avg_params
def find_checkpoints(model_path: str, size=4, strategy="best", metric: str = C.PERPLEXITY) -> List[str]:
"""
Finds N best points from .metrics file according to strategy.
:param model_path: Path to model.
:param size: Number of checkpoints to combine.
:param strategy: Combination strategy.
:param metric: Metric according to which checkpoints are selected. Corresponds to columns in model/metrics file.
:return: List of paths corresponding to chosen checkpoints.
"""
maximize = C.METRIC_MAXIMIZE[metric]
points = utils.get_validation_metric_points(model_path=model_path, metric=metric)
# keep only points for which .param files exist
param_path = os.path.join(model_path, C.PARAMS_NAME)
points = [(value, checkpoint) for value, checkpoint in points if os.path.exists(param_path % checkpoint)]
if strategy == "best":
# N best scoring points
top_n = _strategy_best(points, size, maximize)
elif strategy == "last":
# N sequential points ending with overall best
top_n = _strategy_last(points, size, maximize)
elif strategy == "lifespan":
# Track lifespan of every "new best" point
# Points dominated by a previous better point have lifespan 0
top_n = _strategy_lifespan(points, size, maximize)
else:
raise RuntimeError("Unknown strategy, options: best last lifespan")
# Assemble paths for params files corresponding to chosen checkpoints
# Last element in point is always the checkpoint id
params_paths = [
os.path.join(model_path, C.PARAMS_NAME % point[-1]) for point in top_n
]
# Report
logger.info("Found: " + ", ".join(str(point) for point in top_n))
return params_paths
def _strategy_best(points, size, maximize):
top_n = sorted(points, reverse=maximize)[:size]
return top_n
def _strategy_last(points, size, maximize):
best = max if maximize else min
after_top = points.index(best(points)) + 1
top_n = points[max(0, after_top - size):after_top]
return top_n
def _strategy_lifespan(points, size, maximize):
top_n = []
cur_best = points[0]
cur_lifespan = 0
for point in points[1:]:
better = point > cur_best if maximize else point < cur_best
if better:
top_n.append(list(itertools.chain([cur_lifespan], cur_best)))
cur_best = point
cur_lifespan = 0
else:
top_n.append(list(itertools.chain([0], point)))
cur_lifespan += 1
top_n.append(list(itertools.chain([cur_lifespan], cur_best)))
# Sort by lifespan, then by val
top_n = sorted(
top_n,
key=lambda point: [point[0], point[1] if maximize else -point[1]],
reverse=True)[:size]
return top_n
def main():
"""
Commandline interface to average parameters.
"""
log_sockeye_version(logger)
params = argparse.ArgumentParser(description="Averages parameters from multiple models.")
arguments.add_average_args(params)
args = params.parse_args()
if len(args.inputs) > 1:
avg_params = average(args.inputs)
else:
param_paths = find_checkpoints(model_path=args.inputs[0],
size=args.n,
strategy=args.strategy,
metric=args.metric)
avg_params = average(param_paths)
mx.nd.save(args.output, avg_params)
logger.info("Averaged parameters written to '%s'", args.output)
if __name__ == "__main__":
main()
| [
"Iterable[str]",
"str"
] | [
1165,
2464
] | [
1178,
2467
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/callback.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Provides functionality to track metrics on training and validation data during training and controls
early-stopping.
"""
import logging
import multiprocessing as mp
import os
import pickle
import shutil
import time
from typing import List, Optional, Tuple, Dict
import mxnet as mx
from . import checkpoint_decoder
from . import constants as C
from . import utils
logger = logging.getLogger(__name__)
class TrainingMonitor(object):
"""
TrainingMonitor logs metrics on training and validation data, submits decoding processes to compute BLEU scores,
and writes metrics to the model output folder.
It further controls early stopping as it decides based on the specified metric to optimize, whether the model
has improved w.r.t to the last checkpoint.
Technically, TrainingMonitor exposes a couple of callback function that are called in the fit() method of
TrainingModel.
:param batch_size: Batch size during training.
:param output_folder: Folder where model files are written to.
:param optimized_metric: Name of the metric that controls early stopping.
:param use_tensorboard: Whether to use Tensorboard logging of metrics.
:param cp_decoder: Optional CheckpointDecoder instance for BLEU monitoring.
"""
def __init__(self,
batch_size: int,
output_folder: str,
optimized_metric: str = C.PERPLEXITY,
use_tensorboard: bool = False,
cp_decoder: Optional[checkpoint_decoder.CheckpointDecoder] = None) -> None:
self.output_folder = output_folder
# stores dicts of metric names & values for each checkpoint
self.metrics = [] # type: List[Dict]
self.metrics_filename = os.path.join(output_folder, C.METRICS_NAME)
self.best_checkpoint = 0
self.start_tic = time.time()
self.summary_writer = None
if use_tensorboard:
import tensorboard # pylint: disable=import-error
log_dir = os.path.join(output_folder, C.TENSORBOARD_NAME)
if os.path.exists(log_dir):
logger.info("Deleting existing tensorboard log dir %s", log_dir)
shutil.rmtree(log_dir)
logger.info("Logging training events for Tensorboard at '%s'", log_dir)
self.summary_writer = tensorboard.FileWriter(log_dir)
self.cp_decoder = cp_decoder
self.ctx = mp.get_context('spawn') # type: ignore
self.decoder_metric_queue = self.ctx.Queue()
self.decoder_process = None # type: Optional[mp.Process]
utils.check_condition(optimized_metric in C.METRICS, "Unsupported metric: %s" % optimized_metric)
if optimized_metric == C.BLEU:
utils.check_condition(self.cp_decoder is not None, "%s requires CheckpointDecoder" % C.BLEU)
self.optimized_metric = optimized_metric
self.validation_best = C.METRIC_WORST[self.optimized_metric]
logger.info("Early stopping by optimizing '%s'", self.optimized_metric)
self.tic = 0
def get_best_checkpoint(self) -> int:
"""
Returns current best checkpoint.
"""
return self.best_checkpoint
def get_best_validation_score(self) -> float:
"""
Returns current best validation result for optimized metric.
"""
return self.validation_best
def _is_better(self, value: float) -> bool:
if C.METRIC_MAXIMIZE[self.optimized_metric]:
return value > self.validation_best
else:
return value < self.validation_best
def checkpoint_callback(self,
checkpoint: int,
train_metric: Dict[str, float],
memory_data: Optional[Dict[int, Tuple[int, int]]] = None):
"""
Callback function when a model checkpoint is performed.
If TrainingMonitor uses Tensorboard, training metrics are written to the Tensorboard event file.
:param checkpoint: Current checkpoint.
:param train_metric: A dictionary of training metrics.
:param memory_data: Optional data about memory usage.
"""
metrics = {}
for name, value in train_metric.items():
metrics[name + "-train"] = value
if memory_data is not None:
utils.log_gpu_memory_usage(memory_data)
used_gpu_mem = sum(v[0] for v in memory_data.values())
metrics['used-gpu-memory'] = used_gpu_mem # total gpu memory used in MB
self.metrics.append(metrics)
if self.summary_writer:
write_tensorboard(self.summary_writer, metrics, checkpoint)
def eval_end_callback(self, checkpoint: int, val_metric: mx.metric.EvalMetric) -> Tuple[bool, int]:
"""
Callback function when processing of held-out validation data is complete.
Counts time elapsed since the start of training.
If TrainingMonitor uses Tensorboard, validation metrics are written to the Tensorboard event file.
If BLEU is monitored with subprocesses, this function collects result from finished decoder processes
and starts a new one for the current checkpoint.
:param checkpoint: Current checkpoint.
:param val_metric: Evaluation metric for validation data.
:return: Tuple of boolean indicating if model improved on validation data according to the.
optimized metric, and the (updated) best checkpoint.
"""
metrics = {}
for name, value in val_metric.get_name_value():
metrics[name + "-val"] = value
metrics['time-elapsed'] = time.time() - self.start_tic
if self.summary_writer:
write_tensorboard(self.summary_writer, metrics, checkpoint)
if self.cp_decoder:
self._wait_for_decoder_to_finish()
self._empty_decoder_metric_queue()
self._start_decode_process(checkpoint)
self.metrics[-1].update(metrics)
utils.write_metrics_file(self.metrics, self.metrics_filename)
has_improved, best_checkpoint = self._find_best_checkpoint()
return has_improved, best_checkpoint
def _find_best_checkpoint(self):
"""
Returns True if optimized_metric has improved since the last call of
this function, together with the best checkpoint
"""
has_improved = False
for checkpoint, metric_dict in enumerate(self.metrics, 1):
value = metric_dict.get(self.optimized_metric + "-val",
self.validation_best)
if self._is_better(value):
self.validation_best = value
self.best_checkpoint = checkpoint
has_improved = True
if has_improved:
logger.info("Validation-%s improved to %f.", self.optimized_metric,
self.validation_best)
else:
logger.info("Validation-%s has not improved, best so far: %f",
self.optimized_metric, self.validation_best)
return has_improved, self.best_checkpoint
def _start_decode_process(self, checkpoint):
assert self.decoder_process is None
output_name = os.path.join(self.output_folder, C.DECODE_OUT_NAME % checkpoint)
process = self.ctx.Process(
target=_decode_and_evaluate,
args=(self.cp_decoder,
checkpoint,
output_name,
self.decoder_metric_queue))
process.name = 'Decoder-%d' % checkpoint
logger.info("Starting process: %s", process.name)
process.start()
self.decoder_process = process
def _empty_decoder_metric_queue(self):
"""
Get metric results from decoder_process queue and optionally write to tensorboard logs
"""
while not self.decoder_metric_queue.empty():
decoded_checkpoint, decoder_metrics = self.decoder_metric_queue.get()
logger.info("Checkpoint [%d]: Decoder finished (%s)",
decoded_checkpoint, decoder_metrics)
self.metrics[decoded_checkpoint - 1].update(decoder_metrics)
if self.summary_writer:
write_tensorboard(self.summary_writer, decoder_metrics,
decoded_checkpoint)
def _wait_for_decoder_to_finish(self, check_interval: int = 5):
if self.decoder_process is None:
return
if not self.decoder_process.is_alive():
self.decoder_process = None
return
# Wait for the decoder to finish
wait_start = time.time()
while self.decoder_process.is_alive():
time.sleep(check_interval)
self.decoder_process = None
wait_time = int(time.time() - wait_start)
logger.warning("Had to wait %d seconds for the checkpoint decoder to finish. Consider increasing the "
"checkpoint frequency (updates between checkpoints, see %s) or reducing the size of the "
"validation samples that are decoded (see %s)." % (wait_time,
C.TRAIN_ARGS_CHECKPOINT_FREQUENCY,
C.TRAIN_ARGS_MONITOR_BLEU))
def stop_fit_callback(self):
"""
Callback function when fitting is stopped. Collects results from decoder processes and writes their results.
"""
if self.decoder_process is not None and self.decoder_process.is_alive():
logger.info("Waiting for %s process to finish." % self.decoder_process.name)
self.decoder_process.join()
self._empty_decoder_metric_queue()
utils.write_metrics_file(self.metrics, self.metrics_filename)
def save_state(self, fname: str):
"""
Saves the state: current metrics and best checkpoint.
:param fname: Name of the file to save the state to.
"""
with open(fname, "wb") as fp:
pickle.dump(self.metrics, fp)
pickle.dump(self.best_checkpoint, fp)
def load_state(self, fname: str):
"""
Loads the state: current metrics and best checkpoint.
:param fname: Name of the file to load the state from.
"""
with open(fname, "rb") as fp:
self.metrics = pickle.load(fp)
self.best_checkpoint = pickle.load(fp)
def _decode_and_evaluate(checkpoint_decoder: checkpoint_decoder.CheckpointDecoder,
checkpoint: int,
output_name: str,
queue: mp.Queue):
"""
Decodes and evaluates using given checkpoint_decoder and puts result in the queue,
indexed by the checkpoint.
"""
metrics = checkpoint_decoder.decode_and_evaluate(checkpoint, output_name)
queue.put((checkpoint, metrics))
def write_tensorboard(summary_writer,
metrics: Dict[str, float],
checkpoint: int):
"""
Writes a Tensorboard scalar event to the given SummaryWriter.
:param summary_writer: A Tensorboard SummaryWriter instance.
:param metrics: Mapping of metric names to their values.
:param checkpoint: Current checkpoint.
"""
from tensorboard.summary import scalar # pylint: disable=import-error
for name, value in metrics.items():
summary_writer.add_summary(
scalar(
name=name, scalar=value), global_step=checkpoint)
| [
"int",
"str",
"float",
"int",
"Dict[str, float]",
"int",
"mx.metric.EvalMetric",
"str",
"str",
"checkpoint_decoder.CheckpointDecoder",
"int",
"str",
"mp.Queue",
"Dict[str, float]",
"int"
] | [
1885,
1922,
3973,
4227,
4274,
5287,
5304,
10470,
10787,
11122,
11197,
11240,
11277,
11608,
11660
] | [
1888,
1925,
3978,
4230,
4290,
5290,
5324,
10473,
10790,
11158,
11200,
11243,
11285,
11624,
11663
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/checkpoint_decoder.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Implements a thin wrapper around Translator to compute BLEU scores on (a sample of) validation data during training.
"""
import logging
import os
import random
import time
from typing import Dict, Optional
import mxnet as mx
import sockeye.output_handler
from . import evaluate
from . import chrf
from . import constants as C
from . import data_io
from . import inference
from . import utils
logger = logging.getLogger(__name__)
class CheckpointDecoder:
"""
Decodes a (random sample of a) dataset using parameters at given checkpoint and computes BLEU against references.
:param context: MXNet context to bind the model to.
:param inputs: Path to file containing input sentences.
:param references: Path to file containing references.
:param model: Model to load.
:param max_input_len: Maximum input length.
:param beam_size: Size of the beam.
:param bucket_width_source: Source bucket width.
:param bucket_width_target: Target bucket width.
:param length_penalty_alpha: Alpha factor for the length penalty
:param length_penalty_beta: Beta factor for the length penalty
:param softmax_temperature: Optional parameter to control steepness of softmax distribution.
:param max_output_length_num_stds: Number of standard deviations as safety margin for maximum output length.
:param ensemble_mode: Ensemble mode: linear or log_linear combination.
:param sample_size: Maximum number of sentences to sample and decode. If <=0, all sentences are used.
:param random_seed: Random seed for sampling. Default: 42.
"""
def __init__(self,
context: mx.context.Context,
inputs: str,
references: str,
model: str,
max_input_len: Optional[int] = None,
beam_size: int = C.DEFAULT_BEAM_SIZE,
bucket_width_source: int = 10,
length_penalty_alpha: float = 1.0,
length_penalty_beta: float = 0.0,
softmax_temperature: Optional[float] = None,
max_output_length_num_stds: int = C.DEFAULT_NUM_STD_MAX_OUTPUT_LENGTH,
ensemble_mode: str = 'linear',
sample_size: int = -1,
random_seed: int = 42) -> None:
self.context = context
self.max_input_len = max_input_len
self.max_output_length_num_stds = max_output_length_num_stds
self.ensemble_mode = ensemble_mode
self.beam_size = beam_size
self.batch_size = 16
self.bucket_width_source = bucket_width_source
self.length_penalty_alpha = length_penalty_alpha
self.length_penalty_beta = length_penalty_beta
self.softmax_temperature = softmax_temperature
self.model = model
with data_io.smart_open(inputs) as inputs_fin, data_io.smart_open(references) as references_fin:
input_sentences = inputs_fin.readlines()
target_sentences = references_fin.readlines()
utils.check_condition(len(input_sentences) == len(target_sentences), "Number of sentence pairs do not match")
if sample_size <= 0:
sample_size = len(input_sentences)
if sample_size < len(input_sentences):
# custom random number generator to guarantee the same samples across runs in order to be able to
# compare metrics across independent runs
random_gen = random.Random(random_seed)
self.input_sentences, self.target_sentences = zip(
*random_gen.sample(list(zip(input_sentences, target_sentences)),
sample_size))
else:
self.input_sentences, self.target_sentences = input_sentences, target_sentences
logger.info("Created CheckpointDecoder(max_input_len=%d, beam_size=%d, model=%s, num_sentences=%d)",
max_input_len if max_input_len is not None else -1,
beam_size, model, len(self.input_sentences))
with data_io.smart_open(os.path.join(self.model, C.DECODE_REF_NAME), 'w') as trg_out, \
data_io.smart_open(os.path.join(self.model, C.DECODE_IN_NAME), 'w') as src_out:
[trg_out.write(s) for s in self.target_sentences]
[src_out.write(s) for s in self.input_sentences]
def decode_and_evaluate(self,
checkpoint: Optional[int] = None,
output_name: str = os.devnull) -> Dict[str, float]:
"""
Decodes data set and evaluates given a checkpoint.
:param checkpoint: Checkpoint to load parameters from.
:param output_name: Filename to write translations to. Defaults to /dev/null.
:return: Mapping of metric names to scores.
"""
models, vocab_source, vocab_target = inference.load_models(self.context,
self.max_input_len,
self.beam_size,
self.batch_size,
[self.model],
[checkpoint],
softmax_temperature=self.softmax_temperature,
max_output_length_num_stds=self.max_output_length_num_stds)
translator = inference.Translator(self.context,
self.ensemble_mode,
self.bucket_width_source,
inference.LengthPenalty(self.length_penalty_alpha, self.length_penalty_beta),
models,
vocab_source,
vocab_target)
trans_wall_time = 0.0
translations = []
with data_io.smart_open(output_name, 'w') as output:
handler = sockeye.output_handler.StringOutputHandler(output)
tic = time.time()
trans_inputs = [translator.make_input(i, line) for i, line in enumerate(self.input_sentences)]
trans_outputs = translator.translate(trans_inputs)
trans_wall_time = time.time() - tic
for trans_input, trans_output in zip(trans_inputs, trans_outputs):
handler.handle(trans_input, trans_output)
translations.append(trans_output.translation)
avg_time = trans_wall_time / len(self.input_sentences)
# TODO(fhieber): eventually add more metrics (METEOR etc.)
return {C.BLEU_VAL: evaluate.raw_corpus_bleu(hypotheses=translations,
references=self.target_sentences,
offset=0.01),
C.CHRF_VAL: chrf.corpus_chrf(hypotheses=translations,
references=self.target_sentences,
trim_whitespaces=True),
C.AVG_TIME: avg_time}
| [
"mx.context.Context",
"str",
"str",
"str"
] | [
2206,
2251,
2285,
2314
] | [
2224,
2254,
2288,
2317
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/chrf.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Computes chrF scores as described in
'CHRF: character n-gram F-score for automatic MT evaluation' by Maja Popovic.
[http://www.statmt.org/wmt15/pdf/WMT49.pdf]
"""
import re
from collections import Counter
from typing import Iterable, Tuple
import numpy as np
ORDER = 6
BETA = 3.0
TRIM_WS = True
def extract_ngrams(s: str, n: int) -> Counter:
"""
Yields counts of character n-grams from string s of order n.
"""
return Counter([s[i:i + n] for i in range(len(s) - n + 1)])
def delete_whitespace(text: str) -> str:
"""
Removes whitespaces from text.
"""
return re.sub("\s+", "", text)
def get_sentence_statistics(hypothesis: str,
reference: str,
order: int = ORDER,
trim_whitespaces: bool = TRIM_WS) -> np.array:
hypothesis = delete_whitespace(hypothesis) if trim_whitespaces else hypothesis
reference = delete_whitespace(reference) if trim_whitespaces else reference
statistics = np.zeros((order * 3))
for i in range(order):
n = i + 1
hypothesis_ngrams = extract_ngrams(hypothesis, n)
reference_ngrams = extract_ngrams(reference, n)
common_ngrams = hypothesis_ngrams & reference_ngrams
statistics[3 * i + 0] = sum(hypothesis_ngrams.values())
statistics[3 * i + 1] = sum(reference_ngrams.values())
statistics[3 * i + 2] = sum(common_ngrams.values())
return statistics
def get_corpus_statistics(hypotheses: Iterable[str],
references: Iterable[str],
order: int = ORDER,
trim_whitespaces: bool = TRIM_WS) -> np.array:
corpus_statistics = np.zeros((order * 3))
for hypothesis, reference in zip(hypotheses, references):
statistics = get_sentence_statistics(hypothesis, reference, order=order, trim_whitespaces=trim_whitespaces)
corpus_statistics += statistics
return corpus_statistics
def _avg_precision_and_recall(statistics: np.array, order: int) -> Tuple[float, float]:
avg_precision = 0.0
avg_recall = 0.0
effective_order = 0
for i in range(order):
hypotheses_ngrams = statistics[3 * i + 0]
references_ngrams = statistics[3 * i + 1]
common_ngrams = statistics[3 * i + 2]
if hypotheses_ngrams > 0 and references_ngrams > 0:
avg_precision += common_ngrams / hypotheses_ngrams
avg_recall += common_ngrams / references_ngrams
effective_order += 1
if effective_order == 0:
return 0.0, 0.0
avg_precision /= effective_order
avg_recall /= effective_order
return avg_precision, avg_recall
def _chrf(avg_precision, avg_recall, beta: float = BETA) -> float:
if avg_precision + avg_recall == 0:
return 0.0
beta_square = beta ** 2
return (1 + beta_square) * (avg_precision * avg_recall) / ((beta_square * avg_precision) + avg_recall)
def corpus_chrf(hypotheses: Iterable[str],
references: Iterable[str],
order: int = ORDER,
trim_whitespaces: bool = TRIM_WS,
beta: float = BETA) -> float:
"""
Computes Chrf on a corpus.
:param hypotheses: Stream of hypotheses.
:param references: Stream of references
:param order: Maximum n-gram order.
:param trim_whitespaces: Whether to trim whitespaces from hypothesis and reference strings.
:param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.
:return: Chrf score.
"""
corpus_statistics = get_corpus_statistics(hypotheses, references, order=order, trim_whitespaces=trim_whitespaces)
avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics, order)
return _chrf(avg_precision, avg_recall, beta=beta)
def sentence_chrf(hypothesis: str,
reference: str,
order: int = ORDER,
trim_whitespaces: bool = TRIM_WS,
beta: float = BETA) -> float:
"""
Computes Chrf on a single sentence pair.
:param hypothesis: Hypothesis string.
:param reference: Reference string.
:param order: Maximum n-gram order.
:param trim_whitespaces: Whether to trim whitespaces from hypothesis and reference strings.
:param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.
:return: Chrf score.
"""
statistics = get_sentence_statistics(hypothesis, reference, order=order, trim_whitespaces=trim_whitespaces)
avg_precision, avg_recall = _avg_precision_and_recall(statistics, order)
return _chrf(avg_precision, avg_recall, beta=beta)
| [
"str",
"int",
"str",
"str",
"str",
"Iterable[str]",
"Iterable[str]",
"np.array",
"int",
"Iterable[str]",
"Iterable[str]",
"str",
"str"
] | [
892,
900,
1092,
1233,
1277,
2076,
2129,
2600,
2617,
3558,
3601,
4426,
4460
] | [
895,
903,
1095,
1236,
1280,
2089,
2142,
2608,
2620,
3571,
3614,
4429,
4463
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/config.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import copy
import inspect
import yaml
class TaggedYamlObjectMetaclass(yaml.YAMLObjectMetaclass):
def __init__(cls, name, bases, kwds):
cls.yaml_tag = "!" + name
new_kwds = {}
new_kwds.update(kwds)
new_kwds['yaml_tag'] = "!" + name
super().__init__(name, bases, new_kwds)
class Config(yaml.YAMLObject, metaclass=TaggedYamlObjectMetaclass):
"""
Base configuration object that supports freezing of members and YAML (de-)serialization.
Actual Configuration should subclass this object.
"""
def __init__(self):
self.__add_frozen()
def __setattr__(self, key, value):
if hasattr(self, '_frozen') and getattr(self, '_frozen'):
raise AttributeError("Cannot set '%s' in frozen config" % key)
if value == self:
raise AttributeError("Cannot set self as attribute")
object.__setattr__(self, key, value)
def __setstate__(self, state):
"""Pickle protocol implementation."""
# We first take the serialized state:
self.__dict__.update(state)
# Then we take the constructors default values for missing arguments in order to stay backwards compatible
# This way we can add parameters to Config objects and still load old models.
init_signature = inspect.signature(self.__init__)
for param_name, param in init_signature.parameters.items():
if param.default is not param.empty:
if not hasattr(self, param_name):
object.__setattr__(self, param_name, param.default)
def freeze(self):
"""
Freezes this Config object, disallowing modification or addition of any parameters.
"""
if getattr(self, '_frozen'):
return
object.__setattr__(self, "_frozen", True)
for k, v in self.__dict__.items():
if isinstance(v, Config) and k != "self":
v.freeze() # pylint: disable= no-member
def __repr__(self):
return "Config[%s]" % ", ".join("%s=%s" % (str(k), str(v)) for k, v in sorted(self.__dict__.items()))
def __eq__(self, other):
if type(other) is not type(self):
return False
for k, v in self.__dict__.items():
if k != "self":
if k not in other.__dict__:
return False
if self.__dict__[k] != other.__dict__[k]:
return False
return True
def __del_frozen(self):
"""
Removes _frozen attribute from this instance and all its child configurations.
"""
self.__delattr__('_frozen')
for attr, val in self.__dict__.items():
if isinstance(val, Config) and hasattr(val, '_frozen'):
val.__del_frozen() # pylint: disable= no-member
def __add_frozen(self):
"""
Adds _frozen attribute to this instance and all its child configurations.
"""
setattr(self, "_frozen", False)
for attr, val in self.__dict__.items():
if isinstance(val, Config):
val.__add_frozen() # pylint: disable= no-member
def save(self, fname: str):
"""
Saves this Config (without the frozen state) to a file called fname.
:param fname: Name of file to store this Config in.
"""
obj = copy.deepcopy(self)
obj.__del_frozen()
with open(fname, 'w') as out:
yaml.dump(obj, out, default_flow_style=False)
@staticmethod
def load(fname: str) -> 'Config':
"""
Returns a Config object loaded from a file. The loaded object is not frozen.
:param fname: Name of file to load the Config from.
:return: Configuration.
"""
with open(fname) as inp:
obj = yaml.load(inp)
obj.__add_frozen()
return obj
def copy(self, **kwargs):
"""
Create a copy of the config object, optionally modifying some of the attributes.
For example `nn_config.copy(num_hidden=512)` will create a copy of `nn_config` where the attribute `num_hidden`
will be set to the new value of num_hidden.
:param kwargs:
:return: A deep copy of the config object.
"""
copy_obj = copy.deepcopy(self)
for name, value in kwargs.items():
object.__setattr__(copy_obj, name, value)
return copy_obj
| [
"str",
"str"
] | [
3751,
4115
] | [
3754,
4118
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/constants.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Defines various constants used througout the project
"""
import mxnet as mx
import numpy as np
BOS_SYMBOL = "<s>"
EOS_SYMBOL = "</s>"
UNK_SYMBOL = "<unk>"
PAD_SYMBOL = "<pad>"
PAD_ID = 0
TOKEN_SEPARATOR = " "
VOCAB_SYMBOLS = [PAD_SYMBOL, UNK_SYMBOL, BOS_SYMBOL, EOS_SYMBOL]
ARG_SEPARATOR = ":"
ENCODER_PREFIX = "encoder_"
DECODER_PREFIX = "decoder_"
EMBEDDING_PREFIX = "embed_"
ATTENTION_PREFIX = "att_"
COVERAGE_PREFIX = "cov_"
BIDIRECTIONALRNN_PREFIX = ENCODER_PREFIX + "birnn_"
STACKEDRNN_PREFIX = ENCODER_PREFIX + "rnn_"
FORWARD_PREFIX = "forward_"
REVERSE_PREFIX = "reverse_"
TRANSFORMER_ENCODER_PREFIX = ENCODER_PREFIX + "transformer_"
CNN_ENCODER_PREFIX = ENCODER_PREFIX + "cnn_"
CHAR_SEQ_ENCODER_PREFIX = ENCODER_PREFIX + "char_"
DEFAULT_OUTPUT_LAYER_PREFIX = "target_output_"
# embedding prefixes
SOURCE_EMBEDDING_PREFIX = "source_embed_"
SOURCE_POSITIONAL_EMBEDDING_PREFIX = "source_pos_embed_"
TARGET_EMBEDDING_PREFIX = "target_embed_"
TARGET_POSITIONAL_EMBEDDING_PREFIX = "target_pos_embed_"
SHARED_EMBEDDING_PREFIX = "source_target_embed_"
# encoder names (arguments)
RNN_NAME = "rnn"
RNN_WITH_CONV_EMBED_NAME = "rnn-with-conv-embed"
TRANSFORMER_TYPE = "transformer"
CONVOLUTION_TYPE = "cnn"
TRANSFORMER_WITH_CONV_EMBED_TYPE = "transformer-with-conv-embed"
# available encoders
ENCODERS = [RNN_NAME, RNN_WITH_CONV_EMBED_NAME, TRANSFORMER_TYPE, TRANSFORMER_WITH_CONV_EMBED_TYPE, CONVOLUTION_TYPE]
# available decoder
DECODERS = [RNN_NAME, TRANSFORMER_TYPE, CONVOLUTION_TYPE]
# rnn types
LSTM_TYPE = 'lstm'
LNLSTM_TYPE = 'lnlstm'
LNGLSTM_TYPE = 'lnglstm'
GRU_TYPE = 'gru'
LNGRU_TYPE = 'lngru'
LNGGRU_TYPE = 'lnggru'
CELL_TYPES = [LSTM_TYPE, LNLSTM_TYPE, LNGLSTM_TYPE, GRU_TYPE, LNGRU_TYPE, LNGGRU_TYPE]
# positional embeddings
NO_POSITIONAL_EMBEDDING = "none"
FIXED_POSITIONAL_EMBEDDING = "fixed"
LEARNED_POSITIONAL_EMBEDDING = "learned"
POSITIONAL_EMBEDDING_TYPES = [NO_POSITIONAL_EMBEDDING, FIXED_POSITIONAL_EMBEDDING, LEARNED_POSITIONAL_EMBEDDING]
DEFAULT_INIT_PATTERN = ".*"
# init types
INIT_XAVIER='xavier'
INIT_UNIFORM='uniform'
INIT_TYPES=[INIT_XAVIER, INIT_UNIFORM]
RAND_TYPE_UNIFORM = 'uniform'
RAND_TYPE_GAUSSIAN= 'gaussian'
# Embedding init types
EMBED_INIT_PATTERN = '(%s|%s|%s)weight' % (SOURCE_EMBEDDING_PREFIX, TARGET_EMBEDDING_PREFIX, SHARED_EMBEDDING_PREFIX)
EMBED_INIT_DEFAULT = 'default'
EMBED_INIT_NORMAL = 'normal'
EMBED_INIT_TYPES = [EMBED_INIT_DEFAULT, EMBED_INIT_NORMAL]
# RNN init types
RNN_INIT_PATTERN = ".*h2h.*"
RNN_INIT_ORTHOGONAL = 'orthogonal'
RNN_INIT_ORTHOGONAL_STACKED = 'orthogonal_stacked'
# use the default initializer used also for all other weights
RNN_INIT_DEFAULT = 'default'
# RNN decoder state init types
RNN_DEC_INIT_ZERO = "zero"
RNN_DEC_INIT_LAST = "last"
RNN_DEC_INIT_AVG = "avg"
RNN_DEC_INIT_CHOICES = [RNN_DEC_INIT_ZERO, RNN_DEC_INIT_LAST, RNN_DEC_INIT_AVG]
# attention types
ATT_BILINEAR = 'bilinear'
ATT_DOT = 'dot'
ATT_DOT_SCALED = 'dot_scaled'
ATT_MH_DOT = 'mhdot'
ATT_FIXED = 'fixed'
ATT_LOC = 'location'
ATT_MLP = 'mlp'
ATT_COV = "coverage"
ATT_TYPES = [ATT_BILINEAR, ATT_DOT, ATT_DOT_SCALED, ATT_MH_DOT, ATT_FIXED, ATT_LOC, ATT_MLP, ATT_COV]
# weight tying components
WEIGHT_TYING_SRC='src'
WEIGHT_TYING_TRG='trg'
WEIGHT_TYING_SOFTMAX='softmax'
# weight tying types (combinations of above components):
WEIGHT_TYING_TRG_SOFTMAX='trg_softmax'
WEIGHT_TYING_SRC_TRG='src_trg'
WEIGHT_TYING_SRC_TRG_SOFTMAX='src_trg_softmax'
# default decoder prefixes
RNN_DECODER_PREFIX = DECODER_PREFIX + "rnn_"
TRANSFORMER_DECODER_PREFIX = DECODER_PREFIX + "transformer_"
CNN_DECODER_PREFIX = DECODER_PREFIX + "cnn_"
# Activation types
# Gaussian Error Linear Unit (https://arxiv.org/pdf/1606.08415.pdf)
GELU = "gelu"
# Gated Linear Unit (https://arxiv.org/pdf/1705.03122.pdf)
GLU = "glu"
RELU = "relu"
SIGMOID = "sigmoid"
SOFT_RELU = "softrelu"
# Swish-1/SiLU (https://arxiv.org/pdf/1710.05941.pdf, https://arxiv.org/pdf/1702.03118.pdf)
SWISH1 = "swish1"
TANH = "tanh"
TRANSFORMER_ACTIVATION_TYPES = [GELU, RELU, SWISH1]
CNN_ACTIVATION_TYPES = [GLU, RELU, SIGMOID, SOFT_RELU, TANH]
# Convolutional block pad types:
CNN_PAD_LEFT = "left"
CNN_PAD_CENTERED = "centered"
# default I/O variable names
SOURCE_NAME = "source"
SOURCE_LENGTH_NAME = "source_length"
TARGET_NAME = "target"
TARGET_LABEL_NAME = "target_label"
LEXICON_NAME = "lexicon"
SOURCE_ENCODED_NAME = "encoded_source"
TARGET_PREVIOUS_NAME = "prev_target_word_id"
HIDDEN_PREVIOUS_NAME = "prev_hidden"
SOURCE_DYNAMIC_PREVIOUS_NAME = "prev_dynamic_source"
LOGIT_INPUTS_NAME = "logit_inputs"
LOGITS_NAME = "logits"
SOFTMAX_NAME = "softmax"
SOFTMAX_OUTPUT_NAME = SOFTMAX_NAME + "_output"
MEASURE_SPEED_EVERY = 50 # measure speed and metrics every X batches
# Monitor constants
STAT_FUNC_DEFAULT = "mx_default" # default MXNet monitor stat func: mx.nd.norm(x)/mx.nd.sqrt(x.size)
STAT_FUNC_MAX = 'max'
STAT_FUNC_MIN = 'min'
STAT_FUNC_MEAN = 'mean'
MONITOR_STAT_FUNCS = {STAT_FUNC_DEFAULT: None,
STAT_FUNC_MAX: lambda x: mx.nd.max(x),
STAT_FUNC_MEAN: lambda x: mx.nd.mean(x)}
# Inference constants
DEFAULT_BEAM_SIZE = 5
CHUNK_SIZE_NO_BATCHING = 1
CHUNK_SIZE_PER_BATCH_SEGMENT = 500
VERSION_NAME = "version"
CONFIG_NAME = "config"
LOG_NAME = "log"
JSON_SUFFIX = ".json"
VOCAB_SRC_NAME = "vocab.src"
VOCAB_TRG_NAME = "vocab.trg"
VOCAB_ENCODING = "utf-8"
PARAMS_PREFIX = "params."
PARAMS_NAME = PARAMS_PREFIX + "%05d"
PARAMS_BEST_NAME = "params.best"
DECODE_OUT_NAME = "decode.output.%05d"
DECODE_IN_NAME = "decode.source"
DECODE_REF_NAME = "decode.target"
SYMBOL_NAME = "symbol" + JSON_SUFFIX
METRICS_NAME = "metrics"
TENSORBOARD_NAME = "tensorboard"
# training resumption constants
TRAINING_STATE_DIRNAME = "training_state"
TRAINING_STATE_TEMP_DIRNAME = "tmp.training_state"
TRAINING_STATE_TEMP_DELETENAME = "delete.training_state"
OPT_STATES_LAST = "mx_optimizer_last.pkl"
OPT_STATES_BEST = "mx_optimizer_best.pkl"
OPT_STATES_INITIAL = "mx_optimizer_initial.pkl"
BUCKET_ITER_STATE_NAME = "bucket.pkl"
RNG_STATE_NAME = "rng.pkl"
MONITOR_STATE_NAME = "monitor.pkl"
TRAINING_STATE_NAME = "training.pkl"
SCHEDULER_STATE_NAME = "scheduler.pkl"
TRAINING_STATE_PARAMS_NAME = "params"
ARGS_STATE_NAME = "args.json"
# Arguments that may differ and still resume training
ARGS_MAY_DIFFER = ["overwrite_output", "use-tensorboard", "quiet",
"align_plot_prefix", "sure_align_threshold",
"keep_last_params"]
# Other argument constants
TRAINING_ARG_SOURCE = "--source"
TRAINING_ARG_TARGET = "--target"
TRAINING_ARG_PREPARED_DATA = "--prepared-data"
VOCAB_ARG_SHARED_VOCAB = "--shared-vocab"
INFERENCE_ARG_INPUT_LONG = "--input"
INFERENCE_ARG_INPUT_SHORT = "-i"
INFERENCE_ARG_OUTPUT_LONG = "--output"
INFERENCE_ARG_OUTPUT_SHORT = "-o"
TRAIN_ARGS_MONITOR_BLEU = "--decode-and-evaluate"
TRAIN_ARGS_CHECKPOINT_FREQUENCY = "--checkpoint-frequency"
# data layout strings
BATCH_MAJOR = "NTC"
TIME_MAJOR = "TNC"
BATCH_TYPE_SENTENCE = "sentence"
BATCH_TYPE_WORD = "word"
KVSTORE_DEVICE = "device"
KVSTORE_LOCAL = "local"
KVSTORE_SYNC = "dist_sync"
KVSTORE_DIST_DEVICE_SYNC = "dist_device_sync"
KVSTORE_DIST_ASYNC = "dist_async"
KVSTORE_NCCL = 'nccl'
KVSTORE_TYPES = [KVSTORE_DEVICE, KVSTORE_LOCAL, KVSTORE_SYNC,
KVSTORE_DIST_DEVICE_SYNC, KVSTORE_DIST_ASYNC,
KVSTORE_NCCL]
# Training constants
OPTIMIZER_ADAM = "adam"
OPTIMIZER_EVE = "eve"
OPTIMIZER_NADAM = "nadam"
OPTIMIZER_RMSPROP = "rmsprop"
OPTIMIZER_SGD = "sgd"
OPTIMIZER_NAG = "nag"
OPTIMIZER_ADAGRAD = "adagrad"
OPTIMIZER_ADADELTA = "adadelta"
OPTIMIZERS = [OPTIMIZER_ADAM, OPTIMIZER_EVE, OPTIMIZER_NADAM, OPTIMIZER_RMSPROP, OPTIMIZER_SGD, OPTIMIZER_NAG,
OPTIMIZER_ADAGRAD, OPTIMIZER_ADADELTA]
LR_SCHEDULER_FIXED_RATE_INV_SQRT_T = "fixed-rate-inv-sqrt-t"
LR_SCHEDULER_FIXED_RATE_INV_T = "fixed-rate-inv-t"
LR_SCHEDULER_FIXED_STEP = "fixed-step"
LR_SCHEDULER_PLATEAU_REDUCE = "plateau-reduce"
LR_SCHEDULERS = [LR_SCHEDULER_FIXED_RATE_INV_SQRT_T,
LR_SCHEDULER_FIXED_RATE_INV_T,
LR_SCHEDULER_FIXED_STEP,
LR_SCHEDULER_PLATEAU_REDUCE]
LR_DECAY_OPT_STATES_RESET_OFF = 'off'
LR_DECAY_OPT_STATES_RESET_INITIAL = 'initial'
LR_DECAY_OPT_STATES_RESET_BEST = 'best'
LR_DECAY_OPT_STATES_RESET_CHOICES = [LR_DECAY_OPT_STATES_RESET_OFF,
LR_DECAY_OPT_STATES_RESET_INITIAL,
LR_DECAY_OPT_STATES_RESET_BEST]
GRADIENT_CLIPPING_TYPE_ABS = 'abs'
GRADIENT_CLIPPING_TYPE_NORM = 'norm'
GRADIENT_CLIPPING_TYPE_NONE = 'none'
GRADIENT_CLIPPING_TYPES = [GRADIENT_CLIPPING_TYPE_ABS, GRADIENT_CLIPPING_TYPE_NORM, GRADIENT_CLIPPING_TYPE_NONE]
GRADIENT_COMPRESSION_NONE = None
GRADIENT_COMPRESSION_2BIT = "2bit"
GRADIENT_COMPRESSION_TYPES = [GRADIENT_CLIPPING_TYPE_NONE, GRADIENT_COMPRESSION_2BIT]
# output handler
OUTPUT_HANDLER_TRANSLATION = "translation"
OUTPUT_HANDLER_TRANSLATION_WITH_SCORE = "translation_with_score"
OUTPUT_HANDLER_TRANSLATION_WITH_ALIGNMENTS = "translation_with_alignments"
OUTPUT_HANDLER_TRANSLATION_WITH_ALIGNMENT_MATRIX = "translation_with_alignment_matrix"
OUTPUT_HANDLER_BENCHMARK = "benchmark"
OUTPUT_HANDLER_ALIGN_PLOT = "align_plot"
OUTPUT_HANDLER_ALIGN_TEXT = "align_text"
OUTPUT_HANDLERS = [OUTPUT_HANDLER_TRANSLATION,
OUTPUT_HANDLER_TRANSLATION_WITH_SCORE,
OUTPUT_HANDLER_TRANSLATION_WITH_ALIGNMENTS,
OUTPUT_HANDLER_TRANSLATION_WITH_ALIGNMENT_MATRIX,
OUTPUT_HANDLER_BENCHMARK,
OUTPUT_HANDLER_ALIGN_PLOT,
OUTPUT_HANDLER_ALIGN_TEXT]
# metrics
ACCURACY = 'accuracy'
PERPLEXITY = 'perplexity'
BLEU = 'bleu'
CHRF = 'chrf'
BLEU_VAL = BLEU + "-val"
CHRF_VAL = CHRF + "-val"
AVG_TIME = "avg-sec-per-sent-val"
METRICS = [PERPLEXITY, ACCURACY, BLEU]
METRIC_MAXIMIZE = {ACCURACY: True, BLEU: True, PERPLEXITY: False}
METRIC_WORST = {ACCURACY: 0.0, BLEU: 0.0, PERPLEXITY: np.inf}
# loss
CROSS_ENTROPY = 'cross-entropy'
LOSS_NORM_BATCH = 'batch'
LOSS_NORM_VALID = "valid"
TARGET_MAX_LENGTH_FACTOR = 2
DEFAULT_NUM_STD_MAX_OUTPUT_LENGTH = 2
LARGE_POSITIVE_VALUE = 99999999.
LARGE_NEGATIVE_VALUE = -LARGE_POSITIVE_VALUE
# data sharding
SHARD_NAME = "shard.%05d"
SHARD_SOURCE = SHARD_NAME + ".source"
SHARD_TARGET = SHARD_NAME + ".target"
DATA_CONFIG = "data.config"
PREPARED_DATA_VERSION_FILE = "data.version"
PREPARED_DATA_VERSION = 1
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/convolution.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Convolutional layers.
"""
from sockeye.config import Config
from . import utils
from . import constants as C
from . import layers
import mxnet as mx
class ConvolutionConfig(Config):
"""
Configuration for a stack of convolutions with Gated Linear Units between layers, similar to Gehring et al. 2017.
:param kernel_width: Kernel size for 1D convolution.
:param num_hidden: Size of hidden representation after convolution.
:param act_type: The type of activation to use.
"""
def __init__(self,
kernel_width: int,
num_hidden: int,
act_type: str = C.GLU,
weight_normalization: bool = False) -> None:
super().__init__()
self.kernel_width = kernel_width
self.num_hidden = num_hidden
utils.check_condition(act_type in C.CNN_ACTIVATION_TYPES, "Unknown activation %s." % act_type)
self.act_type = act_type
self.weight_normalization = weight_normalization
class ConvolutionBlock:
"""
A Convolution-GLU block consists of the 2 following sublayers:
1. Dropout (optional)
1. A Convolution (padded either both to the left and to the right or just to the left).
2. An activation: Either a Gated Linear Unit or any other activation supported by MXNet.
:param config: Configuration for Convolution block.
:param pad_type: 'left' or 'centered'. 'left' only pads to the left (for decoding
the target sequence). 'centered' pads on both sides (for encoding the source sequence).
:param prefix: Name prefix for symbols of this block.
"""
def __init__(self,
config: ConvolutionConfig,
pad_type: str,
prefix: str) -> None:
self.prefix = prefix
self.pad_type = pad_type
self.config = config
self.conv_weight = mx.sym.Variable("%sconv_weight" % prefix,
shape=(
self._pre_activation_num_hidden(),
self.config.num_hidden,
self.config.kernel_width)
)
if self.config.weight_normalization:
self.weight_norm = layers.WeightNormalization(self.conv_weight,
self._pre_activation_num_hidden(),
ndim=3,
prefix="%sconv_" % prefix)
self.conv_weight = self.weight_norm()
else:
self.weight_norm = None
self.conv_bias = mx.sym.Variable("%sconv_bias" % prefix)
def _pre_activation_num_hidden(self):
if self.config.act_type == C.GLU:
return 2 * self.config.num_hidden
else:
return self.config.num_hidden
def __call__(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> mx.sym.Symbol:
"""
Run the convolutional block.
:param data: Input data. Shape: (batch_size, seq_len, num_hidden).
:param data_length: Vector with sequence lengths. Shape: (batch_size,).
:param seq_len: Maximum sequence length.
:return: Shape: (batch_size, seq_len, num_hidden).
"""
if self.pad_type == C.CNN_PAD_LEFT:
# we pad enough on both sides and later slice the extra padding from the right
padding = (self.config.kernel_width - 1,)
elif self.pad_type == C.CNN_PAD_CENTERED:
# we pad enough so that the output size is equal to the input size and we don't need to slice
utils.check_condition(self.config.kernel_width % 2 == 1,
"Only odd kernel widths supported, but got %d" % self.config.kernel_width)
padding = (int((self.config.kernel_width - 1) / 2),)
else:
raise ValueError("Unknown pad type %s" % self.pad_type)
num_hidden = self._pre_activation_num_hidden()
# Apply masking (so that we properly have zero padding for variable sequence length batches)
# Note: SequenceMask expects time-major data
# (seq_len, batch_size, num_hidden)
data = mx.sym.swapaxes(data, dim1=0, dim2=1)
data = mx.sym.SequenceMask(data=data, sequence_length=data_length, use_sequence_length=True, value=0)
# (batch_size, num_hidden, seq_len)
data = mx.sym.transpose(data, axes=(1, 2, 0))
data_conv = mx.sym.Convolution(data=data,
weight=self.conv_weight,
bias=self.conv_bias,
pad=padding,
kernel=(self.config.kernel_width,),
num_filter=num_hidden,
layout="NCW")
# (batch_size, 2 * num_hidden, seq_len)
if self.pad_type == C.CNN_PAD_LEFT:
data_conv = mx.sym.slice_axis(data=data_conv, axis=2, begin=0, end=seq_len)
return self._post_convolution(data_conv)
def step(self, data):
"""
Run convolution over a single position. The data must be exactly as wide as the convolution filters.
:param data: Shape: (batch_size, kernel_width, num_hidden).
:return: Single result of a convolution. Shape: (batch_size, 1, num_hidden).
"""
# As we only run convolution over a single window that is exactly the size of the convolutional filter
# we can use FullyConnected instead of Convolution for efficiency reasons. Additionally we do not need to
# perform any masking.
num_hidden = self._pre_activation_num_hidden()
# (batch_size, num_hidden, kernel_width)
data = mx.sym.swapaxes(data, dim1=1, dim2=2)
# (batch_size, num_hidden * kernel_width)
data = mx.sym.reshape(data, shape=(0, -3))
# (preact_num_hidden, num_hidden * kernel_width)
weight = mx.sym.reshape(self.conv_weight, shape=(0, -3))
data_conv = mx.sym.FullyConnected(data=data,
weight=weight,
bias=self.conv_bias,
num_hidden=num_hidden)
# (batch_size, num_hidden, 1)
data_conv = mx.sym.expand_dims(data_conv, axis=2)
return self._post_convolution(data_conv)
def _post_convolution(self, data_conv):
# data_conv: (batch_size, pre_activation_num_hidden, seq_len)
# TODO: add layer norm (can we do this without reshaping?!)
if self.config.act_type == C.GLU:
# GLU
# two times: (batch_size, num_hidden, seq_len)
# pylint: disable=unbalanced-tuple-unpacking
gate_a, gate_b = mx.sym.split(data_conv, num_outputs=2, axis=1)
# (batch_size, num_hidden, seq_len)
block_output = mx.sym.broadcast_mul(gate_a,
mx.sym.Activation(data=gate_b, act_type="sigmoid"))
else:
# (batch_size, num_hidden, seq_len)
block_output = mx.sym.Activation(data_conv, act_type=self.config.act_type)
# (batch_size, seq_len, num_hidden)
block_output = mx.sym.swapaxes(block_output, dim1=1, dim2=2)
return block_output
| [
"int",
"int",
"ConvolutionConfig",
"str",
"str",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int"
] | [
1126,
1160,
2234,
2280,
2310,
3570,
3615,
3656
] | [
1129,
1163,
2251,
2283,
2313,
3583,
3628,
3659
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/coverage.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Defines the dynamic source encodings ('coverage' mechanisms) for encoder/decoder networks as used in Tu et al. (2016).
"""
import logging
from typing import Callable
import mxnet as mx
from . import config
from . import constants as C
from . import layers
from . import rnn
from . import utils
logger = logging.getLogger(__name__)
class CoverageConfig(config.Config):
"""
Coverage configuration.
:param type: Coverage name.
:param num_hidden: Number of hidden units for coverage networks.
:param layer_normalization: Apply layer normalization to coverage networks.
"""
def __init__(self,
type: str,
num_hidden: int,
layer_normalization: bool) -> None:
super().__init__()
self.type = type
self.num_hidden = num_hidden
self.layer_normalization = layer_normalization
def get_coverage(config: CoverageConfig) -> 'Coverage':
"""
Returns a Coverage instance.
:param config: Coverage configuration.
:return: Instance of Coverage.
"""
if config.type == 'count':
utils.check_condition(config.num_hidden == 1, "Count coverage requires coverage_num_hidden==1")
if config.type == "gru":
return GRUCoverage(config.num_hidden, config.layer_normalization)
elif config.type in {"tanh", "sigmoid", "relu", "softrelu"}:
return ActivationCoverage(config.num_hidden, config.type, config.layer_normalization)
elif config.type == "count":
return CountCoverage()
else:
raise ValueError("Unknown coverage type %s" % config.type)
class Coverage:
"""
Generic coverage class. Similar to Attention classes, a coverage instance returns a callable, update_coverage(),
function when self.on() is called.
"""
def __init__(self, prefix=C.COVERAGE_PREFIX):
self.prefix = prefix
def on(self, source: mx.sym.Symbol, source_length: mx.sym.Symbol, source_seq_len: int) -> Callable:
"""
Returns callable to be used for updating coverage vectors in a sequence decoder.
:param source: Shape: (batch_size, seq_len, encoder_num_hidden).
:param source_length: Shape: (batch_size,).
:param source_seq_len: Maximum length of source sequences.
:return: Coverage callable.
"""
def update_coverage(prev_hidden: mx.sym.Symbol,
attention_prob_scores: mx.sym.Symbol,
prev_coverage: mx.sym.Symbol):
"""
:param prev_hidden: Previous hidden decoder state. Shape: (batch_size, decoder_num_hidden).
:param attention_prob_scores: Current attention scores. Shape: (batch_size, source_seq_len).
:param prev_coverage: Shape: (batch_size, source_seq_len, coverage_num_hidden).
:return: Updated coverage matrix . Shape: (batch_size, source_seq_len, coverage_num_hidden).
"""
raise NotImplementedError()
return update_coverage
class CountCoverage(Coverage):
"""
Coverage class that accumulates the attention weights for each source word.
"""
def __init__(self) -> None:
super().__init__()
def on(self, source: mx.sym.Symbol, source_length: mx.sym.Symbol, source_seq_len: int) -> Callable:
"""
Returns callable to be used for updating coverage vectors in a sequence decoder.
:param source: Shape: (batch_size, seq_len, encoder_num_hidden).
:param source_length: Shape: (batch_size,).
:param source_seq_len: Maximum length of source sequences.
:return: Coverage callable.
"""
def update_coverage(prev_hidden: mx.sym.Symbol,
attention_prob_scores: mx.sym.Symbol,
prev_coverage: mx.sym.Symbol):
"""
:param prev_hidden: Previous hidden decoder state. Shape: (batch_size, decoder_num_hidden).
:param attention_prob_scores: Current attention scores. Shape: (batch_size, source_seq_len).
:param prev_coverage: Shape: (batch_size, source_seq_len, coverage_num_hidden).
:return: Updated coverage matrix . Shape: (batch_size, source_seq_len, coverage_num_hidden).
"""
return prev_coverage + mx.sym.expand_dims(attention_prob_scores, axis=2)
return update_coverage
class GRUCoverage(Coverage):
"""
Implements a GRU whose state is the coverage vector.
TODO: This implementation is slightly inefficient since the source is fed in at every step.
It would be better to pre-compute the mapping of the source but this will likely mean opening up the GRU.
:param coverage_num_hidden: Number of hidden units for coverage vectors.
:param layer_normalization: If true, applies layer normalization for each gate in the GRU cell.
"""
def __init__(self, coverage_num_hidden: int, layer_normalization: bool) -> None:
super().__init__()
self.num_hidden = coverage_num_hidden
gru_prefix= "%sgru" % self.prefix
if layer_normalization:
self.gru = rnn.LayerNormPerGateGRUCell(self.num_hidden, prefix=gru_prefix)
else:
self.gru = mx.rnn.GRUCell(self.num_hidden, prefix=gru_prefix)
def on(self, source: mx.sym.Symbol, source_length: mx.sym.Symbol, source_seq_len: int) -> Callable:
"""
Returns callable to be used for updating coverage vectors in a sequence decoder.
:param source: Shape: (batch_size, seq_len, encoder_num_hidden).
:param source_length: Shape: (batch_size,).
:param source_seq_len: Maximum length of source sequences.
:return: Coverage callable.
"""
def update_coverage(prev_hidden: mx.sym.Symbol,
attention_prob_scores: mx.sym.Symbol,
prev_coverage: mx.sym.Symbol):
"""
:param prev_hidden: Previous hidden decoder state. Shape: (batch_size, decoder_num_hidden).
:param attention_prob_scores: Current attention scores. Shape: (batch_size, source_seq_len).
:param prev_coverage: Shape: (batch_size, source_seq_len, coverage_num_hidden).
:return: Updated coverage matrix . Shape: (batch_size, source_seq_len, coverage_num_hidden).
"""
# (batch_size, source_seq_len, decoder_num_hidden)
expanded_decoder = mx.sym.broadcast_axis(
data=mx.sym.expand_dims(data=prev_hidden, axis=1, name="%sexpand_decoder" % self.prefix),
axis=1, size=source_seq_len, name="%sbroadcast_decoder" % self.prefix)
expanded_att_scores = mx.sym.expand_dims(data=attention_prob_scores,
axis=2,
name="%sexpand_attention_scores" % self.prefix)
# (batch_size, source_seq_len, encoder_num_hidden + decoder_num_hidden + 1)
# +1 for the attention_prob_score for the source word
concat_input = mx.sym.concat(source, expanded_decoder, expanded_att_scores, dim=2,
name="%sconcat_inputs" % self.prefix)
# (batch_size * source_seq_len, encoder_num_hidden + decoder_num_hidden + 1)
flat_input = mx.sym.reshape(concat_input, shape=(-3, -1), name="%sflatten_inputs")
# coverage: (batch_size * seq_len, coverage_num_hidden)
coverage = mx.sym.reshape(data=prev_coverage, shape=(-3, -1))
updated_coverage, _ = self.gru(flat_input, states=[coverage])
# coverage: (batch_size, seq_len, coverage_num_hidden)
coverage = mx.sym.reshape(updated_coverage, shape=(-1, source_seq_len, self.num_hidden))
return mask_coverage(coverage, source_length)
return update_coverage
class ActivationCoverage(Coverage):
"""
Implements a coverage mechanism whose updates are performed by a Perceptron with
configurable activation function.
:param coverage_num_hidden: Number of hidden units for coverage vectors.
:param activation: Type of activation for Perceptron.
:param layer_normalization: If true, applies layer normalization before non-linear activation.
"""
def __init__(self,
coverage_num_hidden: int,
activation: str,
layer_normalization: bool) -> None:
super().__init__()
self.activation = activation
self.num_hidden = coverage_num_hidden
# input (encoder) to hidden
self.cov_e2h_weight = mx.sym.Variable("%se2h_weight" % self.prefix)
# decoder to hidden
self.cov_dec2h_weight = mx.sym.Variable("%si2h_weight" % self.prefix)
# previous coverage to hidden
self.cov_prev2h_weight = mx.sym.Variable("%sprev2h_weight" % self.prefix)
# attention scores to hidden
self.cov_a2h_weight = mx.sym.Variable("%sa2h_weight" % self.prefix)
# optional layer normalization
self.layer_norm = None
if layer_normalization and not self.num_hidden != 1:
self.layer_norm = layers.LayerNormalization(self.num_hidden,
prefix="%snorm" % self.prefix) if layer_normalization else None
def on(self, source: mx.sym.Symbol, source_length: mx.sym.Symbol, source_seq_len: int) -> Callable:
"""
Returns callable to be used for updating coverage vectors in a sequence decoder.
:param source: Shape: (batch_size, seq_len, encoder_num_hidden).
:param source_length: Shape: (batch_size,).
:param source_seq_len: Maximum length of source sequences.
:return: Coverage callable.
"""
# (batch_size, seq_len, coverage_hidden_num)
source_hidden = mx.sym.FullyConnected(data=source,
weight=self.cov_e2h_weight,
no_bias=True,
num_hidden=self.num_hidden,
flatten=False,
name="%ssource_hidden_fc" % self.prefix)
def update_coverage(prev_hidden: mx.sym.Symbol,
attention_prob_scores: mx.sym.Symbol,
prev_coverage: mx.sym.Symbol):
"""
:param prev_hidden: Previous hidden decoder state. Shape: (batch_size, decoder_num_hidden).
:param attention_prob_scores: Current attention scores. Shape: (batch_size, source_seq_len).
:param prev_coverage: Shape: (batch_size, source_seq_len, coverage_num_hidden).
:return: Updated coverage matrix . Shape: (batch_size, source_seq_len, coverage_num_hidden).
"""
# (batch_size, seq_len, coverage_hidden_num)
coverage_hidden = mx.sym.FullyConnected(data=prev_coverage,
weight=self.cov_prev2h_weight,
no_bias=True,
num_hidden=self.num_hidden,
flatten=False,
name="%sprevious_hidden_fc" % self.prefix)
# (batch_size, source_seq_len, 1)
attention_prob_scores = mx.sym.expand_dims(attention_prob_scores, axis=2)
# (batch_size, source_seq_len, coverage_num_hidden)
attention_hidden = mx.sym.FullyConnected(data=attention_prob_scores,
weight=self.cov_a2h_weight,
no_bias=True,
num_hidden=self.num_hidden,
flatten=False,
name="%sattention_fc" % self.prefix)
# (batch_size, coverage_num_hidden)
prev_hidden = mx.sym.FullyConnected(data=prev_hidden, weight=self.cov_dec2h_weight, no_bias=True,
num_hidden=self.num_hidden, name="%sdecoder_hidden")
# (batch_size, 1, coverage_num_hidden)
prev_hidden = mx.sym.expand_dims(data=prev_hidden, axis=1,
name="%sinput_decoder_hidden_expanded" % self.prefix)
# (batch_size, source_seq_len, coverage_num_hidden)
intermediate = mx.sym.broadcast_add(lhs=source_hidden, rhs=prev_hidden,
name="%ssource_plus_hidden" % self.prefix)
# (batch_size, source_seq_len, coverage_num_hidden)
updated_coverage = intermediate + attention_hidden + coverage_hidden
if self.layer_norm is not None:
updated_coverage = self.layer_norm.normalize(updated_coverage)
# (batch_size, seq_len, coverage_num_hidden)
coverage = mx.sym.Activation(data=updated_coverage,
act_type=self.activation,
name="%sactivation" % self.prefix)
return mask_coverage(coverage, source_length)
return update_coverage
def mask_coverage(coverage: mx.sym.Symbol, source_length: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Masks all coverage scores that are outside the actual sequence.
:param coverage: Input coverage vector. Shape: (batch_size, seq_len, coverage_num_hidden).
:param source_length: Source length. Shape: (batch_size,).
:return: Masked coverage vector. Shape: (batch_size, seq_len, coverage_num_hidden).
"""
coverage = mx.sym.SwapAxis(data=coverage, dim1=0, dim2=1)
coverage = mx.sym.SequenceMask(data=coverage, use_sequence_length=True, sequence_length=source_length)
coverage = mx.sym.SwapAxis(data=coverage, dim1=0, dim2=1)
return coverage
| [
"str",
"int",
"bool",
"CoverageConfig",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"bool",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"str",
"bool",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol"
] | [
1215,
1249,
1292,
1478,
2470,
2500,
2531,
2933,
2999,
3057,
3798,
3828,
3859,
4261,
4327,
4385,
5490,
5516,
5879,
5909,
5940,
6342,
6408,
6466,
8940,
8974,
9017,
9943,
9973,
10004,
10875,
10941,
10999,
14018,
14048
] | [
1218,
1252,
1296,
1492,
2483,
2513,
2534,
2946,
3012,
3070,
3811,
3841,
3862,
4274,
4340,
4398,
5493,
5520,
5892,
5922,
5943,
6355,
6421,
6479,
8943,
8977,
9021,
9956,
9986,
10007,
10888,
10954,
11012,
14031,
14061
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/data_io.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Implements data iterators and I/O related functions for sequence-to-sequence models.
"""
import bisect
import logging
import os
import pickle
import random
from abc import ABC, abstractmethod
from collections import OrderedDict
from contextlib import ExitStack
from typing import Any, cast, Dict, Iterator, Iterable, List, Optional, Sized, Tuple
import math
import mxnet as mx
import numpy as np
from . import config
from . import constants as C
from . import vocab
from .utils import check_condition, smart_open, get_tokens, OnlineMeanAndVariance
logger = logging.getLogger(__name__)
def define_buckets(max_seq_len: int, step=10) -> List[int]:
"""
Returns a list of integers defining bucket boundaries.
Bucket boundaries are created according to the following policy:
We generate buckets with a step size of step until the final bucket fits max_seq_len.
We then limit that bucket to max_seq_len (difference between semi-final and final bucket may be less than step).
:param max_seq_len: Maximum bucket size.
:param step: Distance between buckets.
:return: List of bucket sizes.
"""
buckets = [bucket_len for bucket_len in range(step, max_seq_len + step, step)]
buckets[-1] = max_seq_len
return buckets
def define_parallel_buckets(max_seq_len_source: int,
max_seq_len_target: int,
bucket_width: int = 10,
length_ratio: float = 1.0) -> List[Tuple[int, int]]:
"""
Returns (source, target) buckets up to (max_seq_len_source, max_seq_len_target). The longer side of the data uses
steps of bucket_width while the shorter side uses steps scaled down by the average target/source length ratio. If
one side reaches its max_seq_len before the other, width of extra buckets on that side is fixed to that max_seq_len.
:param max_seq_len_source: Maximum source bucket size.
:param max_seq_len_target: Maximum target bucket size.
:param bucket_width: Width of buckets on longer side.
:param length_ratio: Length ratio of data (target/source).
"""
source_step_size = bucket_width
target_step_size = bucket_width
if length_ratio >= 1.0:
# target side is longer -> scale source
source_step_size = max(1, int(round(bucket_width / length_ratio)))
else:
# source side is longer, -> scale target
target_step_size = max(1, int(round(bucket_width * length_ratio)))
source_buckets = define_buckets(max_seq_len_source, step=source_step_size)
target_buckets = define_buckets(max_seq_len_target, step=target_step_size)
# Extra buckets
if len(source_buckets) < len(target_buckets):
source_buckets += [source_buckets[-1] for _ in range(len(target_buckets) - len(source_buckets))]
elif len(target_buckets) < len(source_buckets):
target_buckets += [target_buckets[-1] for _ in range(len(source_buckets) - len(target_buckets))]
# minimum bucket size is 2 (as we add BOS symbol to target side)
source_buckets = [max(2, b) for b in source_buckets]
target_buckets = [max(2, b) for b in target_buckets]
parallel_buckets = list(zip(source_buckets, target_buckets))
# deduplicate for return
buckets = list(OrderedDict.fromkeys(parallel_buckets))
buckets.sort()
return buckets
def get_bucket(seq_len: int, buckets: List[int]) -> Optional[int]:
"""
Given sequence length and a list of buckets, return corresponding bucket.
:param seq_len: Sequence length.
:param buckets: List of buckets.
:return: Chosen bucket.
"""
bucket_idx = bisect.bisect_left(buckets, seq_len)
if bucket_idx == len(buckets):
return None
return buckets[bucket_idx]
class BucketBatchSize:
"""
:param bucket: The corresponding bucket.
:param batch_size: Number of sequences in each batch.
:param average_words_per_batch: Approximate number of non-padding tokens in each batch.
"""
def __init__(self, bucket: Tuple[int, int], batch_size: int, average_words_per_batch: float) -> None:
self.bucket = bucket
self.batch_size = batch_size
self.average_words_per_batch = average_words_per_batch
def define_bucket_batch_sizes(buckets: List[Tuple[int, int]],
batch_size: int,
batch_by_words: bool,
batch_num_devices: int,
data_target_average_len: List[Optional[float]]) -> List[BucketBatchSize]:
"""
Computes bucket-specific batch sizes (sentences, average_words).
If sentence-based batching: number of sentences is the same for each batch, determines the
number of words. Hence all batch sizes for each bucket are equal.
If word-based batching: number of sentences for each batch is set to the multiple of number
of devices that produces the number of words closest to the target batch size. Average
target sentence length (non-padding symbols) is used for word number calculations.
:param buckets: Bucket list.
:param batch_size: Batch size.
:param batch_by_words: Batch by words.
:param batch_num_devices: Number of devices.
:param data_target_average_len: Optional average target length for each bucket.
"""
check_condition(len(data_target_average_len) == len(buckets),
"Must provide None or average target length for each bucket")
data_target_average_len = list(data_target_average_len)
bucket_batch_sizes = [] # type: List[BucketBatchSize]
largest_total_num_words = 0
for buck_idx, bucket in enumerate(buckets):
# Target/label length with padding
padded_seq_len = bucket[1]
# Average target/label length excluding padding
if data_target_average_len[buck_idx] is None:
data_target_average_len[buck_idx] = padded_seq_len
average_seq_len = data_target_average_len[buck_idx]
# Word-based: num words determines num sentences
# Sentence-based: num sentences determines num words
if batch_by_words:
check_condition(padded_seq_len <= batch_size, "Word batch size must cover sequence lengths for all"
" buckets: (%d > %d)" % (padded_seq_len, batch_size))
# Multiple of number of devices (int) closest to target number of words, assuming each sentence is of
# average length
batch_size_seq = batch_num_devices * round((batch_size / average_seq_len) / batch_num_devices)
batch_size_word = batch_size_seq * average_seq_len
else:
batch_size_seq = batch_size
batch_size_word = batch_size_seq * average_seq_len
bucket_batch_sizes.append(BucketBatchSize(bucket, batch_size_seq, batch_size_word))
# Track largest number of word samples in a batch
largest_total_num_words = max(largest_total_num_words, batch_size_seq * max(*bucket))
# Final step: guarantee that largest bucket by sequence length also has largest total batch size.
# When batching by sentences, this will already be the case.
if batch_by_words:
padded_seq_len = max(*buckets[-1])
average_seq_len = data_target_average_len[-1]
while bucket_batch_sizes[-1].batch_size * padded_seq_len < largest_total_num_words:
bucket_batch_sizes[-1] = BucketBatchSize(
bucket_batch_sizes[-1].bucket,
bucket_batch_sizes[-1].batch_size + batch_num_devices,
bucket_batch_sizes[-1].average_words_per_batch + batch_num_devices * average_seq_len)
return bucket_batch_sizes
def calculate_length_statistics(source_sentences: Iterable[List[Any]],
target_sentences: Iterable[List[Any]],
max_seq_len_source: int,
max_seq_len_target: int) -> 'LengthStatistics':
"""
Returns mean and standard deviation of target-to-source length ratios of parallel corpus.
:param source_sentences: Source sentences.
:param target_sentences: Target sentences.
:param max_seq_len_source: Maximum source sequence length.
:param max_seq_len_target: Maximum target sequence length.
:return: The number of sentences as well as the mean and standard deviation of target to source length ratios.
"""
mean_and_variance = OnlineMeanAndVariance()
for target, source in zip(target_sentences, source_sentences):
source_len = len(source)
target_len = len(target)
if source_len > max_seq_len_source or target_len > max_seq_len_target:
continue
length_ratio = target_len / source_len
mean_and_variance.update(length_ratio)
num_sents = mean_and_variance.count
mean = mean_and_variance.mean
std = math.sqrt(mean_and_variance.variance)
return LengthStatistics(num_sents, mean, std)
def analyze_sequence_lengths(source: str,
target: str,
vocab_source: Dict[str, int],
vocab_target: Dict[str, int],
max_seq_len_source: int,
max_seq_len_target: int) -> 'LengthStatistics':
train_source_sentences = SequenceReader(source, vocab_source, add_bos=False)
# Length statistics are calculated on the raw sentences without special tokens, such as the BOS, as these can
# have a a large impact on the length ratios, especially with lots of short sequences.
train_target_sentences = SequenceReader(target, vocab_target, add_bos=False)
length_statistics = calculate_length_statistics(train_source_sentences, train_target_sentences,
max_seq_len_source,
# Take into account the BOS symbol that is added later
max_seq_len_target - 1)
check_condition(train_source_sentences.is_done() and train_target_sentences.is_done(),
"Different number of lines in source and target data.")
logger.info("%d sequences of maximum length (%d, %d) in '%s' and '%s'.",
length_statistics.num_sents, max_seq_len_source, max_seq_len_target, source, target)
logger.info("Mean training target/source length ratio: %.2f (+-%.2f)",
length_statistics.length_ratio_mean,
length_statistics.length_ratio_std)
return length_statistics
class DataStatisticsAccumulator:
def __init__(self,
buckets: List[Tuple[int, int]],
vocab_source: Dict[str, int],
vocab_target: Dict[str, int],
length_ratio_mean: float,
length_ratio_std: float) -> None:
self.buckets = buckets
num_buckets = len(buckets)
self.length_ratio_mean = length_ratio_mean
self.length_ratio_std = length_ratio_std
self.unk_id_source = vocab_source[C.UNK_SYMBOL]
self.unk_id_target = vocab_target[C.UNK_SYMBOL]
self.size_vocab_source = len(vocab_source)
self.size_vocab_target = len(vocab_target)
self.num_sents = 0
self.num_discarded = 0
self.num_tokens_source = 0
self.num_tokens_target = 0
self.num_unks_source = 0
self.num_unks_target = 0
self.max_observed_len_source = 0
self.max_observed_len_target = 0
self._mean_len_target_per_bucket = [OnlineMeanAndVariance() for _ in range(num_buckets)]
def sequence_pair(self,
source: List[int],
target: List[int],
bucket_idx: Optional[int]):
if bucket_idx is None:
self.num_discarded += 1
return
source_len = len(source)
target_len = len(target)
self._mean_len_target_per_bucket[bucket_idx].update(target_len)
self.num_sents += 1
self.num_tokens_source += source_len
self.num_tokens_target += target_len
self.max_observed_len_source = max(source_len, self.max_observed_len_source)
self.max_observed_len_target = max(target_len, self.max_observed_len_target)
self.num_unks_source += source.count(self.unk_id_source)
self.num_unks_target += target.count(self.unk_id_target)
@property
def mean_len_target_per_bucket(self) -> List[Optional[float]]:
return [mean_and_variance.mean if mean_and_variance.count > 0 else None
for mean_and_variance in self._mean_len_target_per_bucket]
@property
def statistics(self):
num_sents_per_bucket = [mean_and_variance.count for mean_and_variance in self._mean_len_target_per_bucket]
return DataStatistics(num_sents=self.num_sents,
num_discarded=self.num_discarded,
num_tokens_source=self.num_tokens_source,
num_tokens_target=self.num_tokens_target,
num_unks_source=self.num_unks_source,
num_unks_target=self.num_unks_target,
max_observed_len_source=self.max_observed_len_source,
max_observed_len_target=self.max_observed_len_target,
size_vocab_source=self.size_vocab_source,
size_vocab_target=self.size_vocab_target,
length_ratio_mean=self.length_ratio_mean,
length_ratio_std=self.length_ratio_std,
buckets=self.buckets,
num_sents_per_bucket=num_sents_per_bucket,
mean_len_target_per_bucket=self.mean_len_target_per_bucket)
def shard_data(source_fname: str,
target_fname: str,
vocab_source: Dict[str, int],
vocab_target: Dict[str, int],
num_shards: int,
buckets: List[Tuple[int, int]],
length_ratio_mean: float,
length_ratio_std: float,
output_prefix: str) -> Tuple[List[Tuple[str, str, 'DataStatistics']], 'DataStatistics']:
"""
Assign int-coded source/target sentence pairs to shards at random.
:param source_fname: The file name of the source file.
:param target_fname: The file name of the target file.
:param vocab_source: Source vocabulary.
:param vocab_target: Target vocabulary.
:param num_shards: The total number of shards.
:param buckets: Bucket list.
:param length_ratio_mean: Mean length ratio.
:param length_ratio_std: Standard deviation of length ratios.
:param output_prefix: The prefix under which the shard files will be created.
:return: Tuple of source, target file names and statistics for each shard as well as global statistics.
"""
os.makedirs(output_prefix, exist_ok=True)
source_shard_fnames = [os.path.join(output_prefix, C.SHARD_SOURCE % i)
for i in range(num_shards)] # type: List[str]
target_shard_fnames = [os.path.join(output_prefix, C.SHARD_TARGET % i)
for i in range(num_shards)] # type: List[str]
data_stats_accumulator = DataStatisticsAccumulator(buckets, vocab_source, vocab_target,
length_ratio_mean, length_ratio_std)
per_shard_stat_accumulators = [DataStatisticsAccumulator(buckets, vocab_source, vocab_target, length_ratio_mean,
length_ratio_std) for shard_idx in range(num_shards)]
with ExitStack() as exit_stack:
source_shards = []
target_shards = []
# create shard files:
for fname in source_shard_fnames:
source_shards.append(exit_stack.enter_context(smart_open(fname, mode="wt")))
for fname in target_shard_fnames:
target_shards.append(exit_stack.enter_context(smart_open(fname, mode="wt")))
shards = list(zip(source_shards, target_shards, per_shard_stat_accumulators))
source_iter = SequenceReader(source_fname, vocab_source, add_bos=False)
target_iter = SequenceReader(target_fname, vocab_target, add_bos=True)
random_shard_iter = iter(lambda: random.choice(shards), None)
for source, target, (source_shard, target_shard, shard_stats) in zip(source_iter, target_iter,
random_shard_iter):
source_len = len(source)
target_len = len(target)
buck_idx, buck = get_parallel_bucket(buckets, source_len, target_len)
data_stats_accumulator.sequence_pair(source, target, buck_idx)
shard_stats.sequence_pair(source, target, buck_idx)
if buck is None:
continue
source_shard.write(ids2strids(source) + "\n")
target_shard.write(ids2strids(target) + "\n")
per_shard_stats = [shard_stat_accumulator.statistics for shard_stat_accumulator in per_shard_stat_accumulators]
return list(zip(source_shard_fnames, target_shard_fnames, per_shard_stats)), data_stats_accumulator.statistics
class RawParallelDatasetLoader:
"""
Loads a data set of variable-length parallel source/target sequences into buckets of NDArrays.
:param buckets: Bucket list.
:param eos_id: End-of-sentence id.
:param pad_id: Padding id.
:param eos_id: Unknown id.
:param dtype: Data type.
"""
def __init__(self,
buckets: List[Tuple[int, int]],
eos_id: int,
pad_id: int,
dtype: str = 'float32') -> None:
self.buckets = buckets
self.eos_id = eos_id
self.pad_id = pad_id
self.dtype = dtype
def load(self,
source_sentences: Iterable[List[Any]],
target_sentences: Iterable[List[Any]],
num_samples_per_bucket: List[int]) -> 'ParallelDataSet':
assert len(num_samples_per_bucket) == len(self.buckets)
data_source = [np.full((num_samples, source_len), self.pad_id, dtype=self.dtype)
for (source_len, target_len), num_samples in zip(self.buckets, num_samples_per_bucket)]
data_target = [np.full((num_samples, target_len), self.pad_id, dtype=self.dtype)
for (source_len, target_len), num_samples in zip(self.buckets, num_samples_per_bucket)]
data_label = [np.full((num_samples, target_len), self.pad_id, dtype=self.dtype)
for (source_len, target_len), num_samples in zip(self.buckets, num_samples_per_bucket)]
bucket_sample_index = [0 for buck in self.buckets]
# track amount of padding introduced through bucketing
num_tokens_source = 0
num_tokens_target = 0
num_pad_source = 0
num_pad_target = 0
# Bucket sentences as padded np arrays
for source, target in zip(source_sentences, target_sentences):
source_len = len(source)
target_len = len(target)
buck_index, buck = get_parallel_bucket(self.buckets, source_len, target_len)
if buck is None:
continue # skip this sentence pair
num_tokens_source += buck[0]
num_tokens_target += buck[1]
num_pad_source += buck[0] - source_len
num_pad_target += buck[1] - target_len
sample_index = bucket_sample_index[buck_index]
data_source[buck_index][sample_index, :source_len] = source
data_target[buck_index][sample_index, :target_len] = target
# NOTE(fhieber): while this is wasteful w.r.t memory, we need to explicitly create the label sequence
# with the EOS symbol here sentence-wise and not per-batch due to variable sequence length within a batch.
# Once MXNet allows item assignments given a list of indices (probably MXNet 1.0): e.g a[[0,1,5,2]] = x,
# we can try again to compute the label sequence on the fly in next().
data_label[buck_index][sample_index, :target_len] = target[1:] + [self.eos_id]
bucket_sample_index[buck_index] += 1
for i in range(len(data_source)):
data_source[i] = mx.nd.array(data_source[i], dtype=self.dtype)
data_target[i] = mx.nd.array(data_target[i], dtype=self.dtype)
data_label[i] = mx.nd.array(data_label[i], dtype=self.dtype)
if num_tokens_source > 0 and num_tokens_target > 0:
logger.info("Created bucketed parallel data set. Introduced padding: source=%.1f%% target=%.1f%%)",
num_pad_source / num_tokens_source * 100,
num_pad_target / num_tokens_target * 100)
return ParallelDataSet(data_source, data_target, data_label)
def get_num_shards(num_samples: int, samples_per_shard: int, min_num_shards: int) -> int:
"""
Returns the number of shards.
:param num_samples: Number of training data samples.
:param samples_per_shard: Samples per shard.
:param min_num_shards: Minimum number of shards.
:return: Number of shards.
"""
return max(int(math.ceil(num_samples / samples_per_shard)), min_num_shards)
def prepare_data(source: str, target: str,
vocab_source: Dict[str, int], vocab_target: Dict[str, int],
vocab_source_path: Optional[str], vocab_target_path: Optional[str],
shared_vocab: bool,
max_seq_len_source: int,
max_seq_len_target: int,
bucketing: bool,
bucket_width: int,
samples_per_shard: int,
min_num_shards: int,
output_prefix: str,
keep_tmp_shard_files: bool = False):
logger.info("Preparing data.")
# write vocabularies
vocab.vocab_to_json(vocab_source, os.path.join(output_prefix, C.VOCAB_SRC_NAME) + C.JSON_SUFFIX)
vocab.vocab_to_json(vocab_target, os.path.join(output_prefix, C.VOCAB_TRG_NAME) + C.JSON_SUFFIX)
# Pass 1: get target/source length ratios.
length_statistics = analyze_sequence_lengths(source, target, vocab_source, vocab_target,
max_seq_len_source, max_seq_len_target)
# define buckets
buckets = define_parallel_buckets(max_seq_len_source, max_seq_len_target, bucket_width,
length_statistics.length_ratio_mean) if bucketing else [
(max_seq_len_source, max_seq_len_target)]
logger.info("Buckets: %s", buckets)
# Pass 2: Randomly assign data to data shards
# no pre-processing yet, just write the sentences to different files
num_shards = get_num_shards(length_statistics.num_sents, samples_per_shard, min_num_shards)
logger.info("%d samples will be split into %d shard(s) (requested samples/shard=%d, min_num_shards=%d)."
% (length_statistics.num_sents, num_shards, samples_per_shard, min_num_shards))
shards, data_statistics = shard_data(source_fname=source,
target_fname=target,
vocab_source=vocab_source,
vocab_target=vocab_target,
num_shards=num_shards,
buckets=buckets,
length_ratio_mean=length_statistics.length_ratio_mean,
length_ratio_std=length_statistics.length_ratio_std,
output_prefix=output_prefix)
data_statistics.log()
data_loader = RawParallelDatasetLoader(buckets=buckets,
eos_id=vocab_target[C.EOS_SYMBOL],
pad_id=C.PAD_ID)
# 3. convert each shard to serialized ndarrays
for shard_idx, (shard_source, shard_target, shard_stats) in enumerate(shards):
source_sentences = SequenceReader(shard_source, vocab=None)
target_sentences = SequenceReader(shard_target, vocab=None)
dataset = data_loader.load(source_sentences, target_sentences, shard_stats.num_sents_per_bucket)
shard_fname = os.path.join(output_prefix, C.SHARD_NAME % shard_idx)
shard_stats.log()
logger.info("Writing '%s'", shard_fname)
dataset.save(shard_fname)
if not keep_tmp_shard_files:
os.remove(shard_source)
os.remove(shard_target)
config_data = DataConfig(source=os.path.abspath(source),
target=os.path.abspath(target),
vocab_source=vocab_source_path,
vocab_target=vocab_target_path,
shared_vocab=shared_vocab,
num_shards=num_shards,
data_statistics=data_statistics,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target)
data_config_fname = os.path.join(output_prefix, C.DATA_CONFIG)
logger.info("Writing data config to '%s'", data_config_fname)
config_data.save(data_config_fname)
version_file = os.path.join(output_prefix, C.PREPARED_DATA_VERSION_FILE)
with open(version_file, "w") as version_out:
version_out.write(str(C.PREPARED_DATA_VERSION))
def get_data_statistics(source_sentences: Iterable[List[int]],
target_sentences: Iterable[List[int]],
buckets: List[Tuple[int, int]],
length_ratio_mean: float,
length_ratio_std: float,
vocab_source: vocab.Vocab,
vocab_target: vocab.Vocab) -> 'DataStatistics':
data_stats_accumulator = DataStatisticsAccumulator(buckets, vocab_source, vocab_target,
length_ratio_mean, length_ratio_std)
for source, target in zip(source_sentences, target_sentences):
buck_idx, buck = get_parallel_bucket(buckets, len(source), len(target))
data_stats_accumulator.sequence_pair(source, target, buck_idx)
return data_stats_accumulator.statistics
def get_validation_data_iter(data_loader: RawParallelDatasetLoader,
validation_source: str,
validation_target: str,
buckets: List[Tuple[int, int]],
bucket_batch_sizes: List[BucketBatchSize],
vocab_source: vocab.Vocab,
vocab_target: vocab.Vocab,
max_seq_len_source: int,
max_seq_len_target: int,
batch_size: int,
fill_up: str) -> 'ParallelSampleIter':
"""
Returns a ParallelSampleIter for the validation data.
"""
logger.info("=================================")
logger.info("Creating validation data iterator")
logger.info("=================================")
validation_length_statistics = analyze_sequence_lengths(validation_source, validation_target,
vocab_source, vocab_target,
max_seq_len_source, max_seq_len_target)
validation_source_sentences = SequenceReader(validation_source, vocab_source, add_bos=False, limit=None)
validation_target_sentences = SequenceReader(validation_target, vocab_target, add_bos=True, limit=None)
validation_data_statistics = get_data_statistics(validation_source_sentences,
validation_target_sentences,
buckets,
validation_length_statistics.length_ratio_mean,
validation_length_statistics.length_ratio_std,
vocab_source, vocab_target)
validation_data_statistics.log(bucket_batch_sizes)
validation_data = data_loader.load(validation_source_sentences,
validation_target_sentences,
validation_data_statistics.num_sents_per_bucket).fill_up(bucket_batch_sizes,
fill_up)
return ParallelSampleIter(data=validation_data,
buckets=buckets,
batch_size=batch_size,
bucket_batch_sizes=bucket_batch_sizes)
def get_prepared_data_iters(prepared_data_dir: str,
validation_source: str, validation_target: str,
shared_vocab: bool,
batch_size: int,
batch_by_words: bool,
batch_num_devices: int,
fill_up: str) -> Tuple['BaseParallelSampleIter',
'BaseParallelSampleIter',
'DataConfig', vocab.Vocab, vocab.Vocab]:
logger.info("===============================")
logger.info("Creating training data iterator")
logger.info("===============================")
version_file = os.path.join(prepared_data_dir, C.PREPARED_DATA_VERSION_FILE)
with open(version_file) as version_in:
version = int(version_in.read())
check_condition(version == C.PREPARED_DATA_VERSION,
"The dataset %s was written in an old and incompatible format. Please rerun data "
"preparation with a current version of Sockeye." % prepared_data_dir)
config_file = os.path.join(prepared_data_dir, C.DATA_CONFIG)
check_condition(os.path.exists(config_file),
"Could not find data config %s. Are you sure %s is a directory created with "
"python -m sockeye.prepare_data?" % (config_file, prepared_data_dir))
data_config = cast(DataConfig, DataConfig.load(config_file))
shard_fnames = [os.path.join(prepared_data_dir,
C.SHARD_NAME % shard_idx) for shard_idx in range(data_config.num_shards)]
for shard_fname in shard_fnames:
check_condition(os.path.exists(shard_fname), "Shard %s does not exist." % shard_fname)
source_vocab_fname = os.path.join(prepared_data_dir, C.VOCAB_SRC_NAME) + C.JSON_SUFFIX
target_vocab_fname = os.path.join(prepared_data_dir, C.VOCAB_TRG_NAME) + C.JSON_SUFFIX
check_condition(bool(source_vocab_fname), "Source vocabulary %s does not exist." % source_vocab_fname)
check_condition(bool(target_vocab_fname), "Target vocabulary %s does not exist." % target_vocab_fname)
check_condition(shared_vocab == data_config.shared_vocab, "Shared config needed (e.g. for weight tying), but "
"data was prepared without a shared vocab. Use %s when "
"preparing the data." % C.VOCAB_ARG_SHARED_VOCAB)
vocab_source = vocab.vocab_from_json(source_vocab_fname)
vocab_target = vocab.vocab_from_json(target_vocab_fname)
buckets = data_config.data_statistics.buckets
max_seq_len_source = data_config.max_seq_len_source
max_seq_len_target = data_config.max_seq_len_target
bucket_batch_sizes = define_bucket_batch_sizes(buckets,
batch_size,
batch_by_words,
batch_num_devices,
data_config.data_statistics.average_len_target_per_bucket)
data_config.data_statistics.log(bucket_batch_sizes)
train_iter = ShardedParallelSampleIter(shard_fnames,
buckets,
batch_size,
bucket_batch_sizes,
fill_up)
data_loader = RawParallelDatasetLoader(buckets=buckets,
eos_id=vocab_target[C.EOS_SYMBOL],
pad_id=C.PAD_ID)
validation_iter = get_validation_data_iter(data_loader=data_loader,
validation_source=validation_source,
validation_target=validation_target,
buckets=buckets,
bucket_batch_sizes=bucket_batch_sizes,
vocab_source=vocab_source,
vocab_target=vocab_target,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
batch_size=batch_size,
fill_up=fill_up)
return train_iter, validation_iter, data_config, vocab_source, vocab_target
def get_training_data_iters(source: str, target: str,
validation_source: str, validation_target: str,
vocab_source: vocab.Vocab, vocab_target: vocab.Vocab,
vocab_source_path: Optional[str], vocab_target_path: Optional[str],
shared_vocab: bool,
batch_size: int,
batch_by_words: bool,
batch_num_devices: int,
fill_up: str,
max_seq_len_source: int,
max_seq_len_target: int,
bucketing: bool,
bucket_width: int) -> Tuple['BaseParallelSampleIter',
'BaseParallelSampleIter',
'DataConfig']:
"""
Returns data iterators for training and validation data.
:param source: Path to source training data.
:param target: Path to target training data.
:param validation_source: Path to source validation data.
:param validation_target: Path to target validation data.
:param vocab_source: Source vocabulary.
:param vocab_target: Target vocabulary.
:param vocab_source_path: Path to source vocabulary.
:param vocab_target_path: Path to target vocabulary.
:param shared_vocab: Whether the vocabularies are shared.
:param batch_size: Batch size.
:param batch_by_words: Size batches by words rather than sentences.
:param batch_num_devices: Number of devices batches will be parallelized across.
:param fill_up: Fill-up strategy for buckets.
:param max_seq_len_source: Maximum source sequence length.
:param max_seq_len_target: Maximum target sequence length.
:param bucketing: Whether to use bucketing.
:param bucket_width: Size of buckets.
:return: Tuple of (training data iterator, validation data iterator, data config).
"""
logger.info("===============================")
logger.info("Creating training data iterator")
logger.info("===============================")
# Pass 1: get target/source length ratios.
length_statistics = analyze_sequence_lengths(source, target, vocab_source, vocab_target,
max_seq_len_source, max_seq_len_target)
# define buckets
buckets = define_parallel_buckets(max_seq_len_source, max_seq_len_target, bucket_width,
length_statistics.length_ratio_mean) if bucketing else [
(max_seq_len_source, max_seq_len_target)]
source_sentences = SequenceReader(source, vocab_source, add_bos=False)
target_sentences = SequenceReader(target, vocab_target, add_bos=True)
# 2. pass: Get data statistics
data_statistics = get_data_statistics(source_sentences, target_sentences, buckets,
length_statistics.length_ratio_mean, length_statistics.length_ratio_std,
vocab_source, vocab_target)
bucket_batch_sizes = define_bucket_batch_sizes(buckets,
batch_size,
batch_by_words,
batch_num_devices,
data_statistics.average_len_target_per_bucket)
data_statistics.log(bucket_batch_sizes)
data_loader = RawParallelDatasetLoader(buckets=buckets,
eos_id=vocab_target[C.EOS_SYMBOL],
pad_id=C.PAD_ID)
training_data = data_loader.load(source_sentences, target_sentences,
data_statistics.num_sents_per_bucket).fill_up(bucket_batch_sizes, fill_up)
config_data = DataConfig(source=source,
target=target,
vocab_source=vocab_source_path,
vocab_target=vocab_target_path,
shared_vocab=shared_vocab,
num_shards=1,
data_statistics=data_statistics,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target)
train_iter = ParallelSampleIter(training_data,
buckets,
batch_size,
bucket_batch_sizes)
validation_iter = get_validation_data_iter(data_loader=data_loader,
validation_source=validation_source,
validation_target=validation_target,
buckets=buckets,
bucket_batch_sizes=bucket_batch_sizes,
vocab_source=vocab_source,
vocab_target=vocab_target,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
batch_size=batch_size,
fill_up=fill_up)
return train_iter, validation_iter, config_data
class LengthStatistics(config.Config):
def __init__(self,
num_sents: int,
length_ratio_mean: float,
length_ratio_std: float) -> None:
super().__init__()
self.num_sents = num_sents
self.length_ratio_mean = length_ratio_mean
self.length_ratio_std = length_ratio_std
class DataStatistics(config.Config):
def __init__(self,
num_sents: int,
num_discarded,
num_tokens_source,
num_tokens_target,
num_unks_source,
num_unks_target,
max_observed_len_source,
max_observed_len_target,
size_vocab_source,
size_vocab_target,
length_ratio_mean,
length_ratio_std,
buckets: List[Tuple[int, int]],
num_sents_per_bucket: List[int],
mean_len_target_per_bucket: List[Optional[float]]) -> None:
super().__init__()
self.num_sents = num_sents
self.num_discarded = num_discarded
self.num_tokens_source = num_tokens_source
self.num_tokens_target = num_tokens_target
self.num_unks_source = num_unks_source
self.num_unks_target = num_unks_target
self.max_observed_len_source = max_observed_len_source
self.max_observed_len_target = max_observed_len_target
self.size_vocab_source = size_vocab_source
self.size_vocab_target = size_vocab_target
self.length_ratio_mean = length_ratio_mean
self.length_ratio_std = length_ratio_std
self.buckets = buckets
self.num_sents_per_bucket = num_sents_per_bucket
self.average_len_target_per_bucket = mean_len_target_per_bucket
def log(self, bucket_batch_sizes: Optional[List[BucketBatchSize]] = None):
logger.info("Tokens: source %d target %d", self.num_tokens_source, self.num_tokens_target)
if self.num_tokens_source > 0 and self.num_tokens_target > 0:
logger.info("Vocabulary coverage: source %.0f%% target %.0f%%",
(1 - self.num_unks_source / self.num_tokens_source) * 100,
(1 - self.num_unks_target / self.num_tokens_target) * 100)
logger.info("%d sequences across %d buckets", self.num_sents, len(self.num_sents_per_bucket))
logger.info("%d sequences did not fit into buckets and were discarded", self.num_discarded)
if bucket_batch_sizes is not None:
describe_data_and_buckets(self, bucket_batch_sizes)
def describe_data_and_buckets(data_statistics: DataStatistics, bucket_batch_sizes: List[BucketBatchSize]):
"""
Describes statistics across buckets
"""
check_condition(len(bucket_batch_sizes) == len(data_statistics.buckets),
"Number of bucket batch sizes (%d) does not match number of buckets in statistics (%d)."
% (len(bucket_batch_sizes), len(data_statistics.buckets)))
for bucket_batch_size, num_seq in zip(bucket_batch_sizes, data_statistics.num_sents_per_bucket):
if num_seq > 0:
logger.info("Bucket %s: %d samples in %d batches of %d, ~%.1f tokens/batch.",
bucket_batch_size.bucket,
num_seq,
math.ceil(num_seq / bucket_batch_size.batch_size),
bucket_batch_size.batch_size,
bucket_batch_size.average_words_per_batch)
class DataConfig(config.Config):
"""
Stores data paths from training.
"""
def __init__(self,
source: str,
target: str,
vocab_source: Optional[str],
vocab_target: Optional[str],
shared_vocab: bool,
num_shards: int,
data_statistics: DataStatistics,
max_seq_len_source: int,
max_seq_len_target: int) -> None:
super().__init__()
self.source = source
self.target = target
self.vocab_source = vocab_source
self.vocab_target = vocab_target
self.shared_vocab = shared_vocab
self.num_shards = num_shards
self.data_statistics = data_statistics
self.max_seq_len_source = max_seq_len_source
self.max_seq_len_target = max_seq_len_target
def read_content(path: str, limit: Optional[int] = None) -> Iterator[List[str]]:
"""
Returns a list of tokens for each line in path up to a limit.
:param path: Path to files containing sentences.
:param limit: How many lines to read from path.
:return: Iterator over lists of words.
"""
with smart_open(path) as indata:
for i, line in enumerate(indata):
if limit is not None and i == limit:
break
yield list(get_tokens(line))
def tokens2ids(tokens: Iterable[str], vocab: Dict[str, int]) -> List[int]:
"""
Returns sequence of integer ids given a sequence of tokens and vocab.
:param tokens: List of string tokens.
:param vocab: Vocabulary (containing UNK symbol).
:return: List of word ids.
"""
return [vocab.get(w, vocab[C.UNK_SYMBOL]) for w in tokens]
def strids2ids(tokens: Iterable[str]) -> List[int]:
"""
Returns sequence of integer ids given a sequence of string ids.
:param tokens: List of integer tokens.
:return: List of word ids.
"""
return list(map(int, tokens))
def ids2strids(ids: Iterable[int]) -> str:
"""
Returns a string representation of a sequence of integers.
:param ids: Sequence of integers.
:return: String sequence
"""
return " ".join(map(str, ids))
class SequenceReader(Iterator):
"""
Reads sequence samples from path and creates integer id sequences.
Streams from disk, instead of loading all samples into memory.
If vocab is None, the sequences in path are assumed to be integers coded as strings.
:param path: Path to read data from.
:param vocab: Optional mapping from strings to integer ids.
:param add_bos: Whether to add Beginning-Of-Sentence (BOS) symbol.
:param limit: Read limit.
"""
def __init__(self,
path: str,
vocab: Optional[Dict[str, int]],
add_bos: bool = False,
limit: Optional[int] = None) -> None:
self.path = path
self.vocab = vocab
self.bos_id = None
if vocab is not None:
assert C.UNK_SYMBOL in vocab
assert vocab[C.PAD_SYMBOL] == C.PAD_ID
assert C.BOS_SYMBOL in vocab
assert C.EOS_SYMBOL in vocab
self.bos_id = vocab[C.BOS_SYMBOL]
else:
check_condition(not add_bos, "Adding a BOS symbol requires a vocabulary")
self.add_bos = add_bos
self.limit = limit
self._iter = None # type: Optional[Iterator]
self._iterated_once = False
self.count = 0
self._next = None
def __iter__(self):
check_condition(self._next is None, "Can not iterate multiple times simultaneously.")
self._iter = read_content(self.path, self.limit)
self._next = next(self._iter, None)
return self
def __next__(self):
if self._next is None:
raise StopIteration
tokens = self._next
if self.vocab is not None:
sequence = tokens2ids(tokens, self.vocab)
else:
sequence = strids2ids(tokens)
check_condition(bool(sequence), "Empty sequence in file %s" % self.path)
if vocab is not None and self.add_bos:
sequence.insert(0, self.vocab[C.BOS_SYMBOL])
if not self._iterated_once:
self.count += 1
# fetch next element
self._next = next(self._iter, None)
if self._next is None:
self._iter = None
if not self._iterated_once:
self._iterated_once = True
return sequence
def is_done(self):
return self._iterated_once and self._next is None
def get_default_bucket_key(buckets: List[Tuple[int, int]]) -> Tuple[int, int]:
"""
Returns the default bucket from a list of buckets, i.e. the largest bucket.
:param buckets: List of buckets.
:return: The largest bucket in the list.
"""
return max(buckets)
def get_parallel_bucket(buckets: List[Tuple[int, int]],
length_source: int,
length_target: int) -> Optional[Tuple[int, Tuple[int, int]]]:
"""
Returns bucket index and bucket from a list of buckets, given source and target length.
Returns (None, None) if no bucket fits.
:param buckets: List of buckets.
:param length_source: Length of source sequence.
:param length_target: Length of target sequence.
:return: Tuple of (bucket index, bucket), or (None, None) if not fitting.
"""
bucket = None, None # type: Tuple[int, Tuple[int, int]]
for j, (source_bkt, target_bkt) in enumerate(buckets):
if source_bkt >= length_source and target_bkt >= length_target:
bucket = j, (source_bkt, target_bkt)
break
return bucket
class ParallelDataSet(Sized):
"""
Bucketed parallel data set with labels
"""
def __init__(self,
source: List[mx.nd.array],
target: List[mx.nd.array],
label: List[mx.nd.array]) -> None:
check_condition(len(source) == len(target) == len(label),
"Number of buckets for source/target/label must match.")
self.source = source
self.target = target
self.label = label
def __len__(self) -> int:
return len(self.source)
def get_bucket_counts(self):
return [len(self.source[buck_idx]) for buck_idx in range(len(self))]
def save(self, fname: str):
"""
Saves the dataset to a binary .npy file.
"""
mx.nd.save(fname, self.source + self.target + self.label)
@staticmethod
def load(fname: str) -> 'ParallelDataSet':
"""
Loads a dataset from a binary .npy file.
"""
data = mx.nd.load(fname)
n = len(data) // 3
source = data[:n]
target = data[n:2 * n]
label = data[2 * n:]
assert len(source) == len(target) == len(label)
return ParallelDataSet(source, target, label)
def fill_up(self,
bucket_batch_sizes: List[BucketBatchSize],
fill_up: str,
seed: int = 42) -> 'ParallelDataSet':
"""
Returns a new dataset with buckets filled up using the specified fill-up strategy.
:param bucket_batch_sizes: Bucket batch sizes.
:param fill_up: Fill-up strategy.
:param seed: The random seed used for sampling sentences to fill up.
:return: New dataset with buckets filled up to the next multiple of batch size
"""
source = list(self.source)
target = list(self.target)
label = list(self.label)
rs = np.random.RandomState(seed)
for bucket_idx in range(len(self)):
bucket = bucket_batch_sizes[bucket_idx].bucket
bucket_batch_size = bucket_batch_sizes[bucket_idx].batch_size
bucket_source = self.source[bucket_idx]
bucket_target = self.target[bucket_idx]
bucket_label = self.label[bucket_idx]
num_samples = bucket_source.shape[0]
if num_samples % bucket_batch_size != 0:
if fill_up == 'replicate':
rest = bucket_batch_size - num_samples % bucket_batch_size
logger.info("Replicating %d random samples from %d samples in bucket %s "
"to size it to multiple of %d",
rest, num_samples, bucket, bucket_batch_size)
random_indices = mx.nd.array(rs.randint(num_samples, size=rest))
source[bucket_idx] = mx.nd.concat(bucket_source, bucket_source.take(random_indices), dim=0)
target[bucket_idx] = mx.nd.concat(bucket_target, bucket_target.take(random_indices), dim=0)
label[bucket_idx] = mx.nd.concat(bucket_label, bucket_label.take(random_indices), dim=0)
else:
raise NotImplementedError('Unknown fill-up strategy')
return ParallelDataSet(source, target, label)
def permute(self, permutations: List[mx.nd.NDArray]) -> 'ParallelDataSet':
assert len(self) == len(permutations)
source = []
target = []
label = []
for buck_idx in range(len(self)):
num_samples = self.source[buck_idx].shape[0]
if num_samples: # not empty bucket
permutation = permutations[buck_idx]
source.append(self.source[buck_idx].take(permutation))
target.append(self.target[buck_idx].take(permutation))
label.append(self.label[buck_idx].take(permutation))
else:
source.append(self.source[buck_idx])
target.append(self.target[buck_idx])
label.append(self.label[buck_idx])
return ParallelDataSet(source, target, label)
def get_permutations(bucket_counts: List[int]) -> Tuple[List[mx.nd.NDArray], List[mx.nd.NDArray]]:
"""
Returns the indices of a random permutation for each bucket and the corresponding inverse permutations that can
restore the original order of the data if applied to the permuted data.
:param bucket_counts: The number of elements per bucket.
:return: For each bucket a permutation and inverse permutation is returned.
"""
data_permutations = [] # type: List[mx.nd.NDArray]
inverse_data_permutations = [] # type: List[mx.nd.NDArray]
for num_samples in bucket_counts:
if num_samples == 0:
num_samples = 1
# new random order:
data_permutation = np.random.permutation(num_samples)
inverse_data_permutation = np.empty(num_samples, np.int32)
inverse_data_permutation[data_permutation] = np.arange(num_samples)
inverse_data_permutation = mx.nd.array(inverse_data_permutation)
data_permutation = mx.nd.array(data_permutation)
data_permutations.append(data_permutation)
inverse_data_permutations.append(inverse_data_permutation)
return data_permutations, inverse_data_permutations
def get_batch_indices(data: ParallelDataSet,
bucket_batch_sizes: List[BucketBatchSize]) -> List[Tuple[int, int]]:
"""
Returns a list of index tuples that index into the bucket and the start index inside a bucket given
the batch size for a bucket. These indices are valid for the given dataset.
:param data: Data to create indices for.
:param bucket_batch_sizes: Bucket batch sizes.
:return: List of 2d indices.
"""
# create index tuples (i,j) into buckets: i := bucket index ; j := row index of bucket array
idxs = [] # type: List[Tuple[int, int]]
for buck_idx, buck in enumerate(data.source):
bucket = bucket_batch_sizes[buck_idx].bucket
batch_size = bucket_batch_sizes[buck_idx].batch_size
num_samples = data.source[buck_idx].shape[0]
rest = num_samples % batch_size
if rest > 0:
logger.info("Ignoring %d samples from bucket %s with %d samples due to incomplete batch",
rest, bucket, num_samples)
idxs.extend([(buck_idx, j) for j in range(0, num_samples - batch_size + 1, batch_size)])
return idxs
class BaseParallelSampleIter(mx.io.DataIter, ABC):
"""
Base parallel sample iterator.
"""
def __init__(self,
buckets,
batch_size,
bucket_batch_sizes,
source_data_name,
target_data_name,
label_name,
dtype='float32') -> None:
super().__init__(batch_size=batch_size)
self.buckets = list(buckets)
self.default_bucket_key = get_default_bucket_key(self.buckets)
self.bucket_batch_sizes = bucket_batch_sizes
self.source_data_name = source_data_name
self.target_data_name = target_data_name
self.label_name = label_name
self.dtype = dtype
# "Staging area" that needs to fit any size batch we're using by total number of elements.
# When computing per-bucket batch sizes, we guarantee that the default bucket will have the
# largest total batch size.
# Note: this guarantees memory sharing for input data and is generally a good heuristic for
# other parts of the model, but it is possible that some architectures will have intermediate
# operations that produce shapes larger than the default bucket size. In these cases, MXNet
# will silently allocate additional memory.
self.provide_data = [
mx.io.DataDesc(name=self.source_data_name,
shape=(self.bucket_batch_sizes[-1].batch_size, self.default_bucket_key[0]),
layout=C.BATCH_MAJOR),
mx.io.DataDesc(name=self.target_data_name,
shape=(self.bucket_batch_sizes[-1].batch_size, self.default_bucket_key[1]),
layout=C.BATCH_MAJOR)]
self.provide_label = [
mx.io.DataDesc(name=self.label_name,
shape=(self.bucket_batch_sizes[-1].batch_size, self.default_bucket_key[1]),
layout=C.BATCH_MAJOR)]
self.data_names = [self.source_data_name, self.target_data_name]
self.label_names = [self.label_name]
@abstractmethod
def reset(self):
pass
@abstractmethod
def iter_next(self) -> bool:
pass
@abstractmethod
def next(self) -> mx.io.DataBatch:
pass
@abstractmethod
def save_state(self, fname: str):
pass
@abstractmethod
def load_state(self, fname: str):
pass
class ShardedParallelSampleIter(BaseParallelSampleIter):
"""
Goes through the data one shard at a time. The memory consumption is limited by the memory consumption of the
largest shard. The order in which shards are traversed is changed with each reset.
"""
def __init__(self,
shards_fnames: List[str],
buckets,
batch_size,
bucket_batch_sizes,
fill_up: str,
source_data_name=C.SOURCE_NAME,
target_data_name=C.TARGET_NAME,
label_name=C.TARGET_LABEL_NAME,
dtype='float32') -> None:
super().__init__(buckets, batch_size, bucket_batch_sizes,
source_data_name, target_data_name, label_name, dtype)
assert len(shards_fnames) > 0
self.shards_fnames = list(shards_fnames)
self.shard_index = -1
self.fill_up = fill_up
self.reset()
def _load_shard(self):
shard_fname = self.shards_fnames[self.shard_index]
logger.info("Loading shard %s.", shard_fname)
dataset = ParallelDataSet.load(self.shards_fnames[self.shard_index]).fill_up(self.bucket_batch_sizes,
self.fill_up,
seed=self.shard_index)
self.shard_iter = ParallelSampleIter(dataset,
self.buckets,
self.batch_size,
self.bucket_batch_sizes)
def reset(self):
if len(self.shards_fnames) > 1:
logger.info("Shuffling the shards.")
# Making sure to not repeat a shard:
if self.shard_index < 0:
current_shard_fname = ""
else:
current_shard_fname = self.shards_fnames[self.shard_index]
remaining_shards = [shard for shard in self.shards_fnames if shard != current_shard_fname]
next_shard_fname = random.choice(remaining_shards)
remaining_shards = [shard for shard in self.shards_fnames if shard != next_shard_fname]
random.shuffle(remaining_shards)
self.shards_fnames = [next_shard_fname] + remaining_shards
self.shard_index = 0
self._load_shard()
else:
if self.shard_index < 0:
self.shard_index = 0
self._load_shard()
# We can just reset the shard_iter as we only have a single shard
self.shard_iter.reset()
def iter_next(self) -> bool:
next_shard_index = self.shard_index + 1
return self.shard_iter.iter_next() or next_shard_index < len(self.shards_fnames)
def next(self) -> mx.io.DataBatch:
if not self.shard_iter.iter_next():
if self.shard_index < len(self.shards_fnames) - 1:
self.shard_index += 1
self._load_shard()
else:
raise StopIteration
return self.shard_iter.next()
def save_state(self, fname: str):
with open(fname, "wb") as fp:
pickle.dump(self.shards_fnames, fp)
pickle.dump(self.shard_index, fp)
self.shard_iter.save_state(fname + ".sharditer")
def load_state(self, fname: str):
with open(fname, "rb") as fp:
self.shards_fnames = pickle.load(fp)
self.shard_index = pickle.load(fp)
self._load_shard()
self.shard_iter.load_state(fname + ".sharditer")
class ParallelSampleIter(BaseParallelSampleIter):
"""
Data iterator on a bucketed ParallelDataSet. Shuffles data at every reset and supports saving and loading the
iterator state.
"""
def __init__(self,
data: ParallelDataSet,
buckets,
batch_size,
bucket_batch_sizes,
source_data_name=C.SOURCE_NAME,
target_data_name=C.TARGET_NAME,
label_name=C.TARGET_LABEL_NAME,
dtype='float32') -> None:
super().__init__(buckets, batch_size, bucket_batch_sizes,
source_data_name, target_data_name, label_name, dtype)
# create independent lists to be shuffled
self.data = ParallelDataSet(list(data.source), list(data.target), list(data.label))
# create index tuples (buck_idx, batch_start_pos) into buckets. These will be shuffled.
self.batch_indices = get_batch_indices(self.data, bucket_batch_sizes)
self.curr_batch_index = 0
self.inverse_data_permutations = [mx.nd.arange(0, max(1, self.data.source[i].shape[0]))
for i in range(len(self.data))]
self.data_permutations = [mx.nd.arange(0, max(1, self.data.source[i].shape[0]))
for i in range(len(self.data))]
self.reset()
def reset(self):
"""
Resets and reshuffles the data.
"""
self.curr_batch_index = 0
# shuffle batch start indices
random.shuffle(self.batch_indices)
# restore
self.data = self.data.permute(self.inverse_data_permutations)
self.data_permutations, self.inverse_data_permutations = get_permutations(self.data.get_bucket_counts())
self.data = self.data.permute(self.data_permutations)
def iter_next(self) -> bool:
"""
True if iterator can return another batch
"""
return self.curr_batch_index != len(self.batch_indices)
def next(self) -> mx.io.DataBatch:
"""
Returns the next batch from the data iterator.
"""
if not self.iter_next():
raise StopIteration
i, j = self.batch_indices[self.curr_batch_index]
self.curr_batch_index += 1
batch_size = self.bucket_batch_sizes[i].batch_size
source = self.data.source[i][j:j + batch_size]
target = self.data.target[i][j:j + batch_size]
data = [source, target]
label = [self.data.label[i][j:j + batch_size]]
provide_data = [mx.io.DataDesc(name=n, shape=x.shape, layout=C.BATCH_MAJOR) for n, x in
zip(self.data_names, data)]
provide_label = [mx.io.DataDesc(name=n, shape=x.shape, layout=C.BATCH_MAJOR) for n, x in
zip(self.label_names, label)]
# TODO: num pad examples is not set here if fillup strategy would be padding
return mx.io.DataBatch(data, label,
pad=0, index=None, bucket_key=self.buckets[i],
provide_data=provide_data, provide_label=provide_label)
def save_state(self, fname: str):
"""
Saves the current state of iterator to a file, so that iteration can be
continued. Note that the data is not saved, i.e. the iterator must be
initialized with the same parameters as in the first call.
:param fname: File name to save the information to.
"""
with open(fname, "wb") as fp:
pickle.dump(self.batch_indices, fp)
pickle.dump(self.curr_batch_index, fp)
np.save(fp, [a.asnumpy() for a in self.inverse_data_permutations])
np.save(fp, [a.asnumpy() for a in self.data_permutations])
def load_state(self, fname: str):
"""
Loads the state of the iterator from a file.
:param fname: File name to load the information from.
"""
# restore order
self.data = self.data.permute(self.inverse_data_permutations)
with open(fname, "rb") as fp:
self.batch_indices = pickle.load(fp)
self.curr_batch_index = pickle.load(fp)
inverse_data_permutations = np.load(fp)
data_permutations = np.load(fp)
# Because of how checkpointing is done (pre-fetching the next batch in
# each iteration), curr_idx should always be >= 1
assert self.curr_batch_index >= 1
# Right after loading the iterator state, next() should be called
self.curr_batch_index -= 1
# load previous permutations
self.inverse_data_permutations = []
self.data_permutations = []
for bucket in range(len(self.data)):
inverse_permutation = mx.nd.array(inverse_data_permutations[bucket])
self.inverse_data_permutations.append(inverse_permutation)
permutation = mx.nd.array(data_permutations[bucket])
self.data_permutations.append(permutation)
self.data = self.data.permute(self.data_permutations)
| [
"int",
"int",
"int",
"int",
"List[int]",
"Tuple[int, int]",
"int",
"float",
"List[Tuple[int, int]]",
"int",
"bool",
"int",
"List[Optional[float]]",
"Iterable[List[Any]]",
"Iterable[List[Any]]",
"int",
"int",
"str",
"str",
"Dict[str, int]",
"Dict[str, int]",
"int",
"int",
"List[Tuple[int, int]]",
"Dict[str, int]",
"Dict[str, int]",
"float",
"float",
"List[int]",
"List[int]",
"Optional[int]",
"str",
"str",
"Dict[str, int]",
"Dict[str, int]",
"int",
"List[Tuple[int, int]]",
"float",
"float",
"str",
"List[Tuple[int, int]]",
"int",
"int",
"Iterable[List[Any]]",
"Iterable[List[Any]]",
"List[int]",
"int",
"int",
"int",
"str",
"str",
"Dict[str, int]",
"Dict[str, int]",
"Optional[str]",
"Optional[str]",
"bool",
"int",
"int",
"bool",
"int",
"int",
"int",
"str",
"Iterable[List[int]]",
"Iterable[List[int]]",
"List[Tuple[int, int]]",
"float",
"float",
"vocab.Vocab",
"vocab.Vocab",
"RawParallelDatasetLoader",
"str",
"str",
"List[Tuple[int, int]]",
"List[BucketBatchSize]",
"vocab.Vocab",
"vocab.Vocab",
"int",
"int",
"int",
"str",
"str",
"str",
"str",
"bool",
"int",
"bool",
"int",
"str",
"str",
"str",
"str",
"str",
"vocab.Vocab",
"vocab.Vocab",
"Optional[str]",
"Optional[str]",
"bool",
"int",
"bool",
"int",
"str",
"int",
"int",
"bool",
"int",
"int",
"float",
"float",
"int",
"List[Tuple[int, int]]",
"List[int]",
"List[Optional[float]]",
"DataStatistics",
"List[BucketBatchSize]",
"str",
"str",
"Optional[str]",
"Optional[str]",
"bool",
"int",
"DataStatistics",
"int",
"int",
"str",
"Iterable[str]",
"Dict[str, int]",
"Iterable[str]",
"Iterable[int]",
"str",
"Optional[Dict[str, int]]",
"List[Tuple[int, int]]",
"List[Tuple[int, int]]",
"int",
"int",
"List[mx.nd.array]",
"List[mx.nd.array]",
"List[mx.nd.array]",
"str",
"str",
"List[BucketBatchSize]",
"str",
"List[mx.nd.NDArray]",
"List[int]",
"ParallelDataSet",
"List[BucketBatchSize]",
"str",
"str",
"List[str]",
"str",
"str",
"str",
"ParallelDataSet",
"str",
"str"
] | [
1192,
1877,
1930,
3930,
3944,
4578,
4607,
4637,
4823,
4888,
4939,
4994,
5054,
8313,
8384,
8457,
8514,
9577,
9619,
9667,
9726,
9791,
9845,
11236,
11290,
11337,
11389,
11431,
12259,
12300,
12345,
14500,
14534,
14568,
14613,
14656,
14685,
14742,
14782,
14819,
18302,
18350,
18380,
18602,
18654,
18712,
21648,
21672,
21693,
22054,
22067,
22103,
22133,
22185,
22219,
22265,
22308,
22350,
22383,
22420,
22461,
22499,
22536,
26302,
26365,
26419,
26485,
26534,
26579,
26630,
27157,
27231,
27284,
27327,
27399,
27465,
27521,
27583,
27637,
27683,
27726,
29687,
29739,
29763,
29810,
29856,
29905,
29958,
30000,
34348,
34361,
34413,
34437,
34484,
34511,
34571,
34605,
34662,
34708,
34757,
34810,
34852,
34905,
34958,
35002,
35050,
39939,
39980,
40022,
40291,
40721,
40783,
40839,
42509,
42545,
43521,
43551,
43587,
43633,
43679,
43714,
43753,
43806,
43848,
44285,
44790,
44812,
45148,
45393,
46129,
46158,
48022,
48303,
48365,
48409,
49247,
49291,
49334,
49795,
49979,
50394,
50442,
52422,
53248,
54444,
54503,
57941,
58013,
58364,
58493,
61239,
61467,
61940,
64887,
65523
] | [
1195,
1880,
1933,
3933,
3953,
4593,
4610,
4642,
4844,
4891,
4943,
4997,
5075,
8332,
8403,
8460,
8517,
9580,
9622,
9681,
9740,
9794,
9848,
11257,
11304,
11351,
11394,
11436,
12268,
12309,
12358,
14503,
14537,
14582,
14627,
14659,
14706,
14747,
14787,
14822,
18323,
18353,
18383,
18621,
18673,
18721,
21651,
21675,
21696,
22057,
22070,
22117,
22147,
22198,
22232,
22269,
22311,
22353,
22387,
22423,
22464,
22502,
22539,
26321,
26384,
26440,
26490,
26539,
26590,
26641,
27181,
27234,
27287,
27348,
27420,
27476,
27532,
27586,
27640,
27686,
27729,
29690,
29742,
29766,
29814,
29859,
29909,
29961,
30003,
34351,
34364,
34416,
34440,
34495,
34522,
34584,
34618,
34666,
34711,
34761,
34813,
34855,
34908,
34961,
35006,
35053,
39942,
39985,
40027,
40294,
40742,
40792,
40860,
42523,
42566,
43524,
43554,
43600,
43646,
43683,
43717,
43767,
43809,
43851,
44288,
44803,
44826,
45161,
45406,
46132,
46182,
48043,
48324,
48368,
48412,
49264,
49308,
49351,
49798,
49982,
50415,
50445,
52441,
53257,
54459,
54524,
57944,
58016,
58373,
58496,
61242,
61470,
61955,
64890,
65526
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/decoder.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Decoders for sequence-to-sequence models.
"""
import logging
from abc import ABC, abstractmethod
from typing import Callable, Dict, List, NamedTuple, Tuple, Union
from typing import Optional
import mxnet as mx
from sockeye.config import Config
from . import constants as C
from . import convolution
from . import encoder
from . import layers
from . import rnn
from . import rnn_attention
from . import transformer
from . import utils
logger = logging.getLogger(__name__)
DecoderConfig = Union['RecurrentDecoderConfig', transformer.TransformerConfig, 'ConvolutionalDecoderConfig']
def get_decoder(config: DecoderConfig) -> 'Decoder':
if isinstance(config, RecurrentDecoderConfig):
return RecurrentDecoder(config=config, prefix=C.RNN_DECODER_PREFIX)
elif isinstance(config, ConvolutionalDecoderConfig):
return ConvolutionalDecoder(config=config, prefix=C.CNN_DECODER_PREFIX)
elif isinstance(config, transformer.TransformerConfig):
return TransformerDecoder(config=config, prefix=C.TRANSFORMER_DECODER_PREFIX)
else:
raise ValueError("Unsupported decoder configuration")
class Decoder(ABC):
"""
Generic decoder interface.
A decoder needs to implement code to decode a target sequence known in advance (decode_sequence),
and code to decode a single word given its decoder state (decode_step).
The latter is typically used for inference graphs in beam search.
For the inference module to be able to keep track of decoder's states
a decoder provides methods to return initial states (init_states), state variables and their shapes.
"""
@abstractmethod
def decode_sequence(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int,
target_embed: mx.sym.Symbol,
target_embed_lengths: mx.sym.Symbol,
target_embed_max_length: int) -> mx.sym.Symbol:
"""
Decodes a sequence of embedded target words and returns sequence of last decoder
representations for each time step.
:param source_encoded: Encoded source: (source_encoded_max_length, batch_size, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:param target_embed: Embedded target sequence. Shape: (batch_size, target_embed_max_length, target_num_embed).
:param target_embed_lengths: Lengths of embedded target sequences. Shape: (batch_size,).
:param target_embed_max_length: Dimension of the embedded target sequence.
:return: Decoder data. Shape: (batch_size, target_embed_max_length, decoder_depth).
"""
pass
@abstractmethod
def decode_step(self,
step: int,
target_embed_prev: mx.sym.Symbol,
source_encoded_max_length: int,
*states: mx.sym.Symbol) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, List[mx.sym.Symbol]]:
"""
Decodes a single time step given the current step, the previous embedded target word,
and previous decoder states.
Returns decoder representation for the next prediction, attention probabilities, and next decoder states.
Implementations can maintain an arbitrary number of states.
:param step: Global step of inference procedure, starts with 1.
:param target_embed_prev: Previous target word embedding. Shape: (batch_size, target_num_embed).
:param source_encoded_max_length: Length of encoded source time dimension.
:param states: Arbitrary list of decoder states.
:return: logit inputs, attention probabilities, next decoder states.
"""
pass
@abstractmethod
def reset(self):
"""
Reset decoder method. Used for inference.
"""
pass
@abstractmethod
def get_num_hidden(self) -> int:
"""
:return: The representation size of this decoder.
"""
pass
@abstractmethod
def init_states(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns a list of symbolic states that represent the initial states of this decoder.
Used for inference.
:param source_encoded: Encoded source. Shape: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:return: List of symbolic initial states.
"""
pass
@abstractmethod
def state_variables(self, target_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns the list of symbolic variables for this decoder to be used during inference.
:param target_max_length: Current target sequence lengths.
:return: List of symbolic variables.
"""
pass
@abstractmethod
def state_shapes(self,
batch_size: int,
target_max_length: int,
source_encoded_max_length: int,
source_encoded_depth: int) -> List[mx.io.DataDesc]:
"""
Returns a list of shape descriptions given batch size, encoded source max length and encoded source depth.
Used for inference.
:param batch_size: Batch size during inference.
:param target_max_length: Current target sequence length.
:param source_encoded_max_length: Size of encoder time dimension.
:param source_encoded_depth: Depth of encoded source.
:return: List of shape descriptions.
"""
pass
def get_max_seq_len(self) -> Optional[int]:
"""
:return: The maximum length supported by the decoder if such a restriction exists.
"""
return None
class TransformerDecoder(Decoder):
"""
Transformer decoder as in Vaswani et al, 2017: Attention is all you need.
In training, computation scores for each position of the known target sequence are compouted in parallel,
yielding most of the speedup.
At inference time, the decoder block is evaluated again and again over a maximum length input sequence that is
initially filled with zeros and grows during beam search with predicted tokens. Appropriate masking at every
time-step ensures correct self-attention scores and is updated with every step.
:param config: Transformer configuration.
:param prefix: Name prefix for symbols of this decoder.
"""
def __init__(self,
config: transformer.TransformerConfig,
prefix: str = C.TRANSFORMER_DECODER_PREFIX) -> None:
self.config = config
self.prefix = prefix
self.layers = [transformer.TransformerDecoderBlock(
config, prefix="%s%d_" % (prefix, i)) for i in range(config.num_layers)]
self.final_process = transformer.TransformerProcessBlock(sequence=config.preprocess_sequence,
num_hidden=config.model_size,
dropout=config.dropout_prepost,
prefix="%sfinal_process_" % prefix)
self.pos_embedding = encoder.get_positional_embedding(config.positional_embedding_type,
config.model_size,
max_seq_len=config.max_seq_len_target,
fixed_pos_embed_scale_up_input=True,
fixed_pos_embed_scale_down_positions=False,
prefix=C.TARGET_POSITIONAL_EMBEDDING_PREFIX)
def decode_sequence(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int,
target_embed: mx.sym.Symbol,
target_embed_lengths: mx.sym.Symbol,
target_embed_max_length: int) -> mx.sym.Symbol:
"""
Decodes a sequence of embedded target words and returns sequence of last decoder
representations for each time step.
:param source_encoded: Encoded source: (source_encoded_max_length, batch_size, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:param target_embed: Embedded target sequence. Shape: (batch_size, target_embed_max_length, target_num_embed).
:param target_embed_lengths: Lengths of embedded target sequences. Shape: (batch_size,).
:param target_embed_max_length: Dimension of the embedded target sequence.
:return: Decoder data. Shape: (batch_size, target_embed_max_length, decoder_depth).
"""
# (batch_size, source_max_length, num_source_embed)
source_encoded = mx.sym.swapaxes(source_encoded, dim1=0, dim2=1)
# (batch_size * heads, max_length)
source_bias = transformer.get_variable_length_bias(lengths=source_encoded_lengths,
max_length=source_encoded_max_length,
num_heads=self.config.attention_heads,
fold_heads=True,
name="%ssource_bias" % self.prefix)
# (batch_size * heads, 1, max_length)
source_bias = mx.sym.expand_dims(source_bias, axis=1)
# (1, target_max_length, target_max_length)
target_bias = transformer.get_autoregressive_bias(target_embed_max_length, name="%starget_bias" % self.prefix)
# target: (batch_size, target_max_length, model_size)
target, _, target_max_length = self.pos_embedding.encode(target_embed, None, target_embed_max_length)
if self.config.dropout_prepost > 0.0:
target = mx.sym.Dropout(data=target, p=self.config.dropout_prepost)
for layer in self.layers:
target = layer(target=target,
target_bias=target_bias,
source=source_encoded,
source_bias=source_bias)
target = self.final_process(data=target, prev=None)
return target
def decode_step(self,
step: int,
target_embed_prev: mx.sym.Symbol,
source_encoded_max_length: int,
*states: mx.sym.Symbol) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, List[mx.sym.Symbol]]:
"""
Decodes a single time step given the current step, the previous embedded target word,
and previous decoder states.
Returns decoder representation for the next prediction, attention probabilities, and next decoder states.
Implementations can maintain an arbitrary number of states.
:param step: Global step of inference procedure, starts with 1.
:param target_embed_prev: Previous target word embedding. Shape: (batch_size, target_num_embed).
:param source_encoded_max_length: Length of encoded source time dimension.
:param states: Arbitrary list of decoder states.
:return: logit inputs, attention probabilities, next decoder states.
"""
# for step > 1, states contains source_encoded, source_encoded_lengths, and a cache tensor
source_encoded, source_encoded_lengths = states[:2] # pylint: disable=unbalanced-tuple-unpacking
# symbolic indices of the previous word
indices = mx.sym.arange(start=step - 1, stop=step, step=1, name='indices')
# (batch_size, num_embed)
target_embed_prev = self.pos_embedding.encode_positions(indices, target_embed_prev)
# (batch_size, 1, num_embed)
target = mx.sym.expand_dims(target_embed_prev, axis=1)
# (batch_size * heads, max_length)
source_bias = transformer.get_variable_length_bias(lengths=source_encoded_lengths,
max_length=source_encoded_max_length,
num_heads=self.config.attention_heads,
fold_heads=True,
name="%ssource_bias" % self.prefix)
# (batch_size * heads, 1, max_length)
source_bias = mx.sym.expand_dims(source_bias, axis=1)
# auto-regressive bias for last position in sequence
# (1, target_max_length, target_max_length)
target_bias = transformer.get_autoregressive_bias(step, name="%sbias" % self.prefix)
target_bias = mx.sym.slice_axis(target_bias, axis=1, begin=-1, end=step)
# retrieve precomputed self-attention keys & values for each layer from states.
layer_caches = self._get_layer_caches_from_states(list(states))
cache = [] # type: List[mx.sym.Symbol]
for layer, layer_cache in zip(self.layers, layer_caches):
target = layer(target=target,
target_bias=target_bias,
source=source_encoded,
source_bias=source_bias,
cache=layer_cache)
# store updated keys and values in the cache.
# (layer.__call__() has the side-effect of updating contents of layer_cache)
cache += [layer_cache['k'], layer_cache['v']]
cache = mx.sym.concat(*cache, dim=1, name='new_cache')
# (batch_size, 1, model_size)
target = self.final_process(data=target, prev=None)
# (batch_size, model_size)
target = mx.sym.reshape(target, shape=(-3, -1))
# TODO(fhieber): no attention probs for now
attention_probs = mx.sym.sum(mx.sym.zeros_like(source_encoded), axis=2, keepdims=False)
new_states = [source_encoded, source_encoded_lengths, cache]
return target, attention_probs, new_states
def _get_layer_caches_from_states(self, states: List[mx.sym.Symbol]) -> List[Dict[str, Optional[mx.sym.Symbol]]]:
"""
For decoder time steps > 1 there will be a cache tensor available that contains
previously computed key & value tensors for each transformer layer.
The cache tensor passed in is concatenated along the time-axis for efficiency.
:param states: List of states passed to decode_step().
:return: List of layer cache dictionaries.
"""
cache = None
if len(states) == 3:
cache = states[2]
# len(self.layers) * 2 cache items
cache = mx.sym.split(cache, num_outputs=len(self.layers) * 2, axis=1, squeeze_axis=False)
if not cache: # first decoder step
return [{'k': None, 'v': None} for _ in range(len(self.layers))]
else:
layer_caches = [] # type: List[Dict[str, Optional[mx.sym.Symbol]]]
for i in range(len(self.layers)):
layer_caches.append({'k': cache[2 * i + 0], 'v': cache[2 * i + 1]})
return layer_caches
def reset(self):
pass
def get_num_hidden(self) -> int:
"""
:return: The representation size of this decoder.
"""
return self.config.model_size
def init_states(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns a list of symbolic states that represent the initial states of this decoder.
Used for inference.
:param source_encoded: Encoded source. Shape: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:return: List of symbolic initial states.
"""
return [source_encoded, source_encoded_lengths]
def state_variables(self, target_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns the list of symbolic variables for this decoder to be used during inference.
:param target_max_length: Current target sequence length.
:return: List of symbolic variables.
"""
variables = [mx.sym.Variable(C.SOURCE_ENCODED_NAME),
mx.sym.Variable(C.SOURCE_LENGTH_NAME)]
if target_max_length > 1: # no cache for initial decoder step
variables.append(mx.sym.Variable('cache'))
return variables
def state_shapes(self,
batch_size: int,
target_max_length: int,
source_encoded_max_length: int,
source_encoded_depth: int) -> List[mx.io.DataDesc]:
"""
Returns a list of shape descriptions given batch size, encoded source max length and encoded source depth.
Used for inference.
:param batch_size: Batch size during inference.
:param target_max_length: Current target sequence length.
:param source_encoded_max_length: Size of encoder time dimension.
:param source_encoded_depth: Depth of encoded source.
:return: List of shape descriptions.
"""
shapes = [mx.io.DataDesc(C.SOURCE_ENCODED_NAME,
(batch_size, source_encoded_max_length, source_encoded_depth),
layout=C.BATCH_MAJOR),
mx.io.DataDesc(C.SOURCE_LENGTH_NAME, (batch_size,), layout="N")]
if target_max_length > 1: # no cache for initial decoder step
# the cache tensor passed in and out of the decoder step module contains
# all cache tensors concatenated along the time axis
# (as all inputs to the module need to of same batch size).
shapes.append(mx.io.DataDesc(name='cache',
shape=(batch_size,
(target_max_length - 1) * len(self.layers) * 2,
self.config.model_size),
layout=C.BATCH_MAJOR))
return shapes
def get_max_seq_len(self) -> Optional[int]:
# The positional embeddings potentially pose a limit on the maximum length at inference time.
return self.pos_embedding.get_max_seq_len()
RecurrentDecoderState = NamedTuple('RecurrentDecoderState', [
('hidden', mx.sym.Symbol),
('layer_states', List[mx.sym.Symbol]),
])
"""
RecurrentDecoder state.
:param hidden: Hidden state after attention mechanism. Shape: (batch_size, num_hidden).
:param layer_states: Hidden states for RNN layers of RecurrentDecoder. Shape: List[(batch_size, rnn_num_hidden)]
"""
class RecurrentDecoderConfig(Config):
"""
Recurrent decoder configuration.
:param max_seq_len_source: Maximum source sequence length
:param rnn_config: RNN configuration.
:param attention_config: Attention configuration.
:param hidden_dropout: Dropout probability on next decoder hidden state.
:param state_init: Type of RNN decoder state initialization: zero, last, average.
:param context_gating: Whether to use context gating.
:param layer_normalization: Apply layer normalization.
:param attention_in_upper_layers: Pass the attention value to all layers in the decoder.
"""
def __init__(self,
max_seq_len_source: int,
rnn_config: rnn.RNNConfig,
attention_config: rnn_attention.AttentionConfig,
hidden_dropout: float = .0, # TODO: move this dropout functionality to OutputLayer
state_init: str = C.RNN_DEC_INIT_LAST,
context_gating: bool = False,
layer_normalization: bool = False,
attention_in_upper_layers: bool = False) -> None:
super().__init__()
self.max_seq_len_source = max_seq_len_source
self.rnn_config = rnn_config
self.attention_config = attention_config
self.hidden_dropout = hidden_dropout
self.state_init = state_init
self.context_gating = context_gating
self.layer_normalization = layer_normalization
self.attention_in_upper_layers = attention_in_upper_layers
class RecurrentDecoder(Decoder):
"""
RNN Decoder with attention.
The architecture is based on Luong et al, 2015: Effective Approaches to Attention-based Neural Machine Translation.
:param config: Configuration for recurrent decoder.
:param prefix: Decoder symbol prefix.
"""
def __init__(self,
config: RecurrentDecoderConfig,
prefix: str = C.RNN_DECODER_PREFIX) -> None:
# TODO: implement variant without input feeding
self.config = config
self.rnn_config = config.rnn_config
self.attention = rnn_attention.get_attention(config.attention_config, config.max_seq_len_source)
self.prefix = prefix
self.num_hidden = self.rnn_config.num_hidden
if self.config.context_gating:
utils.check_condition(not self.config.attention_in_upper_layers,
"Context gating is not supported with attention in upper layers.")
self.gate_w = mx.sym.Variable("%sgate_weight" % prefix)
self.gate_b = mx.sym.Variable("%sgate_bias" % prefix)
self.mapped_rnn_output_w = mx.sym.Variable("%smapped_rnn_output_weight" % prefix)
self.mapped_rnn_output_b = mx.sym.Variable("%smapped_rnn_output_bias" % prefix)
self.mapped_context_w = mx.sym.Variable("%smapped_context_weight" % prefix)
self.mapped_context_b = mx.sym.Variable("%smapped_context_bias" % prefix)
if self.rnn_config.residual:
utils.check_condition(self.config.rnn_config.first_residual_layer >= 2,
"Residual connections on the first decoder layer are not supported as input and "
"output dimensions do not match.")
# Stacked RNN
if self.rnn_config.num_layers == 1 or not self.config.attention_in_upper_layers:
self.rnn_pre_attention = rnn.get_stacked_rnn(self.rnn_config, self.prefix, parallel_inputs=False)
self.rnn_post_attention = None
else:
self.rnn_pre_attention = rnn.get_stacked_rnn(self.rnn_config, self.prefix, parallel_inputs=False,
layers=[0])
self.rnn_post_attention = rnn.get_stacked_rnn(self.rnn_config, self.prefix, parallel_inputs=True,
layers=range(1, self.rnn_config.num_layers))
self.rnn_pre_attention_n_states = len(self.rnn_pre_attention.state_shape)
if self.config.state_init != C.RNN_DEC_INIT_ZERO:
self._create_state_init_parameters()
# Hidden state parameters
self.hidden_w = mx.sym.Variable("%shidden_weight" % prefix)
self.hidden_b = mx.sym.Variable("%shidden_bias" % prefix)
self.hidden_norm = layers.LayerNormalization(self.num_hidden,
prefix="%shidden_norm" % prefix) \
if self.config.layer_normalization else None
def _create_state_init_parameters(self):
"""
Creates parameters for encoder last state transformation into decoder layer initial states.
"""
self.init_ws, self.init_bs, self.init_norms = [], [], []
# shallow copy of the state shapes:
state_shapes = list(self.rnn_pre_attention.state_shape)
if self.rnn_post_attention:
state_shapes += self.rnn_post_attention.state_shape
for state_idx, (_, init_num_hidden) in enumerate(state_shapes):
self.init_ws.append(mx.sym.Variable("%senc2decinit_%d_weight" % (self.prefix, state_idx)))
self.init_bs.append(mx.sym.Variable("%senc2decinit_%d_bias" % (self.prefix, state_idx)))
if self.config.layer_normalization:
self.init_norms.append(layers.LayerNormalization(num_hidden=init_num_hidden,
prefix="%senc2decinit_%d_norm" % (
self.prefix, state_idx)))
def decode_sequence(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int,
target_embed: mx.sym.Symbol,
target_embed_lengths: mx.sym.Symbol,
target_embed_max_length: int) -> mx.sym.Symbol:
"""
Decodes a sequence of embedded target words and returns sequence of last decoder
representations for each time step.
:param source_encoded: Encoded source: (source_encoded_max_length, batch_size, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:param target_embed: Embedded target sequence. Shape: (batch_size, target_embed_max_length, target_num_embed).
:param target_embed_lengths: Lengths of embedded target sequences. Shape: (batch_size,).
:param target_embed_max_length: Dimension of the embedded target sequence.
:return: Decoder data. Shape: (batch_size, target_embed_max_length, decoder_depth).
"""
# target_embed: target_seq_len * (batch_size, num_target_embed)
target_embed = mx.sym.split(data=target_embed, num_outputs=target_embed_max_length, axis=1, squeeze_axis=True)
# get recurrent attention function conditioned on source
source_encoded_batch_major = mx.sym.swapaxes(source_encoded, dim1=0, dim2=1, name='source_encoded_batch_major')
attention_func = self.attention.on(source_encoded_batch_major, source_encoded_lengths,
source_encoded_max_length)
attention_state = self.attention.get_initial_state(source_encoded_lengths, source_encoded_max_length)
# initialize decoder states
# hidden: (batch_size, rnn_num_hidden)
# layer_states: List[(batch_size, state_num_hidden]
state = self.get_initial_state(source_encoded, source_encoded_lengths)
# hidden_all: target_seq_len * (batch_size, 1, rnn_num_hidden)
hidden_all = []
# TODO: possible alternative: feed back the context vector instead of the hidden (see lamtram)
self.reset()
for seq_idx in range(target_embed_max_length):
# hidden: (batch_size, rnn_num_hidden)
state, attention_state = self._step(target_embed[seq_idx],
state,
attention_func,
attention_state,
seq_idx)
# hidden_expanded: (batch_size, 1, rnn_num_hidden)
hidden_all.append(mx.sym.expand_dims(data=state.hidden, axis=1))
# concatenate along time axis
# hidden_concat: (batch_size, target_seq_len, rnn_num_hidden)
hidden_concat = mx.sym.concat(*hidden_all, dim=1, name="%shidden_concat" % self.prefix)
return hidden_concat
def decode_step(self,
step: int,
target_embed_prev: mx.sym.Symbol,
source_encoded_max_length: int,
*states: mx.sym.Symbol) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, List[mx.sym.Symbol]]:
"""
Decodes a single time step given the current step, the previous embedded target word,
and previous decoder states.
Returns decoder representation for the next prediction, attention probabilities, and next decoder states.
Implementations can maintain an arbitrary number of states.
:param step: Global step of inference procedure, starts with 1.
:param target_embed_prev: Previous target word embedding. Shape: (batch_size, target_num_embed).
:param source_encoded_max_length: Length of encoded source time dimension.
:param states: Arbitrary list of decoder states.
:return: logit inputs, attention probabilities, next decoder states.
"""
source_encoded, prev_dynamic_source, source_encoded_length, prev_hidden, *layer_states = states
attention_func = self.attention.on(source_encoded, source_encoded_length, source_encoded_max_length)
prev_state = RecurrentDecoderState(prev_hidden, list(layer_states))
prev_attention_state = rnn_attention.AttentionState(context=None, probs=None,
dynamic_source=prev_dynamic_source)
# state.hidden: (batch_size, rnn_num_hidden)
# attention_state.dynamic_source: (batch_size, source_seq_len, coverage_num_hidden)
# attention_state.probs: (batch_size, source_seq_len)
state, attention_state = self._step(target_embed_prev,
prev_state,
attention_func,
prev_attention_state)
new_states = [source_encoded,
attention_state.dynamic_source,
source_encoded_length,
state.hidden] + state.layer_states
return state.hidden, attention_state.probs, new_states
def reset(self):
"""
Calls reset on the RNN cell.
"""
self.rnn_pre_attention.reset()
# Shallow copy of cells
cells_to_reset = list(self.rnn_pre_attention._cells)
if self.rnn_post_attention:
self.rnn_post_attention.reset()
cells_to_reset += self.rnn_post_attention._cells
for cell in cells_to_reset:
# TODO remove this once mxnet.rnn.ModifierCell.reset() invokes reset() of base_cell
if isinstance(cell, mx.rnn.ModifierCell):
cell.base_cell.reset()
cell.reset()
def get_num_hidden(self) -> int:
"""
:return: The representation size of this decoder.
"""
return self.num_hidden
def init_states(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns a list of symbolic states that represent the initial states of this decoder.
Used for inference.
:param source_encoded: Encoded source. Shape: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:return: List of symbolic initial states.
"""
source_encoded_time_major = mx.sym.swapaxes(source_encoded, dim1=0, dim2=1)
hidden, layer_states = self.get_initial_state(source_encoded_time_major, source_encoded_lengths)
context, attention_probs, dynamic_source = self.attention.get_initial_state(source_encoded_lengths,
source_encoded_max_length)
states = [source_encoded, dynamic_source, source_encoded_lengths, hidden] + layer_states
return states
def state_variables(self, target_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns the list of symbolic variables for this decoder to be used during inference.
:param target_max_length: Current target sequence lengths.
:return: List of symbolic variables.
"""
return [mx.sym.Variable(C.SOURCE_ENCODED_NAME),
mx.sym.Variable(C.SOURCE_DYNAMIC_PREVIOUS_NAME),
mx.sym.Variable(C.SOURCE_LENGTH_NAME),
mx.sym.Variable(C.HIDDEN_PREVIOUS_NAME)] + \
[mx.sym.Variable("%senc2decinit_%d" % (self.prefix, i)) for i in
range(len(sum([rnn.state_info for rnn in self.get_rnn_cells()], [])))]
def state_shapes(self,
batch_size: int,
target_max_length: int,
source_encoded_max_length: int,
source_encoded_depth: int) -> List[mx.io.DataDesc]:
"""
Returns a list of shape descriptions given batch size, encoded source max length and encoded source depth.
Used for inference.
:param batch_size: Batch size during inference.
:param target_max_length: Current target sequence length.
:param source_encoded_max_length: Size of encoder time dimension.
:param source_encoded_depth: Depth of encoded source.
:return: List of shape descriptions.
"""
return [mx.io.DataDesc(C.SOURCE_ENCODED_NAME,
(batch_size, source_encoded_max_length, source_encoded_depth),
layout=C.BATCH_MAJOR),
mx.io.DataDesc(C.SOURCE_DYNAMIC_PREVIOUS_NAME,
(batch_size, source_encoded_max_length, self.attention.dynamic_source_num_hidden),
layout=C.BATCH_MAJOR),
mx.io.DataDesc(C.SOURCE_LENGTH_NAME,
(batch_size,),
layout="N"),
mx.io.DataDesc(C.HIDDEN_PREVIOUS_NAME,
(batch_size, self.num_hidden),
layout="NC")] + \
[mx.io.DataDesc("%senc2decinit_%d" % (self.prefix, i),
(batch_size, num_hidden),
layout=C.BATCH_MAJOR) for i, (_, num_hidden) in enumerate(
sum([rnn.state_shape for rnn in self.get_rnn_cells()], [])
)]
def get_rnn_cells(self) -> List[mx.rnn.BaseRNNCell]:
"""
Returns a list of RNNCells used by this decoder.
"""
cells = [self.rnn_pre_attention]
if self.rnn_post_attention:
cells.append(self.rnn_post_attention)
return cells
def get_initial_state(self,
source_encoded: mx.sym.Symbol,
source_encoded_length: mx.sym.Symbol) -> RecurrentDecoderState:
"""
Computes initial states of the decoder, hidden state, and one for each RNN layer.
Optionally, init states for RNN layers are computed using 1 non-linear FC
with the last state of the encoder as input.
:param source_encoded: Concatenated encoder states. Shape: (source_seq_len, batch_size, encoder_num_hidden).
:param source_encoded_length: Lengths of source sequences. Shape: (batch_size,).
:return: Decoder state.
"""
# we derive the shape of hidden and layer_states from some input to enable
# shape inference for the batch dimension during inference.
# (batch_size, 1)
zeros = mx.sym.expand_dims(mx.sym.zeros_like(source_encoded_length), axis=1)
# last encoder state: (batch, num_hidden)
source_encoded_last = mx.sym.SequenceLast(data=source_encoded,
sequence_length=source_encoded_length,
use_sequence_length=True) \
if self.config.state_init == C.RNN_DEC_INIT_LAST else None
source_masked = mx.sym.SequenceMask(data=source_encoded,
sequence_length=source_encoded_length,
use_sequence_length=True,
value=0.) if self.config.state_init == C.RNN_DEC_INIT_AVG else None
# decoder hidden state
hidden = mx.sym.tile(data=zeros, reps=(1, self.num_hidden))
# initial states for each layer
layer_states = []
for state_idx, (_, init_num_hidden) in enumerate(sum([rnn.state_shape for rnn in self.get_rnn_cells()], [])):
if self.config.state_init == C.RNN_DEC_INIT_ZERO:
init = mx.sym.tile(data=zeros, reps=(1, init_num_hidden))
else:
if self.config.state_init == C.RNN_DEC_INIT_LAST:
init = source_encoded_last
elif self.config.state_init == C.RNN_DEC_INIT_AVG:
# (batch_size, encoder_num_hidden)
init = mx.sym.broadcast_div(mx.sym.sum(source_masked, axis=0, keepdims=False),
mx.sym.expand_dims(source_encoded_length, axis=1))
else:
raise ValueError("Unknown decoder state init type '%s'" % self.config.state_init)
init = mx.sym.FullyConnected(data=init,
num_hidden=init_num_hidden,
weight=self.init_ws[state_idx],
bias=self.init_bs[state_idx],
name="%senc2decinit_%d" % (self.prefix, state_idx))
if self.config.layer_normalization:
init = self.init_norms[state_idx].normalize(init)
init = mx.sym.Activation(data=init, act_type="tanh",
name="%senc2dec_inittanh_%d" % (self.prefix, state_idx))
layer_states.append(init)
return RecurrentDecoderState(hidden, layer_states)
def _step(self, word_vec_prev: mx.sym.Symbol,
state: RecurrentDecoderState,
attention_func: Callable,
attention_state: rnn_attention.AttentionState,
seq_idx: int = 0) -> Tuple[RecurrentDecoderState, rnn_attention.AttentionState]:
"""
Performs single-time step in the RNN, given previous word vector, previous hidden state, attention function,
and RNN layer states.
:param word_vec_prev: Embedding of previous target word. Shape: (batch_size, num_target_embed).
:param state: Decoder state consisting of hidden and layer states.
:param attention_func: Attention function to produce context vector.
:param attention_state: Previous attention state.
:param seq_idx: Decoder time step.
:return: (new decoder state, updated attention state).
"""
# (1) RNN step
# concat previous word embedding and previous hidden state
rnn_input = mx.sym.concat(word_vec_prev, state.hidden, dim=1,
name="%sconcat_target_context_t%d" % (self.prefix, seq_idx))
# rnn_pre_attention_output: (batch_size, rnn_num_hidden)
# next_layer_states: num_layers * [batch_size, rnn_num_hidden]
rnn_pre_attention_output, rnn_pre_attention_layer_states = \
self.rnn_pre_attention(rnn_input, state.layer_states[:self.rnn_pre_attention_n_states])
# (2) Attention step
attention_input = self.attention.make_input(seq_idx, word_vec_prev, rnn_pre_attention_output)
attention_state = attention_func(attention_input, attention_state)
# (3) Attention handling (and possibly context gating)
if self.rnn_post_attention:
upper_rnn_output, upper_rnn_layer_states = \
self.rnn_post_attention(rnn_pre_attention_output, attention_state.context,
state.layer_states[self.rnn_pre_attention_n_states:])
hidden_concat = mx.sym.concat(upper_rnn_output, attention_state.context,
dim=1, name='%shidden_concat_t%d' % (self.prefix, seq_idx))
if self.config.hidden_dropout > 0:
hidden_concat = mx.sym.Dropout(data=hidden_concat, p=self.config.hidden_dropout,
name='%shidden_concat_dropout_t%d' % (self.prefix, seq_idx))
hidden = self._hidden_mlp(hidden_concat, seq_idx)
# TODO: add context gating?
else:
upper_rnn_layer_states = []
hidden_concat = mx.sym.concat(rnn_pre_attention_output, attention_state.context,
dim=1, name='%shidden_concat_t%d' % (self.prefix, seq_idx))
if self.config.hidden_dropout > 0:
hidden_concat = mx.sym.Dropout(data=hidden_concat, p=self.config.hidden_dropout,
name='%shidden_concat_dropout_t%d' % (self.prefix, seq_idx))
if self.config.context_gating:
hidden = self._context_gate(hidden_concat, rnn_pre_attention_output, attention_state, seq_idx)
else:
hidden = self._hidden_mlp(hidden_concat, seq_idx)
return RecurrentDecoderState(hidden, rnn_pre_attention_layer_states + upper_rnn_layer_states), attention_state
def _hidden_mlp(self, hidden_concat: mx.sym.Symbol, seq_idx: int) -> mx.sym.Symbol:
hidden = mx.sym.FullyConnected(data=hidden_concat,
num_hidden=self.num_hidden, # to state size of RNN
weight=self.hidden_w,
bias=self.hidden_b,
name='%shidden_fc_t%d' % (self.prefix, seq_idx))
if self.config.layer_normalization:
hidden = self.hidden_norm.normalize(hidden)
# hidden: (batch_size, rnn_num_hidden)
hidden = mx.sym.Activation(data=hidden, act_type="tanh",
name="%snext_hidden_t%d" % (self.prefix, seq_idx))
return hidden
def _context_gate(self,
hidden_concat: mx.sym.Symbol,
rnn_output: mx.sym.Symbol,
attention_state: rnn_attention.AttentionState,
seq_idx: int) -> mx.sym.Symbol:
gate = mx.sym.FullyConnected(data=hidden_concat,
num_hidden=self.num_hidden,
weight=self.gate_w,
bias=self.gate_b,
name='%shidden_gate_t%d' % (self.prefix, seq_idx))
gate = mx.sym.Activation(data=gate, act_type="sigmoid",
name='%shidden_gate_act_t%d' % (self.prefix, seq_idx))
mapped_rnn_output = mx.sym.FullyConnected(data=rnn_output,
num_hidden=self.num_hidden,
weight=self.mapped_rnn_output_w,
bias=self.mapped_rnn_output_b,
name="%smapped_rnn_output_fc_t%d" % (self.prefix, seq_idx))
mapped_context = mx.sym.FullyConnected(data=attention_state.context,
num_hidden=self.num_hidden,
weight=self.mapped_context_w,
bias=self.mapped_context_b,
name="%smapped_context_fc_t%d" % (self.prefix, seq_idx))
hidden = gate * mapped_rnn_output + (1 - gate) * mapped_context
if self.config.layer_normalization:
hidden = self.hidden_norm.normalize(hidden)
# hidden: (batch_size, rnn_num_hidden)
hidden = mx.sym.Activation(data=hidden, act_type="tanh",
name="%snext_hidden_t%d" % (self.prefix, seq_idx))
return hidden
class ConvolutionalDecoderConfig(Config):
"""
Convolutional decoder configuration.
:param cnn_config: Configuration for the convolution block.
:param max_seq_len_target: Maximum target sequence length.
:param num_embed: Target word embedding size.
:param encoder_num_hidden: Number of hidden units of the encoder.
:param num_layers: The number of convolutional layers.
:param positional_embedding_type: The type of positional embedding.
:param hidden_dropout: Dropout probability on next decoder hidden state.
"""
def __init__(self,
cnn_config: convolution.ConvolutionConfig,
max_seq_len_target: int,
num_embed: int,
encoder_num_hidden: int,
num_layers: int,
positional_embedding_type: str,
project_qkv: bool = False,
hidden_dropout: float = .0) -> None:
super().__init__()
self.cnn_config = cnn_config
self.max_seq_len_target = max_seq_len_target
self.num_embed = num_embed
self.encoder_num_hidden = encoder_num_hidden
self.num_layers = num_layers
self.positional_embedding_type = positional_embedding_type
self.project_qkv = project_qkv
self.hidden_dropout = hidden_dropout
class ConvolutionalDecoder(Decoder):
"""
Convolutional decoder similar to Gehring et al. 2017.
The decoder consists of an embedding layer, positional embeddings, and layers
of convolutional blocks with residual connections.
Notable differences to Gehring et al. 2017:
* Here the context vectors are created from the last encoder state (instead of using the last encoder state as the
key and the sum of the encoder state and the source embedding as the value)
* The encoder gradients are not scaled down by 1/(2 * num_attention_layers).
* Residual connections are not scaled down by math.sqrt(0.5).
* Attention is computed in the hidden dimension instead of the embedding dimension (removes need for training
several projection matrices)
:param config: Configuration for convolutional decoder.
:param prefix: Name prefix for symbols of this decoder.
"""
def __init__(self,
config: ConvolutionalDecoderConfig,
prefix: str = C.DECODER_PREFIX) -> None:
super().__init__()
self.config = config
self.prefix = prefix
# TODO: potentially project the encoder hidden size to the decoder hidden size.
utils.check_condition(config.encoder_num_hidden == config.cnn_config.num_hidden,
"We need to have the same number of hidden units in the decoder "
"as we have in the encoder")
self.pos_embedding = encoder.get_positional_embedding(config.positional_embedding_type,
num_embed=config.num_embed,
max_seq_len=config.max_seq_len_target,
fixed_pos_embed_scale_up_input=False,
fixed_pos_embed_scale_down_positions=True,
prefix=C.TARGET_POSITIONAL_EMBEDDING_PREFIX)
self.layers = [convolution.ConvolutionBlock(
config.cnn_config,
pad_type='left',
prefix="%s%d_" % (prefix, i)) for i in range(config.num_layers)]
if self.config.project_qkv:
self.attention_layers = [layers.ProjectedDotAttention("%s%d_" % (prefix, i),
self.config.cnn_config.num_hidden)
for i in range(config.num_layers)]
else:
self.attention_layers = [layers.PlainDotAttention() for _ in range(config.num_layers)] # type: ignore
self.i2h_weight = mx.sym.Variable('%si2h_weight' % prefix)
def decode_sequence(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int,
target_embed: mx.sym.Symbol,
target_embed_lengths: mx.sym.Symbol,
target_embed_max_length: int) -> mx.sym.Symbol:
"""
Decodes a sequence of embedded target words and returns sequence of last decoder
representations for each time step.
:param source_encoded: Encoded source: (source_encoded_max_length, batch_size, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:param target_embed: Embedded target sequence. Shape: (batch_size, target_embed_max_length, target_num_embed).
:param target_embed_lengths: Lengths of embedded target sequences. Shape: (batch_size,).
:param target_embed_max_length: Dimension of the embedded target sequence.
:return: Decoder data. Shape: (batch_size, target_embed_max_length, decoder_depth).
"""
# (batch_size, source_encoded_max_length, encoder_depth).
source_encoded_batch_major = mx.sym.swapaxes(source_encoded, dim1=0, dim2=1, name='source_encoded_batch_major')
# (batch_size, target_seq_len, num_hidden)
target_hidden = self._decode(source_encoded=source_encoded_batch_major,
source_encoded_lengths=source_encoded_lengths,
target_embed=target_embed,
target_embed_lengths=target_embed_lengths,
target_embed_max_length=target_embed_max_length)
return target_hidden
def _decode(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
target_embed: mx.sym.Symbol,
target_embed_lengths: mx.sym.Symbol,
target_embed_max_length: int) -> mx.sym.Symbol:
"""
Decode the target and produce a sequence of hidden states.
:param source_encoded: Shape: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Shape: (batch_size,).
:param target_embed: Embedded target sequence. Shape: (batch_size, target_embed_max_length).
:param target_embed_lengths: Lengths of embedded target sequences. Shape: (batch_size,).
:param target_embed_max_length: Size of embedded target sequence dimension.
:return: The target hidden states. Shape: (batch_size, target_seq_len, num_hidden).
"""
target_embed, target_embed_lengths, target_embed_max_length = self.pos_embedding.encode(target_embed,
target_embed_lengths,
target_embed_max_length)
# target_hidden: (batch_size, target_seq_len, num_hidden)
target_hidden = mx.sym.FullyConnected(data=target_embed,
num_hidden=self.config.cnn_config.num_hidden,
no_bias=True,
flatten=False,
weight=self.i2h_weight)
target_hidden_prev = target_hidden
drop_prob = self.config.hidden_dropout
for layer, att_layer in zip(self.layers, self.attention_layers):
# (batch_size, target_seq_len, num_hidden)
target_hidden = layer(mx.sym.Dropout(target_hidden, p=drop_prob) if drop_prob > 0 else target_hidden,
target_embed_lengths, target_embed_max_length)
# (batch_size, target_seq_len, num_embed)
context = att_layer(target_hidden, source_encoded, source_encoded_lengths)
# residual connection:
target_hidden = target_hidden_prev + target_hidden + context
target_hidden_prev = target_hidden
return target_hidden
def decode_step(self,
step: int,
target_embed_prev: mx.sym.Symbol,
source_encoded_max_length: int,
*states: mx.sym.Symbol) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, List[mx.sym.Symbol]]:
"""
Decodes a single time step given the current step, the previous embedded target word,
and previous decoder states.
Returns decoder representation for the next prediction, attention probabilities, and next decoder states.
Implementations can maintain an arbitrary number of states.
:param step: Global step of inference procedure, starts with 1.
:param target_embed_prev: Previous target word embedding. Shape: (batch_size, target_num_embed).
:param source_encoded_max_length: Length of encoded source time dimension.
:param states: Arbitrary list of decoder states.
:return: logit inputs, attention probabilities, next decoder states.
"""
# Source_encoded: (batch_size, source_encoded_max_length, encoder_depth)
source_encoded, source_encoded_lengths, *layer_states = states
# The last layer doesn't keep any state as we only need the last hidden vector for the next word prediction
# but none of the previous hidden vectors
last_layer_state = None
embed_layer_state = layer_states[0]
cnn_layer_states = list(layer_states[1:]) + [last_layer_state]
kernel_width = self.config.cnn_config.kernel_width
new_layer_states = []
# symbolic indices of the previous word
# (batch_size, num_embed)
indices = mx.sym.arange(start=step - 1, stop=step, step=1, name='indices')
target_embed_prev = self.pos_embedding.encode_positions(indices, target_embed_prev)
# (batch_size, num_hidden)
target_hidden_step = mx.sym.FullyConnected(data=target_embed_prev,
num_hidden=self.config.cnn_config.num_hidden,
no_bias=True,
weight=self.i2h_weight)
# re-arrange outcoming layer to the dimensions of the output
# (batch_size, 1, num_hidden)
target_hidden_step = mx.sym.expand_dims(target_hidden_step, axis=1)
# (batch_size, kernel_width, num_hidden)
target_hidden = mx.sym.concat(embed_layer_state, target_hidden_step, dim=1)
new_layer_states.append(mx.sym.slice_axis(data=target_hidden, axis=1, begin=1, end=kernel_width))
target_hidden_step_prev = target_hidden_step
drop_prob = self.config.hidden_dropout
for layer, att_layer, layer_state in zip(self.layers, self.attention_layers, cnn_layer_states):
# (batch_size, kernel_width, num_hidden) -> (batch_size, 1, num_hidden)
target_hidden_step = layer.step(mx.sym.Dropout(target_hidden, p=drop_prob)
if drop_prob > 0 else target_hidden)
# (batch_size, 1, num_embed)
# TODO: compute the source encoded projection only once for efficiency reasons
context_step = att_layer(target_hidden_step, source_encoded, source_encoded_lengths)
# residual connection:
target_hidden_step = target_hidden_step_prev + target_hidden_step + context_step
target_hidden_step_prev = target_hidden_step
if layer_state is not None:
# combine with layer state
# (batch_size, kernel_width, num_hidden)
target_hidden = mx.sym.concat(layer_state, target_hidden_step, dim=1)
new_layer_states.append(mx.sym.slice_axis(data=target_hidden, axis=1, begin=1, end=kernel_width))
else:
# last state, here we only care about the latest hidden state:
# (batch_size, 1, num_hidden) -> (batch_size, num_hidden)
target_hidden = mx.sym.reshape(target_hidden_step, shape=(-3, -1))
# (batch_size, source_encoded_max_length)
attention_probs = mx.sym.reshape(mx.sym.slice_axis(mx.sym.zeros_like(source_encoded),
axis=2, begin=0, end=1),
shape=(0, -1))
return target_hidden, attention_probs, [source_encoded, source_encoded_lengths] + new_layer_states
def reset(self):
pass
def get_num_hidden(self) -> int:
"""
:return: The representation size of this decoder.
"""
return self.config.cnn_config.num_hidden
def init_states(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns a list of symbolic states that represent the initial states of this decoder.
Used for inference.
:param source_encoded: Encoded source. Shape: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:return: List of symbolic initial states.
"""
# Initially all layers get pad symbols as input (zeros)
# (batch_size, kernel_width, num_hidden)
num_hidden = self.config.cnn_config.num_hidden
kernel_width = self.config.cnn_config.kernel_width
# Note: We can not use mx.sym.zeros, as otherwise shape inference fails.
# Therefore we need to get a zero array of the right size through other means.
# (batch_size, 1, 1)
zeros = mx.sym.expand_dims(mx.sym.expand_dims(mx.sym.zeros_like(source_encoded_lengths), axis=1), axis=2)
# (batch_size, kernel_width-1, num_hidden)
next_layer_inputs = [mx.sym.tile(data=zeros, reps=(1, kernel_width - 1, num_hidden),
name="%s%d_init" % (self.prefix, layer_idx))
for layer_idx in range(0, self.config.num_layers)]
return [source_encoded, source_encoded_lengths] + next_layer_inputs
def state_variables(self, target_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns the list of symbolic variables for this decoder to be used during inference.
:param target_max_length: Current target sequence lengths.
:return: List of symbolic variables.
"""
# we keep a fixed slice of the layer inputs as a state for all upper layers:
next_layer_inputs = [mx.sym.Variable("cnn_layer%d_in" % layer_idx)
for layer_idx in range(0, self.config.num_layers)]
return [mx.sym.Variable(C.SOURCE_ENCODED_NAME),
mx.sym.Variable(C.SOURCE_LENGTH_NAME)] + next_layer_inputs
def state_shapes(self,
batch_size: int,
target_max_length: int,
source_encoded_max_length: int,
source_encoded_depth: int) -> List[mx.io.DataDesc]:
"""
Returns a list of shape descriptions given batch size, encoded source max length and encoded source depth.
Used for inference.
:param batch_size: Batch size during inference.
:param target_max_length: Current target sequence length.
:param source_encoded_max_length: Size of encoder time dimension.
:param source_encoded_depth: Depth of encoded source.
:return: List of shape descriptions.
"""
num_hidden = self.config.cnn_config.num_hidden
kernel_width = self.config.cnn_config.kernel_width
next_layer_inputs = [mx.io.DataDesc("cnn_layer%d_in" % layer_idx,
shape=(batch_size, kernel_width - 1, num_hidden),
layout="NTW")
for layer_idx in range(0, self.config.num_layers)]
return [mx.io.DataDesc(C.SOURCE_ENCODED_NAME,
(batch_size, source_encoded_max_length, source_encoded_depth),
layout=C.BATCH_MAJOR),
mx.io.DataDesc(C.SOURCE_LENGTH_NAME, (batch_size,), layout="N")] + next_layer_inputs
def get_max_seq_len(self) -> Optional[int]:
# The positional embeddings potentially pose a limit on the maximum length at inference time.
return self.pos_embedding.get_max_seq_len()
| [
"DecoderConfig",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int",
"int",
"int",
"int",
"int",
"transformer.TransformerConfig",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"List[mx.sym.Symbol]",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int",
"int",
"int",
"int",
"int",
"int",
"rnn.RNNConfig",
"rnn_attention.AttentionConfig",
"RecurrentDecoderConfig",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int",
"int",
"int",
"int",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"RecurrentDecoderState",
"Callable",
"rnn_attention.AttentionState",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"rnn_attention.AttentionState",
"int",
"convolution.ConvolutionConfig",
"int",
"int",
"int",
"int",
"str",
"ConvolutionalDecoderConfig",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int",
"int",
"int",
"int",
"int"
] | [
1179,
2277,
2340,
2406,
2449,
2510,
2574,
3507,
3551,
3613,
3647,
4833,
4892,
4954,
5544,
5897,
5942,
5995,
6043,
7484,
8845,
8908,
8974,
9017,
9078,
9142,
11570,
11614,
11676,
11710,
15276,
16596,
16655,
16717,
17330,
17925,
17970,
18023,
18071,
20792,
20826,
20876,
21996,
25788,
25851,
25917,
25960,
26021,
26085,
28872,
28916,
28978,
29012,
31825,
31884,
31946,
33030,
33754,
33799,
33852,
33900,
35818,
35882,
39160,
39196,
39249,
39290,
42560,
42584,
43352,
43401,
43455,
43516,
45846,
45914,
45947,
45989,
46023,
46072,
47542,
49403,
49466,
49532,
49575,
49636,
49700,
51264,
51319,
51364,
51417,
51473,
53677,
53721,
53783,
53817,
58343,
58402,
58464,
59945,
60636,
60681,
60734,
60782
] | [
1192,
2290,
2353,
2409,
2462,
2523,
2577,
3510,
3564,
3616,
3660,
4846,
4905,
4957,
5547,
5900,
5945,
5998,
6046,
7513,
8858,
8921,
8977,
9030,
9091,
9145,
11573,
11627,
11679,
11723,
15295,
16609,
16668,
16720,
17333,
17928,
17973,
18026,
18074,
20795,
20839,
20905,
22018,
25801,
25864,
25920,
25973,
26034,
26088,
28875,
28929,
28981,
29025,
31838,
31897,
31949,
33033,
33757,
33802,
33855,
33903,
35831,
35895,
39173,
39217,
39257,
39318,
42573,
42587,
43365,
43414,
43483,
43519,
45875,
45917,
45950,
45992,
46026,
46075,
47568,
49416,
49479,
49535,
49588,
49649,
49703,
51277,
51332,
51377,
51430,
51476,
53680,
53734,
53786,
53830,
58356,
58415,
58467,
59948,
60639,
60684,
60737,
60785
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/embeddings.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Command-line tool to inspect model embeddings.
"""
import argparse
import sys
from typing import Iterable, Tuple
import mxnet as mx
import numpy as np
import sockeye.constants as C
import sockeye.translate
import sockeye.utils
import sockeye.vocab
from sockeye.log import setup_main_logger
from sockeye.utils import check_condition
logger = setup_main_logger(__name__, file_logging=False)
def compute_sims(inputs: mx.nd.NDArray, normalize: bool) -> mx.nd.NDArray:
"""
Returns a matrix with pair-wise similarity scores between inputs.
Similarity score is (normalized) Euclidean distance. 'Similarity with self' is masked
to large negative value.
:param inputs: NDArray of inputs.
:param normalize: Whether to normalize to unit-length.
:return: NDArray with pairwise similarities of same shape as inputs.
"""
if normalize:
logger.info("Normalizing embeddings to unit length")
inputs = mx.nd.L2Normalization(inputs, mode='instance')
sims = mx.nd.dot(inputs, inputs, transpose_b=True)
sims_np = sims.asnumpy()
np.fill_diagonal(sims_np, -9999999.)
sims = mx.nd.array(sims_np)
return sims
def nearest_k(similarity_matrix: mx.nd.NDArray,
query_word_id: int,
k: int,
gamma: float = 1.0) -> Iterable[Tuple[int, float]]:
"""
Returns values and indices of k items with largest similarity.
:param similarity_matrix: Similarity matrix.
:param query_word_id: Query word id.
:param k: Number of closest items to retrieve.
:param gamma: Parameter to control distribution steepness.
:return: List of indices and values of k nearest elements.
"""
# pylint: disable=unbalanced-tuple-unpacking
values, indices = mx.nd.topk(mx.nd.softmax(similarity_matrix[query_word_id] / gamma), k=k, ret_typ='both')
return zip(indices.asnumpy(), values.asnumpy())
def main():
"""
Command-line tool to inspect model embeddings.
"""
params = argparse.ArgumentParser(description='Shows nearest neighbours of input tokens in the embedding space.')
params.add_argument('--params', '-p', required=True, help='params file to read parameters from')
params.add_argument('--vocab', '-v', required=True, help='vocab file')
params.add_argument('--side', '-s', required=True, choices=['source', 'target'], help='what embeddings to look at')
params.add_argument('--norm', '-n', action='store_true', help='normalize embeddings to unit length')
params.add_argument('-k', type=int, default=5, help='Number of neighbours to print')
params.add_argument('--gamma', '-g', type=float, default=1.0, help='Softmax distribution steepness.')
args = params.parse_args()
logger.info("Arguments: %s", args)
vocab = sockeye.vocab.vocab_from_json_or_pickle(args.vocab)
vocab_inv = sockeye.vocab.reverse_vocab(vocab)
params, _ = sockeye.utils.load_params(args.params)
weights = params[C.SOURCE_EMBEDDING_PREFIX + "weight"]
if args.side == 'target':
weights = params[C.TARGET_EMBEDDING_PREFIX + "weight"]
logger.info("Embedding size: %d", weights.shape[1])
sims = compute_sims(weights, args.norm)
# weights (vocab, num_target_embed)
check_condition(weights.shape[0] == len(vocab),
"vocab and embeddings matrix do not match: %d vs. %d" % (weights.shape[0], len(vocab)))
for line in sys.stdin:
line = line.rstrip()
for token in line.split():
if token not in vocab:
sys.stdout.write("\n")
logger.error("'%s' not in vocab", token)
continue
sys.stdout.write("Token: %s [%d]: " % (token, vocab[token]))
neighbours = nearest_k(sims, vocab[token], args.k, args.gamma)
for i, (wid, score) in enumerate(neighbours, 1):
sys.stdout.write("%d. %s[%d] %.4f\t" % (i, vocab_inv[wid], wid, score))
sys.stdout.write("\n")
sys.stdout.flush()
if __name__ == '__main__':
main()
| [
"mx.nd.NDArray",
"bool",
"mx.nd.NDArray",
"int",
"int"
] | [
989,
1015,
1766,
1810,
1832
] | [
1002,
1019,
1779,
1813,
1835
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/encoder.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from sockeye.convolution import ConvolutionBlock
"""
Encoders for sequence-to-sequence models.
"""
import logging
from math import ceil, floor
from abc import ABC, abstractmethod
from typing import Callable, List, Optional, Tuple, Union
import mxnet as mx
from sockeye.config import Config
from . import constants as C
from . import rnn
from . import convolution
from . import transformer
from . import utils
logger = logging.getLogger(__name__)
EncoderConfigs = Union['RecurrentEncoderConfig', transformer.TransformerConfig, 'ConvolutionalEncoderConfig']
def get_encoder(config: EncoderConfigs) -> 'Encoder':
if isinstance(config, RecurrentEncoderConfig):
return get_recurrent_encoder(config)
elif isinstance(config, transformer.TransformerConfig):
return get_transformer_encoder(config)
elif isinstance(config, ConvolutionalEncoderConfig):
return get_convolutional_encoder(config)
else:
raise ValueError("Unsupported encoder configuration")
class RecurrentEncoderConfig(Config):
"""
Recurrent encoder configuration.
:param rnn_config: RNN configuration.
:param conv_config: Optional configuration for convolutional embedding.
:param reverse_input: Reverse embedding sequence before feeding into RNN.
"""
def __init__(self,
rnn_config: rnn.RNNConfig,
conv_config: Optional['ConvolutionalEmbeddingConfig'] = None,
reverse_input: bool = False) -> None:
super().__init__()
self.rnn_config = rnn_config
self.conv_config = conv_config
self.reverse_input = reverse_input
class ConvolutionalEncoderConfig(Config):
"""
Convolutional encoder configuration.
:param cnn_config: CNN configuration.
:param num_layers: The number of convolutional layers on top of the embeddings.
:param positional_embedding_type: The type of positional embedding.
"""
def __init__(self,
num_embed: int,
max_seq_len_source: int,
cnn_config: convolution.ConvolutionConfig,
num_layers: int,
positional_embedding_type: str) -> None:
super().__init__()
self.num_embed = num_embed
self.num_layers = num_layers
self.cnn_config = cnn_config
self.max_seq_len_source = max_seq_len_source
self.positional_embedding_type = positional_embedding_type
def get_recurrent_encoder(config: RecurrentEncoderConfig) -> 'Encoder':
"""
Returns an encoder stack with a bi-directional RNN, and a variable number of uni-directional forward RNNs.
:param config: Configuration for recurrent encoder.
:return: Encoder instance.
"""
# TODO give more control on encoder architecture
encoders = list() # type: List[Encoder]
if config.conv_config is not None:
encoders.append(ConvolutionalEmbeddingEncoder(config.conv_config, prefix=C.CHAR_SEQ_ENCODER_PREFIX))
if config.conv_config.add_positional_encoding:
# If specified, add positional encodings to segment embeddings
encoders.append(AddSinCosPositionalEmbeddings(num_embed=config.conv_config.num_embed,
scale_up_input=False,
scale_down_positions=False,
prefix="%sadd_positional_encodings" % C.CHAR_SEQ_ENCODER_PREFIX))
encoders.append(BatchMajor2TimeMajor())
if config.reverse_input:
encoders.append(ReverseSequence())
if config.rnn_config.residual:
utils.check_condition(config.rnn_config.first_residual_layer >= 2,
"Residual connections on the first encoder layer are not supported")
# One layer bi-directional RNN:
encoders.append(BiDirectionalRNNEncoder(rnn_config=config.rnn_config.copy(num_layers=1),
prefix=C.BIDIRECTIONALRNN_PREFIX,
layout=C.TIME_MAJOR))
if config.rnn_config.num_layers > 1:
# Stacked uni-directional RNN:
# Because we already have a one layer bi-rnn we reduce the num_layers as well as the first_residual_layer.
remaining_rnn_config = config.rnn_config.copy(num_layers=config.rnn_config.num_layers - 1,
first_residual_layer=config.rnn_config.first_residual_layer - 1)
encoders.append(RecurrentEncoder(rnn_config=remaining_rnn_config,
prefix=C.STACKEDRNN_PREFIX,
layout=C.TIME_MAJOR))
return EncoderSequence(encoders)
def get_convolutional_encoder(config: ConvolutionalEncoderConfig) -> 'Encoder':
"""
Creates a convolutional encoder.
:param config: Configuration for convolutional encoder.
:param embed_weight: Optionally use an existing embedding matrix instead of creating a new one.
:return: Encoder instance.
"""
encoders = list() # type: List[Encoder]
encoders.append(get_positional_embedding(config.positional_embedding_type,
config.num_embed,
max_seq_len=config.max_seq_len_source,
fixed_pos_embed_scale_up_input=False,
fixed_pos_embed_scale_down_positions=True,
prefix=C.SOURCE_POSITIONAL_EMBEDDING_PREFIX))
encoders.append(ConvolutionalEncoder(config=config))
encoders.append(BatchMajor2TimeMajor())
return EncoderSequence(encoders)
def get_transformer_encoder(config: transformer.TransformerConfig) -> 'Encoder':
"""
Returns a Transformer encoder, consisting of an embedding layer with
positional encodings and a TransformerEncoder instance.
:param config: Configuration for transformer encoder.
:return: Encoder instance.
"""
encoders = list() # type: List[Encoder]
encoders.append(get_positional_embedding(config.positional_embedding_type,
config.model_size,
config.max_seq_len_source,
fixed_pos_embed_scale_up_input=True,
fixed_pos_embed_scale_down_positions=False,
prefix=C.SOURCE_POSITIONAL_EMBEDDING_PREFIX))
if config.conv_config is not None:
encoders.append(ConvolutionalEmbeddingEncoder(config.conv_config))
encoders.append(TransformerEncoder(config))
encoders.append(BatchMajor2TimeMajor())
return EncoderSequence(encoders)
class Encoder(ABC):
"""
Generic encoder interface.
"""
@abstractmethod
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
pass
def get_num_hidden(self) -> int:
"""
:return: The representation size of this encoder.
"""
raise NotImplementedError()
def get_encoded_seq_len(self, seq_len: int) -> int:
"""
:return: The size of the encoded sequence.
"""
return seq_len
def get_max_seq_len(self) -> Optional[int]:
"""
:return: The maximum length supported by the encoder if such a restriction exists.
"""
return None
class BatchMajor2TimeMajor(Encoder):
"""
Converts batch major data to time major.
"""
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
with mx.AttrScope(__layout__=C.TIME_MAJOR):
return mx.sym.swapaxes(data=data, dim1=0, dim2=1), data_length, seq_len
class ReverseSequence(Encoder):
"""
Reverses the input sequence. Requires time-major layout.
"""
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
data = mx.sym.SequenceReverse(data=data, sequence_length=data_length, use_sequence_length=True)
return data, data_length, seq_len
class EmbeddingConfig(Config):
def __init__(self,
vocab_size: int,
num_embed: int,
dropout: float) -> None:
super().__init__()
self.vocab_size = vocab_size
self.num_embed = num_embed
self.dropout = dropout
class Embedding(Encoder):
"""
Thin wrapper around MXNet's Embedding symbol. Works with both time- and batch-major data layouts.
:param config: Embedding config.
:param prefix: Name prefix for symbols of this encoder.
:param embed_weight: Optionally use an existing embedding matrix instead of creating a new one.
"""
def __init__(self,
config: EmbeddingConfig,
prefix: str,
embed_weight: Optional[mx.sym.Symbol] = None) -> None:
self.config = config
self.prefix = prefix
self.embed_weight = embed_weight
if self.embed_weight is None:
self.embed_weight = mx.sym.Variable(prefix + "weight",
shape=(self.config.vocab_size, self.config.num_embed))
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
embedding = mx.sym.Embedding(data=data,
input_dim=self.config.vocab_size,
weight=self.embed_weight,
output_dim=self.config.num_embed,
name=self.prefix + "embed")
if self.config.dropout > 0:
embedding = mx.sym.Dropout(data=embedding, p=self.config.dropout, name="source_embed_dropout")
return embedding, data_length, seq_len
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.config.num_embed
class PositionalEncoder(Encoder):
@abstractmethod
def encode_positions(self,
positions: mx.sym.Symbol,
data: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Add positional encodings to the data using the provided positions.
:param positions: (batch_size,)
:param data: (batch_size, num_embed)
:return: (batch_size, num_embed)
"""
pass
class AddSinCosPositionalEmbeddings(PositionalEncoder):
"""
Takes an encoded sequence and adds fixed positional embeddings as in Vaswani et al, 2017 to it.
:param num_embed: Embedding size.
:param prefix: Name prefix for symbols of this encoder.
:param scale_up_input: If True, scales input data up by num_embed ** 0.5.
:param scale_down_positions: If True, scales positional embeddings down by num_embed ** -0.5.
"""
def __init__(self,
num_embed: int,
prefix: str,
scale_up_input: bool,
scale_down_positions: bool) -> None:
utils.check_condition(num_embed % 2 == 0, "Positional embeddings require an even embedding size it "
"is however %d." % num_embed)
self.scale_up_input = scale_up_input
self.scale_down_positions = scale_down_positions
self.num_embed = num_embed
self.prefix = prefix
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
:param data: (batch_size, source_seq_len, num_embed)
:param data_length: (batch_size,)
:param seq_len: sequence length.
:return: (batch_size, source_seq_len, num_embed)
"""
# add positional embeddings to data
if self.scale_up_input:
data = data * (self.num_embed ** 0.5)
positions = mx.sym.BlockGrad(mx.symbol.Custom(length=seq_len,
depth=self.num_embed,
name="%spositional_encodings" % self.prefix,
op_type='positional_encodings'))
if self.scale_down_positions:
positions = positions * (self.num_embed ** -0.5)
embedding = mx.sym.broadcast_add(data, positions)
return embedding, data_length, seq_len
def encode_positions(self,
positions: mx.sym.Symbol,
data: mx.sym.Symbol) -> mx.sym.Symbol:
"""
:param positions: (batch_size,)
:param data: (batch_size, num_embed)
:return: (batch_size, num_embed)
"""
# (batch_size, 1)
positions = mx.sym.expand_dims(positions, axis=1)
# (num_embed,)
channels = mx.sym.arange(0, self.num_embed // 2)
# (1, num_embed,)
scaling = mx.sym.expand_dims(1. / mx.sym.pow(10000, (2 * channels) / self.num_embed), axis=0)
# (batch_size, num_embed/2)
scaled_positions = mx.sym.dot(positions, scaling)
sin = mx.sym.sin(scaled_positions)
cos = mx.sym.cos(scaled_positions)
# (batch_size, num_embed/2)
pos_embedding = mx.sym.concat(sin, cos, dim=1)
if self.scale_up_input:
data = data * (self.num_embed ** 0.5)
if self.scale_down_positions:
pos_embedding = pos_embedding * (self.num_embed ** -0.5)
return mx.sym.broadcast_add(data, pos_embedding, name="%s_add" % self.prefix)
def get_num_hidden(self) -> int:
return self.num_embed
class AddLearnedPositionalEmbeddings(PositionalEncoder):
"""
Takes an encoded sequence and adds positional embeddings to it, which are learned jointly. Note that this will
limited the maximum sentence length during decoding.
:param num_embed: Embedding size.
:param max_seq_len: Maximum sequence length.
:param prefix: Name prefix for symbols of this encoder.
:param embed_weight: Optionally use an existing embedding matrix instead of creating a new one.
"""
def __init__(self,
num_embed: int,
max_seq_len: int,
prefix: str,
embed_weight: Optional[mx.sym.Symbol] = None) -> None:
self.num_embed = num_embed
self.max_seq_len = max_seq_len
self.prefix = prefix
if embed_weight is not None:
self.embed_weight = embed_weight
else:
self.embed_weight = mx.sym.Variable(prefix + "weight")
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
:param data: (batch_size, source_seq_len, num_embed)
:param data_length: (batch_size,)
:param seq_len: sequence length.
:return: (batch_size, source_seq_len, num_embed)
"""
# (1, source_seq_len)
positions = mx.sym.expand_dims(data=mx.sym.arange(start=0, stop=seq_len, step=1), axis=0)
# (1, source_seq_len, num_embed)
pos_embedding = mx.sym.Embedding(data=positions,
input_dim=self.max_seq_len,
weight=self.embed_weight,
output_dim=self.num_embed,
name=self.prefix + "pos_embed")
return mx.sym.broadcast_add(data, pos_embedding, name="%s_add" % self.prefix), data_length, seq_len
def encode_positions(self,
positions: mx.sym.Symbol,
data: mx.sym.Symbol) -> mx.sym.Symbol:
"""
:param positions: (batch_size,)
:param data: (batch_size, num_embed)
:return: (batch_size, num_embed)
"""
# (batch_size, source_seq_len, num_embed)
pos_embedding = mx.sym.Embedding(data=positions,
input_dim=self.max_seq_len,
weight=self.embed_weight,
output_dim=self.num_embed,
name=self.prefix + "pos_embed")
return mx.sym.broadcast_add(data, pos_embedding, name="%s_add" % self.prefix)
def get_num_hidden(self) -> int:
return self.num_embed
def get_max_seq_len(self) -> Optional[int]:
# we can only support sentences as long as the maximum length during training.
return self.max_seq_len
class NoOpPositionalEmbeddings(PositionalEncoder):
"""
Simple NoOp pos embedding. It does not modify the data, but avoids lots of if statements.
"""
def __init__(self, num_embed) -> None:
self.num_embed = num_embed
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
return data, data_length, seq_len
def encode_positions(self,
positions: mx.sym.Symbol,
data: mx.sym.Symbol) -> mx.sym.Symbol:
return data
def get_num_hidden(self) -> int:
return self.num_embed
def get_positional_embedding(positional_embedding_type: str,
num_embed: int,
max_seq_len: int,
fixed_pos_embed_scale_up_input: bool = False,
fixed_pos_embed_scale_down_positions: bool = False,
prefix: str = '') -> PositionalEncoder:
if positional_embedding_type == C.FIXED_POSITIONAL_EMBEDDING:
return AddSinCosPositionalEmbeddings(num_embed=num_embed,
scale_up_input=fixed_pos_embed_scale_up_input,
scale_down_positions=fixed_pos_embed_scale_down_positions,
prefix=prefix)
elif positional_embedding_type == C.LEARNED_POSITIONAL_EMBEDDING:
return AddLearnedPositionalEmbeddings(num_embed=num_embed,
max_seq_len=max_seq_len,
prefix=prefix)
elif positional_embedding_type == C.NO_POSITIONAL_EMBEDDING:
return NoOpPositionalEmbeddings(num_embed=num_embed)
else:
raise ValueError("Unknown positional embedding type %s" % positional_embedding_type)
class EncoderSequence(Encoder):
"""
A sequence of encoders is itself an encoder.
:param encoders: List of encoders.
"""
def __init__(self, encoders: List[Encoder]) -> None:
self.encoders = encoders
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
for encoder in self.encoders:
data, data_length, seq_len = encoder.encode(data, data_length, seq_len)
return data, data_length, seq_len
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
if isinstance(self.encoders[-1], BatchMajor2TimeMajor):
utils.check_condition(len(self.encoders) > 1,
"Cannot return num_hidden from a BatchMajor2TimeMajor encoder only")
return self.encoders[-2].get_num_hidden()
else:
return self.encoders[-1].get_num_hidden()
def get_encoded_seq_len(self, seq_len: int) -> int:
"""
Returns the size of the encoded sequence.
"""
for encoder in self.encoders:
seq_len = encoder.get_encoded_seq_len(seq_len)
return seq_len
def get_max_seq_len(self) -> Optional[int]:
"""
:return: The maximum length supported by the encoder if such a restriction exists.
"""
max_seq_len = min((encoder.get_max_seq_len()
for encoder in self.encoders if encoder.get_max_seq_len() is not None), default=None)
return max_seq_len
class RecurrentEncoder(Encoder):
"""
Uni-directional (multi-layered) recurrent encoder.
:param rnn_config: RNN configuration.
:param prefix: Prefix.
:param layout: Data layout.
"""
def __init__(self,
rnn_config: rnn.RNNConfig,
prefix: str = C.STACKEDRNN_PREFIX,
layout: str = C.TIME_MAJOR) -> None:
self.rnn_config = rnn_config
self.layout = layout
self.rnn = rnn.get_stacked_rnn(rnn_config, prefix)
def encode(self,
data: mx.sym.Symbol,
data_length: Optional[mx.sym.Symbol],
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
outputs, _ = self.rnn.unroll(seq_len, inputs=data, merge_outputs=True, layout=self.layout)
return outputs, data_length, seq_len
def get_rnn_cells(self):
"""
Returns RNNCells used in this encoder.
"""
return [self.rnn]
def get_num_hidden(self):
"""
Return the representation size of this encoder.
"""
return self.rnn_config.num_hidden
class BiDirectionalRNNEncoder(Encoder):
"""
An encoder that runs a forward and a reverse RNN over input data.
States from both RNNs are concatenated together.
:param rnn_config: RNN configuration.
:param prefix: Prefix.
:param layout: Data layout.
:param encoder_class: Recurrent encoder class to use.
"""
def __init__(self,
rnn_config: rnn.RNNConfig,
prefix=C.BIDIRECTIONALRNN_PREFIX,
layout=C.TIME_MAJOR,
encoder_class: Callable = RecurrentEncoder) -> None:
utils.check_condition(rnn_config.num_hidden % 2 == 0,
"num_hidden must be a multiple of 2 for BiDirectionalRNNEncoders.")
self.rnn_config = rnn_config
self.internal_rnn_config = rnn_config.copy(num_hidden=rnn_config.num_hidden // 2)
if layout[0] == 'N':
logger.warning("Batch-major layout for encoder input. Consider using time-major layout for faster speed")
# time-major layout as _encode needs to swap layout for SequenceReverse
self.forward_rnn = encoder_class(rnn_config=self.internal_rnn_config,
prefix=prefix + C.FORWARD_PREFIX,
layout=C.TIME_MAJOR)
self.reverse_rnn = encoder_class(rnn_config=self.internal_rnn_config,
prefix=prefix + C.REVERSE_PREFIX,
layout=C.TIME_MAJOR)
self.layout = layout
self.prefix = prefix
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
if self.layout[0] == 'N':
data = mx.sym.swapaxes(data=data, dim1=0, dim2=1)
data = self._encode(data, data_length, seq_len)
if self.layout[0] == 'N':
data = mx.sym.swapaxes(data=data, dim1=0, dim2=1)
return data, data_length, seq_len
def _encode(self, data: mx.sym.Symbol, data_length: mx.sym.Symbol, seq_len: int) -> mx.sym.Symbol:
"""
Bidirectionally encodes time-major data.
"""
# (seq_len, batch_size, num_embed)
data_reverse = mx.sym.SequenceReverse(data=data, sequence_length=data_length,
use_sequence_length=True)
# (seq_length, batch, cell_num_hidden)
hidden_forward, _, _ = self.forward_rnn.encode(data, data_length, seq_len)
# (seq_length, batch, cell_num_hidden)
hidden_reverse, _, _ = self.reverse_rnn.encode(data_reverse, data_length, seq_len)
# (seq_length, batch, cell_num_hidden)
hidden_reverse = mx.sym.SequenceReverse(data=hidden_reverse, sequence_length=data_length,
use_sequence_length=True)
# (seq_length, batch, 2 * cell_num_hidden)
hidden_concat = mx.sym.concat(hidden_forward, hidden_reverse, dim=2, name="%s_rnn" % self.prefix)
return hidden_concat
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.rnn_config.num_hidden
def get_rnn_cells(self) -> List[mx.rnn.BaseRNNCell]:
"""
Returns a list of RNNCells used by this encoder.
"""
return self.forward_rnn.get_rnn_cells() + self.reverse_rnn.get_rnn_cells()
class ConvolutionalEncoder(Encoder):
"""
Encoder that uses convolution instead of recurrent connections, similar to Gehring et al. 2017.
:param config: Configuration for convolutional encoder.
:param prefix: Name prefix for operations in this encoder.
"""
def __init__(self,
config: ConvolutionalEncoderConfig,
prefix: str = C.CNN_ENCODER_PREFIX) -> None:
self.config = config
# initialize the weights of the linear transformation required for the residual connections
self.i2h_weight = mx.sym.Variable('%si2h_weight' % prefix)
# initialize the layers of blocks containing a convolution and a GLU, since
# every layer is shared over all encode calls
self.layers = [ConvolutionBlock(
config.cnn_config,
pad_type='centered',
prefix="%s%d_" % (prefix, i)) for i in range(config.num_layers)]
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data with a stack of Convolution+GLU blocks given sequence lengths of individual examples
and maximum sequence length.
:param data: Input data. Shape: (batch_size, seq_len, input_num_hidden).
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded version of the data.
"""
# data: (batch_size, seq_len, num_hidden)
data = mx.sym.FullyConnected(data=data,
num_hidden=self.config.cnn_config.num_hidden,
no_bias=True,
flatten=False,
weight=self.i2h_weight)
# Multiple layers with residual connections:
for layer in self.layers:
data = data + layer(data, data_length, seq_len)
return data, data_length, seq_len
def get_num_hidden(self) -> int:
return self.config.cnn_config.num_hidden
class TransformerEncoder(Encoder):
"""
Non-recurrent encoder based on the transformer architecture in:
Attention Is All You Need, Figure 1 (left)
Vaswani et al. (https://arxiv.org/pdf/1706.03762.pdf).
:param config: Configuration for transformer encoder.
:param prefix: Name prefix for operations in this encoder.
"""
def __init__(self,
config: transformer.TransformerConfig,
prefix: str = C.TRANSFORMER_ENCODER_PREFIX) -> None:
self.config = config
self.prefix = prefix
self.layers = [transformer.TransformerEncoderBlock(
config, prefix="%s%d_" % (prefix, i)) for i in range(config.num_layers)]
self.final_process = transformer.TransformerProcessBlock(sequence=config.preprocess_sequence,
num_hidden=config.model_size,
dropout=config.dropout_prepost,
prefix="%sfinal_process_" % prefix)
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data data, data_length, seq_len.
"""
if self.config.dropout_prepost > 0.0:
data = mx.sym.Dropout(data=data, p=self.config.dropout_prepost)
# (batch_size * heads, 1, max_length)
bias = mx.sym.expand_dims(transformer.get_variable_length_bias(lengths=data_length,
max_length=seq_len,
num_heads=self.config.attention_heads,
fold_heads=True,
name="%sbias"% self.prefix), axis=1)
for i, layer in enumerate(self.layers):
# (batch_size, seq_len, config.model_size)
data = layer(data, bias)
data = self.final_process(data=data, prev=None)
return data, data_length, seq_len
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.config.model_size
class ConvolutionalEmbeddingConfig(Config):
"""
Convolutional embedding encoder configuration.
:param num_embed: Input embedding size.
:param output_dim: Output segment embedding size.
:param max_filter_width: Maximum filter width for convolutions.
:param num_filters: Number of filters of each width.
:param pool_stride: Stride for pooling layer after convolutions.
:param num_highway_layers: Number of highway layers for segment embeddings.
:param dropout: Dropout probability.
:param add_positional_encoding: Dropout probability.
"""
def __init__(self,
num_embed: int,
output_dim: int = None,
max_filter_width: int = 8,
num_filters: Tuple[int, ...] = (200, 200, 250, 250, 300, 300, 300, 300),
pool_stride: int = 5,
num_highway_layers: int = 4,
dropout: float = 0.0,
add_positional_encoding: bool = False) -> None:
super().__init__()
self.num_embed = num_embed
self.output_dim = output_dim
self.max_filter_width = max_filter_width
self.num_filters = num_filters
self.pool_stride = pool_stride
self.num_highway_layers = num_highway_layers
self.dropout = dropout
self.add_positional_encoding = add_positional_encoding
if self.output_dim is None:
self.output_dim = sum(self.num_filters)
class ConvolutionalEmbeddingEncoder(Encoder):
"""
An encoder developed to map a sequence of character embeddings to a shorter sequence of segment
embeddings using convolutional, pooling, and highway layers. More generally, it maps a sequence
of input embeddings to a sequence of span embeddings.
* "Fully Character-Level Neural Machine Translation without Explicit Segmentation"
Jason Lee; Kyunghyun Cho; Thomas Hofmann (https://arxiv.org/pdf/1610.03017.pdf)
:param config: Convolutional embedding config.
:param prefix: Name prefix for symbols of this encoder.
"""
def __init__(self,
config: ConvolutionalEmbeddingConfig,
prefix: str = C.CHAR_SEQ_ENCODER_PREFIX) -> None:
utils.check_condition(len(config.num_filters) == config.max_filter_width,
"num_filters must have max_filter_width elements.")
self.num_embed = config.num_embed
self.output_dim = config.output_dim
self.max_filter_width = config.max_filter_width
self.num_filters = config.num_filters[:]
self.pool_stride = config.pool_stride
self.num_highway_layers = config.num_highway_layers
self.prefix = prefix
self.dropout = config.dropout
self.add_positional_encoding = config.add_positional_encoding
self.conv_weight = {filter_width: mx.sym.Variable("%s%s%d%s" % (self.prefix, "conv_", filter_width, "_weight"))
for filter_width in range(1, self.max_filter_width + 1)}
self.conv_bias = {filter_width: mx.sym.Variable("%s%s%d%s" % (self.prefix, "conv_", filter_width, "_bias"))
for filter_width in range(1, self.max_filter_width + 1)}
self.project_weight = mx.sym.Variable(self.prefix + "project_weight")
self.project_bias = mx.sym.Variable(self.prefix + "project_bias")
self.gate_weight = [mx.sym.Variable("%s%s%d%s" % (self.prefix, "gate_", i, "_weight"))
for i in range(self.num_highway_layers)]
self.gate_bias = [mx.sym.Variable("%s%s%d%s" % (self.prefix, "gate_", i, "_bias"))
for i in range(self.num_highway_layers)]
self.transform_weight = [mx.sym.Variable("%s%s%d%s" % (self.prefix, "transform_", i, "_weight"))
for i in range(self.num_highway_layers)]
self.transform_bias = [mx.sym.Variable("%s%s%d%s" % (self.prefix, "transform_", i, "_bias"))
for i in range(self.num_highway_layers)]
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data data, data_length, seq_len.
"""
total_num_filters = sum(self.num_filters)
encoded_seq_len = self.get_encoded_seq_len(seq_len)
# (batch_size, channel=1, seq_len, num_embed)
data = mx.sym.Reshape(data=data, shape=(-1, 1, seq_len, self.num_embed))
# Convolution filters of width 1..N
conv_outputs = []
for filter_width, num_filter in enumerate(self.num_filters, 1):
# "half" padding: output length == input length
pad_before = ceil((filter_width - 1) / 2)
pad_after = floor((filter_width - 1) / 2)
# (batch_size, channel=1, seq_len + (filter_width - 1), num_embed)
padded = mx.sym.pad(data=data,
mode="constant",
constant_value=0,
pad_width=(0, 0, 0, 0, pad_before, pad_after, 0, 0))
# (batch_size, num_filter, seq_len, num_scores=1)
conv = mx.sym.Convolution(data=padded,
# cudnn_tune="off",
kernel=(filter_width, self.num_embed),
num_filter=num_filter,
weight=self.conv_weight[filter_width],
bias=self.conv_bias[filter_width])
conv = mx.sym.Activation(data=conv, act_type="relu")
conv_outputs.append(conv)
# (batch_size, total_num_filters, seq_len, num_scores=1)
conv_concat = mx.sym.concat(*conv_outputs, dim=1)
# Max pooling with stride
uncovered = seq_len % self.pool_stride
if uncovered > 0:
pad_after = self.pool_stride - uncovered
# (batch_size, total_num_filters, seq_len + pad_to_final_stride, num_scores=1)
conv_concat = mx.sym.pad(data=conv_concat,
mode="constant",
constant_value=0,
pad_width=(0, 0, 0, 0, 0, pad_after, 0, 0))
# (batch_size, total_num_filters, seq_len/stride, num_scores=1)
pool = mx.sym.Pooling(data=conv_concat,
pool_type="max",
kernel=(self.pool_stride, 1),
stride=(self.pool_stride, 1))
# (batch_size, total_num_filters, seq_len/stride)
pool = mx.sym.reshape(data=pool,
shape=(-1, total_num_filters, encoded_seq_len))
# (batch_size, seq_len/stride, total_num_filters)
pool = mx.sym.swapaxes(data=pool, dim1=1, dim2=2)
if self.dropout > 0:
pool = mx.sym.Dropout(data=pool, p=self.dropout)
# Raw segment embeddings reshaped for highway network
# (batch_size * seq_len/stride, total_num_filters)
seg_embedding = mx.sym.Reshape(data=pool, shape=(-3, total_num_filters))
# Projection layer if requested output dimension is different from total number of filters
# (TransformerEncoder compatibility, not in original paper)
if self.output_dim != total_num_filters:
# (batch_size * seq_len/stride, outut_dim)
seg_embedding = mx.sym.FullyConnected(data=seg_embedding,
num_hidden=self.output_dim,
weight=self.project_weight,
bias=self.project_bias)
seg_embedding = mx.sym.Activation(data=seg_embedding, act_type="relu")
if self.dropout > 0:
seg_embedding = mx.sym.Dropout(data=seg_embedding, p=self.dropout)
# Highway network
for i in range(self.num_highway_layers):
# Gate
gate = mx.sym.FullyConnected(data=seg_embedding,
num_hidden=self.output_dim,
weight=self.gate_weight[i],
bias=self.gate_bias[i])
gate = mx.sym.Activation(data=gate, act_type="sigmoid")
if self.dropout > 0:
gate = mx.sym.Dropout(data=gate, p=self.dropout)
# Transform
transform = mx.sym.FullyConnected(data=seg_embedding,
num_hidden=self.output_dim,
weight=self.transform_weight[i],
bias=self.transform_bias[i])
transform = mx.sym.Activation(data=transform, act_type="relu")
if self.dropout > 0:
transform = mx.sym.Dropout(data=transform, p=self.dropout)
# Connection
seg_embedding = gate * transform + (1 - gate) * seg_embedding
# (batch_size, seq_len/stride, outut_dim) aka
# (batch_size, encoded_seq_len, num_segment_emded)
seg_embedding = mx.sym.Reshape(data=seg_embedding,
shape=(-1, encoded_seq_len, self.output_dim))
# Dropout on final segment embeddings
if self.dropout > 0:
seg_embedding = mx.sym.Dropout(data=seg_embedding, p=self.dropout)
# Ceiling function isn't differentiable so this will throw errors if we
# attempt to compute gradients. Fortunately we aren't updating inputs
# so we can just block the backward pass here.
encoded_data_length = mx.sym.BlockGrad(mx.sym.ceil(data_length / self.pool_stride))
return seg_embedding, encoded_data_length, encoded_seq_len
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.output_dim
def get_encoded_seq_len(self, seq_len: int) -> int:
"""
Returns the size of the encoded sequence.
"""
return int(ceil(seq_len / self.pool_stride))
| [
"EncoderConfigs",
"rnn.RNNConfig",
"int",
"int",
"convolution.ConvolutionConfig",
"int",
"str",
"RecurrentEncoderConfig",
"ConvolutionalEncoderConfig",
"transformer.TransformerConfig",
"mx.sym.Symbol",
"Optional[mx.sym.Symbol]",
"int",
"int",
"mx.sym.Symbol",
"Optional[mx.sym.Symbol]",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int",
"int",
"float",
"EmbeddingConfig",
"str",
"mx.sym.Symbol",
"Optional[mx.sym.Symbol]",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"str",
"bool",
"bool",
"mx.sym.Symbol",
"Optional[mx.sym.Symbol]",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int",
"str",
"mx.sym.Symbol",
"Optional[mx.sym.Symbol]",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"Optional[mx.sym.Symbol]",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"str",
"int",
"int",
"List[Encoder]",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int",
"rnn.RNNConfig",
"mx.sym.Symbol",
"Optional[mx.sym.Symbol]",
"int",
"rnn.RNNConfig",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"ConvolutionalEncoderConfig",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"transformer.TransformerConfig",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int",
"ConvolutionalEmbeddingConfig",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int"
] | [
1151,
1905,
2552,
2594,
2628,
2688,
2737,
3043,
5355,
6351,
7534,
7577,
7626,
8228,
8666,
8709,
8758,
9437,
9480,
9519,
9801,
9834,
9865,
10404,
10446,
10873,
10916,
10965,
12141,
12187,
12959,
12989,
13027,
13072,
13485,
13528,
13577,
14585,
14631,
16273,
16308,
16338,
16724,
16767,
16816,
17772,
17818,
18993,
19036,
19085,
19245,
19291,
19470,
19515,
19562,
20851,
20951,
20994,
21033,
22095,
22920,
23209,
23252,
23301,
24509,
25729,
25772,
25811,
26519,
26547,
26571,
28252,
28903,
28946,
28985,
30458,
31200,
31243,
31282,
33358,
34853,
36825,
36868,
36907,
43088
] | [
1165,
1918,
2555,
2597,
2657,
2691,
2740,
3065,
5381,
6380,
7547,
7600,
7629,
8231,
8679,
8732,
8761,
9450,
9493,
9522,
9804,
9837,
9870,
10419,
10449,
10886,
10939,
10968,
12154,
12200,
12962,
12992,
13031,
13076,
13498,
13551,
13580,
14598,
14644,
16276,
16311,
16341,
16737,
16790,
16819,
17785,
17831,
19006,
19059,
19088,
19258,
19304,
19473,
19518,
19565,
20864,
20964,
21007,
21036,
22098,
22933,
23222,
23275,
23304,
24522,
25742,
25785,
25814,
26532,
26560,
26574,
28278,
28916,
28959,
28988,
30487,
31213,
31256,
31285,
33361,
34881,
36838,
36881,
36910,
43091
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/evaluate.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Evaluation CLI. Prints corpus BLEU
"""
import argparse
import logging
import sys
from typing import Iterable, Optional
from contrib import sacrebleu
from sockeye.log import setup_main_logger, log_sockeye_version
from . import arguments
from . import chrf
from . import constants as C
from . import data_io
from . import utils
logger = setup_main_logger(__name__, file_logging=False)
def raw_corpus_bleu(hypotheses: Iterable[str], references: Iterable[str], offset: Optional[float] = 0.01) -> float:
"""
Simple wrapper around sacreBLEU's BLEU without tokenization and smoothing.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:param offset: Smoothing constant.
:return: BLEU score as float between 0 and 1.
"""
return sacrebleu.raw_corpus_bleu(hypotheses, [references], smooth_floor=offset).score / 100
def main():
params = argparse.ArgumentParser(description='Evaluate translations by calculating metrics with '
'respect to a reference set.')
arguments.add_evaluate_args(params)
arguments.add_logging_args(params)
args = params.parse_args()
if args.quiet:
logger.setLevel(logging.ERROR)
utils.check_condition(args.offset >= 0, "Offset should be non-negative.")
log_sockeye_version(logger)
logger.info("Command: %s", " ".join(sys.argv))
logger.info("Arguments: %s", args)
references = [' '.join(e) for e in data_io.read_content(args.references)]
hypotheses = [h.strip() for h in args.hypotheses]
logger.info("%d hypotheses | %d references", len(hypotheses), len(references))
if not args.not_strict:
utils.check_condition(len(hypotheses) == len(references),
"Number of hypotheses (%d) and references (%d) does not match." % (len(hypotheses),
len(references)))
if not args.sentence:
scores = []
for metric in args.metrics:
if metric == C.BLEU:
bleu_score = raw_corpus_bleu(hypotheses, references, args.offset)
scores.append("%.6f" % bleu_score)
elif metric == C.CHRF:
chrf_score = chrf.corpus_chrf(hypotheses, references, trim_whitespaces=True)
scores.append("%.6f" % chrf_score)
print("\t".join(scores), file=sys.stdout)
else:
for h, r in zip(hypotheses, references):
scores = []
for metric in args.metrics:
if metric == C.BLEU:
bleu = raw_corpus_bleu(h, r, args.offset)
scores.append("%.6f" % bleu)
elif metric == C.CHRF:
chrf_score = chrf.corpus_chrf(h, r, trim_whitespaces=True)
scores.append("%.6f" % chrf_score)
print("\t".join(scores), file=sys.stdout)
if __name__ == '__main__':
main()
| [
"Iterable[str]",
"Iterable[str]"
] | [
989,
1016
] | [
1002,
1029
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/extract_parameters.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Extract specific parameters.
"""
import argparse
import os
from typing import Dict, List
import mxnet as mx
import numpy as np
from sockeye.log import setup_main_logger, log_sockeye_version
from . import arguments
from . import constants as C
from . import utils
logger = setup_main_logger(__name__, console=True, file_logging=False)
def _extract(param_names: List[str],
params: Dict[str, mx.nd.NDArray],
ext_params: Dict[str, np.ndarray]) -> List[str]:
"""
Extract specific parameters from a given base.
:param param_names: Names of parameters to be extracted.
:param params: Mapping from parameter names to the actual NDArrays parameters.
:param ext_params: Extracted parameter dictionary.
:return: Remaining names of parameters to be extracted.
"""
remaining_param_names = list(param_names)
for name in param_names:
if name in params:
logger.info("\tFound '%s': shape=%s", name, str(params[name].shape))
ext_params[name] = params[name].asnumpy()
remaining_param_names.remove(name)
return remaining_param_names
def extract(param_path: str,
param_names: List[str],
list_all: bool) -> Dict[str, np.ndarray]:
"""
Extract specific parameters given their names.
:param param_path: Path to the parameter file.
:param param_names: Names of parameters to be extracted.
:param list_all: List names of all available parameters.
:return: Extracted parameter dictionary.
"""
logger.info("Loading parameters from '%s'", param_path)
arg_params, aux_params = utils.load_params(param_path)
ext_params = {} # type: Dict[str, np.ndarray]
param_names = _extract(param_names, arg_params, ext_params)
param_names = _extract(param_names, aux_params, ext_params)
if len(param_names) > 0:
logger.info("The following parameters were not found:")
for name in param_names:
logger.info("\t%s", name)
logger.info("Check the following availabilities")
list_all = True
if list_all:
if arg_params:
logger.info("Available arg parameters:")
for name in arg_params:
logger.info("\t%s: shape=%s", name, str(arg_params[name].shape))
if aux_params:
logger.info("Available aux parameters:")
for name in aux_params:
logger.info("\t%s: shape=%s", name, str(aux_params[name].shape))
return ext_params
def main():
"""
Commandline interface to extract parameters.
"""
log_sockeye_version(logger)
params = argparse.ArgumentParser(description="Extract specific parameters.")
arguments.add_extract_args(params)
args = params.parse_args()
if os.path.isdir(args.input):
param_path = os.path.join(args.input, C.PARAMS_BEST_NAME)
else:
param_path = args.input
ext_params = extract(param_path, args.names, args.list_all)
if len(ext_params) > 0:
utils.check_condition(args.output != None, "An output filename must be specified. (Use --output)")
logger.info("Writting extracted parameters to '%s'", args.output)
np.savez_compressed(args.output, **ext_params)
if __name__ == "__main__":
main()
| [
"List[str]",
"Dict[str, mx.nd.NDArray]",
"Dict[str, np.ndarray]",
"str",
"List[str]",
"bool"
] | [
935,
967,
1018,
1726,
1756,
1789
] | [
944,
991,
1039,
1729,
1765,
1793
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/inference.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Code for inference/translation
"""
import itertools
import logging
import os
from typing import Callable, Dict, List, NamedTuple, Optional, Tuple, Union, Set
import mxnet as mx
import numpy as np
from . import constants as C
from . import data_io
from . import lexicon
from . import model
from . import utils
from . import vocab
logger = logging.getLogger(__name__)
class InferenceModel(model.SockeyeModel):
"""
InferenceModel is a SockeyeModel that supports three operations used for inference/decoding:
(1) Encoder forward call: encode source sentence and return initial decoder states.
(2) Decoder forward call: single decoder step: predict next word.
:param model_folder: Folder to load model from.
:param context: MXNet context to bind modules to.
:param beam_size: Beam size.
:param batch_size: Batch size.
:param checkpoint: Checkpoint to load. If None, finds best parameters in model_folder.
:param softmax_temperature: Optional parameter to control steepness of softmax distribution.
:param max_output_length_num_stds: Number of standard deviations as safety margin for maximum output length.
:param decoder_return_logit_inputs: Decoder returns inputs to logit computation instead of softmax over target
vocabulary. Used when logits/softmax are handled separately.
:param cache_output_layer_w_b: Cache weights and biases for logit computation.
"""
def __init__(self,
model_folder: str,
context: mx.context.Context,
beam_size: int,
batch_size: int,
checkpoint: Optional[int] = None,
softmax_temperature: Optional[float] = None,
max_output_length_num_stds: int = C.DEFAULT_NUM_STD_MAX_OUTPUT_LENGTH,
decoder_return_logit_inputs: bool = False,
cache_output_layer_w_b: bool = False) -> None:
self.model_version = utils.load_version(os.path.join(model_folder, C.VERSION_NAME))
logger.info("Model version: %s", self.model_version)
utils.check_version(self.model_version)
config = model.SockeyeModel.load_config(os.path.join(model_folder, C.CONFIG_NAME))
super().__init__(config)
self.fname_params = os.path.join(model_folder, C.PARAMS_NAME % checkpoint if checkpoint else C.PARAMS_BEST_NAME)
utils.check_condition(beam_size < self.config.vocab_target_size,
'The beam size must be smaller than the target vocabulary size.')
self.beam_size = beam_size
self.softmax_temperature = softmax_temperature
self.batch_size = batch_size
self.context = context
self._build_model_components()
self.max_input_length, self.get_max_output_length = models_max_input_output_length([self],
max_output_length_num_stds)
self.encoder_module = None # type: Optional[mx.mod.BucketingModule]
self.encoder_default_bucket_key = None # type: Optional[int]
self.decoder_module = None # type: Optional[mx.mod.BucketingModule]
self.decoder_default_bucket_key = None # type: Optional[Tuple[int, int]]
self.decoder_data_shapes_cache = None # type: Optional[Dict]
self.decoder_return_logit_inputs = decoder_return_logit_inputs
self.cache_output_layer_w_b = cache_output_layer_w_b
self.output_layer_w = None # type: mx.nd.NDArray
self.output_layer_b = None # type: mx.nd.NDArray
def initialize(self, max_input_length: int, get_max_output_length_function: Callable):
"""
Delayed construction of modules to ensure multiple Inference models can agree on computing a common
maximum output length.
:param max_input_length: Maximum input length.
:param get_max_output_length_function: Callable to compute maximum output length.
"""
self.max_input_length = max_input_length
if self.max_input_length > self.training_max_seq_len_source:
logger.warning("Model was only trained with sentences up to a length of %d, "
"but a max_input_len of %d is used.",
self.training_max_seq_len_source, self.max_input_length)
self.get_max_output_length = get_max_output_length_function
# check the maximum supported length of the encoder & decoder:
if self.max_supported_seq_len_source is not None:
utils.check_condition(self.max_input_length <= self.max_supported_seq_len_source,
"Encoder only supports a maximum length of %d" % self.max_supported_seq_len_source)
if self.max_supported_seq_len_target is not None:
decoder_max_len = self.get_max_output_length(max_input_length)
utils.check_condition(decoder_max_len <= self.max_supported_seq_len_target,
"Decoder only supports a maximum length of %d, but %d was requested. Note that the "
"maximum output length depends on the input length and the source/target length "
"ratio observed during training." % (self.max_supported_seq_len_target,
decoder_max_len))
self.encoder_module, self.encoder_default_bucket_key = self._get_encoder_module()
self.decoder_module, self.decoder_default_bucket_key = self._get_decoder_module()
self.decoder_data_shapes_cache = dict() # bucket_key -> shape cache
max_encoder_data_shapes = self._get_encoder_data_shapes(self.encoder_default_bucket_key)
max_decoder_data_shapes = self._get_decoder_data_shapes(self.decoder_default_bucket_key)
self.encoder_module.bind(data_shapes=max_encoder_data_shapes, for_training=False, grad_req="null")
self.decoder_module.bind(data_shapes=max_decoder_data_shapes, for_training=False, grad_req="null")
self.load_params_from_file(self.fname_params)
self.encoder_module.init_params(arg_params=self.params, allow_missing=False)
self.decoder_module.init_params(arg_params=self.params, allow_missing=False)
if self.cache_output_layer_w_b:
if self.output_layer.weight_normalization:
# precompute normalized output layer weight imperatively
assert self.output_layer.weight_norm is not None
weight = self.params[self.output_layer.weight_norm.weight.name].as_in_context(self.context)
scale = self.params[self.output_layer.weight_norm.scale.name].as_in_context(self.context)
self.output_layer_w = self.output_layer.weight_norm(weight, scale)
else:
self.output_layer_w = self.params[self.output_layer.w.name].as_in_context(self.context)
self.output_layer_b = self.params[self.output_layer.b.name].as_in_context(self.context)
def _get_encoder_module(self) -> Tuple[mx.mod.BucketingModule, int]:
"""
Returns a BucketingModule for the encoder. Given a source sequence, it returns
the initial decoder states of the model.
The bucket key for this module is the length of the source sequence.
:return: Tuple of encoder module and default bucket key.
"""
def sym_gen(source_seq_len: int):
source = mx.sym.Variable(C.SOURCE_NAME)
source_length = utils.compute_lengths(source)
# source embedding
(source_embed,
source_embed_length,
source_embed_seq_len) = self.embedding_source.encode(source, source_length, source_seq_len)
# encoder
# source_encoded: (source_encoded_length, batch_size, encoder_depth)
(source_encoded,
source_encoded_length,
source_encoded_seq_len) = self.encoder.encode(source_embed,
source_embed_length,
source_embed_seq_len)
# source_encoded: (batch_size, source_encoded_length, encoder_depth)
# TODO(fhieber): Consider standardizing encoders to return batch-major data to avoid this line.
source_encoded = mx.sym.swapaxes(source_encoded, dim1=0, dim2=1)
# initial decoder states
decoder_init_states = self.decoder.init_states(source_encoded,
source_encoded_length,
source_encoded_seq_len)
data_names = [C.SOURCE_NAME]
label_names = [] # type: List[str]
return mx.sym.Group(decoder_init_states), data_names, label_names
default_bucket_key = self.max_input_length
module = mx.mod.BucketingModule(sym_gen=sym_gen,
default_bucket_key=default_bucket_key,
context=self.context)
return module, default_bucket_key
def _get_decoder_module(self) -> Tuple[mx.mod.BucketingModule, Tuple[int, int]]:
"""
Returns a BucketingModule for a single decoder step.
Given previously predicted word and previous decoder states, it returns
a distribution over the next predicted word and the next decoder states.
The bucket key for this module is the length of the source sequence
and the current time-step in the inference procedure (e.g. beam search).
The latter corresponds to the current length of the target sequences.
:return: Tuple of decoder module and default bucket key.
"""
def sym_gen(bucket_key: Tuple[int, int]):
"""
Returns either softmax output (probs over target vocabulary) or inputs to logit
computation, controlled by decoder_return_logit_inputs
"""
source_seq_len, decode_step = bucket_key
source_embed_seq_len = self.embedding_source.get_encoded_seq_len(source_seq_len)
source_encoded_seq_len = self.encoder.get_encoded_seq_len(source_embed_seq_len)
self.decoder.reset()
target_prev = mx.sym.Variable(C.TARGET_NAME)
states = self.decoder.state_variables(decode_step)
state_names = [state.name for state in states]
# embedding for previous word
# (batch_size, num_embed)
target_embed_prev, _, _ = self.embedding_target.encode(data=target_prev, data_length=None, seq_len=1)
# decoder
# target_decoded: (batch_size, decoder_depth)
(target_decoded,
attention_probs,
states) = self.decoder.decode_step(decode_step,
target_embed_prev,
source_encoded_seq_len,
*states)
if self.decoder_return_logit_inputs:
# skip output layer in graph
outputs = mx.sym.identity(target_decoded, name=C.LOGIT_INPUTS_NAME)
else:
# logits: (batch_size, target_vocab_size)
logits = self.output_layer(target_decoded)
if self.softmax_temperature is not None:
logits /= self.softmax_temperature
outputs = mx.sym.softmax(data=logits, name=C.SOFTMAX_NAME)
data_names = [C.TARGET_NAME] + state_names
label_names = [] # type: List[str]
return mx.sym.Group([outputs, attention_probs] + states), data_names, label_names
# pylint: disable=not-callable
default_bucket_key = (self.max_input_length, self.get_max_output_length(self.max_input_length))
module = mx.mod.BucketingModule(sym_gen=sym_gen,
default_bucket_key=default_bucket_key,
context=self.context)
return module, default_bucket_key
def _get_encoder_data_shapes(self, bucket_key: int) -> List[mx.io.DataDesc]:
"""
Returns data shapes of the encoder module.
:param bucket_key: Maximum input length.
:return: List of data descriptions.
"""
return [mx.io.DataDesc(name=C.SOURCE_NAME,
shape=(self.batch_size, bucket_key),
layout=C.BATCH_MAJOR)]
def _get_decoder_data_shapes(self, bucket_key: Tuple[int, int]) -> List[mx.io.DataDesc]:
"""
Returns data shapes of the decoder module.
Caches results for bucket_keys if called iteratively.
:param bucket_key: Tuple of (maximum input length, maximum target length).
:return: List of data descriptions.
"""
source_max_length, target_max_length = bucket_key
return self.decoder_data_shapes_cache.setdefault(
bucket_key,
[mx.io.DataDesc(name=C.TARGET_NAME, shape=(self.batch_size * self.beam_size,), layout="NT")] +
self.decoder.state_shapes(self.batch_size * self.beam_size,
target_max_length,
self.encoder.get_encoded_seq_len(source_max_length),
self.encoder.get_num_hidden()))
def run_encoder(self,
source: mx.nd.NDArray,
source_max_length: int) -> 'ModelState':
"""
Runs forward pass of the encoder.
Encodes source given source length and bucket key.
Returns encoder representation of the source, source_length, initial hidden state of decoder RNN,
and initial decoder states tiled to beam size.
:param source: Integer-coded input tokens. Shape (batch_size, source length).
:param source_max_length: Bucket key.
:return: Initial model state.
"""
batch = mx.io.DataBatch(data=[source],
label=None,
bucket_key=source_max_length,
provide_data=self._get_encoder_data_shapes(source_max_length))
self.encoder_module.forward(data_batch=batch, is_train=False)
decoder_states = self.encoder_module.get_outputs()
# replicate encoder/init module results beam size times
decoder_states = [mx.nd.repeat(s, repeats=self.beam_size, axis=0) for s in decoder_states]
return ModelState(decoder_states)
def run_decoder(self,
prev_word: mx.nd.NDArray,
bucket_key: Tuple[int, int],
model_state: 'ModelState') -> Tuple[mx.nd.NDArray, mx.nd.NDArray, 'ModelState']:
"""
Runs forward pass of the single-step decoder.
:return: Decoder stack output (logit inputs or probability distribution), attention scores, updated model state.
"""
batch = mx.io.DataBatch(
data=[prev_word.as_in_context(self.context)] + model_state.states,
label=None,
bucket_key=bucket_key,
provide_data=self._get_decoder_data_shapes(bucket_key))
self.decoder_module.forward(data_batch=batch, is_train=False)
out, attention_probs, *model_state.states = self.decoder_module.get_outputs()
return out, attention_probs, model_state
@property
def training_max_seq_len_source(self) -> int:
""" The maximum sequence length on the source side during training. """
if self.config.config_data.data_statistics.max_observed_len_source is not None:
return self.config.config_data.data_statistics.max_observed_len_source
else:
return self.config.max_seq_len_source
@property
def training_max_seq_len_target(self) -> int:
""" The maximum sequence length on the target side during training. """
if self.config.config_data.data_statistics.max_observed_len_target is not None:
return self.config.config_data.data_statistics.max_observed_len_target
else:
return self.config.max_seq_len_target
@property
def max_supported_seq_len_source(self) -> Optional[int]:
""" If not None this is the maximally supported source length during inference (hard constraint). """
return self.encoder.get_max_seq_len()
@property
def max_supported_seq_len_target(self) -> Optional[int]:
""" If not None this is the maximally supported target length during inference (hard constraint). """
return self.decoder.get_max_seq_len()
@property
def length_ratio_mean(self) -> float:
return self.config.config_data.data_statistics.length_ratio_mean
@property
def length_ratio_std(self) -> float:
return self.config.config_data.data_statistics.length_ratio_std
def load_models(context: mx.context.Context,
max_input_len: Optional[int],
beam_size: int,
batch_size: int,
model_folders: List[str],
checkpoints: Optional[List[int]] = None,
softmax_temperature: Optional[float] = None,
max_output_length_num_stds: int = C.DEFAULT_NUM_STD_MAX_OUTPUT_LENGTH,
decoder_return_logit_inputs: bool = False,
cache_output_layer_w_b: bool = False) -> Tuple[List[InferenceModel], Dict[str, int], Dict[str, int]]:
"""
Loads a list of models for inference.
:param context: MXNet context to bind modules to.
:param max_input_len: Maximum input length.
:param beam_size: Beam size.
:param model_folders: List of model folders to load models from.
:param checkpoints: List of checkpoints to use for each model in model_folders. Use None to load best checkpoint.
:param softmax_temperature: Optional parameter to control steepness of softmax distribution.
:param max_output_length_num_stds: Number of standard deviations to add to mean target-source length ratio
to compute maximum output length.
:param decoder_return_logit_inputs: Model decoders return inputs to logit computation instead of softmax over target
vocabulary. Used when logits/softmax are handled separately.
:param cache_output_layer_w_b: Models cache weights and biases for logit computation as NumPy arrays (used with
restrict lexicon).
:return: List of models, source vocabulary, target vocabulary.
"""
models, source_vocabs, target_vocabs = [], [], []
if checkpoints is None:
checkpoints = [None] * len(model_folders)
for model_folder, checkpoint in zip(model_folders, checkpoints):
source_vocabs.append(vocab.vocab_from_json_or_pickle(os.path.join(model_folder, C.VOCAB_SRC_NAME)))
target_vocabs.append(vocab.vocab_from_json_or_pickle(os.path.join(model_folder, C.VOCAB_TRG_NAME)))
model = InferenceModel(model_folder=model_folder,
context=context,
beam_size=beam_size,
batch_size=batch_size,
softmax_temperature=softmax_temperature,
checkpoint=checkpoint,
decoder_return_logit_inputs=decoder_return_logit_inputs,
cache_output_layer_w_b=cache_output_layer_w_b)
models.append(model)
utils.check_condition(vocab.are_identical(*source_vocabs), "Source vocabulary ids do not match")
utils.check_condition(vocab.are_identical(*target_vocabs), "Target vocabulary ids do not match")
# set a common max_output length for all models.
max_input_len, get_max_output_length = models_max_input_output_length(models,
max_output_length_num_stds,
max_input_len)
for model in models:
model.initialize(max_input_len, get_max_output_length)
return models, source_vocabs[0], target_vocabs[0]
def models_max_input_output_length(models: List[InferenceModel],
num_stds: int,
forced_max_input_len: Optional[int] = None) -> Tuple[int, Callable]:
"""
Returns a function to compute maximum output length given a fixed number of standard deviations as a
safety margin, and the current input length.
Mean and std are taken from the model with the largest values to allow proper ensembling of models
trained on different data sets.
:param models: List of models.
:param num_stds: Number of standard deviations to add as a safety margin. If -1, returned maximum output lengths
will always be 2 * input_length.
:param forced_max_input_len: An optional overwrite of the maximum input length.
:return: The maximum input length and a function to get the output length given the input length.
"""
max_mean = max(model.length_ratio_mean for model in models)
max_std = max(model.length_ratio_std for model in models)
supported_max_seq_len_source = min((model.max_supported_seq_len_source for model in models
if model.max_supported_seq_len_source is not None),
default=None)
supported_max_seq_len_target = min((model.max_supported_seq_len_target for model in models
if model.max_supported_seq_len_target is not None),
default=None)
training_max_seq_len_source = min(model.training_max_seq_len_source for model in models)
return get_max_input_output_length(supported_max_seq_len_source,
supported_max_seq_len_target,
training_max_seq_len_source,
forced_max_input_len=forced_max_input_len,
length_ratio_mean=max_mean,
length_ratio_std=max_std,
num_stds=num_stds)
def get_max_input_output_length(supported_max_seq_len_source: Optional[int],
supported_max_seq_len_target: Optional[int],
training_max_seq_len_source: Optional[int],
forced_max_input_len: Optional[int],
length_ratio_mean: float,
length_ratio_std: float,
num_stds: int) -> Tuple[int, Callable]:
"""
Returns a function to compute maximum output length given a fixed number of standard deviations as a
safety margin, and the current input length. It takes into account optional maximum source and target lengths.
:param supported_max_seq_len_source: The maximum source length supported by the models.
:param supported_max_seq_len_target: The maximum target length supported by the models.
:param training_max_seq_len_source: The maximum source length observed during training.
:param forced_max_input_len: An optional overwrite of the maximum input length.
:param length_ratio_mean: The mean of the length ratio that was calculated on the raw sequences with special
symbols such as EOS or BOS.
:param length_ratio_std: The standard deviation of the length ratio.
:param num_stds: The number of standard deviations the target length may exceed the mean target length (as long as
the supported maximum length allows for this).
:return: The maximum input length and a function to get the output length given the input length.
"""
space_for_bos = 1
space_for_eos = 1
if num_stds < 0:
factor = C.TARGET_MAX_LENGTH_FACTOR # type: float
else:
factor = length_ratio_mean + (length_ratio_std * num_stds)
if forced_max_input_len is None:
# Make sure that if there is a hard constraint on the maximum source or target length we never exceed this
# constraint. This is for example the case for learned positional embeddings, which are only defined for the
# maximum source and target sequence length observed during training.
if supported_max_seq_len_source is not None and supported_max_seq_len_target is None:
max_input_len = supported_max_seq_len_source
elif supported_max_seq_len_source is None and supported_max_seq_len_target is not None:
max_output_len = supported_max_seq_len_target - space_for_bos - space_for_eos
if np.ceil(factor * training_max_seq_len_source) > max_output_len:
max_input_len = int(np.floor(max_output_len / factor))
else:
max_input_len = training_max_seq_len_source
elif supported_max_seq_len_source is not None or supported_max_seq_len_target is not None:
max_output_len = supported_max_seq_len_target - space_for_bos - space_for_eos
if np.ceil(factor * supported_max_seq_len_source) > max_output_len:
max_input_len = int(np.floor(max_output_len / factor))
else:
max_input_len = supported_max_seq_len_source
else:
# Any source/target length is supported and max_input_len was not manually set, therefore we use the
# maximum length from training.
max_input_len = training_max_seq_len_source
else:
max_input_len = forced_max_input_len
def get_max_output_length(input_length: int):
"""
Returns the maximum output length for inference given the input length.
Explicitly includes space for BOS and EOS sentence symbols in the target sequence, because we assume
that the mean length ratio computed on the training data do not include these special symbols.
(see data_io.analyze_sequence_lengths)
"""
return int(np.ceil(factor * input_length)) + space_for_bos + space_for_eos
return max_input_len, get_max_output_length
Tokens = List[str]
TranslatorInput = NamedTuple('TranslatorInput', [
('id', int),
('sentence', str),
('tokens', Tokens),
])
"""
Required input for Translator.
:param id: Sentence id.
:param sentence: Input sentence.
:param tokens: List of input tokens.
"""
InputChunk = NamedTuple("InputChunk",
[("id", int),
("chunk_id", int),
("tokens", Tokens)])
"""
A chunk of a TranslatorInput.
:param id: Sentence id.
:param chunk_id: The id of the chunk.
:param tokens: List of input tokens.
"""
TranslatorOutput = NamedTuple('TranslatorOutput', [
('id', int),
('translation', str),
('tokens', List[str]),
('attention_matrix', np.ndarray),
('score', float),
])
"""
Output structure from Translator.
:param id: Id of input sentence.
:param translation: Translation string without sentence boundary tokens.
:param tokens: List of translated tokens.
:param attention_matrix: Attention matrix. Shape: (target_length, source_length).
:param score: Negative log probability of generated translation.
"""
TokenIds = List[int]
Translation = NamedTuple('Translation', [
('target_ids', TokenIds),
('attention_matrix', np.ndarray),
('score', float)
])
TranslatedChunk = NamedTuple('TranslatedChunk', [
('id', int),
('chunk_id', int),
('translation', Translation),
])
"""
Translation of a chunk of a sentence.
:param id: Id of the sentence.
:param chunk_id: Id of the chunk.
:param translation: The translation of the input chunk.
"""
class ModelState:
"""
A ModelState encapsulates information about the decoder states of an InferenceModel.
"""
def __init__(self, states: List[mx.nd.NDArray]) -> None:
self.states = states
def sort_state(self, best_hyp_indices: mx.nd.NDArray):
"""
Sorts states according to k-best order from last step in beam search.
"""
self.states = [mx.nd.take(ds, best_hyp_indices) for ds in self.states]
class LengthPenalty:
"""
Calculates the length penalty as:
(beta + len(Y))**alpha / (beta + 1)**alpha
See Wu et al. 2016 (note that in the paper beta has a different meaning,
and a fixed value 5 was used for this parameter)
:param alpha: The alpha factor for the length penalty (see above).
:param beta: The beta factor for the length penalty (see above).
"""
def __init__(self, alpha: float = 1.0, beta: float = 0.0) -> None:
self.alpha = alpha
self.beta = beta
self.denominator = (self.beta + 1.) ** self.alpha
def __call__(self, lengths: Union[mx.nd.NDArray, int, float]) -> Union[mx.nd.NDArray, float]:
"""
Calculate the length penalty for the given vector of lengths.
:param lengths: A scalar or a matrix of sentence lengths of dimensionality (batch_size, 1).
:return: The length penalty. A scalar or a matrix (batch_size, 1) depending on the input.
"""
if self.alpha == 0.0:
if isinstance(lengths, mx.nd.NDArray):
# no length penalty:
return mx.nd.ones_like(lengths)
else:
return 1.0
else:
# note: we avoid unnecessary addition or pow operations
numerator = self.beta + lengths if self.beta != 0.0 else lengths
numerator = numerator ** self.alpha if self.alpha != 1.0 else numerator
return numerator / self.denominator
def _concat_translations(translations: List[Translation], start_id: int, stop_ids: Set[int],
length_penalty: LengthPenalty) -> Translation:
"""
Combine translations through concatenation.
:param translations: A list of translations (sequence starting with BOS symbol, attention_matrix), score and length.
:param start_id: The EOS symbol.
:param translations: The BOS symbols.
:return: A concatenation if the translations with a score.
"""
# Concatenation of all target ids without BOS and EOS
target_ids = [start_id]
attention_matrices = []
for idx, translation in enumerate(translations):
assert translation.target_ids[0] == start_id
if idx == len(translations) - 1:
target_ids.extend(translation.target_ids[1:])
attention_matrices.append(translation.attention_matrix[1:, :])
else:
if translation.target_ids[-1] in stop_ids:
target_ids.extend(translation.target_ids[1:-1])
attention_matrices.append(translation.attention_matrix[1:-1, :])
else:
target_ids.extend(translation.target_ids[1:])
attention_matrices.append(translation.attention_matrix[1:, :])
# Combine attention matrices:
attention_shapes = [attention_matrix.shape for attention_matrix in attention_matrices]
# Adding another row for the empty BOS alignment vector
bos_align_shape = np.asarray([1, 0])
attention_matrix_combined = np.zeros(np.sum(np.asarray(attention_shapes), axis=0) + bos_align_shape)
# We start at position 1 as position 0 is for the BOS, which is kept zero
pos_t, pos_s = 1, 0
for attention_matrix, (len_t, len_s) in zip(attention_matrices, attention_shapes):
attention_matrix_combined[pos_t:pos_t + len_t, pos_s:pos_s + len_s] = attention_matrix
pos_t += len_t
pos_s += len_s
# Unnormalize + sum and renormalize the score:
score = sum(translation.score * length_penalty(len(translation.target_ids))
for translation in translations)
score = score / length_penalty(len(target_ids))
return Translation(target_ids, attention_matrix_combined, score)
class Translator:
"""
Translator uses one or several models to translate input.
It holds references to vocabularies to takes care of encoding input strings as word ids and conversion
of target ids into a translation string.
:param context: MXNet context to bind modules to.
:param ensemble_mode: Ensemble mode: linear or log_linear combination.
:param length_penalty: Length penalty instance.
:param models: List of models.
:param vocab_source: Source vocabulary.
:param vocab_target: Target vocabulary.
:param restrict_lexicon: Top-k lexicon to use for target vocabulary restriction.
"""
def __init__(self,
context: mx.context.Context,
ensemble_mode: str,
bucket_source_width: int,
length_penalty: LengthPenalty,
models: List[InferenceModel],
vocab_source: Dict[str, int],
vocab_target: Dict[str, int],
restrict_lexicon: Optional[lexicon.TopKLexicon] = None) -> None:
self.context = context
self.length_penalty = length_penalty
self.vocab_source = vocab_source
self.vocab_target = vocab_target
self.vocab_target_inv = vocab.reverse_vocab(self.vocab_target)
self.restrict_lexicon = restrict_lexicon
self.start_id = self.vocab_target[C.BOS_SYMBOL]
self.stop_ids = {self.vocab_target[C.EOS_SYMBOL], C.PAD_ID} # type: Set[int]
self.models = models
self.interpolation_func = self._get_interpolation_func(ensemble_mode)
self.beam_size = self.models[0].beam_size
self.batch_size = self.models[0].batch_size
# after models are loaded we ensured that they agree on max_input_length, max_output_length and batch size
self.max_input_length = self.models[0].max_input_length
max_output_length = self.models[0].get_max_output_length(self.max_input_length)
if bucket_source_width > 0:
self.buckets_source = data_io.define_buckets(self.max_input_length, step=bucket_source_width)
else:
self.buckets_source = [self.max_input_length]
self.pad_dist = mx.nd.full((self.batch_size * self.beam_size, len(self.vocab_target)), val=np.inf,
ctx=self.context)
logger.info("Translator (%d model(s) beam_size=%d ensemble_mode=%s batch_size=%d "
"buckets_source=%s)",
len(self.models),
self.beam_size,
"None" if len(self.models) == 1 else ensemble_mode,
self.batch_size,
self.buckets_source)
@staticmethod
def _get_interpolation_func(ensemble_mode):
if ensemble_mode == 'linear':
return Translator._linear_interpolation
elif ensemble_mode == 'log_linear':
return Translator._log_linear_interpolation
else:
raise ValueError("unknown interpolation type")
@staticmethod
def _linear_interpolation(predictions):
# pylint: disable=invalid-unary-operand-type
return -mx.nd.log(utils.average_arrays(predictions))
@staticmethod
def _log_linear_interpolation(predictions):
"""
Returns averaged and re-normalized log probabilities
"""
log_probs = utils.average_arrays([mx.nd.log(p) for p in predictions])
# pylint: disable=invalid-unary-operand-type
return -mx.nd.log(mx.nd.softmax(log_probs))
@staticmethod
def make_input(sentence_id: int, sentence: str) -> TranslatorInput:
"""
Returns TranslatorInput from input_string
:param sentence_id: Input sentence id.
:param sentence: Input sentence.
:return: Input for translate method.
"""
tokens = list(data_io.get_tokens(sentence))
return TranslatorInput(id=sentence_id, sentence=sentence.rstrip(), tokens=tokens)
def translate(self, trans_inputs: List[TranslatorInput]) -> List[TranslatorOutput]:
"""
Batch-translates a list of TranslatorInputs, returns a list of TranslatorOutputs.
Splits oversized sentences to sentence chunks of size less than max_input_length.
:param trans_inputs: List of TranslatorInputs as returned by make_input().
:return: List of translation results.
"""
translated_chunks = []
# split into chunks
input_chunks = [] # type: List[InputChunk]
for input_idx, trans_input in enumerate(trans_inputs):
if len(trans_input.tokens) == 0:
empty_translation = Translation(target_ids=[],
attention_matrix=np.asarray([[0]]),
score=-np.inf)
translated_chunks.append(TranslatedChunk(id=input_idx,
chunk_id=0,
translation=empty_translation))
elif len(trans_input.tokens) > self.max_input_length:
logger.debug(
"Input %d has length (%d) that exceeds max input length (%d). Splitting into chunks of size %d.",
trans_input.id, len(trans_input.tokens), self.buckets_source[-1], self.max_input_length)
token_chunks = utils.chunks(trans_input.tokens, self.max_input_length)
input_chunks.extend(InputChunk(input_idx, chunk_id, chunk)
for chunk_id, chunk in enumerate(token_chunks))
else:
input_chunks.append(InputChunk(input_idx, 0, trans_input.tokens))
# Sort longest to shortest (to rather fill batches of shorter than longer sequences)
input_chunks = sorted(input_chunks, key=lambda chunk: len(chunk.tokens), reverse=True)
# translate in batch-sized blocks over input chunks
for batch_id, chunks in enumerate(utils.grouper(input_chunks, self.batch_size)):
batch = [chunk.tokens for chunk in chunks]
logger.debug("Translating batch %d", batch_id)
# underfilled batch will be filled to a full batch size with copies of the 1st input
rest = self.batch_size - len(batch)
if rest > 0:
logger.debug("Extending the last batch to the full batch size (%d)", self.batch_size)
batch = batch + [batch[0]] * rest
batch_translations = self.translate_nd(*self._get_inference_input(batch))
# truncate to remove filler translations
if rest > 0:
batch_translations = batch_translations[:-rest]
for chunk, translation in zip(chunks, batch_translations):
translated_chunks.append(TranslatedChunk(chunk.id, chunk.chunk_id, translation))
# Sort by input idx and then chunk id
translated_chunks = sorted(translated_chunks)
# Concatenate results
results = []
chunks_by_input_idx = itertools.groupby(translated_chunks, key=lambda translation: translation.id)
for trans_input, (input_idx, chunks) in zip(trans_inputs, chunks_by_input_idx):
chunks = list(chunks)
if len(chunks) == 1:
translation = chunks[0].translation
else:
translations_to_concat = [translated_chunk.translation for translated_chunk in chunks]
translation = self._concat_translations(translations_to_concat)
results.append(self._make_result(trans_input, translation))
return results
def _get_inference_input(self, sequences: List[List[str]]) -> Tuple[mx.nd.NDArray, int]:
"""
Returns NDArray of source ids (shape=(batch_size, bucket_key)) and corresponding bucket_key.
:param sequences: List of lists of input tokens.
:return NDArray of source ids and bucket key.
"""
bucket_key = data_io.get_bucket(max(len(tokens) for tokens in sequences), self.buckets_source)
utils.check_condition(C.PAD_ID == 0, "pad id should be 0")
source = mx.nd.zeros((len(sequences), bucket_key))
for j, tokens in enumerate(sequences):
source[j, :len(tokens)] = data_io.tokens2ids(tokens, self.vocab_source)
return source, bucket_key
def _make_result(self,
trans_input: TranslatorInput,
translation: Translation) -> TranslatorOutput:
"""
Returns a translator result from generated target-side word ids, attention matrix, and score.
Strips stop ids from translation string.
:param trans_input: Translator input.
:param translation: The translation + attention and score.
:return: TranslatorOutput.
"""
# remove special sentence start symbol (<s>) from the output:
target_ids = translation.target_ids[1:]
attention_matrix = translation.attention_matrix[1:, :]
target_tokens = [self.vocab_target_inv[target_id] for target_id in target_ids]
target_string = C.TOKEN_SEPARATOR.join(
target_token for target_id, target_token in zip(target_ids, target_tokens) if
target_id not in self.stop_ids)
attention_matrix = attention_matrix[:, :len(trans_input.tokens)]
return TranslatorOutput(id=trans_input.id,
translation=target_string,
tokens=target_tokens,
attention_matrix=attention_matrix,
score=translation.score)
def _concat_translations(self, translations: List[Translation]) -> Translation:
"""
Combine translations through concatenation.
:param translations: A list of translations (sequence, attention_matrix), score and length.
:return: A concatenation if the translations with a score.
"""
return _concat_translations(translations, self.start_id, self.stop_ids, self.length_penalty)
def translate_nd(self,
source: mx.nd.NDArray,
source_length: int) -> List[Translation]:
"""
Translates source of source_length, given a bucket_key.
:param source: Source ids. Shape: (batch_size, bucket_key).
:param source_length: Bucket key.
:return: Sequence of translations.
"""
return self._get_best_from_beam(*self._beam_search(source, source_length))
def _encode(self, sources: mx.nd.NDArray, source_length: int) -> List[ModelState]:
"""
Returns a ModelState for each model representing the state of the model after encoding the source.
:param sources: Source ids. Shape: (batch_size, bucket_key).
:param source_length: Bucket key.
:return: List of ModelStates.
"""
return [model.run_encoder(sources, source_length) for model in self.models]
def _decode_step(self,
sequences: mx.nd.NDArray,
step: int,
source_length: int,
states: List[ModelState],
models_output_layer_w: List[mx.nd.NDArray],
models_output_layer_b: List[mx.nd.NDArray]) \
-> Tuple[mx.nd.NDArray, mx.nd.NDArray, List[ModelState]]:
"""
Returns decoder predictions (combined from all models), attention scores, and updated states.
:param sequences: Sequences of current hypotheses. Shape: (batch_size * beam_size, max_output_length).
:param step: Beam search iteration.
:param source_length: Length of the input sequence.
:param states: List of model states.
:param models_output_layer_w: Custom model weights for logit computation (empty for none).
:param models_output_layer_b: Custom model biases for logit computation (empty for none).
:return: (probs, attention scores, list of model states)
"""
bucket_key = (source_length, step)
prev_word = sequences[:, step - 1]
model_probs, model_attention_probs, model_states = [], [], []
# We use zip_longest here since we'll have empty lists when not using restrict_lexicon
for model, out_w, out_b, state in itertools.zip_longest(
self.models, models_output_layer_w, models_output_layer_b, states):
decoder_outputs, attention_probs, state = model.run_decoder(prev_word, bucket_key, state)
# Compute logits and softmax with restricted vocabulary
if self.restrict_lexicon:
logits = model.output_layer(decoder_outputs, out_w, out_b)
probs = mx.nd.softmax(logits)
else:
# Otherwise decoder outputs are already target vocab probs
probs = decoder_outputs
model_probs.append(probs)
model_attention_probs.append(attention_probs)
model_states.append(state)
neg_logprobs, attention_probs = self._combine_predictions(model_probs, model_attention_probs)
return neg_logprobs, attention_probs, model_states
def _combine_predictions(self,
probs: List[mx.nd.NDArray],
attention_probs: List[mx.nd.NDArray]) -> Tuple[mx.nd.NDArray, mx.nd.NDArray]:
"""
Returns combined predictions of models as negative log probabilities and averaged attention prob scores.
:param probs: List of Shape(beam_size, target_vocab_size).
:param attention_probs: List of Shape(beam_size, bucket_key).
:return: Combined negative log probabilities, averaged attention scores.
"""
# average attention prob scores. TODO: is there a smarter way to do this?
attention_prob_score = utils.average_arrays(attention_probs)
# combine model predictions and convert to neg log probs
if len(self.models) == 1:
neg_logprobs = -mx.nd.log(probs[0]) # pylint: disable=invalid-unary-operand-type
else:
neg_logprobs = self.interpolation_func(probs)
return neg_logprobs, attention_prob_score
def _beam_search(self,
source: mx.nd.NDArray,
source_length: int) -> Tuple[mx.nd.NDArray, mx.nd.NDArray, mx.nd.NDArray, mx.nd.NDArray]:
"""
Translates multiple sentences using beam search.
:param source: Source ids. Shape: (batch_size, bucket_key).
:param source_length: Max source length.
:return List of lists of word ids, list of attentions, array of accumulated length-normalized
negative log-probs.
"""
# Length of encoded sequence (may differ from initial input length)
encoded_source_length = self.models[0].encoder.get_encoded_seq_len(source_length)
utils.check_condition(all(encoded_source_length ==
model.encoder.get_encoded_seq_len(source_length) for model in self.models),
"Models must agree on encoded sequence length")
# Maximum output length
max_output_length = self.models[0].get_max_output_length(source_length)
# General data structure: each row has batch_size * beam blocks for the 1st sentence, with a full beam,
# then the next block for the 2nd sentence and so on
# sequences: (batch_size * beam_size, output_length), pre-filled with <s> symbols on index 0
sequences = mx.nd.full((self.batch_size * self.beam_size, max_output_length), val=C.PAD_ID, ctx=self.context,
dtype='int32')
sequences[:, 0] = self.start_id
lengths = mx.nd.ones((self.batch_size * self.beam_size, 1), ctx=self.context)
finished = mx.nd.zeros((self.batch_size * self.beam_size,), ctx=self.context, dtype='int32')
# attentions: (batch_size * beam_size, output_length, encoded_source_length)
attentions = mx.nd.zeros((self.batch_size * self.beam_size, max_output_length, encoded_source_length),
ctx=self.context)
# best_hyp_indices: row indices of smallest scores (ascending).
best_hyp_indices = mx.nd.zeros((self.batch_size * self.beam_size,), ctx=self.context, dtype='int32')
# best_word_indices: column indices of smallest scores (ascending).
best_word_indices = mx.nd.zeros((self.batch_size * self.beam_size,), ctx=self.context, dtype='int32')
# scores_accumulated: chosen smallest scores in scores (ascending).
scores_accumulated = mx.nd.zeros((self.batch_size * self.beam_size, 1), ctx=self.context)
# reset all padding distribution cells to np.inf
self.pad_dist[:] = np.inf
# If using a top-k lexicon, select param rows for logit computation that correspond to the
# target vocab for this sentence.
models_output_layer_w = list()
models_output_layer_b = list()
pad_dist = self.pad_dist
vocab_slice_ids = None # type: mx.nd.NDArray
if self.restrict_lexicon:
# TODO: See note in method about migrating to pure MXNet when set operations are supported.
# We currently convert source to NumPy and target ids back to NDArray.
vocab_slice_ids = mx.nd.array(self.restrict_lexicon.get_trg_ids(source.astype("int32").asnumpy()),
ctx=self.context)
if vocab_slice_ids.shape[0] < self.beam_size + 1:
# This fixes an edge case for toy models, where the number of vocab ids from the lexicon is
# smaller than the beam size.
logger.warning("Padding vocab_slice_ids (%d) with EOS to have at least %d+1 elements to expand",
vocab_slice_ids.shape[0], self.beam_size)
n = self.beam_size - vocab_slice_ids.shape[0] + 1
vocab_slice_ids = mx.nd.concat(vocab_slice_ids,
mx.nd.full((n,), val=self.vocab_target[C.EOS_SYMBOL], ctx=self.context),
dim=0)
pad_dist = mx.nd.full((self.batch_size * self.beam_size, vocab_slice_ids.shape[0]),
val=np.inf, ctx=self.context)
for m in self.models:
models_output_layer_w.append(m.output_layer_w.take(vocab_slice_ids))
models_output_layer_b.append(m.output_layer_b.take(vocab_slice_ids))
# (0) encode source sentence, returns a list
model_states = self._encode(source, source_length)
for t in range(1, max_output_length):
# (1) obtain next predictions and advance models' state
# scores: (batch_size * beam_size, target_vocab_size)
# attention_scores: (batch_size * beam_size, bucket_key)
scores, attention_scores, model_states = self._decode_step(sequences,
t,
source_length,
model_states,
models_output_layer_w,
models_output_layer_b)
# (2) compute length-normalized accumulated scores in place
if t == 1 and self.batch_size == 1: # only one hypothesis at t==1
scores = scores[:1] / self.length_penalty(lengths[:1])
else:
# renormalize scores by length ...
scores = (scores + scores_accumulated * self.length_penalty(lengths - 1)) / self.length_penalty(lengths)
# ... but not for finished hyps.
# their predicted distribution is set to their accumulated scores at C.PAD_ID.
pad_dist[:, C.PAD_ID] = scores_accumulated[:, 0]
# this is equivalent to doing this in numpy:
# pad_dist[finished, :] = np.inf
# pad_dist[finished, C.PAD_ID] = scores_accumulated[finished]
scores = mx.nd.where(finished, pad_dist, scores)
# (3) get beam_size winning hypotheses for each sentence block separately
# TODO(fhieber): once mx.nd.topk is sped-up no numpy conversion necessary anymore.
scores = scores.asnumpy() # convert to numpy once to minimize cross-device copying
for sent in range(self.batch_size):
rows = slice(sent * self.beam_size, (sent + 1) * self.beam_size)
sliced_scores = scores if t == 1 and self.batch_size == 1 else scores[rows]
# TODO we could save some tiny amount of time here by not running smallest_k for a finished sent
(best_hyp_indices[rows], best_word_indices[rows]), \
scores_accumulated[rows, 0] = utils.smallest_k(sliced_scores, self.beam_size, t == 1)
# offsetting since the returned smallest_k() indices were slice-relative
best_hyp_indices[rows] += rows.start
# Map from restricted to full vocab ids if needed
if self.restrict_lexicon:
best_word_indices[:] = vocab_slice_ids.take(best_word_indices)
# (4) get hypotheses and their properties for beam_size winning hypotheses (ascending)
sequences = mx.nd.take(sequences, best_hyp_indices)
lengths = mx.nd.take(lengths, best_hyp_indices)
finished = mx.nd.take(finished, best_hyp_indices)
attention_scores = mx.nd.take(attention_scores, best_hyp_indices)
attentions = mx.nd.take(attentions, best_hyp_indices)
# (5) update best hypotheses, their attention lists and lengths (only for non-finished hyps)
# pylint: disable=unsupported-assignment-operation
sequences[:, t] = best_word_indices
attentions[:, t, :] = attention_scores
lengths += mx.nd.cast(1 - mx.nd.expand_dims(finished, axis=1), dtype='float32')
# (6) determine which hypotheses in the beam are now finished
finished = ((best_word_indices == C.PAD_ID) + (best_word_indices == self.vocab_target[C.EOS_SYMBOL]))
if mx.nd.sum(finished).asscalar() == self.batch_size * self.beam_size: # all finished
break
# (7) update models' state with winning hypotheses (ascending)
for ms in model_states:
ms.sort_state(best_hyp_indices)
return sequences, attentions, scores_accumulated, lengths
def _get_best_from_beam(self,
sequences: mx.nd.NDArray,
attention_lists: mx.nd.NDArray,
accumulated_scores: mx.nd.NDArray,
lengths: mx.nd.NDArray) -> List[Translation]:
"""
Return the best (aka top) entry from the n-best list.
:param sequences: Array of word ids. Shape: (batch_size * beam_size, bucket_key).
:param attention_lists: Array of attentions over source words.
Shape: (batch_size * self.beam_size, max_output_length, encoded_source_length).
:param accumulated_scores: Array of length-normalized negative log-probs.
:return: Top sequence, top attention matrix, top accumulated score (length-normalized
negative log-probs) and length.
"""
utils.check_condition(sequences.shape[0] == attention_lists.shape[0] \
== accumulated_scores.shape[0] == lengths.shape[0], "Shape mismatch")
# sequences & accumulated scores are in latest 'k-best order', thus 0th element is best
best = 0
result = []
for sent in range(self.batch_size):
idx = sent * self.beam_size + best
length = int(lengths[idx].asscalar())
sequence = sequences[idx][:length].asnumpy().tolist()
# attention_matrix: (target_seq_len, source_seq_len)
attention_matrix = np.stack(attention_lists[idx].asnumpy()[:length, :], axis=0)
score = accumulated_scores[idx].asscalar()
result.append(Translation(sequence, attention_matrix, score))
return result
| [
"str",
"mx.context.Context",
"int",
"int",
"int",
"Callable",
"int",
"Tuple[int, int]",
"int",
"Tuple[int, int]",
"mx.nd.NDArray",
"int",
"mx.nd.NDArray",
"Tuple[int, int]",
"'ModelState'",
"mx.context.Context",
"Optional[int]",
"int",
"int",
"List[str]",
"List[InferenceModel]",
"int",
"Optional[int]",
"Optional[int]",
"Optional[int]",
"Optional[int]",
"float",
"float",
"int",
"int",
"List[mx.nd.NDArray]",
"mx.nd.NDArray",
"Union[mx.nd.NDArray, int, float]",
"List[Translation]",
"int",
"Set[int]",
"LengthPenalty",
"mx.context.Context",
"str",
"int",
"LengthPenalty",
"List[InferenceModel]",
"Dict[str, int]",
"Dict[str, int]",
"int",
"str",
"List[TranslatorInput]",
"List[List[str]]",
"TranslatorInput",
"Translation",
"List[Translation]",
"mx.nd.NDArray",
"int",
"mx.nd.NDArray",
"int",
"mx.nd.NDArray",
"int",
"int",
"List[ModelState]",
"List[mx.nd.NDArray]",
"List[mx.nd.NDArray]",
"List[mx.nd.NDArray]",
"List[mx.nd.NDArray]",
"mx.nd.NDArray",
"int",
"mx.nd.NDArray",
"mx.nd.NDArray",
"mx.nd.NDArray",
"mx.nd.NDArray"
] | [
2086,
2117,
2165,
2199,
4235,
4272,
8069,
10456,
12843,
13267,
14166,
14220,
15340,
15387,
15437,
17658,
17709,
17751,
17784,
17820,
20975,
21042,
23096,
23173,
23249,
23318,
23384,
23441,
23490,
26482,
28694,
28797,
29605,
30507,
30536,
30551,
30602,
33383,
33435,
33478,
33516,
33556,
33609,
33656,
36274,
36289,
36703,
40396,
41137,
41188,
42409,
42846,
42897,
43282,
43312,
43763,
43805,
43846,
43880,
43942,
44007,
45980,
46047,
46988,
47039,
55567,
55627,
55690,
55742
] | [
2089,
2135,
2168,
2202,
4238,
4280,
8072,
10471,
12846,
13282,
14179,
14223,
15353,
15402,
15449,
17676,
17722,
17754,
17787,
17829,
20995,
21045,
23109,
23186,
23262,
23331,
23389,
23446,
23493,
26485,
28713,
28810,
29637,
30524,
30539,
30559,
30615,
33401,
33438,
33481,
33529,
33576,
33623,
33670,
36277,
36292,
36724,
40411,
41152,
41199,
42426,
42859,
42900,
43295,
43315,
43776,
43808,
43849,
43896,
43961,
44026,
45999,
46066,
47001,
47042,
55580,
55640,
55703,
55755
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/init_embedding.py | #!/usr/bin/env python3
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Initializing Sockeye embedding weights with pretrained word representations.
Quick usage:
python3 -m contrib.utils.init_embedding \
-e embed-in-src.npy embed-in-tgt.npy \
-i vocab-in-src.json vocab-in-tgt.json \
-o vocab-out-src.json vocab-out-tgt.json \
-n source_embed_weight target_embed_weight \
-f params.init
Optional arguments:
--embeddings, -e
list of input embedding weights in .npy format
shape=(vocab-in-size, embedding-size)
--vocabularies-in, -i
list of input vocabularies as token-index dictionaries in .json format
--vocabularies-out, -o
list of output vocabularies as token-index dictionaries in .json format
They can be generated using sockeye.vocab before actual Sockeye training.
--names, -n
list of Sockeye parameter names for embedding weights
Most common ones are source_embed_weight, target_embed_weight and source_target_embed_weight.
Sizes of above 4 lists should be exactly the same - they are vertically aligned.
--file, -f
file to write initialized parameters
--encoding, -c
open input vocabularies with specified encoding (default: utf-8)
"""
import argparse
import sys
from typing import Dict
import numpy as np
import mxnet as mx
from sockeye.log import setup_main_logger, log_sockeye_version
from . import arguments
from . import utils
from . import vocab
logger = setup_main_logger(__name__, console=True, file_logging=False)
def init_embedding(embed: np.ndarray,
vocab_in: Dict[str, int],
vocab_out: Dict[str, int],
initializer: mx.initializer.Initializer=mx.init.Constant(value=0.0)) -> mx.nd.NDArray:
"""
Initialize embedding weight by existing word representations given input and output vocabularies.
:param embed: Input embedding weight.
:param vocab_in: Input vocabulary.
:param vocab_out: Output vocabulary.
:param initializer: MXNet initializer.
:return: Initialized output embedding weight.
"""
embed_init = mx.nd.empty((len(vocab_out), embed.shape[1]), dtype='float32')
embed_desc = mx.init.InitDesc("embed_weight")
initializer(embed_desc, embed_init)
for token in vocab_out:
if token in vocab_in:
embed_init[vocab_out[token]] = embed[vocab_in[token]]
return embed_init
def main():
"""
Commandline interface to initialize Sockeye embedding weights with pretrained word representations.
"""
log_sockeye_version(logger)
params = argparse.ArgumentParser(description='Quick usage: python3 -m contrib.utils.init_embedding '
'-e embed-in-src.npy embed-in-tgt.npy '
'-i vocab-in-src.json vocab-in-tgt.json '
'-o vocab-out-src.json vocab-out-tgt.json '
'-n source_embed_weight target_embed_weight '
'-f params.init')
arguments.add_init_embedding_args(params)
args = params.parse_args()
if len(args.embeddings) != len(args.vocabularies_in) or \
len(args.embeddings) != len(args.vocabularies_out) or \
len(args.embeddings) != len(args.names):
logger.error("Exactly the same number of 'input embedding weights', 'input vocabularies', "
"'output vocabularies' and 'Sockeye parameter names' should be provided.")
sys.exit(1)
params = {} # type: Dict[str, mx.nd.NDArray]
for embed_file, vocab_in_file, vocab_out_file, name in zip(args.embeddings, args.vocabularies_in, \
args.vocabularies_out, args.names):
logger.info('Loading input embedding weight: %s', embed_file)
embed = np.load(embed_file)
logger.info('Loading input/output vocabularies: %s %s', vocab_in_file, vocab_out_file)
vocab_in = vocab.vocab_from_json(vocab_in_file, encoding=args.encoding)
vocab_out = vocab.vocab_from_json(vocab_out_file)
logger.info('Initializing parameter: %s', name)
initializer = mx.init.Normal(sigma=np.std(embed))
params[name] = init_embedding(embed, vocab_in, vocab_out, initializer)
logger.info('Saving initialized parameters to %s', args.file)
utils.save_params(params, args.file)
if __name__ == '__main__':
main()
| [
"np.ndarray",
"Dict[str, int]",
"Dict[str, int]"
] | [
2171,
2212,
2258
] | [
2181,
2226,
2272
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/initializer.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from typing import Optional
import mxnet as mx
import numpy as np
import sockeye.constants as C
logger = logging.getLogger(__name__)
def get_initializer(default_init_type: str, default_init_scale: float, default_init_xavier_rand_type: str,
default_init_xavier_factor_type: str, embed_init_type: str, embed_init_sigma: float,
rnn_init_type: str) -> mx.initializer.Initializer:
"""
Returns a mixed MXNet initializer.
:param default_init_type: The default weight initializer type.
:param default_init_scale: The scale used for default weight initialization (only used with uniform initialization).
:param default_init_xavier_rand_type: Xavier random number generator type.
:param default_init_xavier_factor_type: Xavier factor type.
:param embed_init_type: Embedding matrix initialization type.
:param embed_init_sigma: Sigma for normal initialization of embedding matrix.
:param rnn_init_type: Initialization type for RNN h2h matrices.
:return: Mixed initializer.
"""
# default initializer
if default_init_type == C.INIT_XAVIER:
default_init = [(C.DEFAULT_INIT_PATTERN,
mx.init.Xavier(rnd_type=default_init_xavier_rand_type,
factor_type=default_init_xavier_factor_type,
magnitude=default_init_scale))]
elif default_init_type == C.INIT_UNIFORM:
default_init = [(C.DEFAULT_INIT_PATTERN, mx.init.Uniform(scale=default_init_scale))]
else:
raise ValueError("Unknown default initializer %s." % default_init_type)
# embedding initializer
if embed_init_type == C.EMBED_INIT_NORMAL:
embed_init = [(C.EMBED_INIT_PATTERN, mx.init.Normal(sigma=embed_init_sigma))]
elif embed_init_type == C.EMBED_INIT_DEFAULT:
embed_init = []
else:
raise ValueError('Unknown embedding initializer: %s' % embed_init_type)
# rnn initializer
if rnn_init_type == C.RNN_INIT_ORTHOGONAL:
rnn_init = [(C.RNN_INIT_PATTERN, mx.initializer.Orthogonal())]
elif rnn_init_type == C.RNN_INIT_ORTHOGONAL_STACKED:
rnn_init = [(C.RNN_INIT_PATTERN, StackedOrthogonalInit(scale=1.0, rand_type="eye"))]
elif rnn_init_type == C.RNN_INIT_DEFAULT:
rnn_init = []
else:
raise ValueError('Unknown RNN initializer: %s' % rnn_init_type)
params_init_pairs = embed_init + rnn_init + default_init
return mx.initializer.Mixed(*zip(*params_init_pairs))
@mx.init.register
class StackedOrthogonalInit(mx.initializer.Initializer):
"""
Initializes weight as Orthogonal matrix. Here we assume that the weight consists of stacked square matrices of
the same size.
For example one could have 3 (2,2) matrices resulting in a (6,2) matrix. This situation arises in RNNs when one
wants to perform multiple h2h transformations in a single matrix multiplication.
Reference:
Exact solutions to the nonlinear dynamics of learning in deep linear neural networks
arXiv preprint arXiv:1312.6120 (2013).
:param scale: Scaling factor of weight.
:param rand_type: use "uniform" or "normal" random number to initialize weight.
"eye" simply sets the matrix to an identity matrix.
"""
def __init__(self, scale=1.414, rand_type="uniform"):
super().__init__()
self.scale = scale
self.rand_type = rand_type
def _init_weight(self, sym_name, arr):
assert len(arr.shape) == 2, "Only 2d weight matrices supported."
base_dim = arr.shape[1]
stacked_dim = arr.shape[0] # base_dim * num_sub_matrices
assert stacked_dim % base_dim == 0, \
"Dim1 must be a multiple of dim2 (as weight = stacked square matrices)."
num_sub_matrices = stacked_dim // base_dim
logger.info("Initializing weight %s (shape=%s, num_sub_matrices=%d) with an orthogonal weight matrix.",
sym_name, arr.shape, num_sub_matrices)
for mat_idx in range(0, num_sub_matrices):
if self.rand_type == "uniform":
tmp = np.random.uniform(-1.0, 1.0, (base_dim, base_dim))
_, __, q = np.linalg.svd(tmp)
elif self.rand_type == "normal":
tmp = np.random.normal(0.0, 1.0, (base_dim, base_dim))
_, __, q = np.linalg.svd(tmp)
elif self.rand_type == "eye":
q = np.eye(base_dim)
else:
raise ValueError("unknown rand_type %s" % self.rand_type)
q = self.scale * q
arr[mat_idx * base_dim:mat_idx * base_dim + base_dim] = q
| [
"str",
"float",
"str",
"str",
"str",
"float",
"str"
] | [
758,
783,
821,
879,
901,
924,
966
] | [
761,
788,
824,
882,
904,
929,
969
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/layers.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import math
from typing import Dict, Optional, Tuple, Union
import mxnet as mx
import numpy as np
from . import constants as C
from . import utils
logger = logging.getLogger(__name__)
def activation(data: mx.sym.Symbol, act_type: str) -> mx.sym.Symbol:
"""
Apply custom or standard activation.
Custom activation types include:
- Swish-1, also called Sigmoid-Weighted Linear Unit (SiLU): Ramachandran et
al. (https://arxiv.org/pdf/1710.05941.pdf), Elfwing et al.
(https://arxiv.org/pdf/1702.03118.pdf)
- Gaussian Error Linear Unit (GELU): Hendrycks and Gimpel
(https://arxiv.org/pdf/1606.08415.pdf)
:param data: input Symbol of any shape.
:param act_type: Type of activation.
:return: output Symbol with same shape as input.
"""
# TODO: Contribute these to MXNet? For now it appears that registered activation types must be implemented in C++.
if act_type == C.SWISH1:
return data * mx.sym.Activation(data, act_type="sigmoid")
elif act_type == C.GELU:
# Approximation of x * gaussian_cdf(x) used by Hendrycks and Gimpel
return 0.5 * data * (1 + mx.sym.Activation((math.sqrt(2 / math.pi) * (data + (0.044715 * (data**3)))),
act_type="tanh"))
else:
return mx.sym.Activation(data, act_type=act_type)
class LayerNormalization:
"""
Implements Ba et al, Layer Normalization (https://arxiv.org/abs/1607.06450).
:param num_hidden: Number of hidden units of layer to be normalized.
:param prefix: Optional prefix of layer name.
:param scale: Optional variable for scaling of shape (num_hidden,). Will be created if None.
:param shift: Optional variable for shifting of shape (num_hidden,). Will be created if None.
:param scale_init: Initial value of scale variable if scale is None. Default 1.0.
:param shift_init: Initial value of shift variable if shift is None. Default 0.0.
"""
# TODO(fhieber): this should eventually go to MXNet
def __init__(self,
num_hidden: int,
prefix: Optional[str] = None,
scale: Optional[mx.sym.Symbol] = None,
shift: Optional[mx.sym.Symbol] = None,
scale_init: float = 1.0,
shift_init: float = 0.0) -> None:
utils.check_condition(num_hidden > 1,
"Layer normalization should only be applied to layers with more than 1 neuron.")
self.prefix = prefix
self.scale = scale if scale is not None else mx.sym.Variable('%s_gamma' % prefix, shape=(num_hidden,),
init=mx.init.Constant(value=scale_init))
self.shift = shift if shift is not None else mx.sym.Variable('%s_beta' % prefix, shape=(num_hidden,),
init=mx.init.Constant(value=shift_init))
@staticmethod
def moments(inputs: mx.sym.Symbol) -> Tuple[mx.sym.Symbol, mx.sym.Symbol]:
"""
Computes mean and variance of the last dimension of a Symbol.
:param inputs: Shape: (d0, ..., dn, hidden).
:return: mean, var: Shape: (d0, ..., dn, 1).
"""
mean = mx.sym.mean(data=inputs, axis=-1, keepdims=True)
# TODO(fhieber): MXNet should have this.
var = mx.sym.mean(mx.sym.square(mx.sym.broadcast_minus(inputs, mean)), axis=-1, keepdims=True)
return mean, var
def normalize(self, inputs: mx.sym.Symbol, eps: float = 0.000001) -> mx.sym.Symbol:
"""
Normalizes hidden units of inputs as follows:
inputs = scale * (inputs - mean) / sqrt(var + eps) + shift
Normalization is performed over the last dimension of the input data.
:param inputs: Inputs to normalize. Shape: (d0, ..., dn, num_hidden).
:param eps: Variance epsilon.
:return: inputs_norm: Normalized inputs. Shape: (d0, ..., dn, num_hidden).
"""
mean, var = self.moments(inputs)
inputs_norm = mx.sym.broadcast_minus(inputs, mean, name='%sinp_minus_mean' % self.prefix)
inputs_norm = mx.sym.broadcast_mul(inputs_norm, mx.sym.rsqrt(var + eps), name='%sinp_norm' % self.prefix)
inputs_norm = mx.sym.broadcast_mul(inputs_norm, self.scale, name='%sinp_norm_scaled' % self.prefix)
inputs_norm = mx.sym.broadcast_add(inputs_norm, self.shift, name='%sinp_norm_scaled_shifted' % self.prefix)
return inputs_norm
class WeightNormalization:
"""
Implements Weight Normalization, see Salimans & Kingma 2016 (https://arxiv.org/abs/1602.07868).
For a given tensor the normalization is done per hidden dimension.
:param weight: Weight tensor of shape: (num_hidden, d1, d2, ...).
:param num_hidden: Size of the first dimension.
:param ndim: The total number of dimensions of the weight tensor.
:param prefix: The prefix used for naming.
"""
def __init__(self, weight, num_hidden, ndim=2, prefix: str = '') -> None:
self.prefix = prefix
self.weight = weight
self.num_hidden = num_hidden
self.scale = mx.sym.Variable("%swn_scale" % prefix,
shape=tuple([num_hidden] + [1] * (ndim - 1)),
init=mx.init.Constant(value=1.0))
def __call__(self, weight: Optional[mx.nd.NDArray] = None, scale: Optional[mx.nd.NDArray] = None) -> mx.sym.Symbol:
"""
Normalize each hidden dimension and scale afterwards
:return: A weight normalized weight tensor.
"""
if weight is None and scale is None:
return mx.sym.broadcast_mul(lhs=mx.sym.L2Normalization(self.weight, mode='instance'),
rhs=self.scale, name="%swn_scale" % self.prefix)
else:
assert isinstance(weight, mx.nd.NDArray)
assert isinstance(scale, mx.nd.NDArray)
return mx.nd.broadcast_mul(lhs=mx.nd.L2Normalization(weight, mode='instance'), rhs=scale)
class OutputLayer:
"""
Defines the output layer of Sockeye decoders. Supports weight tying and weight normalization.
:param hidden_size: Decoder hidden size.
:param vocab_size: Target vocabulary size.
:param weight_normalization: Whether to apply weight normalization.
:param prefix: Prefix used for naming.
"""
def __init__(self,
hidden_size: int,
vocab_size: int,
weight: Optional[mx.sym.Symbol],
weight_normalization: bool,
prefix: str = C.DEFAULT_OUTPUT_LAYER_PREFIX) -> None:
self.vocab_size = vocab_size
self.prefix = prefix
if weight is None:
self.w = mx.sym.Variable("%sweight" % self.prefix, shape=(vocab_size, hidden_size))
else:
self.w = weight
self.weight_normalization = weight_normalization
if weight_normalization:
logger.info("Normalizing output layer weights.")
self.weight_norm = WeightNormalization(self.w,
num_hidden=vocab_size,
ndim=2,
prefix=self.prefix)
self.w = self.weight_norm()
self.b = mx.sym.Variable("%sbias" % self.prefix)
def __call__(self,
hidden: Union[mx.sym.Symbol, mx.nd.NDArray],
weight: Optional[mx.nd.NDArray] = None,
bias: Optional[mx.nd.NDArray] = None):
"""
Linear transformation to vocab size. Returns logits.
:param hidden: Decoder representation for n elements. Shape: (n, self.num_hidden).
:return: Logits. Shape(n, self.vocab_size).
"""
if isinstance(hidden, mx.sym.Symbol):
# TODO dropout?
return mx.sym.FullyConnected(data=hidden,
num_hidden=self.vocab_size,
weight=self.w,
bias=self.b,
flatten=False,
name=C.LOGITS_NAME)
# Equivalent NDArray implementation (requires passed weights/biases)
assert isinstance(hidden, mx.nd.NDArray)
utils.check_condition(weight is not None and bias is not None,
"OutputLayer NDArray implementation requires passing weight and bias NDArrays.")
return mx.nd.FullyConnected(data=hidden,
num_hidden=bias.shape[0],
weight=weight,
bias=bias,
flatten=False)
def split_heads(x: mx.sym.Symbol, depth_per_head: int, heads: int) -> mx.sym.Symbol:
"""
Returns a symbol with head dimension folded into batch and depth divided by the number of heads.
:param x: Symbol of shape (batch, length, depth).
:param depth_per_head: Depth per head.
:param heads: Number of heads.
:return: Symbol of shape (batch * heads, length, depth_per_heads).
"""
# (batch, length, heads, depth_per_head)
x = mx.sym.reshape(data=x, shape=(0, -1, heads, depth_per_head))
# (batch, heads, length, depth/heads)
x = mx.sym.transpose(data=x, axes=(0, 2, 1, 3))
# (batch * heads, length, depth/heads)
return mx.sym.reshape(data=x, shape=(-3, -1, depth_per_head))
def combine_heads(x: mx.sym.Symbol, depth_per_head: int, heads: int) -> mx.sym.Symbol:
"""
Returns a symbol with both batch & length, and head & depth dimensions combined.
:param x: Symbol of shape (batch * heads, length, depth_per_head).
:param depth_per_head: Depth per head.
:param heads: Number of heads.
:return: Symbol of shape (batch, length, depth).
"""
# (batch, heads, length, depth_per_head)
x = mx.sym.reshape(data=x, shape=(-4, -1, heads, 0, depth_per_head))
# (batch, length, heads, depth_per_head)
x = mx.sym.transpose(x, axes=(0, 2, 1, 3))
# (batch, length, depth)
return mx.sym.reshape(x, shape=(-1, 0, depth_per_head * heads))
def broadcast_to_heads(x: mx.sym.Symbol, num_heads: int, ndim: int, fold_heads: bool = True) -> mx.sym.Symbol:
"""
Broadcasts batch-major input of shape (batch, d1 ... dn-1) to (batch*heads, d1 ... dn-1).
:param x: Batch-major input. Shape: (batch, d1 ... dn-1).
:param num_heads: Number of heads.
:param ndim: Number of dimensions in x.
:param fold_heads: Whether to fold heads dimension into batch dimension.
:return: Tensor with each sample repeated heads-many times.
Shape: (batch * heads, d1 ... dn-1) if fold_heads == True, (batch, heads, d1 ... dn-1) else.
"""
dims = [0] * (ndim - 1)
# x: (batch, 1)
x = mx.sym.expand_dims(x, axis=1)
# x: (batch, heads, dims...)
x = mx.sym.broadcast_to(x, shape=[0, num_heads] + dims)
if fold_heads:
# (batch * heads, dims...)
return mx.sym.reshape(x, shape=[-3] + dims)
else:
# x: (batch, heads, dims...)
return x
def dot_attention(queries: mx.sym.Symbol,
keys: mx.sym.Symbol,
values: mx.sym.Symbol,
lengths: Optional[mx.sym.Symbol] = None,
dropout: float = 0.0,
bias: Optional[mx.sym.Symbol] = None,
prefix: Optional[str] = ''):
"""
Computes dot attention for a set of queries, keys, and values.
:param queries: Attention queries. Shape: (n, lq, d).
:param keys: Attention keys. Shape: (n, lk, d).
:param values: Attention values. Shape: (n, lk, dv).
:param lengths: Optional sequence lengths of the keys. Shape: (n,).
:param dropout: Dropout probability.
:param bias: Optional 3d bias tensor.
:param prefix: Optional prefix
:return: 'Context' vectors for each query. Shape: (n, lq, dv).
"""
utils.check_condition(lengths is not None or bias is not None,
"Must provide either length or bias argument for masking")
# (n, lq, lk)
logits = mx.sym.batch_dot(lhs=queries, rhs=keys, transpose_b=True, name='%sdot' % prefix)
if lengths is not None:
# mask lk dimension
# (lk, n, lq)
logits = mx.sym.transpose(data=logits, axes=(2, 0, 1))
logits = mx.sym.SequenceMask(data=logits,
use_sequence_length=True,
sequence_length=lengths,
value=C.LARGE_NEGATIVE_VALUE)
# (n, lq, lk)
logits = mx.sym.transpose(data=logits, axes=(1, 2, 0))
if bias is not None:
logits = mx.sym.broadcast_add(logits, bias, name='%sbias_add' % prefix)
probs = mx.sym.softmax(logits, axis=-1)
probs = mx.sym.Dropout(probs, p=dropout) if dropout > 0.0 else probs
# (n, lq, lk) x (n, lk, dv) -> (n, lq, dv)
return mx.sym.batch_dot(lhs=probs, rhs=values, name='%scontexts' % prefix)
class MultiHeadAttentionBase:
"""
Base class for Multi-head attention.
:param prefix: Attention prefix.
:param depth_att: Attention depth / number of hidden units.
:param heads: Number of attention heads.
:param depth_out: Output depth / number of output units.
:param dropout: Dropout probability on attention scores
"""
def __init__(self,
prefix: str,
depth_att: int = 512,
heads: int = 8,
depth_out: int = 512,
dropout: float = 0.0) -> None:
self.prefix = prefix
utils.check_condition(depth_att % heads == 0,
"Number of heads (%d) must divide attention depth (%d)" % (heads, depth_att))
self.depth = depth_att
self.heads = heads
self.depth_out = depth_out
self.dropout = dropout
self.depth_per_head = self.depth // self.heads
self.w_h2o = mx.sym.Variable("%sh2o_weight" % prefix)
self.b_h2o = mx.sym.Variable("%sh2o_bias" % prefix)
def _attend(self,
queries: mx.sym.Symbol,
keys: mx.sym.Symbol,
values: mx.sym.Symbol,
lengths: Optional[mx.sym.Symbol] = None,
bias: Optional[mx.sym.Symbol] = None) -> mx.sym.Symbol:
"""
Returns context vectors of multi-head dot attention.
:param queries: Query tensor. Shape: (batch_size, query_max_length, depth).
:param keys: Keys. Shape: (batch_size, memory_max_length, depth).
:param values: Values. Shape: (batch_size, memory_max_length, depth).
:param lengths: Optional lengths of keys. Shape: (batch_size,).
:param bias: Optional 3d bias.
:return: Context vectors. Shape: (batch_size, query_max_length, output_depth).
"""
# scale by sqrt(depth_per_head)
queries = queries * (self.depth_per_head ** -0.5)
# (batch*heads, length, depth/heads)
queries = split_heads(queries, self.depth_per_head, self.heads)
keys = split_heads(keys, self.depth_per_head, self.heads)
values = split_heads(values, self.depth_per_head, self.heads)
lengths = broadcast_to_heads(lengths, self.heads, ndim=1, fold_heads=True) if lengths is not None else lengths
# (batch*heads, query_max_length, depth_per_head)
contexts = dot_attention(queries, keys, values,
lengths=lengths, dropout=self.dropout, bias=bias, prefix=self.prefix)
# (batch, query_max_length, depth)
contexts = combine_heads(contexts, self.depth_per_head, self.heads)
# contexts: (batch, query_max_length, output_depth)
contexts = mx.sym.FullyConnected(data=contexts,
weight=self.w_h2o,
bias=self.b_h2o,
num_hidden=self.depth_out,
flatten=False)
return contexts
class MultiHeadSelfAttention(MultiHeadAttentionBase):
"""
Multi-head self-attention. Independent linear projections of inputs serve as
queries, keys, and values for the attention.
:param prefix: Attention prefix.
:param depth_att: Attention depth / number of hidden units.
:param heads: Number of attention heads.
:param depth_out: Output depth / number of output units.
:param dropout: Dropout probability on attention scores
"""
def __init__(self,
prefix: str,
depth_att: int = 512,
heads: int = 8,
depth_out: int = 512,
dropout: float = 0.0) -> None:
super().__init__(prefix, depth_att, heads, depth_out, dropout)
self.w_i2h = mx.sym.Variable("%si2h_weight" % prefix)
self.b_i2h = mx.sym.Variable("%si2h_bias" % prefix)
def __call__(self,
inputs: mx.sym.Symbol,
input_lengths: Optional[mx.sym.Symbol] = None,
bias: Optional[mx.sym.Symbol] = None,
cache: Optional[Dict[str, Optional[mx.sym.Symbol]]] = None) -> mx.sym.Symbol:
"""
Computes multi-head attention on a set of inputs, serving as queries, keys, and values.
If sequence lengths are provided, they will be used to mask the attention scores.
A bias mask may also be used to mask the attention scores.
May also use a cache of previously computed inputs.
Returns a symbol of shape (batch, max_length, output_depth).
:param inputs: Input Data. Shape: (batch, max_length, input_depth).
:param input_lengths: Optional lengths of inputs to mask attention scores. Shape: (batch, 1).
:param bias: Optional 3d bias tensor to mask attention scores.
:param cache: Optional dictionary of previously computed keys and values.
:return: Symbol of shape (batch, max_length, output_depth).
"""
# combined: (batch, max_length, depth * 3)
combined = mx.sym.FullyConnected(data=inputs,
weight=self.w_i2h,
bias=self.b_i2h,
num_hidden=self.depth * 3,
flatten=False,
name="%sqkv_transform" % self.prefix)
# split into query, keys and values
# (batch, max_length, depth)
# pylint: disable=unbalanced-tuple-unpacking
queries, keys, values = mx.sym.split(data=combined, num_outputs=3, axis=2)
if cache is not None:
# append new keys & values to cache, update the cache
keys = cache['k'] = keys if cache['k'] is None else mx.sym.concat(cache['k'], keys, dim=1)
values = cache['v'] = values if cache['v'] is None else mx.sym.concat(cache['v'], values, dim=1)
return self._attend(queries,
keys,
values,
lengths=input_lengths,
bias=bias)
class MultiHeadAttention(MultiHeadAttentionBase):
"""
Multi-head attention layer for queries independent from keys/values.
:param prefix: Attention prefix.
:param depth_att: Attention depth / number of hidden units.
:param heads: Number of attention heads.
:param depth_out: Output depth / number of output units.
:param dropout: Dropout probability on attention scores
"""
def __init__(self,
prefix: str,
depth_att: int = 512,
heads: int = 8,
depth_out: int = 512,
dropout: float = 0.0) -> None:
super().__init__(prefix, depth_att, heads, depth_out, dropout)
self.w_q2h = mx.sym.Variable("%sq2h_weight" % prefix)
self.b_q2h = mx.sym.Variable("%sq2h_bias" % prefix)
self.w_kv2h = mx.sym.Variable("%skv2h_weight" % prefix)
self.b_kv2h = mx.sym.Variable("%skv2h_bias" % prefix)
def __call__(self,
queries: mx.sym.Symbol,
memory: mx.sym.Symbol,
memory_lengths: Optional[mx.sym.Symbol] = None,
bias: Optional[mx.sym.Symbol] = None) -> mx.sym.Symbol:
"""
Computes multi-head attention for queries given a memory tensor.
If sequence lengths are provided, they will be used to mask the attention scores.
A bias mask may also be used to mask the attention scores.
Returns a symbol of shape (batch, max_length, output_depth).
:param queries: Query tensor. Shape: (batch, query_max_length, input_depth).
:param memory: Memory data to attend to. Shape: (batch, memory_max_length, input_depth).
:param memory_lengths: Optional lengths of memory to mask attention scores. Shape: (batch, 1).
:param bias: Optional 3d bias tensor to mask attention scores.
:return: Symbol of shape (batch, query_seq_len, output_depth).
"""
# (batch, memory_max_length, depth * 2)
combined = mx.sym.FullyConnected(data=memory,
weight=self.w_kv2h,
bias=self.b_kv2h,
num_hidden=self.depth * 2,
flatten=False,
name="%skv_transform" % self.prefix)
# split into query, keys and values
# (batch, memory_max_length, depth)
# NOTE: requires depth to be equal across all 2 parts.
# pylint: disable=unbalanced-tuple-unpacking
keys, values = mx.sym.split(data=combined, num_outputs=2, axis=2)
# (batch, query_max_length, depth * 2)
queries = mx.sym.FullyConnected(data=queries,
weight=self.w_q2h,
bias=self.b_q2h,
num_hidden=self.depth,
flatten=False,
name="%sq_transform" % self.prefix)
return self._attend(queries,
keys,
values,
bias=bias)
class ProjectedDotAttention:
"""
Dot attention layer for queries independent from keys/values.
:param prefix: Attention prefix.
:param num_hidden: Attention depth / number of hidden units.
"""
def __init__(self,
prefix: str,
num_hidden) -> None:
self.prefix = prefix
self.num_hidden = num_hidden
self.w_q2h = mx.sym.Variable("%sq2h_weight" % prefix)
self.b_q2h = mx.sym.Variable("%sq2h_bias" % prefix)
self.w_kv2h = mx.sym.Variable("%skv2h_weight" % prefix)
self.b_kv2h = mx.sym.Variable("%skv2h_bias" % prefix)
def __call__(self,
queries: mx.sym.Symbol,
memory: mx.sym.Symbol,
memory_lengths: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Apply project, apply dot attention and return new context vectors.
:param queries: Symbol of shape (batch, queries_max_length, input_num_hidden).
:param memory: Symbol of shape (batch, memory_max_length, input_num_hidden).
:param memory_lengths: Symbol of shape (batch, 1).
:return: Symbol of shape (batch, queries_max_length, num_hidden).
"""
# (batch, memory_max_length, num_hidden * 2)
combined = mx.sym.FullyConnected(data=memory,
weight=self.w_kv2h,
bias=self.b_kv2h,
num_hidden=self.num_hidden * 2,
flatten=False,
name="%skv_transform" % self.prefix)
# split into keys and values
# pylint: disable=unbalanced-tuple-unpacking
keys, values = mx.sym.split(data=combined, num_outputs=2, axis=2)
# (batch, queries_max_length, num_hidden)
queries = mx.sym.FullyConnected(data=queries,
weight=self.w_q2h,
bias=self.b_q2h,
num_hidden=self.num_hidden,
flatten=False,
name="%sq_transform" % self.prefix)
# scale by sqrt(num_hidden)
queries = queries * (self.num_hidden ** -0.5)
# (batch, queries_max_length, num_hidden)
contexts = dot_attention(queries, keys, values, memory_lengths)
return contexts
class PlainDotAttention:
"""
Dot attention layer for queries independent from keys/values.
"""
def __call__(self,
queries: mx.sym.Symbol,
memory: mx.sym.Symbol,
memory_lengths: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Returns a symbol of shape (batch, max_length, output_depth).
:param queries: Symbol of shape (batch, queries_max_length, input_depth).
:param memory: Symbol of shape (batch, memory_max_length, input_depth).
:param memory_lengths: Symbol of shape (batch, 1).
:return: Symbol of shape (batch, queries_max_length, output_depth).
"""
# (batch*heads, queries_max_length, depth_per_head)
contexts = dot_attention(queries, memory, memory, memory_lengths)
return contexts
class PositionalEncodings(mx.operator.CustomOp):
"""
Returns a symbol of shape (1, max_seq_len, num_embed)
with positional encodings as in Vaswani et al, 2017.
:param length: Maximum sequence length.
:param depth: Embedding size.
"""
def __init__(self, length: int, depth: int) -> None:
super().__init__()
self.encodings = self.get_encodings(length, depth)
@staticmethod
def get_encodings(length, depth) -> np.ndarray:
utils.check_condition(depth % 2 == 0, "Positional embeddings require an even embedding size it "
"is however %d." % depth)
# (1, depth)
channels = np.arange(depth // 2).reshape((1, -1))
# (length, 1)
positions = np.arange(0, length).reshape((-1, 1))
scaled_positions = positions / np.power(10000, (2 * channels) / depth)
# sinusoids:
sin = np.sin(scaled_positions)
# cosines:
cos = np.cos(scaled_positions)
# interleave: (1, length, num_embed)
encodings = np.hstack([sin, cos]).reshape(1, length, depth)
return encodings
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.encodings)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("positional_encodings")
class PositionalEncodingsProp(mx.operator.CustomOpProp):
def __init__(self, length: str, depth: str) -> None:
super().__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(1, self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return PositionalEncodings(length=self.length, depth=self.depth)
| [
"mx.sym.Symbol",
"str",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int",
"Optional[mx.sym.Symbol]",
"bool",
"Union[mx.sym.Symbol, mx.nd.NDArray]",
"mx.sym.Symbol",
"int",
"int",
"mx.sym.Symbol",
"int",
"int",
"mx.sym.Symbol",
"int",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"str",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"str",
"mx.sym.Symbol",
"str",
"mx.sym.Symbol",
"mx.sym.Symbol",
"str",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"int",
"str",
"str"
] | [
791,
816,
2664,
3591,
4121,
7059,
7093,
7123,
7187,
8058,
9452,
9483,
9495,
10179,
10210,
10222,
10884,
10910,
10921,
11850,
11889,
11930,
14145,
14850,
14887,
14926,
17301,
17707,
20350,
20883,
20923,
23353,
23760,
23800,
23848,
25702,
25742,
25790,
26668,
26680,
27866,
27878
] | [
804,
819,
2667,
3604,
4134,
7062,
7096,
7146,
7191,
8093,
9465,
9486,
9498,
10192,
10213,
10225,
10897,
10913,
10924,
11863,
11902,
11943,
14148,
14863,
14900,
14939,
17304,
17720,
20353,
20896,
20936,
23356,
23773,
23813,
23861,
25715,
25755,
25803,
26671,
26683,
27869,
27881
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/lexicon.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import argparse
import collections
import json
import logging
import operator
import os
from typing import Dict, Generator, Tuple
import mxnet as mx
import numpy as np
from . import arguments
from . import constants as C
from .data_io import smart_open
from .log import setup_main_logger, log_sockeye_version
from .utils import check_condition
from . import vocab
logger = logging.getLogger(__name__)
class Lexicon:
"""
Lexicon model component. Stores lexicon and supports two operations:
(1) Given source batch, lookup translation distributions in the lexicon
(2) Given attention score vector and lexicon lookups, compute the lexical bias for the decoder
:param source_vocab_size: Source vocabulary size.
:param target_vocab_size: Target vocabulary size.
:param learn: Whether to adapt lexical biases during training.
"""
def __init__(self, source_vocab_size: int, target_vocab_size: int, learn: bool = False) -> None:
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
# TODO: once half-precision works, use float16 for this variable to save memory
self.lexicon = mx.sym.Variable(name=C.LEXICON_NAME,
shape=(self.source_vocab_size,
self.target_vocab_size))
if not learn:
logger.info("Fixed lexicon bias terms")
self.lexicon = mx.sym.BlockGrad(self.lexicon)
else:
logger.info("Learning lexicon bias terms")
def lookup(self, source: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Lookup lexicon distributions for source.
:param source: Input. Shape: (batch_size, source_seq_len).
:return: Lexicon distributions for input. Shape: (batch_size, target_vocab_size, source_seq_len).
"""
return mx.sym.swapaxes(data=mx.sym.Embedding(data=source,
input_dim=self.source_vocab_size,
weight=self.lexicon,
output_dim=self.target_vocab_size,
name=C.LEXICON_NAME + "_lookup"), dim1=1, dim2=2)
@staticmethod
def calculate_lex_bias(source_lexicon: mx.sym.Symbol, attention_prob_score: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Given attention/alignment scores, calculates a weighted sum over lexical distributions
that serve as a bias for the decoder softmax.
* https://arxiv.org/pdf/1606.02006.pdf
* http://www.aclweb.org/anthology/W/W16/W16-4610.pdf
:param source_lexicon: Lexical biases for sentence Shape: (batch_size, target_vocab_size, source_seq_len).
:param attention_prob_score: Attention score. Shape: (batch_size, source_seq_len).
:return: Lexical bias. Shape: (batch_size, 1, target_vocab_size).
"""
# attention_prob_score: (batch_size, source_seq_len) -> (batch_size, source_seq_len, 1)
attention_prob_score = mx.sym.expand_dims(attention_prob_score, axis=2)
# lex_bias: (batch_size, target_vocab_size, 1)
lex_bias = mx.sym.batch_dot(source_lexicon, attention_prob_score)
# lex_bias: (batch_size, 1, target_vocab_size)
lex_bias = mx.sym.swapaxes(data=lex_bias, dim1=1, dim2=2)
return lex_bias
def initialize_lexicon(cmdline_arg: str, vocab_source: Dict[str, int], vocab_target: Dict[str, int]) -> mx.nd.NDArray:
"""
Reads a probabilistic word lexicon as given by the commandline argument and converts
to log probabilities.
If specified, smooths with custom value, uses 0.001 otherwise.
:param cmdline_arg: Commandline argument.
:param vocab_source: Source vocabulary.
:param vocab_target: Target vocabulary.
:return: Lexicon array. Shape: (vocab_source_size, vocab_target_size).
"""
fields = cmdline_arg.split(":", 1)
path = fields[0]
lexicon = read_lexicon(path, vocab_source, vocab_target)
assert lexicon.shape == (len(vocab_source), len(vocab_target)), "Invalid lexicon shape"
eps = 0.001
if len(fields) == 2:
eps = float(fields[1])
check_condition(eps > 0, "epsilon must be >0")
logger.info("Smoothing lexicon with eps=%.4f", eps)
lexicon = mx.nd.array(np.log(lexicon + eps))
return lexicon
def lexicon_iterator(path: str,
vocab_source: Dict[str, int],
vocab_target: Dict[str, int]) -> Generator[Tuple[int, int, float], None, None]:
"""
Yields lines from a translation table of format: src, trg, logprob.
:param path: Path to lexicon file.
:param vocab_source: Source vocabulary.
:param vocab_target: Target vocabulary.
:return: Generator returning tuples (src_id, trg_id, prob).
"""
assert C.UNK_SYMBOL in vocab_source
assert C.UNK_SYMBOL in vocab_target
src_unk_id = vocab_source[C.UNK_SYMBOL]
trg_unk_id = vocab_target[C.UNK_SYMBOL]
with smart_open(path) as fin:
for line in fin:
src, trg, logprob = line.rstrip("\n").split("\t")
prob = np.exp(float(logprob))
src_id = vocab_source.get(src, src_unk_id)
trg_id = vocab_target.get(trg, trg_unk_id)
yield src_id, trg_id, prob
def read_lexicon(path: str, vocab_source: Dict[str, int], vocab_target: Dict[str, int]) -> np.ndarray:
"""
Loads lexical translation probabilities from a translation table of format: src, trg, logprob.
Source words unknown to vocab_source are discarded.
Target words unknown to vocab_target contribute to p(unk|source_word).
See Incorporating Discrete Translation Lexicons into Neural Machine Translation, Section 3.1 & Equation 5
(https://arxiv.org/pdf/1606.02006.pdf))
:param path: Path to lexicon file.
:param vocab_source: Source vocabulary.
:param vocab_target: Target vocabulary.
:return: Lexicon array. Shape: (vocab_source_size, vocab_target_size).
"""
src_unk_id = vocab_source[C.UNK_SYMBOL]
trg_unk_id = vocab_target[C.UNK_SYMBOL]
lexicon = np.zeros((len(vocab_source), len(vocab_target)))
n = 0
for src_id, trg_id, prob in lexicon_iterator(path, vocab_source, vocab_target):
if src_id == src_unk_id:
continue
if trg_id == trg_unk_id:
lexicon[src_id, trg_unk_id] += prob
else:
lexicon[src_id, trg_id] = prob
n += 1
logger.info("Loaded lexicon from '%s' with %d entries", path, n)
return lexicon
class LexiconInitializer(mx.initializer.Initializer):
"""
Given a lexicon NDArray, initialize the variable named C.LEXICON_NAME with it.
:param lexicon: Lexicon array.
"""
def __init__(self, lexicon: mx.nd.NDArray) -> None:
super().__init__()
self.lexicon = lexicon
def _init_default(self, sym_name, arr):
assert sym_name == C.LEXICON_NAME, "This initializer should only be used for a lexicon parameter variable"
logger.info("Initializing '%s' with lexicon.", sym_name)
assert len(arr.shape) == 2, "Only 2d weight matrices supported."
self.lexicon.copyto(arr)
class TopKLexicon:
"""
Lexicon component that stores the k most likely target words for each source word. Used during
decoding to restrict target vocabulary for each source sequence.
:param vocab_source: Trained model source vocabulary.
:param vocab_target: Trained mode target vocabulary.
"""
def __init__(self,
vocab_source: Dict[str, int],
vocab_target: Dict[str, int]) -> None:
self.vocab_source = vocab_source
self.vocab_target = vocab_target
# Shape: (vocab_source_size, k), k determined at create() or load()
self.lex = None # type: np.ndarray
# Always allow special vocab symbols in target vocab
self.always_allow = np.array([vocab_target[symbol] for symbol in C.VOCAB_SYMBOLS], dtype=np.int)
def create(self, path: str, k: int = 20):
"""
Create from a scored lexicon file (fast_align format) using vocab from a trained Sockeye model.
:param path: Path to lexicon file.
:param k: Number of target entries per source to keep.
"""
self.lex = np.zeros((len(self.vocab_source), k), dtype=np.int)
# Read lexicon
src_unk_id = self.vocab_source[C.UNK_SYMBOL]
trg_unk_id = self.vocab_target[C.UNK_SYMBOL]
_lex = collections.defaultdict(dict) # type: Dict[int, Dict[int, float]]
for src_id, trg_id, prob in lexicon_iterator(path, self.vocab_source, self.vocab_target):
# Unk token will always be part of target vocab, so no need to track it here
if src_id == src_unk_id or trg_id == trg_unk_id:
continue
_lex[src_id][trg_id] = prob
# Sort and copy top-k trg_ids to lex array row src_id
for src_id, trg_entries in _lex.items():
top_k = list(sorted(trg_entries.items(), key=operator.itemgetter(1), reverse=True))[:k]
self.lex[src_id, :len(top_k)] = list(sorted(trg_id for (trg_id, _) in top_k))
# Free memory after copy
trg_entries.clear()
logger.info("Created top-k lexicon from \"%s\", k=%d.", path, k)
def save(self, path: str):
"""
Save lexicon in JSON format. Lexicon will be specific to Sockeye model.
:param path: Path to JSON output file.
"""
# Save k, lex array in dict form
to_save = [self.lex.shape[1], dict(enumerate(row.tolist() for row in self.lex))]
with open(path, "w", encoding=C.VOCAB_ENCODING) as out:
json.dump(to_save, out, indent=4, ensure_ascii=False)
def load(self, path: str):
"""
Load lexicon from JSON file.
:param path: Path to JSON file.
"""
with open(path, encoding=C.VOCAB_ENCODING) as inp:
k, loaded = json.load(inp)
self.lex = np.zeros((len(self.vocab_source), k), dtype=np.int)
for (src_id, top_k) in loaded.items():
self.lex[int(src_id), :len(top_k)] = top_k
logger.info("Loaded top-k lexicon from \"%s\".", path)
def get_trg_ids(self, src_ids: np.ndarray) -> np.ndarray:
"""
Lookup possible target ids for input sequence of source ids.
:param src_ids: Sequence(s) of source ids (any shape).
:return: Possible target ids for source (unique sorted, always includes special symbols).
"""
# TODO: When MXNet adds support for set operations, we can migrate to avoid conversions to/from NumPy.
unique_src_ids = np.lib.arraysetops.unique(src_ids)
trg_ids = np.lib.arraysetops.union1d(self.always_allow, self.lex[unique_src_ids, :].reshape(-1))
return trg_ids
def main():
"""
Commandline interface for building top-k lexicons using during decoding.
"""
params = argparse.ArgumentParser(description="Build a top-k lexicon for use during decoding.")
arguments.add_lexicon_args(params)
arguments.add_logging_args(params)
args = params.parse_args()
logger = setup_main_logger(__name__, console=not args.quiet, file_logging=False)
log_sockeye_version(logger)
logger.info("Reading source and target vocab from \"%s\"", args.model)
vocab_source = vocab.vocab_from_json_or_pickle(os.path.join(args.model, C.VOCAB_SRC_NAME))
vocab_target = vocab.vocab_from_json_or_pickle(os.path.join(args.model, C.VOCAB_TRG_NAME))
logger.info("Creating top-k lexicon from \"%s\"", args.input)
lexicon = TopKLexicon(vocab_source, vocab_target)
lexicon.create(args.input, args.k)
lexicon.save(args.output)
if __name__ == "__main__":
main()
| [
"int",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"str",
"Dict[str, int]",
"Dict[str, int]",
"str",
"Dict[str, int]",
"Dict[str, int]",
"str",
"Dict[str, int]",
"Dict[str, int]",
"mx.nd.NDArray",
"Dict[str, int]",
"Dict[str, int]",
"str",
"str",
"str",
"np.ndarray"
] | [
1478,
1502,
2159,
2919,
2956,
4047,
4066,
4096,
5031,
5071,
5122,
5973,
5992,
6022,
7420,
8209,
8256,
8677,
9995,
10440,
10930
] | [
1481,
1505,
2172,
2932,
2969,
4050,
4080,
4110,
5034,
5085,
5136,
5976,
6006,
6036,
7433,
8223,
8270,
8680,
9998,
10443,
10940
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/log.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import logging.config
import sys
from typing import Optional
FORMATTERS = {
'verbose': {
'format': '[%(asctime)s:%(levelname)s:%(name)s:%(funcName)s] %(message)s',
'datefmt': "%Y-%m-%d:%H:%M:%S",
},
'simple': {
'format': '[%(levelname)s:%(name)s] %(message)s'
},
}
FILE_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': FORMATTERS,
'handlers': {
'rotating': {
'level': 'INFO',
'formatter': 'verbose',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 10000000,
'backupCount': 5,
'filename': 'sockeye.log',
}
},
'root': {
'handlers': ['rotating'],
'level': 'DEBUG',
}
}
CONSOLE_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': FORMATTERS,
'handlers': {
'console': {
'level': 'INFO',
'formatter': 'simple',
'class': 'logging.StreamHandler',
'stream': None
},
},
'root': {
'handlers': ['console'],
'level': 'DEBUG',
}
}
FILE_CONSOLE_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': FORMATTERS,
'handlers': {
'console': {
'level': 'INFO',
'formatter': 'simple',
'class': 'logging.StreamHandler',
'stream': None
},
'rotating': {
'level': 'INFO',
'formatter': 'verbose',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 10000000,
'backupCount': 5,
'filename': 'sockeye.log',
}
},
'root': {
'handlers': ['console', 'rotating'],
'level': 'DEBUG',
}
}
LOGGING_CONFIGS = {
"file_only": FILE_LOGGING,
"console_only": CONSOLE_LOGGING,
"file_console": FILE_CONSOLE_LOGGING,
}
def _is_python34() -> bool:
version = sys.version_info
return version[0] == 3 and version[1] == 4
def setup_main_logger(name: str, file_logging=True, console=True, path: Optional[str] = None) -> logging.Logger:
"""
Return a logger that configures logging for the main application.
:param name: Name of the returned logger.
:param file_logging: Whether to log to a file.
:param console: Whether to log to the console.
:param path: Optional path to write logfile to.
"""
if file_logging and console:
log_config = LOGGING_CONFIGS["file_console"]
elif file_logging:
log_config = LOGGING_CONFIGS["file_only"]
else:
log_config = LOGGING_CONFIGS["console_only"]
if path:
log_config["handlers"]["rotating"]["filename"] = path # type: ignore
logging.config.dictConfig(log_config)
logger = logging.getLogger(name)
def exception_hook(exc_type, exc_value, exc_traceback):
if _is_python34():
# Python3.4 does not seem to handle logger.exception() well
import traceback
traceback = "".join(traceback.format_tb(exc_traceback)) + exc_type.name
logger.error("Uncaught exception\n%s", traceback)
else:
logger.exception("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = exception_hook
return logger
def log_sockeye_version(logger):
from sockeye import __version__
try:
from sockeye.git_version import git_hash
except ImportError:
git_hash = "unknown"
logger.info("Sockeye version %s commit %s", __version__, git_hash)
def log_mxnet_version(logger):
from mxnet import __version__
logger.info("MXNet version %s", __version__) | [
"str"
] | [
2675
] | [
2678
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/loss.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Functions to generate loss symbols for sequence-to-sequence models.
"""
import logging
from abc import ABC, abstractmethod
from typing import List, Optional
import mxnet as mx
from mxnet.metric import EvalMetric
from . import config
from . import constants as C
logger = logging.getLogger(__name__)
class LossConfig(config.Config):
"""
Loss configuration.
:param name: Loss name.
:param vocab_size: Target vocab size.
:param normalization_type: How to normalize the loss.
:param label_smoothing: Optional smoothing constant for label smoothing.
"""
def __init__(self,
name: str,
vocab_size: int,
normalization_type: str,
label_smoothing: float = 0.0) -> None:
super().__init__()
self.name = name
self.vocab_size = vocab_size
self.normalization_type = normalization_type
self.label_smoothing = label_smoothing
def get_loss(loss_config: LossConfig) -> 'Loss':
"""
Returns Loss instance.
:param loss_config: Loss configuration.
"""
if loss_config.name == C.CROSS_ENTROPY:
return CrossEntropyLoss(loss_config)
else:
raise ValueError("unknown loss name: %s" % loss_config.name)
class Loss(ABC):
"""
Generic Loss interface.
get_loss() method should return a loss symbol and the softmax outputs.
The softmax outputs (named C.SOFTMAX_NAME) are used by EvalMetrics to compute various metrics,
e.g. perplexity, accuracy. In the special case of cross_entropy, the SoftmaxOutput symbol
provides softmax outputs for forward() AND cross_entropy gradients for backward().
"""
def get_loss(self, logits: mx.sym.Symbol, labels: mx.sym.Symbol) -> List[mx.sym.Symbol]:
"""
Returns loss and softmax output symbols given logits and integer-coded labels.
:param logits: Shape: (batch_size * target_seq_len, target_vocab_size).
:param labels: Shape: (batch_size * target_seq_len,).
:return: List of loss and softmax output symbols.
"""
raise NotImplementedError()
@abstractmethod
def create_metric(self) -> EvalMetric:
"""
Create an instance of the EvalMetric that corresponds to this Loss function.
"""
pass
class CrossEntropyLoss(Loss):
"""
Computes the cross-entropy loss.
:param loss_config: Loss configuration.
"""
def __init__(self, loss_config: LossConfig) -> None:
logger.info("Loss: CrossEntropy(normalization_type=%s, label_smoothing=%s)",
loss_config.normalization_type, loss_config.label_smoothing)
self.loss_config = loss_config
def get_loss(self, logits: mx.sym.Symbol, labels: mx.sym.Symbol) -> List[mx.sym.Symbol]:
"""
Returns loss and softmax output symbols given logits and integer-coded labels.
:param logits: Shape: (batch_size * target_seq_len, target_vocab_size).
:param labels: Shape: (batch_size * target_seq_len,).
:return: List of loss symbol.
"""
if self.loss_config.normalization_type == C.LOSS_NORM_VALID:
normalization = "valid"
elif self.loss_config.normalization_type == C.LOSS_NORM_BATCH:
normalization = "null"
else:
raise ValueError("Unknown loss normalization type: %s" % self.loss_config.normalization_type)
return [mx.sym.SoftmaxOutput(data=logits,
label=labels,
ignore_label=C.PAD_ID,
use_ignore=True,
normalization=normalization,
smooth_alpha=self.loss_config.label_smoothing,
name=C.SOFTMAX_NAME)]
def create_metric(self) -> "CrossEntropyMetric":
return CrossEntropyMetric(self.loss_config)
class CrossEntropyMetric(EvalMetric):
"""
Version of the cross entropy metric that ignores padding tokens.
:param loss_config: The configuration used for the corresponding loss.
:param name: Name of this metric instance for display.
:param output_names: Name of predictions that should be used when updating with update_dict.
:param label_names: Name of labels that should be used when updating with update_dict.
"""
def __init__(self,
loss_config: LossConfig,
name: str = C.CROSS_ENTROPY,
output_names: Optional[List[str]] = None,
label_names: Optional[List[str]] = None) -> None:
super().__init__(name, output_names=output_names, label_names=label_names)
self.loss_config = loss_config
def cross_entropy(self, pred, label, ignore):
prob = mx.nd.pick(pred, label.astype(dtype="int32"))
prob = prob * (1 - ignore) + ignore
loss = -mx.nd.log(prob + 1e-8) # pylint: disable=invalid-unary-operand-type
return loss
def cross_entropy_smoothed(self, pred, label, ignore):
label_dist = mx.nd.one_hot(indices=label.astype(dtype='int32'),
depth=self.loss_config.vocab_size,
on_value=1.0 - self.loss_config.label_smoothing,
off_value=self.loss_config.label_smoothing /
(self.loss_config.vocab_size - 1.0))
label_dist = mx.nd.where(1 - ignore, label_dist, mx.nd.zeros_like(label_dist))
loss = label_dist * (- mx.nd.log(pred + 1e-8)) # pylint: disable=invalid-unary-operand-type
return loss
def update(self, labels, preds):
for label, pred in zip(labels, preds):
batch_size = label.shape[0]
label = label.as_in_context(pred.context).reshape((label.size,))
# Ignore padding
# TODO: contribute ignoring padding for cross-entropy back to MXNet
ignore = (label == C.PAD_ID).astype(dtype=pred.dtype)
if self.loss_config.label_smoothing > 0.0:
loss = self.cross_entropy_smoothed(pred, label, ignore)
else:
loss = self.cross_entropy(pred, label, ignore)
# Sum, normalizing if needed
if self.loss_config.normalization_type == C.LOSS_NORM_VALID:
loss = loss / mx.nd.sum(1 - ignore)
self.num_inst += 1
elif self.loss_config.normalization_type == C.LOSS_NORM_BATCH:
# When not normalizing, we divide by the batch size (number of sequences)
# NOTE: This is different from MXNet's metrics
self.num_inst += batch_size
self.sum_metric += mx.nd.sum(loss).asscalar() | [
"str",
"int",
"str",
"LossConfig",
"mx.sym.Symbol",
"mx.sym.Symbol",
"LossConfig",
"mx.sym.Symbol",
"mx.sym.Symbol",
"LossConfig"
] | [
1200,
1234,
1276,
1554,
2283,
2306,
3046,
3304,
3327,
5021
] | [
1203,
1237,
1279,
1564,
2296,
2319,
3056,
3317,
3340,
5031
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/lr_scheduler.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from math import sqrt
from typing import List, Optional, Tuple
import sockeye.constants as C
from sockeye.utils import check_condition
logger = logging.getLogger(__name__)
class LearningRateScheduler:
def __init__(self, warmup: int = 0) -> None:
self.base_lr = None # Note: will be overwritten by MXNet optimizer
check_condition(warmup >= 0, "warmup needs to be >= 0.")
self.warmup = warmup
self.log_warmup_every_t = self.warmup // 10
self.last_warmup_log = -1
def __call__(self, num_updates):
pass
def _warmup(self, num_updates):
"""
Returns linearly increasing fraction of base_lr.
"""
assert self.base_lr is not None
if not self.warmup:
return self.base_lr
fraction = (num_updates + 1) * self.base_lr / (self.warmup + 1)
if num_updates > self.last_warmup_log and num_updates % self.log_warmup_every_t == 0:
self.last_warmup_log = num_updates
logger.info("Learning rate warmup: %3.0f%%", fraction/self.base_lr * 100.0)
return fraction
class AdaptiveLearningRateScheduler(LearningRateScheduler):
"""
Learning rate scheduler that implements `new_evaluation_result` and accordingly adaptively adjust the learning
rate.
"""
def new_evaluation_result(self, has_improved: bool) -> bool:
"""
Returns true if the parameters should be reset to the ones with the best validation score.
:param has_improved: Whether the model improved on held-out validation data.
:return: True if parameters should be reset to the ones with best validation score.
"""
return False
class LearningRateSchedulerFixedStep(AdaptiveLearningRateScheduler):
"""
Use a fixed schedule of learning rate steps: lr_1 for N steps, lr_2 for M steps, etc.
:param schedule: List of learning rate step tuples in the form (rate, num_updates).
:param updates_per_checkpoint: Updates per checkpoint.
"""
def __init__(self, schedule: List[Tuple[float, int]], updates_per_checkpoint: int) -> None:
super().__init__()
check_condition(all(num_updates > 0 for (_, num_updates) in schedule),
"num_updates for each step should be > 0.")
check_condition(all(num_updates % updates_per_checkpoint == 0 for (_, num_updates) in schedule),
"num_updates for each step should be divisible by updates_per_checkpoint.")
self.schedule = schedule
self.current_step = 0
self.current_rate = 0.
self.current_step_num_updates = 0
self.current_step_started_at = 0
self.next_step_at = 0
self.latest_t = 0
self._update_rate(self.current_step)
def new_evaluation_result(self, has_improved: bool) -> bool:
"""
Returns true if the parameters should be reset to the ones with the best validation score.
:param has_improved: Whether the model improved on held-out validation data.
:return: True if parameters should be reset to the ones with best validation score.
"""
logger.info("Checkpoint learning rate: %1.2e (%d/%d updates)",
self.current_rate,
self.latest_t - self.current_step_started_at,
self.current_step_num_updates)
if self.latest_t >= self.next_step_at:
self.current_step += 1
self._update_rate(self.current_step)
return False
def _update_rate(self, step: int):
if self.current_step < len(self.schedule):
self.current_rate, self.current_step_num_updates = self.schedule[step]
self.current_step_started_at = self.latest_t
self.next_step_at += self.current_step_num_updates
logger.info("Changing learning rate to %1.2e for %d updates",
self.current_rate,
self.current_step_num_updates)
def __call__(self, t: int):
self.latest_t = max(t, self.latest_t)
return self.current_rate
@staticmethod
def parse_schedule_str(schedule_str: str) -> List[Tuple[float, int]]:
"""
Parse learning schedule string.
:param schedule_str: String in form rate1:num_updates1[,rate2:num_updates2,...]
:return: List of tuples (learning_rate, num_updates).
"""
schedule = list()
for step in schedule_str.split(","):
rate, num_updates = step.split(":")
schedule.append((float(rate), int(num_updates)))
return schedule
class LearningRateSchedulerInvSqrtT(LearningRateScheduler):
"""
Learning rate schedule: lr / sqrt(1 + factor * t).
Note: The factor is calculated from the half life of the learning rate.
:param updates_per_checkpoint: Number of batches between checkpoints.
:param half_life: Half life of the learning rate in number of checkpoints.
:param warmup: Number of (linear) learning rate increases to warm-up.
"""
def __init__(self, updates_per_checkpoint: int, half_life: int, warmup: int = 0) -> None:
super().__init__(warmup)
check_condition(updates_per_checkpoint > 0, "updates_per_checkpoint needs to be > 0.")
check_condition(half_life > 0, "half_life needs to be > 0.")
# 0.5 base_lr = base_lr * sqrt(1 + T * factor)
# then factor = 3 ./ T, with T = half_life * updates_per_checkpoint
self.factor = 3. / (half_life * updates_per_checkpoint)
self.t_last_log = -1
self.log_every_t = int(half_life * updates_per_checkpoint)
def __call__(self, num_updates: int):
lr = min(self.base_lr / sqrt(1 + num_updates * self.factor), self._warmup(num_updates) if self.warmup > 0 else C.LARGE_POSITIVE_VALUE)
# Note: this method is called once per parameter for the same t. Making sure to just log once.
if num_updates > self.t_last_log and num_updates % self.log_every_t == 0:
logger.info("Learning rate currently at %1.2e", lr)
self.t_last_log = num_updates
return lr
class LearningRateSchedulerInvT(LearningRateScheduler):
"""
Learning rate schedule: lr / (1 + factor * t).
Note: The factor is calculated from the half life of the learning rate.
:param updates_per_checkpoint: Number of batches between checkpoints.
:param half_life: Half life of the learning rate in number of checkpoints.
"""
def __init__(self, updates_per_checkpoint: int, half_life: int, warmup: int = 0) -> None:
super().__init__(warmup)
check_condition(updates_per_checkpoint > 0, "updates_per_checkpoint needs to be > 0.")
check_condition(half_life > 0, "half_life needs to be > 0.")
# 0.5 base_lr = base_lr * (1 + T * factor)
# then factor = 1 ./ T, with T = half_life * updates_per_checkpoint
self.factor = 1. / (half_life * updates_per_checkpoint)
self.t_last_log = -1
self.log_every_t = int(half_life * updates_per_checkpoint)
def __call__(self, num_updates: int):
lr = min(self.base_lr / (1 + num_updates * self.factor), self._warmup(num_updates) if self.warmup > 0 else C.LARGE_POSITIVE_VALUE)
# Note: this method is called once per parameter for the same t. Making sure to just log once.
if num_updates > self.t_last_log and num_updates % self.log_every_t == 0:
logger.info("Learning rate currently at %1.2e", lr)
self.t_last_log = num_updates
return lr
class LearningRateSchedulerPlateauReduce(AdaptiveLearningRateScheduler):
"""
Lower the learning rate as soon as the validation score plateaus.
:param reduce_factor: Factor to reduce learning rate with.
:param reduce_num_not_improved: Number of checkpoints with no improvement after which learning rate is reduced.
"""
def __init__(self, reduce_factor: float, reduce_num_not_improved: int, warmup: int = 0) -> None:
super().__init__(warmup)
check_condition(0.0 < reduce_factor <= 1, "reduce_factor should be in ]0,1].")
self.reduce_factor = reduce_factor
self.reduce_num_not_improved = reduce_num_not_improved
self.num_not_improved = 0
self.lr = None # type: float
self.t_last_log = -1
self.warmed_up = not self.warmup > 0
logger.info("Will reduce the learning rate by a factor of %.2f whenever"
" the validation score doesn't improve %d times.",
reduce_factor, reduce_num_not_improved)
def new_evaluation_result(self, has_improved: bool) -> bool:
"""
Returns true if the parameters should be reset to the ones with the best validation score.
:param has_improved: Whether the model improved on held-out validation data.
:return: True if parameters should be reset to the ones with best validation score.
"""
if self.lr is None:
assert self.base_lr is not None
self.lr = self.base_lr
if has_improved:
self.num_not_improved = 0
else:
self.num_not_improved += 1
if self.num_not_improved >= self.reduce_num_not_improved and self.reduce_factor < 1.0 and self.warmed_up:
old_lr = self.lr
self.lr *= self.reduce_factor
logger.info("%d checkpoints since improvement or rate scaling, "
"lowering learning rate: %1.2e -> %1.2e", self.num_not_improved, old_lr, self.lr)
self.num_not_improved = 0
return True
return False
def __call__(self, t):
if self.lr is None:
assert self.base_lr is not None
self.lr = self.base_lr
lr = self._warmup(t) if self.warmup > 0 and t <= self.warmup else self.lr
if t == self.warmup:
self.warmed_up = True
return lr
def __repr__(self):
return "LearningRateSchedulerPlateauReduce(reduce_factor=%.2f, " \
"reduce_num_not_improved=%d)" % (self.reduce_factor, self.num_not_improved)
def get_lr_scheduler(scheduler_type: str,
updates_per_checkpoint: int,
learning_rate_half_life: int,
learning_rate_reduce_factor: float,
learning_rate_reduce_num_not_improved: int,
learning_rate_schedule: Optional[List[Tuple[float, int]]] = None,
learning_rate_warmup: Optional[int] = 0) -> Optional[LearningRateScheduler]:
"""
Returns a learning rate scheduler.
:param scheduler_type: Scheduler type.
:param updates_per_checkpoint: Number of batches between checkpoints.
:param learning_rate_half_life: Half life of the learning rate in number of checkpoints.
:param learning_rate_reduce_factor: Factor to reduce learning rate with.
:param learning_rate_reduce_num_not_improved: Number of checkpoints with no improvement after which learning rate is
reduced.
:param learning_rate_warmup: Number of batches that the learning rate is linearly increased.
:raises: ValueError if unknown scheduler_type
:return: Learning rate scheduler.
"""
check_condition(learning_rate_schedule is None or scheduler_type == C.LR_SCHEDULER_FIXED_STEP,
"Learning rate schedule can only be used with '%s' learning rate scheduler."
% C.LR_SCHEDULER_FIXED_STEP)
if scheduler_type is None:
return None
if scheduler_type == C.LR_SCHEDULER_FIXED_RATE_INV_SQRT_T:
return LearningRateSchedulerInvSqrtT(updates_per_checkpoint, learning_rate_half_life, learning_rate_warmup)
elif scheduler_type == C.LR_SCHEDULER_FIXED_RATE_INV_T:
return LearningRateSchedulerInvT(updates_per_checkpoint, learning_rate_half_life, learning_rate_warmup)
elif scheduler_type == C.LR_SCHEDULER_FIXED_STEP:
check_condition(learning_rate_schedule is not None,
"learning_rate_schedule needed for %s scheduler" % C.LR_SCHEDULER_FIXED_STEP)
return LearningRateSchedulerFixedStep(learning_rate_schedule, updates_per_checkpoint)
elif scheduler_type == C.LR_SCHEDULER_PLATEAU_REDUCE:
check_condition(learning_rate_reduce_factor is not None,
"learning_rate_reduce_factor needed for %s scheduler" % C.LR_SCHEDULER_PLATEAU_REDUCE)
check_condition(learning_rate_reduce_num_not_improved is not None,
"learning_rate_reduce_num_not_improved needed for %s scheduler" % C.LR_SCHEDULER_PLATEAU_REDUCE)
if learning_rate_reduce_factor >= 1.0:
logger.warning("Not using %s learning rate scheduling: learning_rate_reduce_factor == 1.0"
% C.LR_SCHEDULER_PLATEAU_REDUCE)
return None
return LearningRateSchedulerPlateauReduce(learning_rate_reduce_factor, learning_rate_reduce_num_not_improved,
learning_rate_warmup)
else:
raise ValueError("Unknown learning rate scheduler type %s." % scheduler_type)
| [
"bool",
"List[Tuple[float, int]]",
"int",
"bool",
"int",
"int",
"str",
"int",
"int",
"int",
"int",
"int",
"int",
"float",
"int",
"bool",
"str",
"int",
"int",
"float",
"int"
] | [
1940,
2636,
2685,
3407,
4136,
4595,
4740,
5677,
5693,
6249,
7111,
7127,
7680,
8515,
8547,
9214,
10760,
10810,
10861,
10916,
10983
] | [
1944,
2659,
2688,
3411,
4139,
4598,
4743,
5680,
5696,
6252,
7114,
7130,
7683,
8520,
8550,
9218,
10763,
10813,
10864,
10921,
10986
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/model.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import copy
import logging
import os
from typing import cast, Dict, Optional, Tuple
import mxnet as mx
from sockeye import __version__
from sockeye.config import Config
from . import constants as C
from . import data_io
from . import decoder
from . import encoder
from . import layers
from . import loss
from . import utils
logger = logging.getLogger(__name__)
class ModelConfig(Config):
"""
ModelConfig defines model parameters defined at training time which are relevant to model inference.
Add new model parameters here. If you want backwards compatibility for models trained with code that did not
contain these parameters, provide a reasonable default under default_values.
:param config_data: Used training data.
:param max_seq_len_source: Maximum source sequence length to unroll during training.
:param max_seq_len_target: Maximum target sequence length to unroll during training.
:param vocab_source_size: Source vocabulary size.
:param vocab_target_size: Target vocabulary size.
:param config_embed_source: Embedding config for source.
:param config_embed_target: Embedding config for target.
:param config_encoder: Encoder configuration.
:param config_decoder: Decoder configuration.
:param config_loss: Loss configuration.
:param weight_tying: Enables weight tying if True.
:param weight_tying_type: Determines which weights get tied. Must be set if weight_tying is enabled.
"""
def __init__(self,
config_data: data_io.DataConfig,
max_seq_len_source: int,
max_seq_len_target: int,
vocab_source_size: int,
vocab_target_size: int,
config_embed_source: encoder.EmbeddingConfig,
config_embed_target: encoder.EmbeddingConfig,
config_encoder: Config,
config_decoder: Config,
config_loss: loss.LossConfig,
weight_tying: bool = False,
weight_tying_type: Optional[str] = C.WEIGHT_TYING_TRG_SOFTMAX,
weight_normalization: bool = False) -> None:
super().__init__()
self.config_data = config_data
self.max_seq_len_source = max_seq_len_source
self.max_seq_len_target = max_seq_len_target
self.vocab_source_size = vocab_source_size
self.vocab_target_size = vocab_target_size
self.config_embed_source = config_embed_source
self.config_embed_target = config_embed_target
self.config_encoder = config_encoder
self.config_decoder = config_decoder
self.config_loss = config_loss
self.weight_tying = weight_tying
self.weight_tying_type = weight_tying_type
self.weight_normalization = weight_normalization
if weight_tying and weight_tying_type is None:
raise RuntimeError("weight_tying_type must be specified when using weight_tying.")
class SockeyeModel:
"""
SockeyeModel shares components needed for both training and inference.
The main components of a Sockeye model are
1) Source embedding
2) Target embedding
3) Encoder
4) Decoder
5) Output Layer
ModelConfig contains parameters and their values that are fixed at training time and must be re-used at inference
time.
:param config: Model configuration.
"""
def __init__(self, config: ModelConfig) -> None:
self.config = copy.deepcopy(config)
self.config.freeze()
logger.info("%s", self.config)
self.embedding_source = None # type: Optional[encoder.Embedding]
self.encoder = None # type: Optional[encoder.Encoder]
self.embedding_target = None # type: Optional[encoder.Embedding]
self.decoder = None # type: Optional[decoder.Decoder]
self.output_layer = None # type: Optional[layers.OutputLayer]
self._is_built = False
self.params = None # type: Optional[Dict]
def save_config(self, folder: str):
"""
Saves model configuration to <folder>/config
:param folder: Destination folder.
"""
fname = os.path.join(folder, C.CONFIG_NAME)
self.config.save(fname)
logger.info('Saved config to "%s"', fname)
@staticmethod
def load_config(fname: str) -> ModelConfig:
"""
Loads model configuration.
:param fname: Path to load model configuration from.
:return: Model configuration.
"""
config = ModelConfig.load(fname)
logger.info('ModelConfig loaded from "%s"', fname)
return cast(ModelConfig, config) # type: ignore
def save_params_to_file(self, fname: str):
"""
Saves model parameters to file.
:param fname: Path to save parameters to.
"""
assert self._is_built
utils.save_params(self.params.copy(), fname)
logging.info('Saved params to "%s"', fname)
def load_params_from_file(self, fname: str):
"""
Loads and sets model parameters from file.
:param fname: Path to load parameters from.
"""
assert self._is_built
utils.check_condition(os.path.exists(fname), "No model parameter file found under %s. "
"This is either not a model directory or the first training "
"checkpoint has not happened yet." % fname)
self.params, _ = utils.load_params(fname)
logger.info('Loaded params from "%s"', fname)
@staticmethod
def save_version(folder: str):
"""
Saves version to <folder>/version.
:param folder: Destination folder.
"""
fname = os.path.join(folder, C.VERSION_NAME)
with open(fname, "w") as out:
out.write(__version__)
def _get_embed_weights(self) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, mx.sym.Symbol]:
"""
Returns embedding parameters for source and target.
:return: Tuple of source and target parameter symbols.
"""
w_embed_source = mx.sym.Variable(C.SOURCE_EMBEDDING_PREFIX + "weight",
shape=(self.config.config_embed_source.vocab_size,
self.config.config_embed_source.num_embed))
w_embed_target = mx.sym.Variable(C.TARGET_EMBEDDING_PREFIX + "weight",
shape=(self.config.config_embed_target.vocab_size,
self.config.config_embed_target.num_embed))
w_out_target = mx.sym.Variable("target_output_weight",
shape=(self.config.vocab_target_size, self.decoder.get_num_hidden()))
if self.config.weight_tying:
if C.WEIGHT_TYING_SRC in self.config.weight_tying_type \
and C.WEIGHT_TYING_TRG in self.config.weight_tying_type:
logger.info("Tying the source and target embeddings.")
w_embed_source = w_embed_target = mx.sym.Variable(C.SHARED_EMBEDDING_PREFIX + "weight",
shape=(self.config.config_embed_source.vocab_size,
self.config.config_embed_source.num_embed))
if C.WEIGHT_TYING_SOFTMAX in self.config.weight_tying_type:
logger.info("Tying the target embeddings and output layer parameters.")
utils.check_condition(self.config.config_embed_target.num_embed == self.decoder.get_num_hidden(),
"Weight tying requires target embedding size and decoder hidden size " +
"to be equal: %d vs. %d" % (self.config.config_embed_target.num_embed,
self.decoder.get_num_hidden()))
w_out_target = w_embed_target
return w_embed_source, w_embed_target, w_out_target
def _build_model_components(self):
"""
Instantiates model components.
"""
# encoder & decoder first (to know the decoder depth)
self.encoder = encoder.get_encoder(self.config.config_encoder)
self.decoder = decoder.get_decoder(self.config.config_decoder)
# source & target embeddings
embed_weight_source, embed_weight_target, out_weight_target = self._get_embed_weights()
self.embedding_source = encoder.Embedding(self.config.config_embed_source,
prefix=C.SOURCE_EMBEDDING_PREFIX,
embed_weight=embed_weight_source)
self.embedding_target = encoder.Embedding(self.config.config_embed_target,
prefix=C.TARGET_EMBEDDING_PREFIX,
embed_weight=embed_weight_target)
# output layer
self.output_layer = layers.OutputLayer(hidden_size=self.decoder.get_num_hidden(),
vocab_size=self.config.vocab_target_size,
weight=out_weight_target,
weight_normalization=self.config.weight_normalization)
self._is_built = True
| [
"data_io.DataConfig",
"int",
"int",
"int",
"int",
"encoder.EmbeddingConfig",
"encoder.EmbeddingConfig",
"Config",
"Config",
"loss.LossConfig",
"ModelConfig",
"str",
"str",
"str",
"str",
"str"
] | [
2084,
2141,
2183,
2224,
2265,
2308,
2371,
2429,
2470,
2508,
3984,
4580,
4888,
5267,
5567,
6191
] | [
2102,
2144,
2186,
2227,
2268,
2331,
2394,
2435,
2476,
2523,
3995,
4583,
4891,
5270,
5570,
6194
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/optimizers.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Extra optimizers not included in MXNet.
"""
from abc import abstractmethod
from collections import namedtuple
import math
from typing import Optional, Tuple
import mxnet as mx
from sockeye.utils import check_condition
BatchState = namedtuple("BatchState", ["metric_val"])
CheckpointState = namedtuple("CheckpointState", ["checkpoint", "metric_val"])
class SockeyeOptimizer(mx.optimizer.Optimizer):
"""
Optimizer that has access to additional information from the last batch and the last checkpoint
when updating weights.
:param request_optimized_metric: Whether to request the optimized metric (e.g. perplexity) in
place of optimizer loss (e.g. cross-entropy).
"""
def __init__(self, request_optimized_metric: bool = False, **kwargs) -> None:
self.request_optimized_metric = request_optimized_metric
self.batch_state = None # type: Optional[BatchState]
self.checkpoint_state = None # type: Optional[CheckpointState]
super().__init__(**kwargs)
def pre_update_batch(self, batch_state: BatchState):
"""
Called automatically prior to `update()` for each batch.
"""
self.batch_state = batch_state
def pre_update_checkpoint(self, checkpoint_state: CheckpointState):
"""
Called automatically at each checkpoint.
"""
self.checkpoint_state = checkpoint_state
@abstractmethod
def update(self, index, weight, grad, state):
"""
Called automatically as normal.
"""
pass
class EveState:
"""
Storage class for Eve optimizer state information.
"""
def __init__(self, weight: mx.nd.NDArray) -> None:
# Mean and variance for Adam
self.mean = mx.nd.zeros_like(weight, ctx=weight.context)
self.variance = mx.nd.zeros_like(weight, ctx=weight.context)
# For Nadam warmup
self.m_schedule = 1.
# Values for computing Eve's d term (batch)
self.batch_f_hat_prev = 0.
self.batch_d_prev = 1.
# Values for computing Eve's d term (checkpoint)
self.checkpoint_prev = 0
self.checkpoint_f_hat_prev = 0.
self.checkpoint_d_prev = 1.
@mx.optimizer.Optimizer.register
class Eve(SockeyeOptimizer):
"""
The Eve optimizer is an extended version of Adam that incorporates feedback from the objective
function to further adapt the learning rate.
* "Improving Stochastic Gradient Descent with Feedback"
Jayanth Koushik; Hiroaki Hayashi (https://arxiv.org/abs/1611.01505)
This version allows:
* Using validation checkpoint loss in addition to training batch loss.
* Using Adam or Nesterov Adam (Nadam) as the base algorithm
Eve does not currently support rescaling gradients, clipping gradients, or weight decay.
:param learning_rate: The initial learning rate.
:param beta1: Exponential decay rate for the first moment estimates.
:param beta2: Exponential decay rate for the second moment estimates.
:param beta3_batch: Exponential decay rate for batch objective relative change.
:param beta3_checkpoint: Exponential decay rate for checkpoint objective relative change.
:param epsilon: Small value to avoid division by 0.
:param k_lo: Lower threshold for relative change.
:param k_hi: Upper threshold for relative change.
:param use_batch_objective: Incorporate batch objective (can use both).
:param use_checkpoint_objective: Incorporate checkpoint objective (can use both).
:param use_nesterov_momentum: Use Nesterov-accelerated adaptive moment estimation (update rules
used by "Nadam" optimizer).
"""
def __init__(self,
learning_rate: float = 0.001,
beta1: float = 0.9,
beta2: float = 0.999,
beta3_batch: float = 0.999,
beta3_checkpoint: float = 0.,
epsilon: float = 1e-8,
k_lo: float = 0.1,
k_hi: float = 10,
schedule_decay: float = 0.004,
use_batch_objective: bool = True,
use_checkpoint_objective: bool = False,
use_nesterov_momentum: bool = False,
**kwargs) -> None:
check_condition(any((use_batch_objective, use_checkpoint_objective)),
"Must use at least one of: batch objective, checkpoint objective")
super().__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.beta3_batch = beta3_batch
self.beta3_checkpoint = beta3_checkpoint
self.epsilon = epsilon
self.k_lo = k_lo
self.k_hi = k_hi
self.schedule_decay = schedule_decay
self.use_batch_objective = use_batch_objective
self.use_checkpoint_objective = use_checkpoint_objective
self.use_nesterov_momentum = use_nesterov_momentum
def create_state(self, index: int, weight: mx.nd.NDArray) -> EveState:
return EveState(weight)
def update(self, index: int, weight: mx.nd.NDArray, grad: mx.nd.NDArray, state: EveState):
assert isinstance(weight, mx.nd.NDArray)
assert isinstance(grad, mx.nd.NDArray)
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
t = self._index_update_count[index]
# Preprocess grad
grad = grad * self.rescale_grad + wd * weight
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -1. * self.clip_gradient, self.clip_gradient)
# First compute Eve's f_hat and d terms
def compute_d(t: int, f: float, f_hat_prev: float, d_prev: float, beta: float) -> Tuple[float, float]:
"""Compute Eve's f_hat and d terms as described in paper"""
if t > 1:
# The original paper has a typo in the algorithm here. The following lines are re-
# written to reflect the actual logic presented in the authors' longer explanation.
if f <= f_hat_prev:
delta_lo = 1. / (self.k_hi + 1.)
delta_hi = 1. / (self.k_lo + 1.)
else:
delta_lo = self.k_lo + 1.
delta_hi = self.k_hi + 1.
# ^ End modified section ^
c = min(max(delta_lo, f / f_hat_prev), delta_hi)
f_hat = c * f_hat_prev
r = abs(f_hat - f_hat_prev) / min(f_hat, f_hat_prev)
d = beta * d_prev + (1. - beta) * r
else:
f_hat = f
d = 1.
return (f_hat, d)
batch_d, checkpoint_d = None, None
# Computation occurs for each batch
if self.use_batch_objective:
batch_f_hat, batch_d = compute_d(t,
self.batch_state.metric_val,
state.batch_f_hat_prev,
state.batch_d_prev,
self.beta3_batch)
state.batch_f_hat_prev = batch_f_hat
state.batch_d_prev = batch_d
# Computation occurs once per checkpoint using the checkpoint number as t. Prior to the
# first checkpoint, d = 1.
if self.use_checkpoint_objective:
# Only need to recompute if we've seen a new checkpoint since the previous batch update
if (isinstance(self.checkpoint_state, CheckpointState) and
self.checkpoint_state.checkpoint != state.checkpoint_prev):
checkpoint = self.checkpoint_state.checkpoint
checkpoint_f_hat, checkpoint_d = compute_d(checkpoint,
self.checkpoint_state.metric_val,
state.checkpoint_f_hat_prev,
state.checkpoint_d_prev,
self.beta3_checkpoint)
state.checkpoint_prev = checkpoint
state.checkpoint_f_hat_prev = checkpoint_f_hat
state.checkpoint_d_prev = checkpoint_d
else:
checkpoint_d = state.checkpoint_d_prev
# Batch and checkpoint contribute equally when both are used
if self.use_batch_objective and self.use_checkpoint_objective:
d = (batch_d + checkpoint_d) / 2.
elif self.use_batch_objective:
d = batch_d
elif self.use_checkpoint_objective:
d = checkpoint_d
else:
raise ValueError
# Update mean and variance (Adam/Nadam)
m_t, v_t = state.mean, state.variance
m_t[:] = self.beta1 * m_t + (1. - self.beta1) * grad
v_t[:] = self.beta2 * v_t + (1. - self.beta2) * grad * grad
# Finally apply either Adam or Nadam update
if self.use_nesterov_momentum:
# Nadam warming momentum schedule
momentum_t = self.beta1 * (1. - 0.5 * 0.96**(t * self.schedule_decay))
momentum_t_1 = self.beta1 * (1. - 0.5 * 0.96**((t + 1) * self.schedule_decay))
state.m_schedule = state.m_schedule * momentum_t
m_schedule_next = state.m_schedule * momentum_t_1
# Nadam update terms
grad_prime = grad / (1. - state.m_schedule)
m_t_prime = m_t / (1. - m_schedule_next)
v_t_prime = v_t / (1. - self.beta2**t)
m_t_bar = (1. - momentum_t) * grad_prime + momentum_t_1 * m_t_prime
# Final weight update with extra d term
weight[:] -= lr * m_t_bar / (d * mx.nd.sqrt(v_t_prime) + self.epsilon)
else:
# Adam warmup
coef1 = 1. - self.beta1**t
coef2 = 1. - self.beta2**t
lr *= math.sqrt(coef2) / coef1
# Final weight update with extra d term
weight[:] = weight - lr * m_t / (d * mx.nd.sqrt(v_t) + self.epsilon)
| [
"BatchState",
"CheckpointState",
"mx.nd.NDArray",
"int",
"mx.nd.NDArray",
"int",
"mx.nd.NDArray",
"mx.nd.NDArray",
"EveState",
"int",
"float",
"float",
"float",
"float"
] | [
1658,
1854,
2262,
5611,
5624,
5713,
5726,
5747,
5769,
6303,
6311,
6330,
6345,
6358
] | [
1668,
1869,
2275,
5614,
5637,
5716,
5739,
5760,
5777,
6306,
6316,
6335,
6350,
6363
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/output_handler.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from abc import ABC, abstractmethod
import sys
from typing import Optional
import sockeye.constants as C
from . import data_io
from . import inference
from sockeye.utils import plot_attention, print_attention_text, get_alignments
def get_output_handler(output_type: str,
output_fname: Optional[str],
sure_align_threshold: float) -> 'OutputHandler':
"""
:param output_type: Type of output handler.
:param output_fname: Output filename. If none sys.stdout is used.
:param sure_align_threshold: Threshold to consider an alignment link as 'sure'.
:raises: ValueError for unknown output_type.
:return: Output handler.
"""
output_stream = sys.stdout if output_fname is None else data_io.smart_open(output_fname, mode='w')
if output_type == C.OUTPUT_HANDLER_TRANSLATION:
return StringOutputHandler(output_stream)
elif output_type == C.OUTPUT_HANDLER_TRANSLATION_WITH_SCORE:
return StringWithScoreOutputHandler(output_stream)
elif output_type == C.OUTPUT_HANDLER_TRANSLATION_WITH_ALIGNMENTS:
return StringWithAlignmentsOutputHandler(output_stream, sure_align_threshold)
elif output_type == C.OUTPUT_HANDLER_TRANSLATION_WITH_ALIGNMENT_MATRIX:
return StringWithAlignmentMatrixOutputHandler(output_stream)
elif output_type == C.OUTPUT_HANDLER_BENCHMARK:
return BenchmarkOutputHandler(output_stream)
elif output_type == C.OUTPUT_HANDLER_ALIGN_PLOT:
return AlignPlotHandler(plot_prefix="align" if output_fname is None else output_fname)
elif output_type == C.OUTPUT_HANDLER_ALIGN_TEXT:
return AlignTextHandler(sure_align_threshold)
else:
raise ValueError("unknown output type")
class OutputHandler(ABC):
"""
Abstract output handler interface
"""
@abstractmethod
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total wall-clock time for translation.
"""
pass
class StringOutputHandler(OutputHandler):
"""
Output handler to write translation to a stream
:param stream: Stream to write translations to (e.g. sys.stdout).
"""
def __init__(self, stream):
self.stream = stream
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total walltime for translation.
"""
self.stream.write("%s\n" % t_output.translation)
self.stream.flush()
class StringWithScoreOutputHandler(OutputHandler):
"""
Output handler to write translation score and translation to a stream. The score and translation
string are tab-delimited.
:param stream: Stream to write translations to (e.g. sys.stdout).
"""
def __init__(self, stream):
self.stream = stream
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total walltime for translation.
"""
self.stream.write("{:.3f}\t{}\n".format(t_output.score, t_output.translation))
self.stream.flush()
class StringWithAlignmentsOutputHandler(StringOutputHandler):
"""
Output handler to write translations and alignments to a stream. Translation and alignment string
are separated by a tab.
Alignments are written in the format:
<src_index>-<trg_index> ...
An alignment link is included if its probability is above the threshold.
:param stream: Stream to write translations and alignments to.
:param threshold: Threshold for including alignment links.
"""
def __init__(self, stream, threshold: float) -> None:
super().__init__(stream)
self.threshold = threshold
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total wall-clock time for translation.
"""
alignments = " ".join(
["%d-%d" % (s, t) for s, t in get_alignments(t_output.attention_matrix, threshold=self.threshold)])
self.stream.write("%s\t%s\n" % (t_output.translation, alignments))
self.stream.flush()
class StringWithAlignmentMatrixOutputHandler(StringOutputHandler):
"""
Output handler to write translations and an alignment matrix to a stream.
Note that unlike other output handlers each input sentence will result in an output
consisting of multiple lines.
More concretely the format is:
```
sentence id ||| target words ||| score ||| source words ||| number of source words ||| number of target words
ALIGNMENT FOR T_1
ALIGNMENT FOR T_2
...
ALIGNMENT FOR T_n
```
where the alignment is a list of probabilities of alignment to the source words.
:param stream: Stream to write translations and alignments to.
"""
def __init__(self, stream) -> None:
super().__init__(stream)
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total wall-clock time for translation.
"""
line = "{sent_id:d} ||| {target} ||| {score:f} ||| {source} ||| {source_len:d} ||| {target_len:d}\n"
self.stream.write(line.format(sent_id=t_input.id,
target=" ".join(t_output.tokens),
score=t_output.score,
source=" ".join(t_input.tokens),
source_len=len(t_input.tokens),
target_len=len(t_output.tokens)))
attention_matrix = t_output.attention_matrix.T
for i in range(0, attention_matrix.shape[0]):
attention_vector = attention_matrix[i]
self.stream.write(" ".join(["%f" % value for value in attention_vector]))
self.stream.write("\n")
self.stream.write("\n")
self.stream.flush()
class BenchmarkOutputHandler(StringOutputHandler):
"""
Output handler to write detailed benchmark information to a stream.
"""
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total walltime for translation.
"""
self.stream.write("input=%s\toutput=%s\tinput_tokens=%d\toutput_tokens=%d\ttranslation_time=%0.4f\n" %
(t_input.sentence,
t_output.translation,
len(t_input.tokens),
len(t_output.tokens),
t_walltime))
self.stream.flush()
class AlignPlotHandler(OutputHandler):
"""
Output handler to plot alignment matrices to PNG files.
:param plot_prefix: Prefix for generated PNG files.
"""
def __init__(self, plot_prefix: str) -> None:
self.plot_prefix = plot_prefix
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total wall-clock time for translation.
"""
plot_attention(t_output.attention_matrix,
t_input.tokens,
t_output.tokens,
"%s_%d.png" % (self.plot_prefix, t_input.id))
class AlignTextHandler(OutputHandler):
"""
Output handler to write alignment matrices as ASCII art.
:param threshold: Threshold for considering alignment links as sure.
"""
def __init__(self, threshold: float) -> None:
self.threshold = threshold
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total wall-clock time for translation.
"""
print_attention_text(t_output.attention_matrix,
t_input.tokens,
t_output.tokens,
self.threshold)
| [
"str",
"Optional[str]",
"float",
"inference.TranslatorInput",
"inference.TranslatorOutput",
"inference.TranslatorInput",
"inference.TranslatorOutput",
"inference.TranslatorInput",
"inference.TranslatorOutput",
"float",
"inference.TranslatorInput",
"inference.TranslatorOutput",
"inference.TranslatorInput",
"inference.TranslatorOutput",
"inference.TranslatorInput",
"inference.TranslatorOutput",
"str",
"inference.TranslatorInput",
"inference.TranslatorOutput",
"float",
"inference.TranslatorInput",
"inference.TranslatorOutput"
] | [
835,
877,
937,
2457,
2509,
3057,
3109,
3810,
3862,
4749,
4879,
4931,
6220,
6272,
7558,
7610,
8428,
8527,
8579,
9248,
9345,
9397
] | [
838,
890,
942,
2482,
2535,
3082,
3135,
3835,
3888,
4754,
4904,
4957,
6245,
6298,
7583,
7636,
8431,
8552,
8605,
9253,
9370,
9423
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/prepare_data.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import argparse
import os
from . import arguments
from . import constants as C
from . import data_io
from . import utils
from . import vocab
from .log import setup_main_logger
logger = setup_main_logger(__name__, file_logging=False, console=True)
def main():
params = argparse.ArgumentParser(description='Preprocesses and shards training data.')
arguments.add_prepare_data_cli_args(params)
args = params.parse_args()
output_folder = os.path.abspath(args.output)
os.makedirs(output_folder, exist_ok=True)
global logger
logger = setup_main_logger(__name__, file_logging=True, path=os.path.join(output_folder, C.LOG_NAME))
utils.seedRNGs(args.seed)
minimum_num_shards = args.min_num_shards
samples_per_shard = args.num_samples_per_shard
bucketing = not args.no_bucketing
bucket_width = args.bucket_width
shared_vocab = args.shared_vocab
vocab_source_path = args.source_vocab
vocab_target_path = args.target_vocab
num_words_source, num_words_target = args.num_words
word_min_count_source, word_min_count_target = args.word_min_count
max_len_source, max_len_target = args.max_seq_len
vocab_source, vocab_target = vocab.load_or_create_vocabs(source=args.source,
target=args.target,
source_vocab_path=args.source_vocab,
target_vocab_path=args.target_vocab,
shared_vocab=args.shared_vocab,
num_words_source=num_words_source,
word_min_count_source=word_min_count_source,
num_words_target=num_words_target,
word_min_count_target=word_min_count_target)
data_io.prepare_data(args.source, args.target,
vocab_source, vocab_target,
vocab_source_path, vocab_target_path,
shared_vocab,
max_len_source,
max_len_target,
bucketing=bucketing,
bucket_width=bucket_width,
samples_per_shard=samples_per_shard,
min_num_shards=minimum_num_shards,
output_prefix=output_folder)
if __name__ == "__main__":
main()
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/rnn.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# List is needed for mypy, but not used in the code, only in special comments
from typing import Optional, List, Iterable # NOQA pylint: disable=unused-import
import mxnet as mx
from sockeye.config import Config
from sockeye.layers import LayerNormalization
from . import constants as C
from . import utils
class RNNConfig(Config):
"""
RNN configuration.
:param cell_type: RNN cell type.
:param num_hidden: Number of RNN hidden units.
:param num_layers: Number of RNN layers.
:param dropout_inputs: Dropout probability on RNN inputs (Gal, 2015).
:param dropout_states: Dropout probability on RNN states (Gal, 2015).
:param dropout_recurrent: Dropout probability on cell update (Semeniuta, 2016).
:param residual: Whether to add residual connections between multi-layered RNNs.
:param first_residual_layer: First layer with a residual connection (1-based indexes).
Default is to start at the second layer.
:param forget_bias: Initial value of forget biases.
"""
def __init__(self,
cell_type: str,
num_hidden: int,
num_layers: int,
dropout_inputs: float,
dropout_states: float,
dropout_recurrent: float = 0,
residual: bool = False,
first_residual_layer: int = 2,
forget_bias: float = 0.0) -> None:
super().__init__()
self.cell_type = cell_type
self.num_hidden = num_hidden
self.num_layers = num_layers
self.dropout_inputs = dropout_inputs
self.dropout_states = dropout_states
self.dropout_recurrent = dropout_recurrent
self.residual = residual
self.first_residual_layer = first_residual_layer
self.forget_bias = forget_bias
class SequentialRNNCellParallelInput(mx.rnn.SequentialRNNCell):
"""
A SequentialRNNCell, where an additional "parallel" input can be given at
call time and it will be added to the input of each layer
"""
def __call__(self, inputs, parallel_inputs, states):
# Adapted copy of mx.rnn.SequentialRNNCell.__call__()
self._counter += 1
next_states = []
pos = 0
for cell in self._cells:
assert not isinstance(cell, mx.rnn.BidirectionalCell)
length = len(cell.state_info)
state = states[pos:pos + length]
pos += length
inputs, state = cell(inputs, parallel_inputs, state)
next_states.append(state)
return inputs, sum(next_states, [])
class ParallelInputCell(mx.rnn.ModifierCell):
"""
A modifier cell that accepts two input vectors and concatenates them before
calling the original cell. Typically it is used for concatenating the
normal and the parallel input in a stacked rnn.
"""
def __call__(self, inputs, parallel_inputs, states):
concat_inputs = mx.sym.concat(inputs, parallel_inputs)
output, states = self.base_cell(concat_inputs, states)
return output, states
class ResidualCellParallelInput(mx.rnn.ResidualCell):
"""
A ResidualCell, where an additional "parallel" input can be given at call
time and it will be added to the input of each layer, but not considered
for the residual connection itself.
"""
def __call__(self, inputs, parallel_inputs, states):
concat_inputs = mx.sym.concat(inputs, parallel_inputs)
output, states = self.base_cell(concat_inputs, states)
output = mx.symbol.elemwise_add(output, inputs, name="%s_plus_residual" % output.name)
return output, states
def get_stacked_rnn(config: RNNConfig, prefix: str,
parallel_inputs: bool = False,
layers: Optional[Iterable[int]] = None) -> mx.rnn.SequentialRNNCell:
"""
Returns (stacked) RNN cell given parameters.
:param config: rnn configuration.
:param prefix: Symbol prefix for RNN.
:param parallel_inputs: Support parallel inputs for the stacked RNN cells.
:param layers: Specify which layers to create as a list of layer indexes.
:return: RNN cell.
"""
rnn = mx.rnn.SequentialRNNCell() if not parallel_inputs else SequentialRNNCellParallelInput()
if not layers:
layers = range(config.num_layers)
for layer_idx in layers:
# fhieber: the 'l' in the prefix does NOT stand for 'layer' but for the direction 'l' as in mx.rnn.rnn_cell::517
# this ensures parameter name compatibility of training w/ FusedRNN and decoding with 'unfused' RNN.
cell_prefix = "%sl%d_" % (prefix, layer_idx)
if config.cell_type == C.LSTM_TYPE:
if config.dropout_recurrent > 0.0:
cell = RecurrentDropoutLSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix,
forget_bias=config.forget_bias, dropout=config.dropout_recurrent)
else:
cell = mx.rnn.LSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix, forget_bias=config.forget_bias)
elif config.cell_type == C.LNLSTM_TYPE:
cell = LayerNormLSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix, forget_bias=config.forget_bias)
elif config.cell_type == C.LNGLSTM_TYPE:
cell = LayerNormPerGateLSTMCell(num_hidden=config.num_hidden, prefix=cell_prefix,
forget_bias=config.forget_bias)
elif config.cell_type == C.GRU_TYPE:
cell = mx.rnn.GRUCell(num_hidden=config.num_hidden, prefix=cell_prefix)
elif config.cell_type == C.LNGRU_TYPE:
cell = LayerNormGRUCell(num_hidden=config.num_hidden, prefix=cell_prefix)
elif config.cell_type == C.LNGGRU_TYPE:
cell = LayerNormPerGateGRUCell(num_hidden=config.num_hidden, prefix=cell_prefix)
else:
raise NotImplementedError()
if config.dropout_inputs > 0 or config.dropout_states > 0:
cell = VariationalDropoutCell(cell,
dropout_inputs=config.dropout_inputs,
dropout_states=config.dropout_states)
# layer_idx is 0 based, whereas first_residual_layer is 1-based
if config.residual and layer_idx + 1 >= config.first_residual_layer:
cell = mx.rnn.ResidualCell(cell) if not parallel_inputs else ResidualCellParallelInput(cell)
elif parallel_inputs:
cell = ParallelInputCell(cell)
rnn.add(cell)
return rnn
class LayerNormLSTMCell(mx.rnn.LSTMCell):
"""
Long-Short Term Memory (LSTM) network cell with layer normalization across gates.
Based on Jimmy Lei Ba et al: Layer Normalization (https://arxiv.org/pdf/1607.06450.pdf)
:param num_hidden: number of RNN hidden units. Number of units in output symbol.
:param prefix: prefix for name of layers (and name of weight if params is None).
:param params: RNNParams or None. Container for weight sharing between cells. Created if None.
:param forget_bias: bias added to forget gate, default 1.0. Jozefowicz et al. 2015 recommends setting this to 1.0.
:param norm_scale: scale/gain for layer normalization.
:param norm_shift: shift/bias after layer normalization.
"""
def __init__(self,
num_hidden: int,
prefix: str = 'lnlstm_',
params: Optional[mx.rnn.RNNParams] = None,
forget_bias: float = 1.0,
norm_scale: float = 1.0,
norm_shift: float = 0.0) -> None:
super(LayerNormLSTMCell, self).__init__(num_hidden, prefix, params, forget_bias)
self._iN = LayerNormalization(num_hidden=num_hidden * 4,
prefix="%si2h" % self._prefix,
scale=self.params.get('i2h_scale', shape=(num_hidden * 4,),
init=mx.init.Constant(value=norm_scale)),
shift=self.params.get('i2h_shift', shape=(num_hidden * 4,),
init=mx.init.Constant(value=norm_shift)))
self._hN = LayerNormalization(num_hidden=num_hidden * 4,
prefix="%sh2h" % self._prefix,
scale=self.params.get('h2h_scale', shape=(num_hidden * 4,),
init=mx.init.Constant(value=norm_scale)),
shift=self.params.get('h2h_shift', shape=(num_hidden * 4,),
init=mx.init.Constant(value=norm_shift)))
self._cN = LayerNormalization(num_hidden=num_hidden,
prefix="%sc" % self._prefix,
scale=self.params.get('c_scale', shape=(num_hidden,),
init=mx.init.Constant(value=norm_scale)),
shift=self.params.get('c_shift', shape=(num_hidden,),
init=mx.init.Constant(value=norm_shift)))
self._shape_fix = None
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_' % (self._prefix, self._counter)
i2h = mx.sym.FullyConnected(data=inputs, weight=self._iW, bias=self._iB,
num_hidden=self._num_hidden * 4,
name='%si2h' % name)
if self._counter == 0:
self._shape_fix = mx.sym.zeros_like(i2h)
else:
assert self._shape_fix is not None
h2h = mx.sym.FullyConnected(data=states[0], weight=self._hW, bias=self._hB,
num_hidden=self._num_hidden * 4,
name='%sh2h' % name)
gates = self._iN.normalize(i2h) + self._hN.normalize(self._shape_fix + h2h)
# pylint: disable=unbalanced-tuple-unpacking
in_gate, forget_gate, in_transform, out_gate = mx.sym.split(gates,
num_outputs=4,
axis=1,
name="%sslice" % name)
in_gate = mx.sym.Activation(in_gate, act_type="sigmoid",
name='%si' % name)
forget_gate = mx.sym.Activation(forget_gate, act_type="sigmoid",
name='%sf' % name)
in_transform = mx.sym.Activation(in_transform, act_type="tanh",
name='%sc' % name)
out_gate = mx.sym.Activation(out_gate, act_type="sigmoid",
name='%so' % name)
next_c = mx.sym._internal._plus(forget_gate * states[1], in_gate * in_transform,
name='%sstate' % name)
next_h = mx.sym._internal._mul(out_gate,
mx.sym.Activation(self._cN.normalize(next_c),
act_type="tanh"),
name='%sout' % name)
return next_h, [next_h, next_c]
class LayerNormPerGateLSTMCell(mx.rnn.LSTMCell):
"""
Long-Short Term Memory (LSTM) network cell with layer normalization per gate.
Based on Jimmy Lei Ba et al: Layer Normalization (https://arxiv.org/pdf/1607.06450.pdf)
:param num_hidden: number of RNN hidden units. Number of units in output symbol.
:param prefix: prefix for name of layers (and name of weight if params is None).
:param params: RNNParams or None. Container for weight sharing between cells. Created if None.
:param forget_bias: bias added to forget gate, default 1.0. Jozefowicz et al. 2015 recommends setting this to 1.0.
:param norm_scale: scale/gain for layer normalization.
:param norm_shift: shift/bias after layer normalization.
"""
def __init__(self,
num_hidden: int,
prefix: str = 'lnglstm_',
params: Optional[mx.rnn.RNNParams] = None,
forget_bias: float = 1.0,
norm_scale: float = 1.0,
norm_shift: float = 0.0) -> None:
super(LayerNormPerGateLSTMCell, self).__init__(num_hidden, prefix, params, forget_bias)
self._norm_layers = list() # type: List[LayerNormalization]
for name in ['i', 'f', 'c', 'o', 's']:
scale = self.params.get('%s_shift' % name, shape=(num_hidden,),
init=mx.init.Constant(value=norm_shift))
shift = self.params.get('%s_scale' % name, shape=(num_hidden,),
init=mx.init.Constant(value=norm_scale if name != "f" else forget_bias))
self._norm_layers.append(
LayerNormalization(num_hidden, prefix="%s%s" % (self._prefix, name), scale=scale, shift=shift))
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_' % (self._prefix, self._counter)
i2h = mx.sym.FullyConnected(data=inputs, weight=self._iW, bias=self._iB,
num_hidden=self._num_hidden * 4,
name='%si2h' % name)
h2h = mx.sym.FullyConnected(data=states[0], weight=self._hW, bias=self._hB,
num_hidden=self._num_hidden * 4,
name='%sh2h' % name)
gates = i2h + h2h
# pylint: disable=unbalanced-tuple-unpacking
in_gate, forget_gate, in_transform, out_gate = mx.sym.split(
gates, num_outputs=4, name="%sslice" % name)
in_gate = self._norm_layers[0].normalize(in_gate)
forget_gate = self._norm_layers[1].normalize(forget_gate)
in_transform = self._norm_layers[2].normalize(in_transform)
out_gate = self._norm_layers[3].normalize(out_gate)
in_gate = mx.sym.Activation(in_gate, act_type="sigmoid",
name='%si' % name)
forget_gate = mx.sym.Activation(forget_gate, act_type="sigmoid",
name='%sf' % name)
in_transform = mx.sym.Activation(in_transform, act_type="tanh",
name='%sc' % name)
out_gate = mx.sym.Activation(out_gate, act_type="sigmoid",
name='%so' % name)
next_c = mx.sym._internal._plus(forget_gate * states[1], in_gate * in_transform,
name='%sstate' % name)
next_h = mx.sym._internal._mul(out_gate,
mx.sym.Activation(self._norm_layers[4].normalize(next_c), act_type="tanh"),
name='%sout' % name)
return next_h, [next_h, next_c]
class RecurrentDropoutLSTMCell(mx.rnn.LSTMCell):
"""
LSTMCell with recurrent dropout without memory loss as in:
http://aclanthology.coli.uni-saarland.de/pdf/C/C16/C16-1165.pdf
"""
def __init__(self, num_hidden, prefix='lstm_', params=None, forget_bias=1.0, dropout: float = 0.0) -> None:
super().__init__(num_hidden, prefix, params, forget_bias)
utils.check_condition(dropout > 0.0, "RecurrentDropoutLSTMCell shoud have dropout > 0.0")
self.dropout = dropout
def __call__(self, inputs, states):
self._counter += 1
name = '%st%d_' % (self._prefix, self._counter)
i2h = mx.sym.FullyConnected(data=inputs, weight=self._iW, bias=self._iB,
num_hidden=self._num_hidden * 4,
name='%si2h' % name)
h2h = mx.sym.FullyConnected(data=states[0], weight=self._hW, bias=self._hB,
num_hidden=self._num_hidden * 4,
name='%sh2h' % name)
gates = i2h + h2h
slice_gates = mx.sym.SliceChannel(gates, num_outputs=4,
name="%sslice" % name)
in_gate = mx.sym.Activation(slice_gates[0], act_type="sigmoid",
name='%si' % name)
forget_gate = mx.sym.Activation(slice_gates[1], act_type="sigmoid",
name='%sf' % name)
in_transform = mx.sym.Activation(slice_gates[2], act_type="tanh",
name='%sc' % name)
if self.dropout > 0.0:
in_transform = mx.sym.Dropout(in_transform, p=self.dropout, name='%sc_dropout' % name)
out_gate = mx.sym.Activation(slice_gates[3], act_type="sigmoid",
name='%so' % name)
next_c = mx.sym._internal._plus(forget_gate * states[1], in_gate * in_transform,
name='%sstate' % name)
next_h = mx.sym._internal._mul(out_gate, mx.sym.Activation(next_c, act_type="tanh"),
name='%sout' % name)
return next_h, [next_h, next_c]
class LayerNormGRUCell(mx.rnn.GRUCell):
"""
Gated Recurrent Unit (GRU) network cell with layer normalization across gates.
Based on Jimmy Lei Ba et al: Layer Normalization (https://arxiv.org/pdf/1607.06450.pdf)
:param num_hidden: number of RNN hidden units. Number of units in output symbol.
:param prefix: prefix for name of layers (and name of weight if params is None).
:param params: RNNParams or None. Container for weight sharing between cells. Created if None.
:param norm_scale: scale/gain for layer normalization.
:param norm_shift: shift/bias after layer normalization.
"""
def __init__(self,
num_hidden: int,
prefix: str = 'lngru_',
params: Optional[mx.rnn.RNNParams] = None,
norm_scale: float = 1.0,
norm_shift: float = 0.0) -> None:
super(LayerNormGRUCell, self).__init__(num_hidden, prefix, params)
self._iN = LayerNormalization(num_hidden=num_hidden * 3,
prefix="%si2h" % self._prefix,
scale=self.params.get('i2h_scale', shape=(num_hidden * 3,),
init=mx.init.Constant(value=norm_scale)),
shift=self.params.get('i2h_shift', shape=(num_hidden * 3,),
init=mx.init.Constant(value=norm_shift)))
self._hN = LayerNormalization(num_hidden=num_hidden * 3,
prefix="%sh2h" % self._prefix,
scale=self.params.get('h2h_scale', shape=(num_hidden * 3,),
init=mx.init.Constant(value=norm_scale)),
shift=self.params.get('h2h_shift', shape=(num_hidden * 3,),
init=mx.init.Constant(value=norm_shift)))
self._shape_fix = None
def __call__(self, inputs, states):
self._counter += 1
seq_idx = self._counter
name = '%st%d_' % (self._prefix, seq_idx)
prev_state_h = states[0]
i2h = mx.sym.FullyConnected(data=inputs,
weight=self._iW,
bias=self._iB,
num_hidden=self._num_hidden * 3,
name="%s_i2h" % name)
h2h = mx.sym.FullyConnected(data=prev_state_h,
weight=self._hW,
bias=self._hB,
num_hidden=self._num_hidden * 3,
name="%s_h2h" % name)
if self._counter == 0:
self._shape_fix = mx.sym.zeros_like(i2h)
else:
assert self._shape_fix is not None
i2h = self._iN.normalize(i2h)
h2h = self._hN.normalize(self._shape_fix + h2h)
# pylint: disable=unbalanced-tuple-unpacking
i2h_r, i2h_z, i2h = mx.sym.split(i2h, num_outputs=3, name="%s_i2h_slice" % name)
h2h_r, h2h_z, h2h = mx.sym.split(h2h, num_outputs=3, name="%s_h2h_slice" % name)
reset_gate = mx.sym.Activation(i2h_r + h2h_r, act_type="sigmoid",
name="%s_r_act" % name)
update_gate = mx.sym.Activation(i2h_z + h2h_z, act_type="sigmoid",
name="%s_z_act" % name)
next_h_tmp = mx.sym.Activation(i2h + reset_gate * h2h, act_type="tanh",
name="%s_h_act" % name)
next_h = mx.sym._internal._plus((1. - update_gate) * next_h_tmp, update_gate * prev_state_h,
name='%sout' % name)
return next_h, [next_h]
class LayerNormPerGateGRUCell(mx.rnn.GRUCell):
"""
Gated Recurrent Unit (GRU) network cell with layer normalization per gate.
Based on Jimmy Lei Ba et al: Layer Normalization (https://arxiv.org/pdf/1607.06450.pdf)
:param num_hidden: number of RNN hidden units. Number of units in output symbol.
:param prefix: prefix for name of layers (and name of weight if params is None).
:param params: RNNParams or None. Container for weight sharing between cells. Created if None.
:param norm_scale: scale/gain for layer normalization.
:param norm_shift: shift/bias after layer normalization.
"""
def __init__(self,
num_hidden: int,
prefix: str = 'lnggru_',
params: Optional[mx.rnn.RNNParams] = None,
norm_scale: float = 1.0,
norm_shift: float = 0.0) -> None:
super(LayerNormPerGateGRUCell, self).__init__(num_hidden, prefix, params)
self._norm_layers = list() # type: List[LayerNormalization]
for name in ['r', 'z', 'o']:
scale = self.params.get('%s_shift' % name, shape=(num_hidden,), init=mx.init.Constant(value=norm_shift))
shift = self.params.get('%s_scale' % name, shape=(num_hidden,), init=mx.init.Constant(value=norm_scale))
self._norm_layers.append(
LayerNormalization(num_hidden, prefix="%s%s" % (self._prefix, name), scale=scale, shift=shift))
def __call__(self, inputs, states):
self._counter += 1
seq_idx = self._counter
name = '%st%d_' % (self._prefix, seq_idx)
prev_state_h = states[0]
i2h = mx.sym.FullyConnected(data=inputs,
weight=self._iW,
bias=self._iB,
num_hidden=self._num_hidden * 3,
name="%s_i2h" % name)
h2h = mx.sym.FullyConnected(data=prev_state_h,
weight=self._hW,
bias=self._hB,
num_hidden=self._num_hidden * 3,
name="%s_h2h" % name)
# pylint: disable=unbalanced-tuple-unpacking
i2h_r, i2h_z, i2h = mx.sym.split(i2h, num_outputs=3, name="%s_i2h_slice" % name)
h2h_r, h2h_z, h2h = mx.sym.split(h2h, num_outputs=3, name="%s_h2h_slice" % name)
reset_gate = mx.sym.Activation(self._norm_layers[0].normalize(i2h_r + h2h_r),
act_type="sigmoid", name="%s_r_act" % name)
update_gate = mx.sym.Activation(self._norm_layers[1].normalize(i2h_z + h2h_z),
act_type="sigmoid", name="%s_z_act" % name)
next_h_tmp = mx.sym.Activation(self._norm_layers[2].normalize(i2h + reset_gate * h2h),
act_type="tanh", name="%s_h_act" % name)
next_h = mx.sym._internal._plus((1. - update_gate) * next_h_tmp, update_gate * prev_state_h,
name='%sout' % name)
return next_h, [next_h]
class VariationalDropoutCell(mx.rnn.ModifierCell):
"""
Apply Bayesian Dropout on input and states separately. The dropout mask does not change when applied sequentially.
:param base_cell: Base cell to be modified.
:param dropout_inputs: Dropout probability for inputs.
:param dropout_states: Dropout probability for state inputs.
"""
def __init__(self,
base_cell: mx.rnn.BaseRNNCell,
dropout_inputs: float,
dropout_states: float) -> None:
super().__init__(base_cell)
self.dropout_inputs = dropout_inputs
self.dropout_states = dropout_states
self.mask_inputs = None
self.mask_states = None
def __call__(self, inputs, states):
if self.dropout_inputs > 0:
if self.mask_inputs is None:
self.mask_inputs = mx.sym.Dropout(data=mx.sym.ones_like(inputs), p=self.dropout_inputs)
inputs = inputs * self.mask_inputs
if self.dropout_states > 0:
if self.mask_states is None:
self.mask_states = mx.sym.Dropout(data=mx.sym.ones_like(states[0]), p=self.dropout_states)
states[0] = states[0] * self.mask_states
output, states = self.base_cell(inputs, states)
return output, states
def reset(self):
super(VariationalDropoutCell, self).reset()
self.mask_inputs = None
self.mask_states = None
| [
"str",
"int",
"int",
"float",
"float",
"RNNConfig",
"str",
"int",
"int",
"int",
"int",
"mx.rnn.BaseRNNCell",
"float",
"float"
] | [
1644,
1678,
1712,
1750,
1790,
4250,
4269,
7941,
12807,
18553,
22445,
25326,
25379,
25419
] | [
1647,
1681,
1715,
1755,
1795,
4259,
4272,
7944,
12810,
18556,
22448,
25344,
25384,
25424
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/rnn_attention.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Implementations of different attention mechanisms in sequence-to-sequence models.
"""
import logging
from typing import Callable, NamedTuple, Optional, Tuple
import mxnet as mx
from . import config
from . import constants as C
from . import coverage
from . import layers
from . import utils
logger = logging.getLogger(__name__)
class AttentionConfig(config.Config):
"""
Attention configuration.
:param type: Attention name.
:param num_hidden: Number of hidden units for attention networks.
:param input_previous_word: Feeds the previous target embedding into the attention mechanism.
:param source_num_hidden: Number of hidden units of the source.
:param query_num_hidden: Number of hidden units of the query.
:param layer_normalization: Apply layer normalization to MLP attention.
:param config_coverage: Optional coverage configuration.
:param num_heads: Number of attention heads. Only used for Multi-head dot attention.
"""
def __init__(self,
type: str,
num_hidden: int,
input_previous_word: bool,
source_num_hidden: int,
query_num_hidden: int,
layer_normalization: bool,
config_coverage: Optional[coverage.CoverageConfig] = None,
num_heads: Optional[int] = None) -> None:
super().__init__()
self.type = type
self.num_hidden = num_hidden
self.input_previous_word = input_previous_word
self.source_num_hidden = source_num_hidden
self.query_num_hidden = query_num_hidden
self.layer_normalization = layer_normalization
self.config_coverage = config_coverage
self.num_heads = num_heads
def get_attention(config: AttentionConfig, max_seq_len: int) -> 'Attention':
"""
Returns an Attention instance based on attention_type.
:param config: Attention configuration.
:param max_seq_len: Maximum length of source sequences.
:return: Instance of Attention.
"""
if config.type == C.ATT_BILINEAR:
if config.input_previous_word:
logger.warning("bilinear attention does not support input_previous_word")
return BilinearAttention(config.query_num_hidden)
elif config.type == C.ATT_DOT:
return DotAttention(config.input_previous_word, config.source_num_hidden, config.query_num_hidden,
config.num_hidden)
elif config.type == C.ATT_MH_DOT:
utils.check_condition(config.num_heads is not None, "%s requires setting num-heads." % C.ATT_MH_DOT)
return MultiHeadDotAttention(config.input_previous_word,
num_hidden=config.num_hidden,
heads=config.num_heads)
elif config.type == C.ATT_DOT_SCALED:
return DotAttention(config.input_previous_word, config.source_num_hidden, config.query_num_hidden,
config.num_hidden, scale=config.num_hidden ** -0.5)
elif config.type == C.ATT_FIXED:
return EncoderLastStateAttention(config.input_previous_word)
elif config.type == C.ATT_LOC:
return LocationAttention(config.input_previous_word, max_seq_len)
elif config.type == C.ATT_MLP:
return MlpAttention(input_previous_word=config.input_previous_word,
attention_num_hidden=config.num_hidden,
layer_normalization=config.layer_normalization)
elif config.type == C.ATT_COV:
return MlpAttention(input_previous_word=config.input_previous_word,
attention_num_hidden=config.num_hidden,
layer_normalization=config.layer_normalization,
config_coverage=config.config_coverage)
else:
raise ValueError("Unknown attention type %s" % config.type)
AttentionInput = NamedTuple('AttentionInput', [('seq_idx', int), ('query', mx.sym.Symbol)])
"""
Input to attention callables.
:param seq_idx: Decoder time step / sequence index.
:param query: Query input to attention mechanism, e.g. decoder hidden state (plus previous word).
"""
AttentionState = NamedTuple('AttentionState', [
('context', mx.sym.Symbol),
('probs', mx.sym.Symbol),
('dynamic_source', mx.sym.Symbol),
])
"""
Results returned from attention callables.
:param context: Context vector (Bahdanau et al, 15). Shape: (batch_size, encoder_num_hidden)
:param probs: Attention distribution over source encoder states. Shape: (batch_size, source_seq_len).
:param dynamic_source: Dynamically updated source encoding.
Shape: (batch_size, source_seq_len, dynamic_source_num_hidden)
"""
class Attention(object):
"""
Generic attention interface that returns a callable for attending to source states.
:param input_previous_word: Feed the previous target embedding into the attention mechanism.
:param dynamic_source_num_hidden: Number of hidden units of dynamic source encoding update mechanism.
"""
def __init__(self,
input_previous_word: bool,
dynamic_source_num_hidden: int = 1,
prefix: str = C.ATTENTION_PREFIX) -> None:
self.dynamic_source_num_hidden = dynamic_source_num_hidden
self._input_previous_word = input_previous_word
self.prefix = prefix
def on(self, source: mx.sym.Symbol, source_length: mx.sym.Symbol, source_seq_len: int) -> Callable:
"""
Returns callable to be used for recurrent attention in a sequence decoder.
The callable is a recurrent function of the form:
AttentionState = attend(AttentionInput, AttentionState).
:param source: Shape: (batch_size, seq_len, encoder_num_hidden).
:param source_length: Shape: (batch_size,).
:param source_seq_len: Maximum length of source sequences.
:return: Attention callable.
"""
def attend(att_input: AttentionInput, att_state: AttentionState) -> AttentionState:
"""
Returns updated attention state given attention input and current attention state.
:param att_input: Attention input as returned by make_input().
:param att_state: Current attention state
:return: Updated attention state.
"""
raise NotImplementedError()
return attend
def get_initial_state(self, source_length: mx.sym.Symbol, source_seq_len: int) -> AttentionState:
"""
Returns initial attention state. Dynamic source encoding is initialized with zeros.
:param source_length: Source length. Shape: (batch_size,).
:param source_seq_len: Maximum length of source sequences.
"""
dynamic_source = mx.sym.expand_dims(mx.sym.expand_dims(mx.sym.zeros_like(source_length), axis=1), axis=2)
# dynamic_source: (batch_size, source_seq_len, num_hidden_dynamic_source)
dynamic_source = mx.sym.broadcast_to(dynamic_source, shape=(0, source_seq_len, self.dynamic_source_num_hidden))
return AttentionState(context=None, probs=None, dynamic_source=dynamic_source)
def make_input(self,
seq_idx: int,
word_vec_prev: mx.sym.Symbol,
decoder_state: mx.sym.Symbol) -> AttentionInput:
"""
Returns AttentionInput to be fed into the attend callable returned by the on() method.
:param seq_idx: Decoder time step.
:param word_vec_prev: Embedding of previously predicted ord
:param decoder_state: Current decoder state
:return: Attention input.
"""
query = decoder_state
if self._input_previous_word:
# (batch_size, num_target_embed + rnn_num_hidden)
query = mx.sym.concat(word_vec_prev, decoder_state, dim=1,
name='%sconcat_prev_word_%d' % (self.prefix, seq_idx))
return AttentionInput(seq_idx=seq_idx, query=query)
class BilinearAttention(Attention):
"""
Bilinear attention based on Luong et al. 2015.
:math:`score(h_t, h_s) = h_t^T \\mathbf{W} h_s`
For implementation reasons we modify to:
:math:`score(h_t, h_s) = h_s^T \\mathbf{W} h_t`
:param num_hidden: Number of hidden units the source will be projected to.
"""
def __init__(self, num_hidden: int) -> None:
super().__init__(False)
self.num_hidden = num_hidden
self.s2t_weight = mx.sym.Variable("%ss2t_weight" % self.prefix)
def on(self, source: mx.sym.Symbol, source_length: mx.sym.Symbol, source_seq_len: int) -> Callable:
"""
Returns callable to be used for recurrent attention in a sequence decoder.
The callable is a recurrent function of the form:
AttentionState = attend(AttentionInput, AttentionState).
:param source: Shape: (batch_size, seq_len, encoder_num_hidden).
:param source_length: Shape: (batch_size,).
:param source_seq_len: Maximum length of source sequences.
:return: Attention callable.
"""
# (batch_size, seq_len, self.num_hidden)
source_hidden = mx.sym.FullyConnected(data=source,
weight=self.s2t_weight,
num_hidden=self.num_hidden,
no_bias=True,
flatten=False,
name="%ssource_hidden_fc" % self.prefix)
def attend(att_input: AttentionInput, att_state: AttentionState) -> AttentionState:
"""
Returns updated attention state given attention input and current attention state.
:param att_input: Attention input as returned by make_input().
:param att_state: Current attention state
:return: Updated attention state.
"""
# (batch_size, decoder_num_hidden, 1)
query = mx.sym.expand_dims(att_input.query, axis=2)
# in: (batch_size, source_seq_len, self.num_hidden) X (batch_size, self.num_hidden, 1)
# out: (batch_size, source_seq_len, 1).
attention_scores = mx.sym.batch_dot(lhs=source_hidden, rhs=query, name="%sbatch_dot" % self.prefix)
context, attention_probs = get_context_and_attention_probs(source, source_length, attention_scores)
return AttentionState(context=context,
probs=attention_probs,
dynamic_source=att_state.dynamic_source)
return attend
class DotAttention(Attention):
"""
Attention mechanism with dot product between encoder and decoder hidden states [Luong et al. 2015].
:math:`score(h_t, h_s) = \\langle h_t, h_s \\rangle`
:math:`a = softmax(score(*, h_s))`
If rnn_num_hidden != num_hidden, states are projected with additional parameters to num_hidden.
:math:`score(h_t, h_s) = \\langle \\mathbf{W}_t h_t, \\mathbf{W}_s h_s \\rangle`
:param input_previous_word: Feed the previous target embedding into the attention mechanism.
:param source_num_hidden: Number of hidden units in source.
:param query_num_hidden: Number of hidden units in query.
:param num_hidden: Number of hidden units.
:param scale: Optionally scale query before dot product [Vaswani et al, 2017].
"""
def __init__(self,
input_previous_word: bool,
source_num_hidden: int,
query_num_hidden: int,
num_hidden: int,
scale: Optional[float] = None) -> None:
super().__init__(input_previous_word)
self.project_source = source_num_hidden != num_hidden
self.project_query = query_num_hidden != num_hidden
self.num_hidden = num_hidden
self.scale = scale
self.s2h_weight = mx.sym.Variable("%ss2h_weight" % self.prefix) if self.project_source else None
self.t2h_weight = mx.sym.Variable("%st2h_weight" % self.prefix) if self.project_query else None
def on(self, source: mx.sym.Symbol, source_length: mx.sym.Symbol, source_seq_len: int) -> Callable:
"""
Returns callable to be used for recurrent attention in a sequence decoder.
The callable is a recurrent function of the form:
AttentionState = attend(AttentionInput, AttentionState).
:param source: Shape: (batch_size, seq_len, encoder_num_hidden).
:param source_length: Shape: (batch_size,).
:param source_seq_len: Maximum length of source sequences.
:return: Attention callable.
"""
if self.project_source:
# (batch_size, seq_len, self.num_hidden)
source_hidden = mx.sym.FullyConnected(data=source,
weight=self.s2h_weight,
num_hidden=self.num_hidden,
no_bias=True,
flatten=False,
name="%ssource_hidden_fc" % self.prefix)
else:
source_hidden = source
def attend(att_input: AttentionInput, att_state: AttentionState) -> AttentionState:
"""
Returns updated attention state given attention input and current attention state.
:param att_input: Attention input as returned by make_input().
:param att_state: Current attention state
:return: Updated attention state.
"""
query = att_input.query
if self.project_query:
# query: (batch_size, self.num_hidden)
query = mx.sym.FullyConnected(data=query,
weight=self.t2h_weight,
num_hidden=self.num_hidden,
no_bias=True, name="%squery_hidden_fc" % self.prefix)
# scale down dot product by sqrt(num_hidden) [Vaswani et al, 17]
if self.scale is not None:
query = query * self.scale
# (batch_size, decoder_num_hidden, 1)
expanded_decoder_state = mx.sym.expand_dims(query, axis=2)
# batch_dot: (batch, M, K) X (batch, K, N) –> (batch, M, N).
# (batch_size, seq_len, 1)
attention_scores = mx.sym.batch_dot(lhs=source_hidden, rhs=expanded_decoder_state,
name="%sbatch_dot" % self.prefix)
context, attention_probs = get_context_and_attention_probs(source, source_length, attention_scores)
return AttentionState(context=context,
probs=attention_probs,
dynamic_source=att_state.dynamic_source)
return attend
class MultiHeadDotAttention(Attention):
"""
Dot product attention with multiple heads as proposed in Vaswani et al, Attention is all you need.
Can be used with a RecurrentDecoder.
:param input_previous_word: Feed the previous target embedding into the attention mechanism.
:param num_hidden: Number of hidden units.
:param heads: Number of attention heads / independently computed attention scores.
"""
def __init__(self,
input_previous_word: bool,
num_hidden: int,
heads: int) -> None:
super().__init__(input_previous_word)
utils.check_condition(num_hidden % heads == 0,
"Number of heads (%d) must divide attention depth (%d)" % (heads, num_hidden))
self.num_hidden = num_hidden
self.heads = heads
self.num_hidden_per_head = self.num_hidden // self.heads
self.s2h_weight = mx.sym.Variable("%ss2h_weight" % self.prefix)
self.s2h_bias = mx.sym.Variable("%ss2h_bias" % self.prefix)
self.t2h_weight = mx.sym.Variable("%st2h_weight" % self.prefix)
self.t2h_bias = mx.sym.Variable("%st2h_bias" % self.prefix)
self.h2o_weight = mx.sym.Variable("%sh2o_weight" % self.prefix)
self.h2o_bias = mx.sym.Variable("%sh2o_bias" % self.prefix)
def on(self, source: mx.sym.Symbol, source_length: mx.sym.Symbol, source_seq_len: int) -> Callable:
"""
Returns callable to be used for recurrent attention in a sequence decoder.
The callable is a recurrent function of the form:
AttentionState = attend(AttentionInput, AttentionState).
:param source: Shape: (batch_size, seq_len, encoder_num_hidden).
:param source_length: Shape: (batch_size,).
:param source_seq_len: Maximum length of source sequences.
:return: Attention callable.
"""
# (batch, length, num_hidden * 2)
source_hidden = mx.sym.FullyConnected(data=source,
weight=self.s2h_weight,
bias=self.s2h_bias,
num_hidden=self.num_hidden * 2,
flatten=False,
name="%ssource_hidden_fc" % self.prefix)
# split keys and values
# (batch, length, num_hidden)
# pylint: disable=unbalanced-tuple-unpacking
keys, values = mx.sym.split(data=source_hidden, num_outputs=2, axis=2)
# (batch*heads, length, num_hidden/head)
keys = layers.split_heads(keys, self.num_hidden_per_head, self.heads)
values = layers.split_heads(values, self.num_hidden_per_head, self.heads)
def attend(att_input: AttentionInput, att_state: AttentionState) -> AttentionState:
"""
Returns updated attention state given attention input and current attention state.
:param att_input: Attention input as returned by make_input().
:param att_state: Current attention state
:return: Updated attention state.
"""
# (batch, num_hidden)
query = mx.sym.FullyConnected(data=att_input.query,
weight=self.t2h_weight, bias=self.t2h_bias,
num_hidden=self.num_hidden, name="%squery_hidden_fc" % self.prefix)
# (batch, length, heads, num_hidden/head)
query = mx.sym.reshape(query, shape=(0, 1, self.heads, self.num_hidden_per_head))
# (batch, heads, num_hidden/head, length)
query = mx.sym.transpose(query, axes=(0, 2, 3, 1))
# (batch * heads, num_hidden/head, 1)
query = mx.sym.reshape(query, shape=(-3, self.num_hidden_per_head, 1))
# scale dot product
query = query * (self.num_hidden_per_head ** -0.5)
# (batch*heads, length, num_hidden/head) X (batch*heads, num_hidden/head, 1)
# -> (batch*heads, length, 1)
attention_scores = mx.sym.batch_dot(lhs=keys, rhs=query, name="%sdot" % self.prefix)
# (batch*heads, 1)
lengths = layers.broadcast_to_heads(source_length, self.heads, ndim=1, fold_heads=True)
# context: (batch*heads, num_hidden/head)
# attention_probs: (batch*heads, length)
context, attention_probs = get_context_and_attention_probs(values, lengths, attention_scores)
# combine heads
# (batch*heads, 1, num_hidden/head)
context = mx.sym.expand_dims(context, axis=1)
# (batch, 1, num_hidden)
context = layers.combine_heads(context, self.num_hidden_per_head, heads=self.heads)
# (batch, num_hidden)
context = mx.sym.reshape(context, shape=(-3, -1))
# (batch, heads, length)
attention_probs = mx.sym.reshape(data=attention_probs, shape=(-4, -1, self.heads, source_seq_len))
# just average over distributions
attention_probs = mx.sym.mean(attention_probs, axis=1, keepdims=False)
return AttentionState(context=context,
probs=attention_probs,
dynamic_source=att_state.dynamic_source)
return attend
class EncoderLastStateAttention(Attention):
"""
Always returns the last encoder state independent of the query vector.
Equivalent to no attention.
"""
def on(self, source: mx.sym.Symbol, source_length: mx.sym.Symbol, source_seq_len: int) -> Callable:
"""
Returns callable to be used for recurrent attention in a sequence decoder.
The callable is a recurrent function of the form:
AttentionState = attend(AttentionInput, AttentionState).
:param source: Shape: (batch_size, seq_len, encoder_num_hidden).
:param source_length: Shape: (batch_size,).
:param source_seq_len: Maximum length of source sequences.
:return: Attention callable.
"""
source = mx.sym.swapaxes(source, dim1=0, dim2=1)
encoder_last_state = mx.sym.SequenceLast(data=source, sequence_length=source_length,
use_sequence_length=True)
fixed_probs = mx.sym.one_hot(source_length - 1, depth=source_seq_len)
def attend(att_input: AttentionInput, att_state: AttentionState) -> AttentionState:
return AttentionState(context=encoder_last_state,
probs=fixed_probs,
dynamic_source=att_state.dynamic_source)
return attend
class LocationAttention(Attention):
"""
Attends to locations in the source [Luong et al, 2015]
:math:`a_t = softmax(\\mathbf{W}_a h_t)` for decoder hidden state at time t.
:note: :math:`\\mathbf{W}_a` is of shape (max_source_seq_len, decoder_num_hidden).
:param input_previous_word: Feed the previous target embedding into the attention mechanism.
:param max_source_seq_len: Maximum length of source sequences.
"""
def __init__(self,
input_previous_word: bool,
max_source_seq_len: int) -> None:
super().__init__(input_previous_word)
self.max_source_seq_len = max_source_seq_len
self.location_weight = mx.sym.Variable("%sloc_weight" % self.prefix)
self.location_bias = mx.sym.Variable("%sloc_bias" % self.prefix)
def on(self, source: mx.sym.Symbol, source_length: mx.sym.Symbol, source_seq_len: int) -> Callable:
"""
Returns callable to be used for recurrent attention in a sequence decoder.
The callable is a recurrent function of the form:
AttentionState = attend(AttentionInput, AttentionState).
:param source: Shape: (batch_size, seq_len, encoder_num_hidden).
:param source_length: Shape: (batch_size,).
:param source_seq_len: Maximum length of source sequences.
:return: Attention callable.
"""
def attend(att_input: AttentionInput, att_state: AttentionState) -> AttentionState:
"""
Returns updated attention state given attention input and current attention state.
:param att_input: Attention input as returned by make_input().
:param att_state: Current attention state
:return: Updated attention state.
"""
# attention_scores: (batch_size, seq_len)
attention_scores = mx.sym.FullyConnected(data=att_input.query,
num_hidden=self.max_source_seq_len,
weight=self.location_weight,
bias=self.location_bias)
# attention_scores: (batch_size, seq_len)
attention_scores = mx.sym.slice_axis(data=attention_scores,
axis=1,
begin=0,
end=source_seq_len)
# attention_scores: (batch_size, seq_len, 1)
attention_scores = mx.sym.expand_dims(data=attention_scores, axis=2)
context, attention_probs = get_context_and_attention_probs(source, source_length, attention_scores)
return AttentionState(context=context,
probs=attention_probs,
dynamic_source=att_state.dynamic_source)
return attend
class MlpAttention(Attention):
"""
Attention computed through a one-layer MLP with num_hidden units [Luong et al, 2015].
:math:`score(h_t, h_s) = \\mathbf{W}_a tanh(\\mathbf{W}_c [h_t, h_s] + b)`
:math:`a = softmax(score(*, h_s))`
Optionally, if attention_coverage_type is not None, attention uses dynamic source encoding ('coverage' mechanism)
as in Tu et al. (2016): Modeling Coverage for Neural Machine Translation.
:math:`score(h_t, h_s) = \\mathbf{W}_a tanh(\\mathbf{W}_c [h_t, h_s, c_s] + b)`
:math:`c_s` is the decoder time-step dependent source encoding which is updated using the current
decoder state.
:param input_previous_word: Feed the previous target embedding into the attention mechanism.
:param attention_num_hidden: Number of hidden units.
:param layer_normalization: If true, normalizes hidden layer outputs before tanh activation.
:param config_coverage: Optional coverage config.
"""
def __init__(self,
input_previous_word: bool,
attention_num_hidden: int,
layer_normalization: bool = False,
config_coverage: Optional[coverage.CoverageConfig] = None) -> None:
dynamic_source_num_hidden = 1 if config_coverage is None else config_coverage.num_hidden
super().__init__(input_previous_word=input_previous_word,
dynamic_source_num_hidden=dynamic_source_num_hidden)
self.attention_num_hidden = attention_num_hidden
# input (encoder) to hidden
self.att_e2h_weight = mx.sym.Variable("%se2h_weight" % self.prefix)
# input (query) to hidden
self.att_q2h_weight = mx.sym.Variable("%sq2h_weight" % self.prefix)
# hidden to score
self.att_h2s_weight = mx.sym.Variable("%sh2s_weight" % self.prefix)
# coverage
self.coverage = coverage.get_coverage(config_coverage) if config_coverage is not None else None
# dynamic source (coverage) weights and settings
# input (coverage) to hidden
self.att_c2h_weight = mx.sym.Variable("%sc2h_weight" % self.prefix) if config_coverage is not None else None
# layer normalization
self._ln = layers.LayerNormalization(num_hidden=attention_num_hidden,
prefix="%snorm" % self.prefix) if layer_normalization else None
def on(self, source: mx.sym.Symbol, source_length: mx.sym.Symbol, source_seq_len: int) -> Callable:
"""
Returns callable to be used for recurrent attention in a sequence decoder.
The callable is a recurrent function of the form:
AttentionState = attend(AttentionInput, AttentionState).
:param source: Shape: (batch_size, seq_len, encoder_num_hidden).
:param source_length: Shape: (batch_size,).
:param source_seq_len: Maximum length of source sequences.
:return: Attention callable.
"""
coverage_func = self.coverage.on(source, source_length, source_seq_len) if self.coverage else None
# (batch_size, seq_len, attention_num_hidden)
source_hidden = mx.sym.FullyConnected(data=source,
weight=self.att_e2h_weight,
num_hidden=self.attention_num_hidden,
no_bias=True,
flatten=False,
name="%ssource_hidden_fc" % self.prefix)
def attend(att_input: AttentionInput, att_state: AttentionState) -> AttentionState:
"""
Returns updated attention state given attention input and current attention state.
:param att_input: Attention input as returned by make_input().
:param att_state: Current attention state
:return: Updated attention state.
"""
# (batch_size, attention_num_hidden)
query_hidden = mx.sym.FullyConnected(data=att_input.query,
weight=self.att_q2h_weight,
num_hidden=self.attention_num_hidden,
no_bias=True,
name="%squery_hidden" % self.prefix)
# (batch_size, 1, attention_num_hidden)
query_hidden = mx.sym.expand_dims(data=query_hidden,
axis=1,
name="%squery_hidden_expanded" % self.prefix)
attention_hidden_lhs = source_hidden
if self.coverage:
# (batch_size, seq_len, attention_num_hidden)
dynamic_hidden = mx.sym.FullyConnected(data=att_state.dynamic_source,
weight=self.att_c2h_weight,
num_hidden=self.attention_num_hidden,
no_bias=True,
flatten=False,
name="%sdynamic_source_hidden_fc" % self.prefix)
# (batch_size, seq_len, attention_num_hidden
attention_hidden_lhs = dynamic_hidden + source_hidden
# (batch_size, seq_len, attention_num_hidden)
attention_hidden = mx.sym.broadcast_add(lhs=attention_hidden_lhs, rhs=query_hidden,
name="%squery_plus_input" % self.prefix)
if self._ln is not None:
attention_hidden = self._ln.normalize(attention_hidden)
# (batch_size, seq_len, attention_num_hidden)
attention_hidden = mx.sym.Activation(attention_hidden, act_type="tanh",
name="%shidden" % self.prefix)
# (batch_size, seq_len, 1)
attention_scores = mx.sym.FullyConnected(data=attention_hidden,
weight=self.att_h2s_weight,
num_hidden=1,
no_bias=True,
flatten=False,
name="%sraw_att_score_fc" % self.prefix)
context, attention_probs = get_context_and_attention_probs(source, source_length, attention_scores)
dynamic_source = att_state.dynamic_source
if self.coverage:
# update dynamic source encoding
# Note: this is a slight change to the Tu et al, 2016 paper: input to the coverage update
# is the attention input query, not the previous decoder state.
dynamic_source = coverage_func(prev_hidden=att_input.query,
attention_prob_scores=attention_probs,
prev_coverage=att_state.dynamic_source)
return AttentionState(context=context,
probs=attention_probs,
dynamic_source=dynamic_source)
return attend
def mask_attention_scores(logits: mx.sym.Symbol,
length: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Masks attention scores according to sequence length.
:param logits: Shape: (batch_size, seq_len, 1).
:param length: Shape: (batch_size,).
:return: Masked logits: (batch_size, seq_len, 1).
"""
# TODO: Masking with 0-1 mask, to avoid the multiplication
logits = mx.sym.swapaxes(data=logits, dim1=0, dim2=1)
logits = mx.sym.SequenceMask(data=logits,
use_sequence_length=True,
sequence_length=length,
value=C.LARGE_NEGATIVE_VALUE)
# (batch_size, seq_len, 1)
return mx.sym.swapaxes(data=logits, dim1=0, dim2=1)
def get_context_and_attention_probs(values: mx.sym.Symbol,
length: mx.sym.Symbol,
logits: mx.sym.Symbol) -> Tuple[mx.sym.Symbol, mx.sym.Symbol]:
"""
Returns context vector and attention probabilities
via a weighted sum over values.
:param values: Shape: (batch_size, seq_len, encoder_num_hidden).
:param length: Shape: (batch_size,).
:param logits: Shape: (batch_size, seq_len, 1).
:return: context: (batch_size, encoder_num_hidden), attention_probs: (batch_size, seq_len).
"""
# (batch_size, seq_len, 1)
logits = mask_attention_scores(logits, length)
# (batch_size, seq_len, 1)
probs = mx.sym.softmax(logits, axis=1, name='attention_softmax')
# batch_dot: (batch, M, K) X (batch, K, N) –> (batch, M, N).
# (batch_size, seq_len, num_hidden) X (batch_size, seq_len, 1) -> (batch_size, num_hidden, 1)
context = mx.sym.batch_dot(lhs=values, rhs=probs, transpose_a=True)
# (batch_size, encoder_num_hidden, 1)-> (batch_size, encoder_num_hidden)
context = mx.sym.reshape(data=context, shape=(0, 0))
probs = mx.sym.reshape(data=probs, shape=(0, 0))
return context, probs
| [
"str",
"int",
"bool",
"int",
"int",
"bool",
"AttentionConfig",
"int",
"bool",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"AttentionInput",
"AttentionState",
"mx.sym.Symbol",
"int",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"AttentionInput",
"AttentionState",
"bool",
"int",
"int",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"AttentionInput",
"AttentionState",
"bool",
"int",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"AttentionInput",
"AttentionState",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"AttentionInput",
"AttentionState",
"bool",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"AttentionInput",
"AttentionState",
"bool",
"int",
"mx.sym.Symbol",
"mx.sym.Symbol",
"int",
"AttentionInput",
"AttentionState",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol"
] | [
1594,
1628,
1671,
1713,
1753,
1796,
2346,
2376,
5673,
5970,
6000,
6031,
6540,
6567,
7016,
7047,
7779,
7818,
7867,
8941,
9122,
9152,
9183,
10153,
10180,
12072,
12114,
12154,
12188,
12718,
12748,
12779,
13858,
13885,
16040,
16075,
16104,
16903,
16933,
16964,
18348,
18375,
21122,
21152,
21183,
21995,
22022,
22780,
22823,
23112,
23142,
23173,
23682,
23709,
26225,
26270,
27611,
27641,
27672,
28769,
28796,
32600,
32649,
33382,
33441,
33500
] | [
1597,
1631,
1675,
1716,
1756,
1800,
2361,
2379,
5677,
5983,
6013,
6034,
6554,
6581,
7029,
7050,
7782,
7831,
7880,
8944,
9135,
9165,
9186,
10167,
10194,
12076,
12117,
12157,
12191,
12731,
12761,
12782,
13872,
13899,
16044,
16078,
16107,
16916,
16946,
16967,
18362,
18389,
21135,
21165,
21186,
22009,
22036,
22784,
22826,
23125,
23155,
23176,
23696,
23723,
26229,
26273,
27624,
27654,
27675,
28783,
28810,
32613,
32662,
33395,
33454,
33513
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/train.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Simple Training CLI.
"""
import argparse
import json
import os
import pickle
import shutil
import sys
from contextlib import ExitStack
from typing import Any, cast, Optional, Dict, List, Tuple
import mxnet as mx
from sockeye.config import Config
from sockeye.log import setup_main_logger
from sockeye.utils import check_condition
from . import arguments
from . import constants as C
from . import convolution
from . import coverage
from . import data_io
from . import decoder
from . import encoder
from . import initializer
from . import loss
from . import lr_scheduler
from . import model
from . import rnn
from . import rnn_attention
from . import training
from . import transformer
from . import utils
from . import vocab
# Temporary logger, the real one (logging to a file probably, will be created in the main function)
logger = setup_main_logger(__name__, file_logging=False, console=True)
def none_if_negative(val):
return None if val < 0 else val
def _list_to_tuple(v):
"""Convert v to a tuple if it is a list."""
if isinstance(v, list):
return tuple(v)
return v
def _dict_difference(dict1: Dict, dict2: Dict):
diffs = set()
for k, v in dict1.items():
# Note: A list and a tuple with the same values is considered equal
# (this is due to json deserializing former tuples as list).
if k not in dict2 or _list_to_tuple(dict2[k]) != _list_to_tuple(v):
diffs.add(k)
return diffs
def check_arg_compatibility(args: argparse.Namespace):
"""
Check if some arguments are incompatible with each other.
:param args: Arguments as returned by argparse.
"""
check_condition(args.optimized_metric == C.BLEU or args.optimized_metric in args.metrics,
"Must optimize either BLEU or one of tracked metrics (--metrics)")
if args.encoder == C.TRANSFORMER_TYPE:
check_condition(args.transformer_model_size == args.num_embed[0],
"Source embedding size must match transformer model size: %s vs. %s"
% (args.transformer_model_size, args.num_embed[0]))
if args.decoder == C.TRANSFORMER_TYPE:
check_condition(args.transformer_model_size == args.num_embed[1],
"Target embedding size must match transformer model size: %s vs. %s"
% (args.transformer_model_size, args.num_embed[1]))
def check_resume(args: argparse.Namespace, output_folder: str) -> Tuple[bool, str]:
"""
Check if we should resume a broken training run.
:param args: Arguments as returned by argparse.
:param output_folder: Main output folder for the model.
:return: Flag signaling if we are resuming training and the directory with
the training status.
"""
resume_training = False
training_state_dir = os.path.join(output_folder, C.TRAINING_STATE_DIRNAME)
if os.path.exists(output_folder):
if args.overwrite_output:
logger.info("Removing existing output folder %s.", output_folder)
shutil.rmtree(output_folder)
os.makedirs(output_folder)
elif os.path.exists(training_state_dir):
with open(os.path.join(output_folder, C.ARGS_STATE_NAME), "r") as fp:
old_args = json.load(fp)
arg_diffs = _dict_difference(vars(args), old_args) | _dict_difference(old_args, vars(args))
# Remove args that may differ without affecting the training.
arg_diffs -= set(C.ARGS_MAY_DIFFER)
# allow different device-ids provided their total count is the same
if 'device_ids' in arg_diffs and len(old_args['device_ids']) == len(vars(args)['device_ids']):
arg_diffs.discard('device_ids')
if not arg_diffs:
resume_training = True
else:
# We do not have the logger yet
logger.error("Mismatch in arguments for training continuation.")
logger.error("Differing arguments: %s.", ", ".join(arg_diffs))
sys.exit(1)
elif os.path.exists(os.path.join(output_folder, C.PARAMS_BEST_NAME)):
logger.error("Refusing to overwrite model folder %s as it seems to contain a trained model.", output_folder)
sys.exit(1)
else:
logger.info("The output folder %s already exists, but no training state or parameter file was found. "
"Will start training from scratch.", output_folder)
else:
os.makedirs(output_folder)
return resume_training, training_state_dir
def determine_context(args: argparse.Namespace, exit_stack: ExitStack) -> List[mx.Context]:
"""
Determine the context we should run on (CPU or GPU).
:param args: Arguments as returned by argparse.
:param exit_stack: An ExitStack from contextlib.
:return: A list with the context(s) to run on.
"""
if args.use_cpu:
logger.info("Training Device: CPU")
context = [mx.cpu()]
else:
num_gpus = utils.get_num_gpus()
check_condition(num_gpus >= 1,
"No GPUs found, consider running on the CPU with --use-cpu "
"(note: check depends on nvidia-smi and this could also mean that the nvidia-smi "
"binary isn't on the path).")
if args.disable_device_locking:
context = utils.expand_requested_device_ids(args.device_ids)
else:
context = exit_stack.enter_context(utils.acquire_gpus(args.device_ids, lock_dir=args.lock_dir))
if args.batch_type == C.BATCH_TYPE_SENTENCE:
check_condition(args.batch_size % len(context) == 0, "When using multiple devices the batch size must be "
"divisible by the number of devices. Choose a batch "
"size that is a multiple of %d." % len(context))
logger.info("Training Device(s): GPU %s", context)
context = [mx.gpu(gpu_id) for gpu_id in context]
return context
def determine_decode_and_evaluate_context(args: argparse.Namespace,
exit_stack: ExitStack,
train_context: List[mx.Context]) -> Tuple[int, Optional[mx.Context]]:
"""
Determine the number of sentences to decode and the context we should run on (CPU or GPU).
:param args: Arguments as returned by argparse.
:param exit_stack: An ExitStack from contextlib.
:param train_context: Context for training.
:return: The number of sentences to decode and a list with the context(s) to run on.
"""
num_to_decode = args.decode_and_evaluate
if args.optimized_metric == C.BLEU and num_to_decode == 0:
logger.info("You chose BLEU as the optimized metric, will turn on BLEU monitoring during training. "
"To control how many validation sentences are used for calculating bleu use "
"the --decode-and-evaluate argument.")
num_to_decode = -1
if num_to_decode == 0:
return 0, None
if args.use_cpu or args.decode_and_evaluate_use_cpu:
context = mx.cpu()
elif args.decode_and_evaluate_device_id is not None:
# decode device is defined from the commandline
num_gpus = utils.get_num_gpus()
check_condition(num_gpus >= 1,
"No GPUs found, consider running on the CPU with --use-cpu "
"(note: check depends on nvidia-smi and this could also mean that the nvidia-smi "
"binary isn't on the path).")
if args.disable_device_locking:
context = utils.expand_requested_device_ids([args.decode_and_evaluate_device_id])
else:
context = exit_stack.enter_context(utils.acquire_gpus([args.decode_and_evaluate_device_id],
lock_dir=args.lock_dir))
context = mx.gpu(context[0])
else:
# default decode context is the last training device
context = train_context[-1]
logger.info("Decode and Evaluate Device(s): %s", context)
return num_to_decode, context
def load_or_create_vocabs(args: argparse.Namespace, resume_training: bool, output_folder: str) -> Tuple[Dict, Dict]:
"""
Load the vocabularies from disks if given, create them if not.
:param args: Arguments as returned by argparse.
:param resume_training: When True, the vocabulary will be loaded from an existing output folder.
:param output_folder: Main output folder for the training.
:return: The source and target vocabularies.
"""
if resume_training:
vocab_source = vocab.vocab_from_json_or_pickle(os.path.join(output_folder, C.VOCAB_SRC_NAME))
vocab_target = vocab.vocab_from_json_or_pickle(os.path.join(output_folder, C.VOCAB_TRG_NAME))
else:
num_words_source, num_words_target = args.num_words
word_min_count_source, word_min_count_target = args.word_min_count
# if the source and target embeddings are tied we build a joint vocabulary:
if args.weight_tying and C.WEIGHT_TYING_SRC in args.weight_tying_type \
and C.WEIGHT_TYING_TRG in args.weight_tying_type:
vocab_source = vocab_target = _build_or_load_vocab(args.source_vocab,
[args.source, args.target],
num_words_source,
word_min_count_source)
else:
vocab_source = _build_or_load_vocab(args.source_vocab, [args.source],
num_words_source, word_min_count_source)
vocab_target = _build_or_load_vocab(args.target_vocab, [args.target],
num_words_target, word_min_count_target)
return vocab_source, vocab_target
def _build_or_load_vocab(existing_vocab_path: Optional[str], data_paths: List[str],
num_words: int, word_min_count: int) -> vocab.Vocab:
if existing_vocab_path is None:
vocabulary = vocab.build_from_paths(paths=data_paths,
num_words=num_words,
min_count=word_min_count)
else:
vocabulary = vocab.vocab_from_json(existing_vocab_path)
return vocabulary
def use_shared_vocab(args: argparse.Namespace) -> bool:
""" Determine whether the source and target vocabulary should be shared. """
weight_tying = args.weight_tying
weight_tying_type = args.weight_tying_type
shared_vocab = args.shared_vocab
if weight_tying and C.WEIGHT_TYING_SRC in weight_tying_type and C.WEIGHT_TYING_TRG in weight_tying_type:
if not shared_vocab:
logger.info("A shared source/target vocabulary will be used as weight tying source/target weight tying "
"is enabled")
shared_vocab = True
return shared_vocab
def create_data_iters_and_vocab(args: argparse.Namespace,
shared_vocab: bool,
resume_training: bool,
output_folder: str) -> Tuple['data_io.BaseParallelSampleIter',
'data_io.BaseParallelSampleIter',
'data_io.DataConfig', Dict, Dict]:
"""
Create the data iterators and the vocabularies.
:param args: Arguments as returned by argparse.
:param shared_vocab: Whether to create a shared vocabulary.
:param resume_training: Whether to resume training.
:param output_folder: Output folder.
:return: The data iterators (train, validation, config_data) as well as the source and target vocabularies.
"""
max_seq_len_source, max_seq_len_target = args.max_seq_len
num_words_source, num_words_target = args.num_words
word_min_count_source, word_min_count_target = args.word_min_count
batch_num_devices = 1 if args.use_cpu else sum(-di if di < 0 else 1 for di in args.device_ids)
batch_by_words = args.batch_type == C.BATCH_TYPE_WORD
either_raw_or_prepared_error_msg = "Either specify a raw training corpus with %s and %s or a preprocessed corpus " \
"with %s." % (C.TRAINING_ARG_SOURCE,
C.TRAINING_ARG_TARGET,
C.TRAINING_ARG_PREPARED_DATA)
if args.prepared_data is not None:
utils.check_condition(args.source is None and args.target is None, either_raw_or_prepared_error_msg)
if not resume_training:
utils.check_condition(args.source_vocab is None and args.target_vocab is None,
"You are using a prepared data folder, which is tied to a vocabulary. "
"To change it you need to rerun data preparation with a different vocabulary.")
train_iter, validation_iter, data_config, vocab_source, vocab_target = data_io.get_prepared_data_iters(
prepared_data_dir=args.prepared_data,
validation_source=os.path.abspath(args.validation_source),
validation_target=os.path.abspath(args.validation_target),
shared_vocab=shared_vocab,
batch_size=args.batch_size,
batch_by_words=batch_by_words,
batch_num_devices=batch_num_devices,
fill_up=args.fill_up)
if resume_training:
# resuming training. Making sure the vocabs in the model and in the prepared data match up
model_vocab_source = vocab.vocab_from_json(os.path.join(output_folder, C.VOCAB_SRC_NAME + C.JSON_SUFFIX))
model_vocab_target = vocab.vocab_from_json(os.path.join(output_folder, C.VOCAB_TRG_NAME + C.JSON_SUFFIX))
utils.check_condition(vocab.are_identical(vocab_source, model_vocab_source),
"Prepared data and resumed model source vocabs do not match.")
utils.check_condition(vocab.are_identical(vocab_target, model_vocab_target),
"Prepared data and resumed model target vocabs do not match.")
return train_iter, validation_iter, data_config, vocab_source, vocab_target
else:
utils.check_condition(args.prepared_data is None and args.source is not None and args.target is not None,
either_raw_or_prepared_error_msg)
if resume_training:
# Load the existing vocab created when starting the training run.
vocab_source = vocab.vocab_from_json(os.path.join(output_folder, C.VOCAB_SRC_NAME + C.JSON_SUFFIX))
vocab_target = vocab.vocab_from_json(os.path.join(output_folder, C.VOCAB_TRG_NAME + C.JSON_SUFFIX))
# Recover the vocabulary path from the existing config file:
orig_config = cast(model.ModelConfig, Config.load(os.path.join(output_folder, C.CONFIG_NAME)))
vocab_source_path = orig_config.config_data.vocab_source
vocab_target_path = orig_config.config_data.vocab_target
else:
# Load vocab:
vocab_source_path = args.source_vocab
vocab_target_path = args.target_vocab
vocab_source, vocab_target = vocab.load_or_create_vocabs(source=args.source, target=args.target,
source_vocab_path=vocab_source_path,
target_vocab_path=vocab_target_path,
shared_vocab=shared_vocab,
num_words_source=num_words_source,
num_words_target=num_words_target,
word_min_count_source=word_min_count_source,
word_min_count_target=word_min_count_target)
train_iter, validation_iter, config_data = data_io.get_training_data_iters(
source=os.path.abspath(args.source),
target=os.path.abspath(args.target),
validation_source=os.path.abspath(args.validation_source),
validation_target=os.path.abspath(args.validation_target),
vocab_source=vocab_source,
vocab_target=vocab_target,
vocab_source_path=vocab_source_path,
vocab_target_path=vocab_target_path,
shared_vocab=shared_vocab,
batch_size=args.batch_size,
batch_by_words=batch_by_words,
batch_num_devices=batch_num_devices,
fill_up=args.fill_up,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
bucketing=not args.no_bucketing,
bucket_width=args.bucket_width)
return train_iter, validation_iter, config_data, vocab_source, vocab_target
def create_lr_scheduler(args: argparse.Namespace, resume_training: bool,
training_state_dir: str) -> lr_scheduler.LearningRateScheduler:
"""
Create the learning rate scheduler.
:param args: Arguments as returned by argparse.
:param resume_training: When True, the scheduler will be loaded from disk.
:param training_state_dir: Directory where the training state is stored.
:return: The learning rate scheduler.
"""
learning_rate_half_life = none_if_negative(args.learning_rate_half_life)
# TODO: The loading for continuation of the scheduler is done separately from the other parts
if not resume_training:
lr_scheduler_instance = lr_scheduler.get_lr_scheduler(args.learning_rate_scheduler_type,
args.checkpoint_frequency,
learning_rate_half_life,
args.learning_rate_reduce_factor,
args.learning_rate_reduce_num_not_improved,
args.learning_rate_schedule,
args.learning_rate_warmup)
else:
with open(os.path.join(training_state_dir, C.SCHEDULER_STATE_NAME), "rb") as fp:
lr_scheduler_instance = pickle.load(fp)
return lr_scheduler_instance
def create_encoder_config(args: argparse.Namespace,
config_conv: Optional[encoder.ConvolutionalEmbeddingConfig]) -> Tuple[Config, int]:
"""
Create the encoder config.
:param args: Arguments as returned by argparse.
:param config_conv: The config for the convolutional encoder (optional).
:return: The encoder config and the number of hidden units of the encoder.
"""
encoder_num_layers, _ = args.num_layers
max_seq_len_source, max_seq_len_target = args.max_seq_len
num_embed_source, _ = args.num_embed
config_encoder = None # type: Optional[Config]
if args.encoder in (C.TRANSFORMER_TYPE, C.TRANSFORMER_WITH_CONV_EMBED_TYPE):
encoder_transformer_preprocess, _ = args.transformer_preprocess
encoder_transformer_postprocess, _ = args.transformer_postprocess
config_encoder = transformer.TransformerConfig(
model_size=args.transformer_model_size,
attention_heads=args.transformer_attention_heads,
feed_forward_num_hidden=args.transformer_feed_forward_num_hidden,
act_type=args.transformer_activation_type,
num_layers=encoder_num_layers,
dropout_attention=args.transformer_dropout_attention,
dropout_act=args.transformer_dropout_act,
dropout_prepost=args.transformer_dropout_prepost,
positional_embedding_type=args.transformer_positional_embedding_type,
preprocess_sequence=encoder_transformer_preprocess,
postprocess_sequence=encoder_transformer_postprocess,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
conv_config=config_conv)
encoder_num_hidden = args.transformer_model_size
elif args.encoder == C.CONVOLUTION_TYPE:
cnn_kernel_width_encoder, _ = args.cnn_kernel_width
cnn_config = convolution.ConvolutionConfig(kernel_width=cnn_kernel_width_encoder,
num_hidden=args.cnn_num_hidden,
act_type=args.cnn_activation_type,
weight_normalization=args.weight_normalization)
config_encoder = encoder.ConvolutionalEncoderConfig(num_embed=num_embed_source,
max_seq_len_source=max_seq_len_source,
cnn_config=cnn_config,
num_layers=encoder_num_layers,
positional_embedding_type=args.cnn_positional_embedding_type)
encoder_num_hidden = args.cnn_num_hidden
else:
encoder_rnn_dropout_inputs, _ = args.rnn_dropout_inputs
encoder_rnn_dropout_states, _ = args.rnn_dropout_states
encoder_rnn_dropout_recurrent, _ = args.rnn_dropout_recurrent
config_encoder = encoder.RecurrentEncoderConfig(
rnn_config=rnn.RNNConfig(cell_type=args.rnn_cell_type,
num_hidden=args.rnn_num_hidden,
num_layers=encoder_num_layers,
dropout_inputs=encoder_rnn_dropout_inputs,
dropout_states=encoder_rnn_dropout_states,
dropout_recurrent=encoder_rnn_dropout_recurrent,
residual=args.rnn_residual_connections,
first_residual_layer=args.rnn_first_residual_layer,
forget_bias=args.rnn_forget_bias),
conv_config=config_conv,
reverse_input=args.rnn_encoder_reverse_input)
encoder_num_hidden = args.rnn_num_hidden
return config_encoder, encoder_num_hidden
def create_decoder_config(args: argparse.Namespace, encoder_num_hidden: int) -> Config:
"""
Create the config for the decoder.
:param args: Arguments as returned by argparse.
:return: The config for the decoder.
"""
_, decoder_num_layers = args.num_layers
max_seq_len_source, max_seq_len_target = args.max_seq_len
_, num_embed_target = args.num_embed
config_decoder = None # type: Optional[Config]
if args.decoder == C.TRANSFORMER_TYPE:
_, decoder_transformer_preprocess = args.transformer_preprocess
_, decoder_transformer_postprocess = args.transformer_postprocess
config_decoder = transformer.TransformerConfig(
model_size=args.transformer_model_size,
attention_heads=args.transformer_attention_heads,
feed_forward_num_hidden=args.transformer_feed_forward_num_hidden,
act_type=args.transformer_activation_type,
num_layers=decoder_num_layers,
dropout_attention=args.transformer_dropout_attention,
dropout_act=args.transformer_dropout_act,
dropout_prepost=args.transformer_dropout_prepost,
positional_embedding_type=args.transformer_positional_embedding_type,
preprocess_sequence=decoder_transformer_preprocess,
postprocess_sequence=decoder_transformer_postprocess,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
conv_config=None)
elif args.decoder == C.CONVOLUTION_TYPE:
_, cnn_kernel_width_decoder = args.cnn_kernel_width
convolution_config = convolution.ConvolutionConfig(kernel_width=cnn_kernel_width_decoder,
num_hidden=args.cnn_num_hidden,
act_type=args.cnn_activation_type,
weight_normalization=args.weight_normalization)
config_decoder = decoder.ConvolutionalDecoderConfig(cnn_config=convolution_config,
max_seq_len_target=max_seq_len_target,
num_embed=num_embed_target,
encoder_num_hidden=encoder_num_hidden,
num_layers=decoder_num_layers,
positional_embedding_type=args.cnn_positional_embedding_type,
project_qkv=args.cnn_project_qkv,
hidden_dropout=args.cnn_hidden_dropout)
else:
rnn_attention_num_hidden = args.rnn_num_hidden if args.rnn_attention_num_hidden is None else args.rnn_attention_num_hidden
config_coverage = None
if args.rnn_attention_type == C.ATT_COV:
config_coverage = coverage.CoverageConfig(type=args.rnn_attention_coverage_type,
num_hidden=args.rnn_attention_coverage_num_hidden,
layer_normalization=args.layer_normalization)
config_attention = rnn_attention.AttentionConfig(type=args.rnn_attention_type,
num_hidden=rnn_attention_num_hidden,
input_previous_word=args.rnn_attention_use_prev_word,
source_num_hidden=encoder_num_hidden,
query_num_hidden=args.rnn_num_hidden,
layer_normalization=args.layer_normalization,
config_coverage=config_coverage,
num_heads=args.rnn_attention_mhdot_heads)
_, decoder_rnn_dropout_inputs = args.rnn_dropout_inputs
_, decoder_rnn_dropout_states = args.rnn_dropout_states
_, decoder_rnn_dropout_recurrent = args.rnn_dropout_recurrent
config_decoder = decoder.RecurrentDecoderConfig(
max_seq_len_source=max_seq_len_source,
rnn_config=rnn.RNNConfig(cell_type=args.rnn_cell_type,
num_hidden=args.rnn_num_hidden,
num_layers=decoder_num_layers,
dropout_inputs=decoder_rnn_dropout_inputs,
dropout_states=decoder_rnn_dropout_states,
dropout_recurrent=decoder_rnn_dropout_recurrent,
residual=args.rnn_residual_connections,
first_residual_layer=args.rnn_first_residual_layer,
forget_bias=args.rnn_forget_bias),
attention_config=config_attention,
hidden_dropout=args.rnn_decoder_hidden_dropout,
state_init=args.rnn_decoder_state_init,
context_gating=args.rnn_context_gating,
layer_normalization=args.layer_normalization,
attention_in_upper_layers=args.rnn_attention_in_upper_layers)
return config_decoder
def check_encoder_decoder_args(args) -> None:
"""
Check possible encoder-decoder argument conflicts.
:param args: Arguments as returned by argparse.
"""
encoder_embed_dropout, decoder_embed_dropout = args.embed_dropout
encoder_rnn_dropout_inputs, decoder_rnn_dropout_inputs = args.rnn_dropout_inputs
encoder_rnn_dropout_states, decoder_rnn_dropout_states = args.rnn_dropout_states
if encoder_embed_dropout > 0 and encoder_rnn_dropout_inputs > 0:
logger.warning("Setting encoder RNN AND source embedding dropout > 0 leads to "
"two dropout layers on top of each other.")
if decoder_embed_dropout > 0 and decoder_rnn_dropout_inputs > 0:
logger.warning("Setting encoder RNN AND source embedding dropout > 0 leads to "
"two dropout layers on top of each other.")
encoder_rnn_dropout_recurrent, decoder_rnn_dropout_recurrent = args.rnn_dropout_recurrent
if encoder_rnn_dropout_recurrent > 0 or decoder_rnn_dropout_recurrent > 0:
check_condition(args.rnn_cell_type == C.LSTM_TYPE,
"Recurrent dropout without memory loss only supported for LSTMs right now.")
def create_model_config(args: argparse.Namespace,
vocab_source_size: int, vocab_target_size: int,
config_data: data_io.DataConfig) -> model.ModelConfig:
"""
Create a ModelConfig from the argument given in the command line.
:param args: Arguments as returned by argparse.
:param vocab_source_size: The size of the source vocabulary.
:param vocab_target_size: The size of the target vocabulary.
:param config_data: Data config.
:return: The model configuration.
"""
max_seq_len_source, max_seq_len_target = args.max_seq_len
num_embed_source, num_embed_target = args.num_embed
embed_dropout_source, embed_dropout_target = args.embed_dropout
check_encoder_decoder_args(args)
config_conv = None
if args.encoder == C.RNN_WITH_CONV_EMBED_NAME:
config_conv = encoder.ConvolutionalEmbeddingConfig(num_embed=num_embed_source,
max_filter_width=args.conv_embed_max_filter_width,
num_filters=args.conv_embed_num_filters,
pool_stride=args.conv_embed_pool_stride,
num_highway_layers=args.conv_embed_num_highway_layers,
dropout=args.conv_embed_dropout)
config_encoder, encoder_num_hidden = create_encoder_config(args, config_conv)
config_decoder = create_decoder_config(args, encoder_num_hidden)
config_embed_source = encoder.EmbeddingConfig(vocab_size=vocab_source_size,
num_embed=num_embed_source,
dropout=embed_dropout_source)
config_embed_target = encoder.EmbeddingConfig(vocab_size=vocab_target_size,
num_embed=num_embed_target,
dropout=embed_dropout_target)
config_loss = loss.LossConfig(name=args.loss,
vocab_size=vocab_target_size,
normalization_type=args.loss_normalization_type,
label_smoothing=args.label_smoothing)
model_config = model.ModelConfig(config_data=config_data,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
vocab_source_size=vocab_source_size,
vocab_target_size=vocab_target_size,
config_embed_source=config_embed_source,
config_embed_target=config_embed_target,
config_encoder=config_encoder,
config_decoder=config_decoder,
config_loss=config_loss,
weight_tying=args.weight_tying,
weight_tying_type=args.weight_tying_type if args.weight_tying else None,
weight_normalization=args.weight_normalization)
return model_config
def create_training_model(model_config: model.ModelConfig,
args: argparse.Namespace,
context: List[mx.Context],
train_iter: data_io.BaseParallelSampleIter,
lr_scheduler_instance: lr_scheduler.LearningRateScheduler,
resume_training: bool,
training_state_dir: str) -> training.TrainingModel:
"""
Create a training model and load the parameters from disk if needed.
:param model_config: The configuration for the model.
:param args: Arguments as returned by argparse.
:param context: The context(s) to run on.
:param train_iter: The training data iterator.
:param lr_scheduler_instance: The learning rate scheduler.
:param resume_training: When True, the model will be loaded from disk.
:param training_state_dir: Directory where the training state is stored.
:return: The training model.
"""
training_model = training.TrainingModel(config=model_config,
context=context,
train_iter=train_iter,
bucketing=not args.no_bucketing,
lr_scheduler=lr_scheduler_instance,
gradient_compression_params=gradient_compression_params(args))
# We may consider loading the params in TrainingModule, for consistency
# with the training state saving
if resume_training:
logger.info("Found partial training in directory %s. Resuming from saved state.", training_state_dir)
training_model.load_params_from_file(os.path.join(training_state_dir, C.TRAINING_STATE_PARAMS_NAME))
elif args.params:
logger.info("Training will initialize from parameters loaded from '%s'", args.params)
training_model.load_params_from_file(args.params)
return training_model
def gradient_compression_params(args: argparse.Namespace) -> Optional[Dict[str, Any]]:
"""
:param args: Arguments as returned by argparse.
:return: Gradient compression parameters or None.
"""
if args.gradient_compression_type is None:
return None
else:
return {'type': args.gradient_compression_type, 'threshold': args.gradient_compression_threshold}
def define_optimizer(args, lr_scheduler_instance) -> Tuple[str, Dict, str, str, float]:
"""
Defines the optimizer to use and its parameters.
:param args: Arguments as returned by argparse.
:param lr_scheduler_instance: The learning rate scheduler.
:return: The optimizer type and its parameters as well as the kvstore.
"""
optimizer = args.optimizer
optimizer_params = {'wd': args.weight_decay,
"learning_rate": args.initial_learning_rate}
if lr_scheduler_instance is not None:
optimizer_params["lr_scheduler"] = lr_scheduler_instance
gradient_clipping_threshold = none_if_negative(args.gradient_clipping_threshold)
if gradient_clipping_threshold is None:
logger.info("Gradient clipping threshold set to negative value. Will not perform gradient clipping.")
gradient_clipping_type = C.GRADIENT_CLIPPING_TYPE_NONE
else:
gradient_clipping_type = args.gradient_clipping_type
# Note: for 'abs' we use the implementation inside of MXNet's optimizer and 'norm_*' we implement ourselves
# inside the TrainingModel.
if gradient_clipping_threshold is not None and gradient_clipping_type == C.GRADIENT_CLIPPING_TYPE_ABS:
optimizer_params["clip_gradient"] = gradient_clipping_threshold
if args.momentum is not None:
optimizer_params["momentum"] = args.momentum
if args.loss_normalization_type == C.LOSS_NORM_VALID:
# When we normalize by the number of non-PAD symbols in a batch we need to disable rescale_grad.
optimizer_params["rescale_grad"] = 1.0
elif args.loss_normalization_type == C.LOSS_NORM_BATCH:
# Making MXNet module API's default scaling factor explicit
optimizer_params["rescale_grad"] = 1.0 / args.batch_size
# Manually specified params
if args.optimizer_params:
optimizer_params.update(args.optimizer_params)
logger.info("Optimizer: %s", optimizer)
logger.info("Optimizer Parameters: %s", optimizer_params)
logger.info("kvstore: %s", args.kvstore)
logger.info("Gradient Compression: %s", gradient_compression_params(args))
return optimizer, optimizer_params, args.kvstore, gradient_clipping_type, gradient_clipping_threshold
def main():
params = argparse.ArgumentParser(description='CLI to train sockeye sequence-to-sequence models.')
arguments.add_train_cli_args(params)
args = params.parse_args()
utils.seedRNGs(args.seed)
check_arg_compatibility(args)
output_folder = os.path.abspath(args.output)
resume_training, training_state_dir = check_resume(args, output_folder)
global logger
logger = setup_main_logger(__name__,
file_logging=True,
console=not args.quiet, path=os.path.join(output_folder, C.LOG_NAME))
utils.log_basic_info(args)
with open(os.path.join(output_folder, C.ARGS_STATE_NAME), "w") as fp:
json.dump(vars(args), fp)
with ExitStack() as exit_stack:
context = determine_context(args, exit_stack)
shared_vocab = use_shared_vocab(args)
train_iter, eval_iter, config_data, vocab_source, vocab_target = create_data_iters_and_vocab(
args=args,
shared_vocab=shared_vocab,
resume_training=resume_training,
output_folder=output_folder)
if not resume_training:
vocab.vocab_to_json(vocab_source, os.path.join(output_folder, C.VOCAB_SRC_NAME) + C.JSON_SUFFIX)
vocab.vocab_to_json(vocab_target, os.path.join(output_folder, C.VOCAB_TRG_NAME) + C.JSON_SUFFIX)
vocab_source_size = len(vocab_source)
vocab_target_size = len(vocab_target)
logger.info("Vocabulary sizes: source=%d target=%d", vocab_source_size, vocab_target_size)
lr_scheduler_instance = create_lr_scheduler(args, resume_training, training_state_dir)
model_config = create_model_config(args, vocab_source_size, vocab_target_size, config_data)
model_config.freeze()
training_model = create_training_model(model_config, args,
context, train_iter, lr_scheduler_instance,
resume_training, training_state_dir)
weight_initializer = initializer.get_initializer(
default_init_type=args.weight_init,
default_init_scale=args.weight_init_scale,
default_init_xavier_rand_type=args.weight_init_xavier_rand_type,
default_init_xavier_factor_type=args.weight_init_xavier_factor_type,
embed_init_type=args.embed_weight_init,
embed_init_sigma=vocab_source_size ** -0.5, # TODO
rnn_init_type=args.rnn_h2h_init)
optimizer, optimizer_params, kvstore, gradient_clipping_type, gradient_clipping_threshold = define_optimizer(args, lr_scheduler_instance)
# Handle options that override training settings
max_updates = args.max_updates
max_num_checkpoint_not_improved = args.max_num_checkpoint_not_improved
min_num_epochs = args.min_num_epochs
max_num_epochs = args.max_num_epochs
if min_num_epochs is not None and max_num_epochs is not None:
check_condition(min_num_epochs <= max_num_epochs,
"Minimum number of epochs must be smaller than maximum number of epochs")
# Fixed training schedule always runs for a set number of updates
if args.learning_rate_schedule:
max_updates = sum(num_updates for (_, num_updates) in args.learning_rate_schedule)
max_num_checkpoint_not_improved = -1
min_num_epochs = None
max_num_epochs = None
decode_and_evaluate, decode_and_evaluate_context = determine_decode_and_evaluate_context(args,
exit_stack,
context)
training_model.fit(train_iter, eval_iter,
output_folder=output_folder,
max_params_files_to_keep=args.keep_last_params,
metrics=args.metrics,
initializer=weight_initializer,
allow_missing_params=args.allow_missing_params,
max_updates=max_updates,
checkpoint_frequency=args.checkpoint_frequency,
optimizer=optimizer, optimizer_params=optimizer_params,
optimized_metric=args.optimized_metric,
gradient_clipping_type=gradient_clipping_type,
clip_gradient_threshold=gradient_clipping_threshold,
kvstore=kvstore,
max_num_not_improved=max_num_checkpoint_not_improved,
min_num_epochs=min_num_epochs,
max_num_epochs=max_num_epochs,
decode_and_evaluate=decode_and_evaluate,
decode_and_evaluate_fname_source=args.validation_source,
decode_and_evaluate_fname_target=args.validation_target,
decode_and_evaluate_context=decode_and_evaluate_context,
use_tensorboard=args.use_tensorboard,
mxmonitor_pattern=args.monitor_pattern,
mxmonitor_stat_func=args.monitor_stat_func,
lr_decay_param_reset=args.learning_rate_decay_param_reset,
lr_decay_opt_states_reset=args.learning_rate_decay_optimizer_states_reset)
if __name__ == "__main__":
main()
| [
"Dict",
"Dict",
"argparse.Namespace",
"argparse.Namespace",
"str",
"argparse.Namespace",
"ExitStack",
"argparse.Namespace",
"ExitStack",
"List[mx.Context]",
"argparse.Namespace",
"bool",
"str",
"Optional[str]",
"List[str]",
"int",
"int",
"argparse.Namespace",
"argparse.Namespace",
"bool",
"bool",
"str",
"argparse.Namespace",
"bool",
"str",
"argparse.Namespace",
"Optional[encoder.ConvolutionalEmbeddingConfig]",
"argparse.Namespace",
"int",
"argparse.Namespace",
"int",
"int",
"data_io.DataConfig",
"model.ModelConfig",
"argparse.Namespace",
"List[mx.Context]",
"data_io.BaseParallelSampleIter",
"lr_scheduler.LearningRateScheduler",
"bool",
"str",
"argparse.Namespace"
] | [
1702,
1715,
2070,
3001,
3036,
5196,
5228,
6744,
6818,
6886,
8890,
8927,
8948,
10710,
10737,
10784,
10805,
11184,
11800,
11866,
11921,
11974,
17979,
18016,
18066,
19493,
19552,
23416,
23457,
30049,
30112,
30136,
30178,
33412,
33463,
33518,
33574,
33655,
33734,
33786,
35418
] | [
1706,
1719,
2088,
3019,
3039,
5214,
5237,
6762,
6827,
6902,
8908,
8931,
8951,
10723,
10746,
10787,
10808,
11202,
11818,
11870,
11925,
11977,
17997,
18020,
18069,
19511,
19598,
23434,
23460,
30067,
30115,
30139,
30196,
33429,
33481,
33534,
33604,
33689,
33738,
33789,
35436
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/training.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Code for training
"""
import glob
import logging
import os
import pickle
import random
import shutil
import time
from functools import reduce
from typing import Any, AnyStr, Dict, List, Optional
from math import sqrt
import mxnet as mx
import numpy as np
from . import callback
from . import checkpoint_decoder
from . import constants as C
from . import data_io
from . import loss
from . import lr_scheduler
from .optimizers import BatchState, CheckpointState, SockeyeOptimizer
from . import model
from . import utils
logger = logging.getLogger(__name__)
def global_norm(ndarrays: List[mx.nd.NDArray]) -> float:
# accumulate in a list, as asscalar is blocking and this way we can run the norm calculation in parallel.
norms = [mx.nd.square(mx.nd.norm(arr)) for arr in ndarrays if arr is not None]
return sqrt(sum(norm.asscalar() for norm in norms))
class _TrainingState:
"""
Stores the state of the training process. These are the variables that will
be stored to disk when resuming training.
"""
def __init__(self,
num_not_improved: int,
epoch: int,
checkpoint: int,
updates: int,
samples: int) -> None:
self.num_not_improved = num_not_improved
self.epoch = epoch
self.checkpoint = checkpoint
self.updates = updates
self.samples = samples
class TrainingModel(model.SockeyeModel):
"""
Defines an Encoder/Decoder model (with attention).
RNN configuration (number of hidden units, number of layers, cell type)
is shared between encoder & decoder.
:param config: Configuration object holding details about the model.
:param context: The context(s) that MXNet will be run in (GPU(s)/CPU)
:param train_iter: The iterator over the training data.
:param bucketing: If True bucketing will be used, if False the computation graph will always be
unrolled to the full length.
:param lr_scheduler: The scheduler that lowers the learning rate during training.
:param gradient_compression_params: Gradient compression parameters.
"""
def __init__(self,
config: model.ModelConfig,
context: List[mx.context.Context],
train_iter: data_io.BaseParallelSampleIter,
bucketing: bool,
lr_scheduler,
gradient_compression_params: Optional[Dict[str, Any]] = None) -> None:
super().__init__(config)
self.context = context
self.lr_scheduler = lr_scheduler
self.bucketing = bucketing
self.gradient_compression_params = gradient_compression_params
self._build_model_components()
self.module = self._build_module(train_iter)
self.training_monitor = None # type: Optional[callback.TrainingMonitor]
def _build_module(self, train_iter: data_io.BaseParallelSampleIter):
"""
Initializes model components, creates training symbol and module, and binds it.
"""
#utils.check_condition(train_iter.pad_id == C.PAD_ID == 0, "pad id should be 0")
source = mx.sym.Variable(C.SOURCE_NAME)
source_length = utils.compute_lengths(source)
target = mx.sym.Variable(C.TARGET_NAME)
target_length = utils.compute_lengths(target)
labels = mx.sym.reshape(data=mx.sym.Variable(C.TARGET_LABEL_NAME), shape=(-1,))
model_loss = loss.get_loss(self.config.config_loss)
data_names = [x[0] for x in train_iter.provide_data]
label_names = [x[0] for x in train_iter.provide_label]
def sym_gen(seq_lens):
"""
Returns a (grouped) loss symbol given source & target input lengths.
Also returns data and label names for the BucketingModule.
"""
source_seq_len, target_seq_len = seq_lens
# source embedding
(source_embed,
source_embed_length,
source_embed_seq_len) = self.embedding_source.encode(source, source_length, source_seq_len)
# target embedding
(target_embed,
target_embed_length,
target_embed_seq_len) = self.embedding_target.encode(target, target_length, target_seq_len)
# encoder
# source_encoded: (source_encoded_length, batch_size, encoder_depth)
(source_encoded,
source_encoded_length,
source_encoded_seq_len) = self.encoder.encode(source_embed,
source_embed_length,
source_embed_seq_len)
# decoder
# target_decoded: (batch-size, target_len, decoder_depth)
target_decoded = self.decoder.decode_sequence(source_encoded, source_encoded_length, source_encoded_seq_len,
target_embed, target_embed_length, target_embed_seq_len)
# target_decoded: (batch_size * target_seq_len, rnn_num_hidden)
target_decoded = mx.sym.reshape(data=target_decoded, shape=(-3, 0))
# output layer
# logits: (batch_size * target_seq_len, target_vocab_size)
logits = self.output_layer(target_decoded)
probs = model_loss.get_loss(logits, labels)
return mx.sym.Group(probs), data_names, label_names
if self.bucketing:
logger.info("Using bucketing. Default max_seq_len=%s", train_iter.default_bucket_key)
return mx.mod.BucketingModule(sym_gen=sym_gen,
logger=logger,
default_bucket_key=train_iter.default_bucket_key,
context=self.context,
compression_params=self.gradient_compression_params)
else:
logger.info("No bucketing. Unrolled to (%d,%d)",
self.config.max_seq_len_source, self.config.max_seq_len_target)
symbol, _, __ = sym_gen(train_iter.buckets[0])
return mx.mod.Module(symbol=symbol,
data_names=data_names,
label_names=label_names,
logger=logger,
context=self.context,
compression_params=self.gradient_compression_params)
@staticmethod
def create_eval_metric(metric_name: AnyStr) -> mx.metric.EvalMetric:
"""
Creates an EvalMetric given a metric names.
"""
# output_names refers to the list of outputs this metric should use to update itself, e.g. the softmax output
if metric_name == C.ACCURACY:
return utils.Accuracy(ignore_label=C.PAD_ID, output_names=[C.SOFTMAX_OUTPUT_NAME])
elif metric_name == C.PERPLEXITY:
return mx.metric.Perplexity(ignore_label=C.PAD_ID, output_names=[C.SOFTMAX_OUTPUT_NAME])
else:
raise ValueError("unknown metric name")
@staticmethod
def create_eval_metric_composite(metric_names: List[AnyStr]) -> mx.metric.CompositeEvalMetric:
"""
Creates a composite EvalMetric given a list of metric names.
"""
metrics = [TrainingModel.create_eval_metric(metric_name) for metric_name in metric_names]
return mx.metric.create(metrics)
def fit(self,
train_iter: data_io.BaseParallelSampleIter,
val_iter: data_io.BaseParallelSampleIter,
output_folder: str,
max_params_files_to_keep: int,
metrics: List[AnyStr],
initializer: mx.initializer.Initializer,
allow_missing_params: bool,
max_updates: Optional[int],
checkpoint_frequency: int,
optimizer: str,
optimizer_params: dict,
optimized_metric: str = "perplexity",
gradient_clipping_type: str = "abs",
clip_gradient_threshold: float = 1.0,
kvstore: str = C.KVSTORE_DEVICE,
max_num_not_improved: int = 3,
min_num_epochs: Optional[int] = None,
max_num_epochs: Optional[int] = None,
decode_and_evaluate: int = 0,
decode_and_evaluate_fname_source: Optional[str] = None,
decode_and_evaluate_fname_target: Optional[str] = None,
decode_and_evaluate_context: Optional[mx.Context] = None,
use_tensorboard: bool = False,
mxmonitor_pattern: Optional[str] = None,
mxmonitor_stat_func: Optional[str] = None,
lr_decay_param_reset: bool = False,
lr_decay_opt_states_reset: str = C.LR_DECAY_OPT_STATES_RESET_OFF):
"""
Fits model to data given by train_iter using early-stopping w.r.t data given by val_iter.
Saves all intermediate and final output to output_folder
:param train_iter: The training data iterator.
:param val_iter: The validation data iterator.
:param output_folder: The folder in which all model artifacts will be stored in (parameters, checkpoints, etc.).
:param max_params_files_to_keep: Maximum number of params files to keep in the output folder (last n are kept).
:param metrics: The metrics that will be evaluated during training.
:param initializer: The parameter initializer.
:param allow_missing_params: Allow misssing parameters when initializing model parameters from file.
:param max_updates: Optional maximum number of batches to process.
:param checkpoint_frequency: Frequency of checkpointing in number of updates.
:param optimizer: The MXNet optimizer that will update the parameters.
:param optimizer_params: The parameters for the optimizer.
:param optimized_metric: The metric that is tracked for early stopping.
:param kvstore: The MXNet kvstore used.
:param max_num_not_improved: Stop training if the optimized_metric does not improve for this many checkpoints,
-1: do not use early stopping.
:param min_num_epochs: Optional minimum number of epochs to train, even if validation scores did not improve.
:param max_num_epochs: Optional maximum number of epochs to train.
:param decode_and_evaluate: Monitor BLEU during training (0: off, >=0: the number of sentences to decode for BLEU
evaluation, -1: decode the full validation set.).
:param decode_and_evaluate_fname_source: Filename of source data to decode and evaluate.
:param decode_and_evaluate_fname_target: Filename of target data (references) to decode and evaluate.
:param decode_and_evaluate_context: Optional MXNet context for decode and evaluate.
:param use_tensorboard: If True write tensorboard compatible logs for monitoring training and
validation metrics.
:param mxmonitor_pattern: Optional pattern to match to monitor weights/gradients/outputs
with MXNet's monitor. Default is None which means no monitoring.
:param mxmonitor_stat_func: Choice of statistics function to run on monitored weights/gradients/outputs
when using MXNEt's monitor.
:param lr_decay_param_reset: Reset parameters to previous best after learning rate decay.
:param lr_decay_opt_states_reset: How to reset optimizer states after learning rate decay.
:return: Best score on validation data observed during training.
"""
self.save_version(output_folder)
self.save_config(output_folder)
if 'dist' in kvstore:
self._check_dist_kvstore_requirements(lr_decay_opt_states_reset, lr_decay_param_reset, optimizer)
utils.check_condition(gradient_clipping_type in C.GRADIENT_CLIPPING_TYPES,
"Unknown gradient clipping type %s" % gradient_clipping_type)
self.module.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label,
for_training=True, force_rebind=True, grad_req='write')
self.module.symbol.save(os.path.join(output_folder, C.SYMBOL_NAME))
self.module.init_params(initializer=initializer, arg_params=self.params, aux_params=None,
allow_missing=allow_missing_params, force_init=False)
self._log_params()
self.module.init_optimizer(kvstore=kvstore, optimizer=optimizer, optimizer_params=optimizer_params)
cp_decoder = checkpoint_decoder.CheckpointDecoder(decode_and_evaluate_context,
decode_and_evaluate_fname_source,
decode_and_evaluate_fname_target,
output_folder,
sample_size=decode_and_evaluate) \
if decode_and_evaluate else None
logger.info("Training started.")
self.training_monitor = callback.TrainingMonitor(train_iter.batch_size, output_folder,
optimized_metric=optimized_metric,
use_tensorboard=use_tensorboard,
cp_decoder=cp_decoder)
monitor = None
if mxmonitor_pattern is not None:
monitor = mx.monitor.Monitor(interval=C.MEASURE_SPEED_EVERY,
stat_func=C.MONITOR_STAT_FUNCS.get(mxmonitor_stat_func),
pattern=mxmonitor_pattern,
sort=True)
self.module.install_monitor(monitor)
logger.info("Installed MXNet monitor; pattern='%s'; statistics_func='%s'",
mxmonitor_pattern, mxmonitor_stat_func)
self._fit(train_iter, val_iter, output_folder,
kvstore=kvstore,
max_params_files_to_keep=max_params_files_to_keep,
metrics=metrics,
max_updates=max_updates,
checkpoint_frequency=checkpoint_frequency,
gradient_clipping_type=gradient_clipping_type,
clip_gradient_threshold=clip_gradient_threshold,
max_num_not_improved=max_num_not_improved,
min_num_epochs=min_num_epochs,
max_num_epochs=max_num_epochs,
mxmonitor=monitor,
lr_decay_param_reset=lr_decay_param_reset,
lr_decay_opt_states_reset=lr_decay_opt_states_reset)
logger.info("Training finished. Best checkpoint: %d. Best validation %s: %.6f",
self.training_monitor.get_best_checkpoint(),
self.training_monitor.optimized_metric,
self.training_monitor.get_best_validation_score())
return self.training_monitor.get_best_validation_score()
def _check_dist_kvstore_requirements(self, lr_decay_opt_states_reset, lr_decay_param_reset, optimizer):
# In distributed training the optimizer will run remotely. For eve we however need to pass information about
# the loss, which is not possible anymore by means of accessing self.module._curr_module._optimizer.
utils.check_condition(optimizer != C.OPTIMIZER_EVE, "Eve optimizer not supported with distributed training.")
utils.check_condition(not issubclass(type(self.lr_scheduler), lr_scheduler.AdaptiveLearningRateScheduler),
"Adaptive learning rate schedulers not supported with a dist kvstore. "
"Try a fixed schedule such as %s." % C.LR_SCHEDULER_FIXED_RATE_INV_SQRT_T)
utils.check_condition(not lr_decay_param_reset, "Parameter reset when the learning rate decays not "
"supported with distributed training.")
utils.check_condition(not lr_decay_opt_states_reset, "Optimizer state reset when the learning rate decays "
"not supported with distributed training.")
def _get_curr_module(self):
# As the BucketingModule does not expose all methods of the underlying Module we need to directly access
# the currently active module, when we use bucketing.
return self.module._curr_module if self.bucketing else self.module
def _get_executors(self):
return self._get_curr_module()._exec_group.execs
def _get_optimizer(self):
# TODO: Push update to MXNet to expose the optimizer (Module should have a get_optimizer method)
return self._get_curr_module()._optimizer
def _fit(self,
train_iter: data_io.BaseParallelSampleIter,
val_iter: data_io.BaseParallelSampleIter,
output_folder: str,
kvstore: str,
max_params_files_to_keep: int,
metrics: List[AnyStr],
max_updates: Optional[int],
checkpoint_frequency: int,
gradient_clipping_type: str,
clip_gradient_threshold: float,
max_num_not_improved: int,
min_num_epochs: Optional[int] = None,
max_num_epochs: Optional[int] = None,
mxmonitor: Optional[mx.monitor.Monitor] = None,
lr_decay_param_reset: bool = False,
lr_decay_opt_states_reset: str = C.LR_DECAY_OPT_STATES_RESET_OFF):
"""
Internal fit method. Runtime determined by early stopping.
:param train_iter: Training data iterator.
:param val_iter: Validation data iterator.
:param output_folder: Model output folder.
:param kvstore: The MXNet kvstore.
:param max_params_files_to_keep: Maximum number of params files to keep in the output folder (last n are kept).
:param metrics: List of metric names to track on training and validation data.
:param max_updates: Optional maximum number of batches to process.
:param checkpoint_frequency: Frequency of checkpointing.
:param max_num_not_improved: Maximum number of checkpoints until fitting is stopped if model does not improve,
-1 for no early stopping.
:param min_num_epochs: Optional minimum number of epochs to train, even if validation scores did not improve.
:param max_num_epochs: Optional maximum number of epochs to train.
:param mxmonitor: Optional MXNet monitor instance.
:param lr_decay_param_reset: Reset parameters to previous best after learning rate decay.
:param lr_decay_opt_states_reset: How to reset optimizer states after learning rate decay.
"""
optimizer = self._get_optimizer()
if lr_decay_opt_states_reset == C.LR_DECAY_OPT_STATES_RESET_INITIAL:
self.save_optimizer_states(os.path.join(output_folder, C.OPT_STATES_INITIAL))
metric_train = self.create_eval_metric_composite(metrics)
metric_val = self.create_eval_metric_composite(metrics)
# If optimizer requires it, track loss as metric
if isinstance(optimizer, SockeyeOptimizer):
# Select training loss or optimized metric
if optimizer.request_optimized_metric:
metric_loss = self.create_eval_metric(self.training_monitor.optimized_metric)
else:
metric_loss = loss.get_loss(self.config.config_loss).create_metric()
tic = time.time()
training_state_dir = os.path.join(output_folder, C.TRAINING_STATE_DIRNAME)
if os.path.exists(training_state_dir):
utils.check_condition('dist' not in kvstore, "Training continuation not supported with "
"distributed training.")
train_state = self.load_checkpoint(training_state_dir, train_iter)
else:
train_state = _TrainingState(num_not_improved=0, epoch=0, checkpoint=0, updates=0, samples=0)
next_data_batch = train_iter.next()
speedometer = Speedometer(frequency=C.MEASURE_SPEED_EVERY, auto_reset=False)
while True:
if not train_iter.iter_next():
train_state.epoch += 1
train_iter.reset()
if (max_updates is not None and train_state.updates == max_updates) or \
(max_num_epochs is not None and train_state.epoch == max_num_epochs):
logger.info("Maximum # of updates (%s) or epochs (%s) reached.", max_updates, max_num_epochs)
break
# process batch
batch = next_data_batch
if mxmonitor is not None:
mxmonitor.tic()
# Forward-backward to get outputs, gradients
self.module.forward_backward(batch)
gradient_norm = None
if train_state.updates > 0 and train_state.updates % checkpoint_frequency == 0:
# compute values for logging to metrics (before rescaling...)
gradient_norm = self.get_global_grad_norm()
# note: C.GRADIENT_CLIPPING_TYPE_ABS is handled by the mxnet optimizer directly
if gradient_clipping_type == C.GRADIENT_CLIPPING_TYPE_NORM:
if gradient_norm is None:
gradient_norm = self.get_global_grad_norm()
if gradient_norm > clip_gradient_threshold:
ratio = clip_gradient_threshold / gradient_norm
self.rescale_grad(ratio)
# Update aggregate training loss
self.module.update_metric(metric_train, batch.label)
# If using an extended optimizer, provide extra state information about the current batch
# Loss: training loss
if isinstance(optimizer, SockeyeOptimizer):
# Loss for this batch
metric_loss.reset()
metric_loss.update(batch.label, self.module.get_outputs())
[(_, m_val)] = metric_loss.get_name_value()
batch_state = BatchState(metric_val=m_val)
optimizer.pre_update_batch(batch_state)
# Call optimizer to update weights given gradients, current state
self.module.update()
if mxmonitor is not None:
results = mxmonitor.toc()
if results:
for _, k, v in results:
logger.info('Monitor: Batch [{:d}] {:s} {:s}'.format(train_state.updates, k, v))
if train_iter.iter_next():
# pre-fetch next batch
next_data_batch = train_iter.next()
self.module.prepare(next_data_batch)
batch_num_samples = batch.data[0].shape[0]
batch_num_tokens = batch.data[0].shape[1] * batch_num_samples
train_state.updates += 1
train_state.samples += batch_num_samples
speedometer(train_state.epoch, train_state.updates, batch_num_samples, batch_num_tokens, metric_train)
if train_state.updates > 0 and train_state.updates % checkpoint_frequency == 0:
train_state.checkpoint += 1
self._save_params(output_folder, train_state.checkpoint)
cleanup_params_files(output_folder, max_params_files_to_keep,
train_state.checkpoint, self.training_monitor.get_best_checkpoint())
metric_train_dict = {k: v for k, v in metric_train.get_name_value()}
if gradient_norm is not None:
metric_train_dict['gradient-norm'] = gradient_norm
self.training_monitor.checkpoint_callback(train_state.checkpoint, metric_train_dict,
memory_data=utils.get_gpu_memory_usage(self.context))
toc = time.time()
logger.info("Checkpoint [%d]\tUpdates=%d Epoch=%d Samples=%d Time-cost=%.3f",
train_state.checkpoint, train_state.updates, train_state.epoch,
train_state.samples, (toc - tic))
tic = time.time()
for name, val in metric_train.get_name_value():
logger.info('Checkpoint [%d]\tTrain-%s=%f', train_state.checkpoint, name, val)
metric_train.reset()
# evaluation on validation set
has_improved, best_checkpoint = self._evaluate(train_state, val_iter, metric_val)
# If using an extended optimizer, provide extra state information about the current checkpoint
# Loss: optimized metric
if isinstance(optimizer, SockeyeOptimizer):
m_val = 0
for name, val in metric_val.get_name_value():
if name == self.training_monitor.optimized_metric:
m_val = val
checkpoint_state = CheckpointState(checkpoint=train_state.checkpoint, metric_val=m_val)
optimizer.pre_update_checkpoint(checkpoint_state)
# learning rate adjustment
if self.lr_scheduler is not None:
if issubclass(type(self.lr_scheduler), lr_scheduler.AdaptiveLearningRateScheduler):
lr_adjusted = self.lr_scheduler.new_evaluation_result(has_improved)
else:
lr_adjusted = False
if lr_adjusted and not has_improved:
if lr_decay_param_reset:
logger.info("Loading parameters from last best checkpoint: %d", best_checkpoint)
self._load_params(output_folder, best_checkpoint)
if lr_decay_opt_states_reset == C.LR_DECAY_OPT_STATES_RESET_INITIAL:
logger.info("Resetting optimizer states to initial")
self.load_optimizer_states(os.path.join(output_folder, C.OPT_STATES_INITIAL))
elif lr_decay_opt_states_reset == C.LR_DECAY_OPT_STATES_RESET_BEST:
logger.info("Resetting optimizer states to last best checkpoint: %d", best_checkpoint)
self.load_optimizer_states(os.path.join(output_folder, C.OPT_STATES_BEST))
if has_improved:
best_params_path = os.path.join(output_folder, C.PARAMS_BEST_NAME)
if os.path.lexists(best_params_path):
os.remove(best_params_path)
actual_best_params_fname = C.PARAMS_NAME % best_checkpoint
os.symlink(actual_best_params_fname, best_params_path)
if lr_decay_opt_states_reset == C.LR_DECAY_OPT_STATES_RESET_BEST:
best_opt_states_fname = os.path.join(output_folder, C.OPT_STATES_BEST)
if os.path.exists(best_opt_states_fname):
os.remove(best_opt_states_fname)
self.save_optimizer_states(best_opt_states_fname)
train_state.num_not_improved = 0
else:
train_state.num_not_improved += 1
logger.info("Model has not improved for %d checkpoints", train_state.num_not_improved)
if max_num_not_improved >= 0 and train_state.num_not_improved >= max_num_not_improved:
logger.info("Maximum number of not improved checkpoints (%d) reached: %d",
max_num_not_improved, train_state.num_not_improved)
stop_fit = True
if min_num_epochs is not None and train_state.epoch < min_num_epochs:
logger.info("Minimum number of epochs (%d) not reached yet: %d",
min_num_epochs,
train_state.epoch)
stop_fit = False
if stop_fit:
break
self._checkpoint(train_state, output_folder, train_iter)
cleanup_params_files(output_folder, max_params_files_to_keep,
train_state.checkpoint, self.training_monitor.get_best_checkpoint())
logger.info('Training stopped')
self.training_monitor.stop_fit_callback()
final_training_state_dirname = os.path.join(output_folder, C.TRAINING_STATE_DIRNAME)
if os.path.exists(final_training_state_dirname):
shutil.rmtree(final_training_state_dirname)
if lr_decay_opt_states_reset == C.LR_DECAY_OPT_STATES_RESET_BEST:
best_opt_states_fname = os.path.join(output_folder, C.OPT_STATES_BEST)
if os.path.exists(best_opt_states_fname):
os.remove(best_opt_states_fname)
def _log_params(self):
"""
Logs information about model parameters.
"""
arg_params, aux_params = self.module.get_params()
total_parameters = 0
info = []
for name, array in sorted(arg_params.items()):
info.append("%s: %s" % (name, array.shape))
total_parameters += reduce(lambda x, y: x * y, array.shape)
logger.info("Model parameters: %s", ", ".join(info))
logger.info("Total # of parameters: %d", total_parameters)
def _save_params(self, output_folder: str, checkpoint: int):
"""
Synchronizes parameters across devices, saves the parameters to disk, and updates self.params.
"""
arg_params, aux_params = self.module.get_params()
self.module.set_params(arg_params, aux_params)
self.params = arg_params
params_base_fname = C.PARAMS_NAME % checkpoint
self.save_params_to_file(os.path.join(output_folder, params_base_fname))
def _load_params(self, output_folder: str, checkpoint: int):
"""
Loads parameters from disk, sets self.params and module's parameters.
"""
params_fname = os.path.join(output_folder, C.PARAMS_NAME % checkpoint)
self.load_params_from_file(params_fname) # sets self.params
self.module.set_params(arg_params=self.params, aux_params={})
def _evaluate(self, training_state, val_iter, val_metric):
"""
Computes val_metric on val_iter. Returns whether model improved or not.
"""
val_iter.reset()
val_metric.reset()
for nbatch, eval_batch in enumerate(val_iter):
self.module.forward(eval_batch, is_train=False)
self.module.update_metric(val_metric, eval_batch.label)
for name, val in val_metric.get_name_value():
logger.info('Checkpoint [%d]\tValidation-%s=%f', training_state.checkpoint, name, val)
return self.training_monitor.eval_end_callback(training_state.checkpoint, val_metric)
def _checkpoint(self, training_state: _TrainingState, output_folder: str,
train_iter: data_io.BaseParallelSampleIter):
"""
Saves checkpoint. Note that the parameters are saved in _save_params.
"""
# Create temporary directory for storing the state of the optimization process
training_state_dirname = os.path.join(output_folder, C.TRAINING_STATE_TEMP_DIRNAME)
if not os.path.exists(training_state_dirname):
os.mkdir(training_state_dirname)
# Link current parameter file
params_base_fname = C.PARAMS_NAME % training_state.checkpoint
os.symlink(os.path.join("..", params_base_fname),
os.path.join(training_state_dirname, C.TRAINING_STATE_PARAMS_NAME))
# Save current optimizer states
opt_state_fname = os.path.join(training_state_dirname, C.OPT_STATES_LAST)
self.save_optimizer_states(opt_state_fname)
# State of the bucket iterator
train_iter.save_state(os.path.join(training_state_dirname, C.BUCKET_ITER_STATE_NAME))
# RNG states: python's random and np.random provide functions for
# storing the state, mxnet does not, but inside our code mxnet's RNG is
# not used AFAIK
with open(os.path.join(training_state_dirname, C.RNG_STATE_NAME), "wb") as fp:
pickle.dump(random.getstate(), fp)
pickle.dump(np.random.get_state(), fp) # Yes, one uses _, the other does not
# Monitor state, in order to get the full information about the metrics
self.training_monitor.save_state(os.path.join(training_state_dirname, C.MONITOR_STATE_NAME))
# Our own state
self.save_state(training_state, os.path.join(training_state_dirname, C.TRAINING_STATE_NAME))
# The lr scheduler
with open(os.path.join(training_state_dirname, C.SCHEDULER_STATE_NAME), "wb") as fp:
pickle.dump(self.lr_scheduler, fp)
# We are now finished with writing. Rename the temporary directory to
# the actual directory
final_training_state_dirname = os.path.join(output_folder, C.TRAINING_STATE_DIRNAME)
# First we rename the existing directory to minimize the risk of state
# loss if the process is aborted during deletion (which will be slower
# than directory renaming)
delete_training_state_dirname = os.path.join(output_folder, C.TRAINING_STATE_TEMP_DELETENAME)
if os.path.exists(final_training_state_dirname):
os.rename(final_training_state_dirname, delete_training_state_dirname)
os.rename(training_state_dirname, final_training_state_dirname)
if os.path.exists(delete_training_state_dirname):
shutil.rmtree(delete_training_state_dirname)
@staticmethod
def save_state(training_state: _TrainingState, fname: str):
"""
Saves the state (of the TrainingModel class) to disk.
:param training_state: The training state to save.
:param fname: File name to save the state to.
"""
with open(fname, "wb") as fp:
pickle.dump(training_state, fp)
@staticmethod
def load_state(fname: str) -> _TrainingState:
"""
Loads the training state (of the TrainingModel class) from disk.
:param fname: File name to load the state from.
:return: Training state.
"""
training_state = None
with open(fname, "rb") as fp:
training_state = pickle.load(fp)
return training_state
def save_optimizer_states(self, fname: str):
"""
Saves optimizer states to a file.
:param fname: File name to save optimizer states to.
"""
self._get_curr_module().save_optimizer_states(fname)
def load_optimizer_states(self, fname: str):
"""
Loads optimizer states from file.
:param fname: File name to load optimizer states from.
"""
self._get_curr_module().load_optimizer_states(fname)
def load_checkpoint(self, directory: str, train_iter: data_io.BaseParallelSampleIter) -> _TrainingState:
"""
Loads the full training state from disk. This includes optimizer,
random number generators and everything needed. Note that params
should have been loaded already by the initializer.
:param directory: directory where the state has been saved.
:param train_iter: training data iterator.
"""
# Optimzer state (from mxnet)
opt_state_fname = os.path.join(directory, C.OPT_STATES_LAST)
self.load_optimizer_states(opt_state_fname)
# State of the bucket iterator
train_iter.load_state(os.path.join(directory, C.BUCKET_ITER_STATE_NAME))
# RNG states: python's random and np.random provide functions for
# storing the state, mxnet does not, but inside our code mxnet's RNG is
# not used AFAIK
with open(os.path.join(directory, C.RNG_STATE_NAME), "rb") as fp:
random.setstate(pickle.load(fp))
np.random.set_state(pickle.load(fp))
# Monitor state, in order to get the full information about the metrics
self.training_monitor.load_state(os.path.join(directory, C.MONITOR_STATE_NAME))
# And our own state
return self.load_state(os.path.join(directory, C.TRAINING_STATE_NAME))
def get_global_grad_norm(self) -> float:
executors = self._get_executors()
rescale_grad = self._get_optimizer().rescale_grad
# average norm across executors:
exec_norms = [global_norm([arr for arr in exe.grad_arrays if arr is not None]) for exe in executors]
norm_val = sum(exec_norms) / float(len(exec_norms))
norm_val *= rescale_grad
return norm_val
def rescale_grad(self, scale: float):
for exe in self._get_executors():
for arr in exe.grad_arrays:
if arr is None:
continue
arr *= scale
def cleanup_params_files(output_folder: str, max_to_keep: int, checkpoint: int, best_checkpoint: int):
"""
Cleanup the params files in the output folder.
:param output_folder: folder where param files are created.
:param max_to_keep: maximum number of files to keep, negative to keep all.
:param checkpoint: current checkpoint (i.e. index of last params file created).
:param best_checkpoint: best checkpoint, we will not delete its params.
"""
if max_to_keep <= 0: # We assume we do not want to delete all params
return
existing_files = glob.glob(os.path.join(output_folder, C.PARAMS_PREFIX + "*"))
params_name_with_dir = os.path.join(output_folder, C.PARAMS_NAME)
for n in range(1, max(1, checkpoint - max_to_keep + 1)):
if n != best_checkpoint:
param_fname_n = params_name_with_dir % n
if param_fname_n in existing_files:
os.remove(param_fname_n)
class Speedometer:
"""
Custom Speedometer to log samples and words per second.
"""
def __init__(self, frequency: int = 50, auto_reset: bool = True) -> None:
self.frequency = frequency
self.init = False
self.tic = 0.0
self.last_count = 0
self.auto_reset = auto_reset
self.samples = 0
self.tokens = 0
self.msg = 'Epoch[%d] Batch [%d]\tSpeed: %.2f samples/sec %.2f tokens/sec'
def __call__(self, epoch: int, updates: int, samples: int, tokens: int, metric: Optional[mx.metric.EvalMetric]):
count = updates
if self.last_count > count:
self.init = False
self.last_count = count
self.samples += samples
self.tokens += tokens
if self.init:
if count % self.frequency == 0:
samples_per_sec = self.samples / (time.time() - self.tic)
tokens_per_sec = self.tokens / (time.time() - self.tic)
self.samples = 0
self.tokens = 0
if metric is not None:
name_value = metric.get_name_value()
if self.auto_reset:
metric.reset()
logger.info(self.msg + '\t%s=%f' * len(name_value),
epoch, count, samples_per_sec, tokens_per_sec, *sum(name_value, ()))
else:
logger.info(self.msg, epoch, count, samples_per_sec)
self.tic = time.time()
else:
self.init = True
self.tic = time.time()
| [
"List[mx.nd.NDArray]",
"int",
"int",
"int",
"int",
"int",
"model.ModelConfig",
"List[mx.context.Context]",
"data_io.BaseParallelSampleIter",
"bool",
"data_io.BaseParallelSampleIter",
"AnyStr",
"List[AnyStr]",
"data_io.BaseParallelSampleIter",
"data_io.BaseParallelSampleIter",
"str",
"int",
"List[AnyStr]",
"mx.initializer.Initializer",
"bool",
"Optional[int]",
"int",
"str",
"dict",
"data_io.BaseParallelSampleIter",
"data_io.BaseParallelSampleIter",
"str",
"str",
"int",
"List[AnyStr]",
"Optional[int]",
"int",
"str",
"float",
"int",
"str",
"int",
"str",
"int",
"_TrainingState",
"str",
"data_io.BaseParallelSampleIter",
"_TrainingState",
"str",
"str",
"str",
"str",
"str",
"data_io.BaseParallelSampleIter",
"float",
"str",
"int",
"int",
"int",
"int",
"int",
"int",
"int",
"Optional[mx.metric.EvalMetric]"
] | [
1156,
1661,
1690,
1724,
1755,
1786,
2763,
2808,
2863,
2923,
3473,
7141,
7780,
8103,
8157,
8216,
8259,
8285,
8324,
8386,
8417,
8466,
8494,
8529,
17524,
17579,
17639,
17666,
17710,
17737,
17777,
17827,
17869,
17912,
17954,
30226,
30243,
30701,
30718,
31740,
31771,
31808,
34544,
34567,
34900,
35298,
35537,
35776,
35793,
37550,
37772,
37790,
37807,
37829,
39172,
39186,
39200,
39213,
39226
] | [
1175,
1664,
1693,
1727,
1758,
1789,
2780,
2832,
2893,
2927,
3503,
7147,
7792,
8133,
8187,
8219,
8262,
8297,
8350,
8390,
8430,
8469,
8497,
8533,
17554,
17609,
17642,
17669,
17713,
17749,
17790,
17830,
17872,
17917,
17957,
30229,
30246,
30704,
30721,
31754,
31774,
31838,
34558,
34570,
34903,
35301,
35540,
35779,
35823,
37555,
37775,
37793,
37810,
37832,
39175,
39189,
39203,
39216,
39256
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/transformer.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Dict, Optional
import mxnet as mx
import numpy as np
from . import config
from . import constants as C
from . import layers
class TransformerConfig(config.Config):
def __init__(self,
model_size: int,
attention_heads: int,
feed_forward_num_hidden: int,
act_type: str,
num_layers: int,
dropout_attention: float,
dropout_act: float,
dropout_prepost: float,
positional_embedding_type: str,
preprocess_sequence: str,
postprocess_sequence: str,
max_seq_len_source: int,
max_seq_len_target: int,
conv_config: Optional['ConvolutionalEmbeddingConfig'] = None) -> None: # type: ignore
super().__init__()
self.model_size = model_size
self.attention_heads = attention_heads
self.feed_forward_num_hidden = feed_forward_num_hidden
self.act_type = act_type
self.num_layers = num_layers
self.dropout_attention = dropout_attention
self.dropout_act = dropout_act
self.dropout_prepost = dropout_prepost
self.positional_embedding_type = positional_embedding_type
self.preprocess_sequence = preprocess_sequence
self.postprocess_sequence = postprocess_sequence
self.max_seq_len_source = max_seq_len_source
self.max_seq_len_target = max_seq_len_target
self.conv_config = conv_config
class TransformerEncoderBlock:
"""
A transformer encoder block consists self-attention and a feed-forward layer with pre/post process blocks
in between.
"""
def __init__(self,
config: TransformerConfig,
prefix: str) -> None:
self.pre_self_attention = TransformerProcessBlock(sequence=config.preprocess_sequence,
num_hidden=config.model_size,
dropout=config.dropout_prepost,
prefix="%satt_self_pre_" % prefix)
self.self_attention = layers.MultiHeadSelfAttention(depth_att=config.model_size,
heads=config.attention_heads,
depth_out=config.model_size,
dropout=config.dropout_attention,
prefix="%satt_self_" % prefix)
self.post_self_attention = TransformerProcessBlock(sequence=config.postprocess_sequence,
num_hidden=config.model_size,
dropout=config.dropout_prepost,
prefix="%satt_self_post_" % prefix)
self.pre_ff = TransformerProcessBlock(sequence=config.preprocess_sequence,
num_hidden=config.model_size,
dropout=config.dropout_prepost,
prefix="%sff_pre_" % prefix)
self.ff = TransformerFeedForward(num_hidden=config.feed_forward_num_hidden,
num_model=config.model_size,
act_type=config.act_type,
dropout=config.dropout_act,
prefix="%sff_" % prefix)
self.post_ff = TransformerProcessBlock(sequence=config.postprocess_sequence,
num_hidden=config.model_size,
dropout=config.dropout_prepost,
prefix="%sff_post_" % prefix)
def __call__(self, data: mx.sym.Symbol, bias: mx.sym.Symbol) -> mx.sym.Symbol:
# self-attention
data_self_att = self.self_attention(inputs=self.pre_self_attention(data, None),
bias=bias,
cache=None)
data = self.post_self_attention(data_self_att, data)
# feed-forward
data_ff = self.ff(self.pre_ff(data, None))
data = self.post_ff(data_ff, data)
return data
class TransformerDecoderBlock:
"""
A transformer encoder block consists self-attention, encoder attention, and a feed-forward layer
with pre/post process blocks in between.
"""
def __init__(self,
config: TransformerConfig,
prefix: str) -> None:
self.prefix = prefix
self.pre_self_attention = TransformerProcessBlock(sequence=config.preprocess_sequence,
num_hidden=config.model_size,
dropout=config.dropout_prepost,
prefix="%satt_self_pre_" % prefix)
self.self_attention = layers.MultiHeadSelfAttention(depth_att=config.model_size,
heads=config.attention_heads,
depth_out=config.model_size,
dropout=config.dropout_attention,
prefix="%satt_self_" % prefix)
self.post_self_attention = TransformerProcessBlock(sequence=config.postprocess_sequence,
num_hidden=config.model_size,
dropout=config.dropout_prepost,
prefix="%satt_self_post_" % prefix)
self.pre_enc_attention = TransformerProcessBlock(sequence=config.preprocess_sequence,
num_hidden=config.model_size,
dropout=config.dropout_prepost,
prefix="%satt_enc_pre_" % prefix)
self.enc_attention = layers.MultiHeadAttention(depth_att=config.model_size,
heads=config.attention_heads,
depth_out=config.model_size,
dropout=config.dropout_attention,
prefix="%satt_enc_" % prefix)
self.post_enc_attention = TransformerProcessBlock(sequence=config.postprocess_sequence,
num_hidden=config.model_size,
dropout=config.dropout_prepost,
prefix="%satt_enc_post_" % prefix)
self.pre_ff = TransformerProcessBlock(sequence=config.preprocess_sequence,
num_hidden=config.model_size,
dropout=config.dropout_prepost,
prefix="%sff_pre_" % prefix)
self.ff = TransformerFeedForward(num_hidden=config.feed_forward_num_hidden,
num_model=config.model_size,
act_type=config.act_type,
dropout=config.dropout_act,
prefix="%sff_" % prefix)
self.post_ff = TransformerProcessBlock(sequence=config.postprocess_sequence,
num_hidden=config.model_size,
dropout=config.dropout_prepost,
prefix="%sff_post_" % prefix)
def __call__(self,
target: mx.sym.Symbol,
target_bias: mx.sym.Symbol,
source: mx.sym.Symbol,
source_bias: mx.sym.Symbol,
cache: Optional[Dict[str, Optional[mx.sym.Symbol]]] = None) -> mx.sym.Symbol:
# self-attention
target_self_att = self.self_attention(inputs=self.pre_self_attention(target, None),
bias=target_bias,
cache=cache)
target = self.post_self_attention(target_self_att, target)
# encoder attention
target_enc_att = self.enc_attention(queries=self.pre_enc_attention(target, None),
memory=source,
bias=source_bias)
target = self.post_enc_attention(target_enc_att, target)
# feed-forward
target_ff = self.ff(self.pre_ff(target, None))
target = self.post_ff(target_ff, target)
return target
class TransformerProcessBlock:
"""
Block to perform pre/post processing on layer inputs.
The processing steps are determined by the sequence argument, which can contain one of the three operations:
n: layer normalization
r: residual connection
d: dropout
"""
def __init__(self,
sequence: str,
num_hidden: int,
dropout: float,
prefix: str) -> None:
self.sequence = sequence
self.num_hidden = num_hidden
self.dropout = dropout
self.prefix = prefix
self.layer_norm = None
if "n" in sequence:
self.layer_norm = layers.LayerNormalization(num_hidden=self.num_hidden, prefix="%snorm" % self.prefix)
def __call__(self,
data: mx.sym.Symbol,
prev: Optional[mx.sym.Symbol]) -> mx.sym.Symbol:
"""
Apply processing sequence to data with optional previous input.
:param data: Input data. Shape: (batch, length, num_hidden).
:param prev: Previous data. Shape: (batch, length, num_hidden).
:return: Processed data. Shape: (batch, length, num_hidden).
"""
if not self.sequence:
return data
if prev is None:
assert 'r' not in self.sequence, "Residual connection not allowed if no previous value given."
for step in self.sequence:
if step == "r":
data = mx.sym._internal._plus(data, prev, name="%sresidual" % self.prefix)
elif step == "n":
data = self.layer_norm.normalize(data)
elif step == "d":
if self.dropout > 0.0:
data = mx.sym.Dropout(data, p=self.dropout, name="%sdropout" % self.prefix)
else:
raise ValueError("Unknown step in sequence: %s" % step)
return data
class TransformerFeedForward:
"""
Position-wise feed-forward network with activation.
"""
def __init__(self,
num_hidden: int,
num_model: int,
act_type: str,
dropout: float,
prefix: str) -> None:
self.num_hidden = num_hidden
self.num_model = num_model
self.dropout = dropout
self.prefix = prefix
self.act_type = act_type
self.w_i2h = mx.sym.Variable('%si2h_weight' % prefix)
self.b_i2h = mx.sym.Variable('%si2h_bias' % prefix)
self.w_h2o = mx.sym.Variable('%sh2o_weight' % prefix)
self.b_h2o = mx.sym.Variable('%sh2o_bias' % prefix)
def __call__(self, x) -> mx.sym.Symbol:
"""
Position-wise feed-forward network with activation.
:param x: Symbol of shape (batch_size, seq_len, num_hidden)
:return: Symbol of shape (batch_size, seq_len, num_hidden)
"""
h = mx.sym.FullyConnected(data=x, num_hidden=self.num_hidden, weight=self.w_i2h, bias=self.b_i2h, flatten=False)
h = layers.activation(h, act_type=self.act_type)
if self.dropout > 0.0:
h = mx.sym.Dropout(h, p=self.dropout)
y = mx.sym.FullyConnected(data=h, num_hidden=self.num_model, weight=self.w_h2o, bias=self.b_h2o, flatten=False)
return y
class VariableLengthBias(mx.operator.CustomOp):
"""
Returns bias/mask given a vector of sequence lengths.
"""
def __init__(self, max_length: int) -> None:
super().__init__()
self.max_length = max_length
def forward(self, is_train, req, in_data, out_data, aux):
# lengths: (batch_size,)
lengths = in_data[0]
# (max_length, batch_size)
data = mx.nd.zeros((self.max_length, lengths.shape[0]), ctx=lengths.context)
data = mx.nd.SequenceMask(data=data,
use_sequence_length=True,
sequence_length=lengths,
value=C.LARGE_NEGATIVE_VALUE)
# (batch_size, max_length)
data = mx.nd.swapaxes(data, dim1=0, dim2=1)
self.assign(out_data[0], req[0], data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("variable_length_bias")
class VariableLengthBiasProp(mx.operator.CustomOpProp):
def __init__(self, max_length: str) -> None:
super().__init__()
self.max_length = int(max_length)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
batch_size = in_shape[0][0]
return in_shape, [(batch_size, self.max_length)], []
def infer_type(self, in_type):
return in_type, [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return VariableLengthBias(max_length=self.max_length)
def get_variable_length_bias(lengths: mx.sym.Symbol,
max_length: int,
num_heads: Optional[int] = None,
fold_heads: bool = True,
name: str = '') -> mx.sym.Symbol:
"""
Returns bias/mask for variable sequence lengths.
:param lengths: Sequence lengths. Shape: (batch,).
:param max_length: Maximum sequence length.
:param num_heads: Number of attention heads.
:param fold_heads: Whether to fold heads dimension into batch dimension.
:param name: Name of symbol.
:return: Bias symbol.
"""
# (batch_size, max_length)
x = mx.symbol.Custom(data=lengths, max_length=max_length, op_type='variable_length_bias')
if num_heads is not None:
# (batch_size, heads, max_length) if fold_heads == False else (batch_size * heads, max_length)
x = layers.broadcast_to_heads(x, num_heads, ndim=2, fold_heads=fold_heads)
return mx.sym.BlockGrad(x, name='%sbias' % name)
def get_autoregressive_bias(max_length: int, name: str) -> mx.sym.Symbol:
"""
Returns bias/mask to ensure position i can only attend to positions <i.
:param max_length: Sequence length.
:param name: Name of symbol.
:return: Bias symbol of shape (1, max_length, max_length).
"""
return mx.sym.BlockGrad(mx.symbol.Custom(length=max_length,
name=name,
op_type='auto_regressive_bias'))
class AutoRegressiveBias(mx.operator.CustomOp):
"""
Returns a symbol of shape (1, length, length) with cells above the main diagonal
set to a large negative value, e.g.
length=4
0 1 1 1
0 0 1 1 * -99999
0 0 0 1
0 0 0 0
"""
def __init__(self, length: int) -> None:
super().__init__()
self.bias = self.get_bias(length)
@staticmethod
def get_bias(length: int):
# matrix with lower triangle and main diagonal set to 0, upper triangle set to 1
upper_triangle = np.triu(np.ones((length, length)), k=1)
# (1, length, length)
bias = -99999999. * np.reshape(upper_triangle, (1, length, length))
return mx.nd.array(bias)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.bias)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("auto_regressive_bias")
class AutoRegressiveBiasProp(mx.operator.CustomOpProp):
def __init__(self, length: str) -> None:
super().__init__()
self.length = int(length)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(1, self.length, self.length)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return AutoRegressiveBias(length=self.length)
| [
"int",
"int",
"int",
"str",
"int",
"float",
"float",
"float",
"str",
"str",
"str",
"int",
"int",
"TransformerConfig",
"str",
"mx.sym.Symbol",
"mx.sym.Symbol",
"TransformerConfig",
"str",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"mx.sym.Symbol",
"str",
"int",
"float",
"str",
"mx.sym.Symbol",
"Optional[mx.sym.Symbol]",
"int",
"int",
"str",
"float",
"str",
"int",
"str",
"mx.sym.Symbol",
"int",
"int",
"str",
"int",
"int",
"str"
] | [
806,
845,
892,
924,
958,
999,
1036,
1077,
1128,
1171,
1215,
1257,
1299,
2337,
2381,
4603,
4624,
5325,
5369,
8795,
8840,
8880,
8925,
10139,
10173,
10204,
10236,
10601,
10639,
11851,
11884,
11916,
11947,
11979,
13223,
14135,
14695,
14751,
15729,
15740,
16486,
16613,
17249
] | [
809,
848,
895,
927,
961,
1004,
1041,
1082,
1131,
1174,
1218,
1260,
1302,
2354,
2384,
4616,
4637,
5342,
5372,
8808,
8853,
8893,
8938,
10142,
10176,
10209,
10239,
10614,
10662,
11854,
11887,
11919,
11952,
11982,
13226,
14138,
14708,
14754,
15732,
15743,
16489,
16616,
17252
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/translate.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Translation CLI.
"""
import argparse
import sys
import time
from math import ceil
from contextlib import ExitStack
from typing import Optional, Iterable
import mxnet as mx
import sockeye
import sockeye.arguments as arguments
import sockeye.constants as C
import sockeye.data_io
import sockeye.inference
from sockeye.lexicon import TopKLexicon
import sockeye.output_handler
from sockeye.log import setup_main_logger
from sockeye.utils import acquire_gpus, get_num_gpus, log_basic_info
from sockeye.utils import check_condition, grouper
logger = setup_main_logger(__name__, file_logging=False)
def main():
params = argparse.ArgumentParser(description='Translate CLI')
arguments.add_translate_cli_args(params)
args = params.parse_args()
if args.output is not None:
global logger
logger = setup_main_logger(__name__,
console=not args.quiet,
file_logging=True,
path="%s.%s" % (args.output, C.LOG_NAME))
if args.checkpoints is not None:
check_condition(len(args.checkpoints) == len(args.models), "must provide checkpoints for each model")
log_basic_info(args)
output_handler = sockeye.output_handler.get_output_handler(args.output_type,
args.output,
args.sure_align_threshold)
with ExitStack() as exit_stack:
context = _setup_context(args, exit_stack)
models, vocab_source, vocab_target = sockeye.inference.load_models(
context,
args.max_input_len,
args.beam_size,
args.batch_size,
args.models,
args.checkpoints,
args.softmax_temperature,
args.max_output_length_num_stds,
decoder_return_logit_inputs=args.restrict_lexicon is not None,
cache_output_layer_w_b=args.restrict_lexicon is not None)
restrict_lexicon = None # type: TopKLexicon
if args.restrict_lexicon:
restrict_lexicon = TopKLexicon(vocab_source, vocab_target)
restrict_lexicon.load(args.restrict_lexicon)
translator = sockeye.inference.Translator(context,
args.ensemble_mode,
args.bucket_width,
sockeye.inference.LengthPenalty(args.length_penalty_alpha,
args.length_penalty_beta),
models,
vocab_source,
vocab_target,
restrict_lexicon)
read_and_translate(translator, output_handler, args.chunk_size, args.input)
def read_and_translate(translator: sockeye.inference.Translator, output_handler: sockeye.output_handler.OutputHandler,
chunk_size: Optional[int], source: Optional[str] = None) -> None:
"""
Reads from either a file or stdin and translates each line, calling the output_handler with the result.
:param output_handler: Handler that will write output to a stream.
:param translator: Translator that will translate each line of input.
:param chunk_size: The size of the portion to read at a time from the input.
:param source: Path to file which will be translated line-by-line if included, if none use stdin.
"""
source_data = sys.stdin if source is None else sockeye.data_io.smart_open(source)
batch_size = translator.batch_size
if chunk_size is None:
if translator.batch_size == 1:
# No batching, therefore there is not need to read segments in chunks.
chunk_size = C.CHUNK_SIZE_NO_BATCHING
else:
# Get a constant number of batches per call to Translator.translate.
chunk_size = C.CHUNK_SIZE_PER_BATCH_SEGMENT * translator.batch_size
else:
if chunk_size < translator.batch_size:
logger.warning("You specified a chunk size (%d) smaller than the batch size (%d). This will lead to "
"a degregation of translation speed. Consider choosing a larger chunk size." % (chunk_size,
batch_size))
logger.info("Translating...")
total_time, total_lines = 0.0, 0
for chunk in grouper(source_data, chunk_size):
chunk_time = translate(output_handler, chunk, translator, total_lines)
total_lines += len(chunk)
total_time += chunk_time
if total_lines != 0:
logger.info("Processed %d lines in %d batches. Total time: %.4f, sec/sent: %.4f, sent/sec: %.4f",
total_lines, ceil(total_lines / batch_size), total_time,
total_time / total_lines, total_lines / total_time)
else:
logger.info("Processed 0 lines.")
def translate(output_handler: sockeye.output_handler.OutputHandler, source_data: Iterable[str],
translator: sockeye.inference.Translator, chunk_id: int = 0) -> float:
"""
Translates each line from source_data, calling output handler after translating a batch.
:param output_handler: A handler that will be called once with the output of each translation.
:param source_data: A enumerable list of source sentences that will be translated.
:param translator: The translator that will be used for each line of input.
:param chunk_id: Global id of the chunk.
:return: Total time taken.
"""
tic = time.time()
trans_inputs = [translator.make_input(i, line) for i, line in enumerate(source_data, chunk_id + 1)]
trans_outputs = translator.translate(trans_inputs)
total_time = time.time() - tic
batch_time = total_time / len(trans_inputs)
for trans_input, trans_output in zip(trans_inputs, trans_outputs):
output_handler.handle(trans_input, trans_output, batch_time)
return total_time
def _setup_context(args, exit_stack):
if args.use_cpu:
context = mx.cpu()
else:
num_gpus = get_num_gpus()
check_condition(num_gpus >= 1,
"No GPUs found, consider running on the CPU with --use-cpu "
"(note: check depends on nvidia-smi and this could also mean that the nvidia-smi "
"binary isn't on the path).")
check_condition(len(args.device_ids) == 1, "cannot run on multiple devices for now")
gpu_id = args.device_ids[0]
if args.disable_device_locking:
if gpu_id < 0:
# without locking and a negative device id we just take the first device
gpu_id = 0
else:
gpu_ids = exit_stack.enter_context(acquire_gpus([gpu_id], lock_dir=args.lock_dir))
gpu_id = gpu_ids[0]
context = mx.gpu(gpu_id)
return context
if __name__ == '__main__':
main()
| [
"sockeye.inference.Translator",
"sockeye.output_handler.OutputHandler",
"Optional[int]",
"sockeye.output_handler.OutputHandler",
"Iterable[str]",
"sockeye.inference.Translator"
] | [
3597,
3643,
3716,
5768,
5819,
5866
] | [
3625,
3679,
3729,
5804,
5832,
5894
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/utils.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
A set of utility methods.
"""
import errno
import gzip
import itertools
import logging
import os
import random
import shutil
import subprocess
import sys
import time
from contextlib import contextmanager, ExitStack
from typing import Mapping, Any, List, Iterator, Iterable, Set, Tuple, Dict, Optional, Union, IO
import fcntl
import mxnet as mx
import numpy as np
import sockeye.constants as C
from sockeye import __version__
from sockeye.log import log_sockeye_version, log_mxnet_version
logger = logging.getLogger(__name__)
class SockeyeError(Exception):
pass
def check_version(version: str):
"""
Checks given version against code version and determines compatibility.
Throws if versions are incompatible.
:param version: Given version.
"""
code_version = parse_version(__version__)
given_version = parse_version(version)
check_condition(code_version[0] == given_version[0],
"Given release version (%s) does not match release code version (%s)" % (version, __version__))
check_condition(code_version[1] == given_version[1],
"Given major version (%s) does not match major code version (%s)" % (version, __version__))
def load_version(fname: str) -> str:
"""
Loads version from file.
:param fname: Name of file to load version from.
:return: Version string.
"""
if not os.path.exists(fname):
logger.warning("No version file found. Defaulting to 1.0.3")
return "1.0.3"
with open(fname) as inp:
return inp.read().strip()
def parse_version(version_string: str) -> Tuple[str, str, str]:
"""
Parse version string into release, major, minor version.
:param version_string: Version string.
:return: Tuple of strings.
"""
release, major, minor = version_string.split(".", 2)
return release, major, minor
def log_basic_info(args) -> None:
"""
Log basic information like version number, arguments, etc.
:param args: Arguments as returned by argparse.
"""
log_sockeye_version(logger)
log_mxnet_version(logger)
logger.info("Command: %s", " ".join(sys.argv))
logger.info("Arguments: %s", args)
def seedRNGs(seed: int) -> None:
"""
Seed the random number generators (Python, Numpy and MXNet)
:param seed: The random seed.
"""
np.random.seed(seed)
random.seed(seed)
mx.random.seed(seed)
def check_condition(condition: bool, error_message: str):
"""
Check the condition and if it is not met, exit with the given error message
and error_code, similar to assertions.
:param condition: Condition to check.
:param error_message: Error message to show to the user.
"""
if not condition:
raise SockeyeError(error_message)
def save_graph(symbol: mx.sym.Symbol, filename: str, hide_weights: bool = True):
"""
Dumps computation graph visualization to .pdf and .dot file.
:param symbol: The symbol representing the computation graph.
:param filename: The filename to save the graphic to.
:param hide_weights: If true the weights will not be shown.
"""
dot = mx.viz.plot_network(symbol, hide_weights=hide_weights)
dot.render(filename=filename)
def compute_lengths(sequence_data: mx.sym.Symbol) -> mx.sym.Symbol:
"""
Computes sequence lenghts of PAD_ID-padded data in sequence_data.
:param sequence_data: Input data. Shape: (batch_size, seq_len).
:return: Length data. Shape: (batch_size,).
"""
return mx.sym.sum(mx.sym.broadcast_not_equal(sequence_data, mx.sym.zeros((1,))), axis=1)
def save_params(arg_params: Mapping[str, mx.nd.NDArray], fname: str,
aux_params: Optional[Mapping[str, mx.nd.NDArray]] = None):
"""
Saves the parameters to a file.
:param arg_params: Mapping from parameter names to the actual parameters.
:param fname: The file name to store the parameters in.
:param aux_params: Optional mapping from parameter names to the auxiliary parameters.
"""
save_dict = {('arg:%s' % k): v.as_in_context(mx.cpu()) for k, v in arg_params.items()}
if aux_params is not None:
save_dict.update({('aux:%s' % k): v.as_in_context(mx.cpu()) for k, v in aux_params.items()})
mx.nd.save(fname, save_dict)
def load_params(fname: str) -> Tuple[Dict[str, mx.nd.NDArray], Dict[str, mx.nd.NDArray]]:
"""
Loads parameters from a file.
:param fname: The file containing the parameters.
:return: Mapping from parameter names to the actual parameters for both the arg parameters and the aux parameters.
"""
save_dict = mx.nd.load(fname)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return arg_params, aux_params
class Accuracy(mx.metric.EvalMetric):
"""
Calculates accuracy. Taken from MXNet and adapted to work with batch-major labels
(reshapes (batch_size, time) -> (batch_size * time).
Also allows defining an ignore_label/pad symbol
"""
def __init__(self,
name='accuracy',
output_names=None,
label_names=None,
ignore_label=None):
super(Accuracy, self).__init__(name=name,
output_names=output_names,
label_names=label_names,
ignore_label=ignore_label)
self.ignore_label = ignore_label
def update(self, labels, preds):
mx.metric.check_label_shapes(labels, preds)
for label, pred_label in zip(labels, preds):
if pred_label.shape != label.shape:
pred_label = mx.nd.argmax_channel(pred_label)
pred_label = pred_label.asnumpy().astype('int32')
label = mx.nd.reshape(label, shape=(pred_label.size,)).asnumpy().astype('int32')
mx.metric.check_label_shapes(label, pred_label)
if self.ignore_label is not None:
correct = ((pred_label.flat == label.flat) * (label.flat != self.ignore_label)).sum()
ignore = (label.flat == self.ignore_label).sum()
n = pred_label.size - ignore
else:
correct = (pred_label.flat == label.flat).sum()
n = pred_label.size
self.sum_metric += correct
self.num_inst += n
class OnlineMeanAndVariance:
def __init__(self) -> None:
self._count = 0
self._mean = 0.
self._M2 = 0.
def update(self, value: Union[float, int]) -> None:
self._count += 1
delta = value - self._mean
self._mean += delta / self._count
delta2 = value - self._mean
self._M2 += delta * delta2
@property
def count(self) -> int:
return self._count
@property
def mean(self) -> float:
return self._mean
@property
def variance(self) -> float:
if self._count < 2:
return float('nan')
else:
return self._M2 / self._count
def smallest_k(matrix: np.ndarray, k: int,
only_first_row: bool = False) -> Tuple[Tuple[np.ndarray, np.ndarray], np.ndarray]:
"""
Find the smallest elements in a numpy matrix.
:param matrix: Any matrix.
:param k: The number of smallest elements to return.
:param only_first_row: If true the search is constrained to the first row of the matrix.
:return: The row indices, column indices and values of the k smallest items in matrix.
"""
if only_first_row:
flatten = matrix[:1, :].flatten()
else:
flatten = matrix.flatten()
# args are the indices in flatten of the k smallest elements
args = np.argpartition(flatten, k)[:k]
# args are the indices in flatten of the sorted k smallest elements
args = args[np.argsort(flatten[args])]
# flatten[args] are the values for args
return np.unravel_index(args, matrix.shape), flatten[args]
def smallest_k_mx(matrix: mx.nd.NDArray, k: int,
only_first_row: bool = False) -> Tuple[Tuple[np.ndarray, np.ndarray], np.ndarray]:
"""
Find the smallest elements in a NDarray.
:param matrix: Any matrix.
:param k: The number of smallest elements to return.
:param only_first_row: If True the search is constrained to the first row of the matrix.
:return: The row indices, column indices and values of the k smallest items in matrix.
"""
if only_first_row:
matrix = mx.nd.reshape(matrix[0], shape=(1, -1))
# pylint: disable=unbalanced-tuple-unpacking
values, indices = mx.nd.topk(matrix, axis=None, k=k, ret_typ='both', is_ascend=True)
return np.unravel_index(indices.astype(np.int32).asnumpy(), matrix.shape), values
def chunks(some_list: List, n: int) -> Iterable[List]:
"""Yield successive n-sized chunks from l."""
for i in range(0, len(some_list), n):
yield some_list[i:i + n]
def get_tokens(line: str) -> Iterator[str]:
"""
Yields tokens from input string.
:param line: Input string.
:return: Iterator over tokens.
"""
for token in line.rstrip().split():
if len(token) > 0:
yield token
def smart_open(filename: str, mode: str = "rt", ftype: str = "auto", errors: str = 'replace'):
"""
Returns a file descriptor for filename with UTF-8 encoding.
If mode is "rt", file is opened read-only.
If ftype is "auto", uses gzip iff filename endswith .gz.
If ftype is {"gzip","gz"}, uses gzip.
Note: encoding error handling defaults to "replace"
:param filename: The filename to open.
:param mode: Reader mode.
:param ftype: File type. If 'auto' checks filename suffix for gz to try gzip.open
:param errors: Encoding error handling during reading. Defaults to 'replace'
:return: File descriptor
"""
if ftype == 'gzip' or ftype == 'gz' or (ftype == 'auto' and filename.endswith(".gz")):
return gzip.open(filename, mode=mode, encoding='utf-8', errors=errors)
else:
return open(filename, mode=mode, encoding='utf-8', errors=errors)
def plot_attention(attention_matrix: np.ndarray, source_tokens: List[str], target_tokens: List[str], filename: str):
"""
Uses matplotlib for creating a visualization of the attention matrix.
:param attention_matrix: The attention matrix.
:param source_tokens: A list of source tokens.
:param target_tokens: A list of target tokens.
:param filename: The file to which the attention visualization will be written to.
"""
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
assert attention_matrix.shape[0] == len(target_tokens)
plt.imshow(attention_matrix.transpose(), interpolation="nearest", cmap="Greys")
plt.xlabel("target")
plt.ylabel("source")
plt.gca().set_xticks([i for i in range(0, len(target_tokens))])
plt.gca().set_yticks([i for i in range(0, len(source_tokens))])
plt.gca().set_xticklabels(target_tokens, rotation='vertical')
plt.gca().set_yticklabels(source_tokens)
plt.tight_layout()
plt.savefig(filename)
logger.info("Saved alignment visualization to " + filename)
def print_attention_text(attention_matrix: np.ndarray, source_tokens: List[str], target_tokens: List[str],
threshold: float):
"""
Prints the attention matrix to standard out.
:param attention_matrix: The attention matrix.
:param source_tokens: A list of source tokens.
:param target_tokens: A list of target tokens.
:param threshold: The threshold for including an alignment link in the result.
"""
sys.stdout.write(" ")
for _ in target_tokens:
sys.stdout.write("---")
sys.stdout.write("\n")
for i, f_i in enumerate(source_tokens): # type: ignore
sys.stdout.write(" |")
for j in range(len(target_tokens)):
align_prob = attention_matrix[j, i]
if align_prob > threshold:
sys.stdout.write("(*)")
elif align_prob > 0.4:
sys.stdout.write("(?)")
else:
sys.stdout.write(" ")
sys.stdout.write(" | %s\n" % f_i)
sys.stdout.write(" ")
for _ in target_tokens:
sys.stdout.write("---")
sys.stdout.write("\n")
for k in range(max(map(len, target_tokens))):
sys.stdout.write(" ")
for word in target_tokens:
letter = word[k] if len(word) > k else " "
sys.stdout.write(" %s " % letter)
sys.stdout.write("\n")
sys.stdout.write("\n")
def get_alignments(attention_matrix: np.ndarray, threshold: float = .9) -> Iterator[Tuple[int, int]]:
"""
Yields hard alignments from an attention_matrix (target_length, source_length)
given a threshold.
:param attention_matrix: The attention matrix.
:param threshold: The threshold for including an alignment link in the result.
:return: Generator yielding strings of the form 0-0, 0-1, 2-1, 2-2, 3-4...
"""
for src_idx in range(attention_matrix.shape[1]):
for trg_idx in range(attention_matrix.shape[0]):
if attention_matrix[trg_idx, src_idx] > threshold:
yield (src_idx, trg_idx)
def average_arrays(arrays: List[mx.nd.NDArray]) -> mx.nd.NDArray:
"""
Take a list of arrays of the same shape and take the element wise average.
:param arrays: A list of NDArrays with the same shape that will be averaged.
:return: The average of the NDArrays in the same context as arrays[0].
"""
if len(arrays) == 1:
return arrays[0]
check_condition(all(arrays[0].shape == a.shape for a in arrays), "nd array shapes do not match")
new_array = mx.nd.zeros(arrays[0].shape, dtype=arrays[0].dtype, ctx=arrays[0].context)
for a in arrays:
new_array += a.as_in_context(new_array.context)
new_array /= len(arrays)
return new_array
def get_num_gpus() -> int:
"""
Gets the number of GPUs available on the host (depends on nvidia-smi).
:return: The number of GPUs on the system.
"""
if shutil.which("nvidia-smi") is None:
logger.warning("Couldn't find nvidia-smi, therefore we assume no GPUs are available.")
return 0
sp = subprocess.Popen(['nvidia-smi', '-L'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out_str = sp.communicate()[0].decode("utf-8")
num_gpus = len(out_str.rstrip("\n").split("\n"))
return num_gpus
def get_gpu_memory_usage(ctx: List[mx.context.Context]) -> Optional[Dict[int, Tuple[int, int]]]:
"""
Returns used and total memory for GPUs identified by the given context list.
:param ctx: List of MXNet context devices.
:return: Dictionary of device id mapping to a tuple of (memory used, memory total).
"""
if isinstance(ctx, mx.context.Context):
ctx = [ctx]
ctx = [c for c in ctx if c.device_type == 'gpu']
if not ctx:
return None
if shutil.which("nvidia-smi") is None:
logger.warning("Couldn't find nvidia-smi, therefore we assume no GPUs are available.")
return {}
ids = [str(c.device_id) for c in ctx]
query = "--query-gpu=index,memory.used,memory.total"
format = "--format=csv,noheader,nounits"
sp = subprocess.Popen(['nvidia-smi', query, format, "-i", ",".join(ids)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = sp.communicate()[0].decode("utf-8").rstrip().split("\n")
memory_data = {}
for line in result:
gpu_id, mem_used, mem_total = line.split(",")
memory_data[int(gpu_id)] = (int(mem_used), int(mem_total))
return memory_data
def log_gpu_memory_usage(memory_data: Dict[int, Tuple[int, int]]):
log_str = " ".join("GPU %d: %d/%d MB (%.2f%%)" % (k, v[0], v[1], v[0] * 100.0/v[1]) for k, v in memory_data.items())
logger.info(log_str)
def expand_requested_device_ids(requested_device_ids: List[int]) -> List[int]:
"""
Transform a list of device id requests to concrete device ids. For example on a host with 8 GPUs when requesting
[-4, 3, 5] you will get [0, 1, 2, 3, 4, 5]. Namely you will get device 3 and 5, as well as 3 other available
device ids (starting to fill up from low to high device ids).
:param requested_device_ids: The requested device ids, each number is either negative indicating the number of GPUs
that will be allocated, or positive indicating we want to acquire a specific device id.
:return: A list of device ids.
"""
num_gpus_available = get_num_gpus()
return _expand_requested_device_ids(requested_device_ids, num_gpus_available)
def _expand_requested_device_ids(requested_device_ids: List[int], num_gpus_available: int) -> List[int]:
if num_gpus_available == 0:
raise RuntimeError("Can not acquire GPU, as no GPUs were found on this machine.")
num_arbitrary_device_ids = 0
device_ids = []
for device_id in requested_device_ids:
if device_id < 0:
num_gpus = -device_id
num_arbitrary_device_ids += num_gpus
else:
device_ids.append(device_id)
num_gpus_requested = len(device_ids) + num_arbitrary_device_ids
if num_gpus_requested > num_gpus_available:
raise ValueError("Requested %d GPUs, but only %d are available." % (num_gpus_requested, num_gpus_available))
remaining_device_ids = set(range(num_gpus_available)) - set(device_ids)
logger.info("Attempting to acquire %d GPUs of %d GPUs.", num_gpus_requested, num_gpus_available)
return device_ids + list(remaining_device_ids)[:num_arbitrary_device_ids]
@contextmanager
def acquire_gpus(requested_device_ids: List[int], lock_dir: str = "/tmp",
retry_wait_min: int = 10, retry_wait_rand: int = 60,
num_gpus_available: Optional[int]=None):
"""
Acquire a number of GPUs in a transactional way. This method should be used inside a `with` statement.
Will try to acquire all the requested number of GPUs. If currently
not enough GPUs are available all locks will be released and we wait until we retry. Will retry until enough
GPUs become available.
:param requested_device_ids: The requested device ids, each number is either negative indicating the number of GPUs
that will be allocated, or positive indicating we want to acquire a specific device id.
:param lock_dir: The directory for storing the lock file.
:param retry_wait_min: The minimum number of seconds to wait between retries.
:param retry_wait_rand: Randomly add between 0 and `retry_wait_rand` seconds to the wait time.
:param num_gpus_available: The number of GPUs available, if None we will call get_num_gpus().
:return: yields a list of GPU ids.
"""
if num_gpus_available is None:
num_gpus_available = get_num_gpus()
if num_gpus_available == 0:
raise RuntimeError("Can not acquire GPU, as no GPUs were found on this machine.")
if not os.path.exists(lock_dir):
raise IOError("Lock directory %s does not exist." % lock_dir)
if not os.access(lock_dir, os.W_OK):
raise IOError("Lock directory %s is not writeable." % lock_dir)
# split the device ids into the specific ids requested and count up the number of arbitrary ids we want
# e.g. device_ids = [-3, 2, 5, 7, -5] means we want to acquire device 2, 5 and 7 plus 8 other devices.
specific_device_ids = set() # type: Set[int]
num_arbitrary_device_ids = 0
for device_id in requested_device_ids:
if device_id < 0:
num_gpus = -device_id
num_arbitrary_device_ids += num_gpus
else:
if device_id in specific_device_ids:
raise ValueError("Requested GPU %d twice." % device_id)
specific_device_ids.add(device_id)
# make sure we have enough GPUs available
num_gpus_requested = len(specific_device_ids) + num_arbitrary_device_ids
if num_gpus_requested > num_gpus_available:
raise ValueError("Requested %d GPUs, but only %d are available." % (num_gpus_requested, num_gpus_available))
logger.info("Attempting to acquire %d GPUs of %d GPUs. The requested devices are: %s",
num_gpus_requested, num_gpus_available, str(requested_device_ids))
# note: it's important to first allocate the specific device ids and then the others to not deadlock ourselves.
# for specific device ids we just have the device id itself as a candidate
candidates_to_request = [[device_id] for device_id in specific_device_ids]
# for the arbitrary device ids we take all remaining device ids as a list of candidates
remaining_device_ids = [device_id for device_id in range(num_gpus_available)
if device_id not in specific_device_ids]
candidates_to_request += [remaining_device_ids for _ in range(num_arbitrary_device_ids)]
while True:
with ExitStack() as exit_stack:
acquired_gpus = [] # type: List[int]
any_failed = False
for candidates in candidates_to_request:
gpu_id = exit_stack.enter_context(GpuFileLock(candidates=candidates, lock_dir=lock_dir)) # type: ignore
if gpu_id is not None:
acquired_gpus.append(gpu_id)
else:
if len(candidates) == 1:
logger.info("Could not acquire GPU %d. It's currently locked.", candidates[0])
any_failed = True
break
if not any_failed:
try:
yield acquired_gpus
except:
raise
return
# couldn't acquire all GPUs, let's wait and try again later
# randomize so that multiple processes starting at the same time don't retry at a similar point in time
if retry_wait_rand > 0:
retry_wait_actual = retry_wait_min + random.randint(0, retry_wait_rand)
else:
retry_wait_actual = retry_wait_min
logger.info("Not enough GPUs available will try again in %ss." % retry_wait_actual)
time.sleep(retry_wait_actual)
class GpuFileLock:
"""
Acquires a single GPU by locking a file (therefore this assumes that everyone using GPUs calls this method and
shares the lock directory). Sets target to a GPU id or None if none is available.
:param candidates: List of candidate device ids to try to acquire.
:param lock_dir: The directory for storing the lock file.
"""
def __init__(self, candidates: List[int], lock_dir: str) -> None:
self.candidates = candidates
self.lock_dir = lock_dir
self.lock_file = None # type: Optional[IO[Any]]
self.lock_file_path = None # type: Optional[str]
self.gpu_id = None # type: Optional[int]
self._acquired_lock = False
def __enter__(self) -> Optional[int]:
for gpu_id in self.candidates:
lockfile_path = os.path.join(self.lock_dir, "sockeye.gpu%d.lock" % gpu_id)
lock_file = open(lockfile_path, 'w')
try:
# exclusive non-blocking lock
fcntl.flock(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
# got the lock, let's write our PID into it:
lock_file.write("%d\n" % os.getpid())
lock_file.flush()
self._acquired_lock = True
self.gpu_id = gpu_id
self.lock_file = lock_file
self.lockfile_path = lockfile_path
logger.info("Acquired GPU %d." % gpu_id)
return gpu_id
except IOError as e:
# raise on unrelated IOErrors
if e.errno != errno.EAGAIN:
logger.error("Failed acquiring GPU lock.", exc_info=True)
raise
else:
logger.debug("GPU %d is currently locked.", gpu_id)
return None
def __exit__(self, exc_type, exc_val, exc_tb):
if self.gpu_id is not None:
logger.info("Releasing GPU %d.", self.gpu_id)
if self.lock_file is not None:
if self._acquired_lock:
fcntl.flock(self.lock_file, fcntl.LOCK_UN)
self.lock_file.close()
os.remove(self.lockfile_path)
def read_metrics_file(path: str) -> List[Dict[str, Any]]:
"""
Reads lines metrics file and returns list of mappings of key and values.
:param path: File to read metric values from.
:return: Dictionary of metric names (e.g. perplexity-train) mapping to a list of values.
"""
metrics = []
with open(path) as fin:
for i, line in enumerate(fin, 1):
fields = line.strip().split('\t')
checkpoint = int(fields[0])
check_condition(i == checkpoint,
"Line (%d) and loaded checkpoint (%d) do not align." % (i, checkpoint))
metric = dict()
for field in fields[1:]:
key, value = field.split("=", 1)
metric[key] = float(value)
metrics.append(metric)
return metrics
def write_metrics_file(metrics: List[Dict[str, Any]], path: str):
"""
Write metrics data to tab-separated file.
:param metrics: metrics data.
:param path: Path to write to.
"""
with open(path, 'w') as metrics_out:
for checkpoint, metric_dict in enumerate(metrics, 1):
metrics_str = "\t".join(["%s=%.6f" % (name, value) for name, value in sorted(metric_dict.items())])
metrics_out.write("%d\t%s\n" % (checkpoint, metrics_str))
def get_validation_metric_points(model_path: str, metric: str):
"""
Returns tuples of value and checkpoint for given metric from metrics file at model_path.
:param model_path: Model path containing .metrics file.
:param metric: Metric values to extract.
:return: List of tuples (value, checkpoint).
"""
metrics_path = os.path.join(model_path, C.METRICS_NAME)
data = read_metrics_file(metrics_path)
return [(d['%s-val' % metric], cp) for cp, d in enumerate(data, 1)]
class PrintValue(mx.operator.CustomOp):
"""
Custom operator that takes a symbol, prints its value to stdout and
propagates the value unchanged. Useful for debugging.
Use it as:
my_sym = mx.sym.Custom(op_type="PrintValue", data=my_sym, print_name="My symbol")
Additionally you can use the optional arguments 'use_logger=True' for using
the system logger and 'print_grad=True' for printing information about the
gradient (out_grad, i.e. "upper part" of the graph).
"""
def __init__(self, print_name, print_grad: str, use_logger: str) -> None:
super().__init__()
self.print_name = print_name
# Note that all the parameters are serialized as strings
self.print_grad = (print_grad == "True")
self.use_logger = (use_logger == "True")
def __print_nd__(self, nd: mx.nd.array, label: str):
intro = "%s %s - shape %s" % (label, self.print_name, str(nd.shape))
if self.use_logger:
logger.info(intro)
logger.info(str(nd.asnumpy()))
else:
print(">>>>> ", intro)
print(nd.asnumpy())
def forward(self, is_train, req, in_data, out_data, aux):
self.__print_nd__(in_data[0], "Symbol")
self.assign(out_data[0], req[0], in_data[0])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
if self.print_grad:
self.__print_nd__(out_grad[0], "Grad")
self.assign(in_grad[0], req[0], out_grad[0])
@mx.operator.register("PrintValue")
class PrintValueProp(mx.operator.CustomOpProp):
def __init__(self, print_name: str, print_grad: bool = False, use_logger: bool = False) -> None:
super().__init__(need_top_grad=True)
self.print_name = print_name
self.print_grad = print_grad
self.use_logger = use_logger
def list_arguments(self):
return ["data"]
def list_outputs(self):
return ["output"]
def infer_shape(self, in_shape):
return in_shape, in_shape, []
def infer_type(self, in_type):
return in_type, in_type, []
def create_operator(self, ctx, shapes, dtypes):
return PrintValue(self.print_name,
print_grad=self.print_grad,
use_logger=self.use_logger)
def grouper(iterable: Iterable, size: int) -> Iterable:
"""
Collect data into fixed-length chunks or blocks without discarding underfilled chunks or padding them.
:param iterable: A sequence of inputs.
:return: Sequence of chunks.
"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, size))
if not chunk:
return
yield chunk
| [
"str",
"str",
"str",
"int",
"bool",
"str",
"mx.sym.Symbol",
"str",
"mx.sym.Symbol",
"Mapping[str, mx.nd.NDArray]",
"str",
"str",
"Union[float, int]",
"np.ndarray",
"int",
"mx.nd.NDArray",
"int",
"List",
"int",
"str",
"str",
"np.ndarray",
"List[str]",
"List[str]",
"str",
"np.ndarray",
"List[str]",
"List[str]",
"float",
"np.ndarray",
"List[mx.nd.NDArray]",
"List[mx.context.Context]",
"Dict[int, Tuple[int, int]]",
"List[int]",
"List[int]",
"int",
"List[int]",
"List[int]",
"str",
"str",
"List[Dict[str, Any]]",
"str",
"str",
"str",
"str",
"str",
"mx.nd.array",
"str",
"str",
"Iterable",
"int"
] | [
1169,
1801,
2167,
2780,
3014,
3035,
3373,
3398,
3837,
4196,
4232,
4874,
7240,
7770,
7785,
8696,
8714,
9484,
9493,
9665,
9926,
10846,
10873,
10899,
10920,
11940,
11967,
11993,
12040,
13329,
13973,
15207,
16411,
16642,
17407,
17438,
18385,
23302,
23323,
25100,
25930,
25958,
26428,
26441,
27439,
27456,
27729,
27749,
28505,
29213,
29229
] | [
1172,
1804,
2170,
2783,
3018,
3038,
3386,
3401,
3850,
4223,
4235,
4877,
7257,
7780,
7788,
8709,
8717,
9488,
9496,
9668,
9929,
10856,
10882,
10908,
10923,
11950,
11976,
12002,
12045,
13339,
13992,
15231,
16437,
16651,
17416,
17441,
18394,
23311,
23326,
25103,
25950,
25961,
26431,
26444,
27442,
27459,
27740,
27752,
28508,
29221,
29232
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/sockeye/vocab.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import argparse
import json
import logging
import os
import pickle
from collections import Counter
from contextlib import ExitStack
from itertools import chain, islice
from typing import Dict, Iterable, List, Mapping, Optional, Tuple
from . import utils
from . import constants as C
from . import log
logger = logging.getLogger(__name__)
Vocab = Dict[str, int]
InverseVocab = Dict[int, str]
def build_from_paths(paths: List[str], num_words: int = 50000, min_count: int = 1) -> Vocab:
"""
Creates vocabulary from paths to a file in sentence-per-line format. A sentence is just a whitespace delimited
list of tokens. Note that special symbols like the beginning of sentence (BOS) symbol will be added to the
vocabulary.
:param paths: List of paths to files with one sentence per line.
:param num_words: Maximum number of words in the vocabulary.
:param min_count: Minimum occurrences of words to be included in the vocabulary.
:return: Word-to-id mapping.
"""
with ExitStack() as stack:
logger.info("Building vocabulary from dataset(s): %s", paths)
files = (stack.enter_context(utils.smart_open(path)) for path in paths)
return build_vocab(chain(*files), num_words, min_count)
def build_vocab(data: Iterable[str], num_words: int = 50000, min_count: int = 1) -> Vocab:
"""
Creates a vocabulary mapping from words to ids. Increasing integer ids are assigned by word frequency,
using lexical sorting as a tie breaker. The only exception to this are special symbols such as the padding symbol
(PAD).
:param data: Sequence of sentences containing whitespace delimited tokens.
:param num_words: Maximum number of words in the vocabulary.
:param min_count: Minimum occurrences of words to be included in the vocabulary.
:return: Word-to-id mapping.
"""
vocab_symbols_set = set(C.VOCAB_SYMBOLS)
raw_vocab = Counter(token for line in data for token in utils.get_tokens(line)
if token not in vocab_symbols_set)
# For words with the same count, they will be ordered reverse alphabetically.
# Not an issue since we only care for consistency
pruned_vocab = sorted(((c, w) for w, c in raw_vocab.items() if c >= min_count), reverse=True)
vocab = islice((w for c, w in pruned_vocab), num_words)
word_to_id = {word: idx for idx, word in enumerate(chain(C.VOCAB_SYMBOLS, vocab))}
logger.info("Vocabulary: types: %d/%d/%d/%d (initial/min_pruned/max_pruned/+special) " +
"[min_frequency=%d, max_num_types=%d]",
len(raw_vocab), len(pruned_vocab), len(word_to_id) - len(C.VOCAB_SYMBOLS),
len(word_to_id), min_count, num_words)
# Important: pad symbol becomes index 0
assert word_to_id[C.PAD_SYMBOL] == C.PAD_ID
return word_to_id
def vocab_to_pickle(vocab: Mapping, path: str):
"""
Saves vocabulary in pickle format.
:param vocab: Vocabulary mapping.
:param path: Output file path.
"""
with open(path, 'wb') as out:
pickle.dump(vocab, out)
logger.info('Vocabulary saved to "%s"', path)
def vocab_to_json(vocab: Mapping, path: str):
"""
Saves vocabulary in human-readable json.
:param vocab: Vocabulary mapping.
:param path: Output file path.
"""
with open(path, "w", encoding=C.VOCAB_ENCODING) as out:
json.dump(vocab, out, indent=4, ensure_ascii=False)
logger.info('Vocabulary saved to "%s"', path)
def vocab_from_json_or_pickle(path) -> Vocab:
"""
Try loading the json version of the vocab and fall back to pickle for backwards compatibility.
:param path: Path to vocab without the json suffix. If it exists the `path` + '.json' will be loaded as a JSON
object and otherwise `path` is loaded as a pickle object.
:return: The loaded vocabulary.
"""
if os.path.exists(path + C.JSON_SUFFIX):
return vocab_from_json(path + C.JSON_SUFFIX)
else:
return vocab_from_pickle(path)
def vocab_from_pickle(path: str) -> Vocab:
"""
Saves vocabulary in pickle format.
:param path: Path to pickle file containing the vocabulary.
:return: The loaded vocabulary.
"""
with open(path, 'rb') as inp:
vocab = pickle.load(inp)
logger.info('Vocabulary (%d words) loaded from "%s"', len(vocab), path)
return vocab
def vocab_from_json(path: str, encoding: str = C.VOCAB_ENCODING) -> Vocab:
"""
Saves vocabulary in json format.
:param path: Path to json file containing the vocabulary.
:return: The loaded vocabulary.
"""
with open(path, encoding=encoding) as inp:
vocab = json.load(inp)
logger.info('Vocabulary (%d words) loaded from "%s"', len(vocab), path)
return vocab
def load_or_create_vocab(data: str, vocab_path: Optional[str],
num_words: int, word_min_count: int):
return build_from_paths(paths=[data],
num_words=num_words,
min_count=word_min_count) if vocab_path is None else vocab_from_json(vocab_path)
def load_or_create_vocabs(source: str, target: str, source_vocab_path: Optional[str], target_vocab_path: Optional[str],
shared_vocab: bool,
num_words_source: int, word_min_count_source: int,
num_words_target: int, word_min_count_target: int) -> Tuple[Vocab, Vocab]:
if shared_vocab:
if source_vocab_path and target_vocab_path:
vocab_source = vocab_from_json(source_vocab_path)
vocab_target = vocab_from_json(target_vocab_path)
utils.check_condition(are_identical(vocab_source, vocab_target),
"Shared vocabulary requires identical source and target vocabularies. "
"The vocabularies in %s and %s are not identical." % (source_vocab_path,
target_vocab_path))
elif source_vocab_path is None and target_vocab_path is None:
utils.check_condition(num_words_source == num_words_target,
"A shared vocabulary requires the number of source and target words to be the same.")
utils.check_condition(word_min_count_source == word_min_count_target,
"A shared vocabulary requires the minimum word count for source and target "
"to be the same.")
vocab_source = vocab_target = build_from_paths(paths=[source, target],
num_words=num_words_source,
min_count=word_min_count_source)
else:
vocab_path = source_vocab_path if source_vocab_path is not None else target_vocab_path
logger.info("Using %s as a shared source/target vocabulary." % vocab_path)
vocab_source = vocab_target = vocab_from_json(vocab_path)
else:
vocab_source = load_or_create_vocab(source, source_vocab_path, num_words_source, word_min_count_source)
vocab_target = load_or_create_vocab(target, target_vocab_path, num_words_target, word_min_count_target)
return vocab_source, vocab_target
def reverse_vocab(vocab: Mapping) -> InverseVocab:
"""
Returns value-to-key mapping from key-to-value-mapping.
:param vocab: Key to value mapping.
:return: A mapping from values to keys.
"""
return {v: k for k, v in vocab.items()}
def are_identical(*vocabs: Vocab):
assert len(vocabs) > 0, "At least one vocabulary needed."
return all(set(vocab.items()) == set(vocabs[0].items()) for vocab in vocabs)
def main():
from . import arguments
params = argparse.ArgumentParser(description='CLI to build source and target vocab(s).')
arguments.add_build_vocab_args(params)
args = params.parse_args()
num_words, num_words_other = args.num_words
utils.check_condition(num_words == num_words_other,
"Vocabulary CLI only allows a common value for --num-words")
word_min_count, word_min_count_other = args.word_min_count
utils.check_condition(word_min_count == word_min_count_other,
"Vocabulary CLI only allows a common value for --word-min-count")
global logger
logger = log.setup_main_logger("build_vocab", file_logging=True, console=True,
path="%s.%s" % (args.output, C.LOG_NAME))
vocab = build_from_paths(args.inputs, num_words=num_words, min_count=word_min_count)
logger.info("Vocabulary size: %d ", len(vocab))
vocab_to_json(vocab, args.output + C.JSON_SUFFIX)
if __name__ == "__main__":
main()
| [
"List[str]",
"Iterable[str]",
"Mapping",
"str",
"Mapping",
"str",
"str",
"str",
"str",
"Optional[str]",
"int",
"int",
"str",
"str",
"Optional[str]",
"Optional[str]",
"bool",
"int",
"int",
"int",
"int",
"Mapping",
"Vocab"
] | [
991,
1836,
3429,
3444,
3726,
3741,
4614,
4981,
5394,
5411,
5462,
5483,
5725,
5738,
5762,
5796,
5851,
5901,
5929,
5978,
6006,
7969,
8229
] | [
1000,
1849,
3436,
3447,
3733,
3744,
4617,
4984,
5397,
5424,
5465,
5486,
5728,
5741,
5775,
5809,
5855,
5904,
5932,
5981,
6009,
7976,
8234
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/__init__.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/common.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import os
import random
import sys
from contextlib import contextmanager
from tempfile import TemporaryDirectory
from typing import Optional, Tuple
from unittest.mock import patch
import mxnet as mx
import numpy as np
import sockeye.average
import sockeye.constants as C
import sockeye.evaluate
import sockeye.lexicon
import sockeye.prepare_data
import sockeye.train
import sockeye.translate
import sockeye.utils
from sockeye.chrf import corpus_chrf
from sockeye.evaluate import raw_corpus_bleu
logger = logging.getLogger(__name__)
def gaussian_vector(shape, return_symbol=False):
"""
Generates random normal tensors (diagonal covariance)
:param shape: shape of the tensor.
:param return_symbol: True if the result should be a Symbol, False if it should be an Numpy array.
:return: A gaussian tensor.
"""
return mx.sym.random_normal(shape=shape) if return_symbol else np.random.normal(size=shape)
def integer_vector(shape, max_value, return_symbol=False):
"""
Generates a random positive integer tensor
:param shape: shape of the tensor.
:param max_value: maximum integer value.
:param return_symbol: True if the result should be a Symbol, False if it should be an Numpy array.
:return: A random integer tensor.
"""
return mx.sym.round(mx.sym.random_uniform(shape=shape) * max_value) if return_symbol \
else np.round(np.random.uniform(size=shape) * max_value)
def uniform_vector(shape, min_value=0, max_value=1, return_symbol=False):
"""
Generates a uniformly random tensor
:param shape: shape of the tensor
:param min_value: minimum possible value
:param max_value: maximum possible value (exclusive)
:param return_symbol: True if the result should be a mx.sym.Symbol, False if it should be a Numpy array
:return:
"""
return mx.sym.random_uniform(low=min_value, high=max_value, shape=shape) if return_symbol \
else np.random.uniform(low=min_value, high=max_value, size=shape)
def generate_random_sentence(vocab_size, max_len):
"""
Generates a random "sentence" as a list of integers.
:param vocab_size: Number of words in the "vocabulary". Note that due to
the inclusion of special words (BOS, EOS, UNK) this does *not*
correspond to the maximum possible value.
:param max_len: maximum sentence length.
"""
length = random.randint(1, max_len)
# Due to the special words, the actual words start at index 3 and go up to vocab_size+2
return [random.randint(3, vocab_size + 2) for _ in range(length)]
_DIGITS = "0123456789"
def generate_digits_file(source_path: str,
target_path: str,
line_count: int = 100,
line_length: int = 9,
sort_target: bool = False,
line_count_empty: int = 0,
seed=13):
assert line_count_empty <= line_count
random_gen = random.Random(seed)
with open(source_path, "w") as source_out, open(target_path, "w") as target_out:
all_digits = []
for _ in range(line_count - line_count_empty):
digits = [random_gen.choice(_DIGITS) for _ in range(random_gen.randint(1, line_length))]
all_digits.append(digits)
for _ in range(line_count_empty):
all_digits.append([])
random_gen.shuffle(all_digits)
for digits in all_digits:
print(" ".join(digits), file=source_out)
if sort_target:
digits.sort()
print(" ".join(digits), file=target_out)
def generate_fast_align_lex(lex_path: str):
"""
Generate a fast_align format lex table for digits.
:param lex_path: Path to write lex table.
"""
with open(lex_path, "w") as lex_out:
for digit in _DIGITS:
print("{0}\t{0}\t0".format(digit), file=lex_out)
_LEXICON_PARAMS_COMMON = "-i {input} -m {model} -k 1 -o {json} {quiet}"
@contextmanager
def tmp_digits_dataset(prefix: str,
train_line_count: int, train_max_length: int,
dev_line_count: int, dev_max_length: int,
test_line_count: int, test_line_count_empty: int, test_max_length: int,
sort_target: bool = False,
seed_train: int = 13, seed_dev: int = 13):
with TemporaryDirectory(prefix=prefix) as work_dir:
# Simple digits files for train/dev data
train_source_path = os.path.join(work_dir, "train.src")
train_target_path = os.path.join(work_dir, "train.tgt")
dev_source_path = os.path.join(work_dir, "dev.src")
dev_target_path = os.path.join(work_dir, "dev.tgt")
test_source_path = os.path.join(work_dir, "test.src")
test_target_path = os.path.join(work_dir, "test.tgt")
generate_digits_file(train_source_path, train_target_path, train_line_count,
train_max_length, sort_target=sort_target, seed=seed_train)
generate_digits_file(dev_source_path, dev_target_path, dev_line_count, dev_max_length, sort_target=sort_target,
seed=seed_dev)
generate_digits_file(test_source_path, test_target_path, test_line_count, test_max_length,
line_count_empty=test_line_count_empty, sort_target=sort_target, seed=seed_dev)
data = {'work_dir': work_dir,
'source': train_source_path,
'target': train_target_path,
'validation_source': dev_source_path,
'validation_target': dev_target_path,
'test_source': test_source_path,
'test_target': test_target_path}
yield data
_TRAIN_PARAMS_COMMON = "--use-cpu --max-seq-len {max_len} --source {train_source} --target {train_target}" \
" --validation-source {dev_source} --validation-target {dev_target} --output {model} {quiet}"
_PREPARE_DATA_COMMON = " --max-seq-len {max_len} --source {train_source} --target {train_target}" \
" --output {output} {quiet}"
_TRAIN_PARAMS_PREPARED_DATA_COMMON = "--use-cpu --max-seq-len {max_len} --prepared-data {prepared_data}" \
" --validation-source {dev_source} --validation-target {dev_target} " \
"--output {model} {quiet}"
_TRANSLATE_PARAMS_COMMON = "--use-cpu --models {model} --input {input} --output {output} {quiet}"
_TRANSLATE_PARAMS_RESTRICT = "--restrict-lexicon {json}"
_EVAL_PARAMS_COMMON = "--hypotheses {hypotheses} --references {references} --metrics {metrics} {quiet}"
def run_train_translate(train_params: str,
translate_params: str,
translate_params_equiv: Optional[str],
train_source_path: str,
train_target_path: str,
dev_source_path: str,
dev_target_path: str,
test_source_path: str,
test_target_path: str,
use_prepared_data: bool = False,
max_seq_len: int = 10,
restrict_lexicon: bool = False,
work_dir: Optional[str] = None,
quiet: bool = False) -> Tuple[float, float, float, float]:
"""
Train a model and translate a dev set. Report validation perplexity and BLEU.
:param train_params: Command line args for model training.
:param translate_params: First command line args for translation.
:param translate_params_equiv: Second command line args for translation. Should produce the same outputs
:param train_source_path: Path to the source file.
:param train_target_path: Path to the target file.
:param dev_source_path: Path to the development source file.
:param dev_target_path: Path to the development target file.
:param test_source_path: Path to the test source file.
:param test_target_path: Path to the test target file.
:param use_prepared_data: Whether to use the prepared data functionality.
:param max_seq_len: The maximum sequence length.
:param restrict_lexicon: Additional translation run with top-k lexicon-based vocabulary restriction.
:param work_dir: The directory to store the model and other outputs in.
:param quiet: Suppress the console output of training and decoding.
:return: A tuple containing perplexity, bleu scores for standard and reduced vocab decoding, chrf score.
"""
if quiet:
quiet_arg = "--quiet"
else:
quiet_arg = ""
with TemporaryDirectory(dir=work_dir, prefix="test_train_translate.") as work_dir:
# Optionally create prepared data directory
if use_prepared_data:
prepared_data_path = os.path.join(work_dir, "prepared_data")
params = "{} {}".format(sockeye.prepare_data.__file__,
_PREPARE_DATA_COMMON.format(train_source=train_source_path,
train_target=train_target_path,
output=prepared_data_path,
max_len=max_seq_len,
quiet=quiet_arg))
logger.info("Creating prepared data folder.")
with patch.object(sys, "argv", params.split()):
sockeye.prepare_data.main()
# Train model
model_path = os.path.join(work_dir, "model")
params = "{} {} {}".format(sockeye.train.__file__,
_TRAIN_PARAMS_PREPARED_DATA_COMMON.format(prepared_data=prepared_data_path,
dev_source=dev_source_path,
dev_target=dev_target_path,
model=model_path,
max_len=max_seq_len,
quiet=quiet_arg),
train_params)
logger.info("Starting training with parameters %s.", train_params)
with patch.object(sys, "argv", params.split()):
sockeye.train.main()
else:
# Train model
model_path = os.path.join(work_dir, "model")
params = "{} {} {}".format(sockeye.train.__file__,
_TRAIN_PARAMS_COMMON.format(train_source=train_source_path,
train_target=train_target_path,
dev_source=dev_source_path,
dev_target=dev_target_path,
model=model_path,
max_len=max_seq_len,
quiet=quiet_arg),
train_params)
logger.info("Starting training with parameters %s.", train_params)
with patch.object(sys, "argv", params.split()):
sockeye.train.main()
logger.info("Translating with parameters %s.", translate_params)
# Translate corpus with the 1st params
out_path = os.path.join(work_dir, "out.txt")
params = "{} {} {}".format(sockeye.translate.__file__,
_TRANSLATE_PARAMS_COMMON.format(model=model_path,
input=test_source_path,
output=out_path,
quiet=quiet_arg),
translate_params)
with patch.object(sys, "argv", params.split()):
sockeye.translate.main()
# Translate corpus with the 2nd params
if translate_params_equiv is not None:
out_path_equiv = os.path.join(work_dir, "out_equiv.txt")
params = "{} {} {}".format(sockeye.translate.__file__,
_TRANSLATE_PARAMS_COMMON.format(model=model_path,
input=test_source_path,
output=out_path_equiv,
quiet=quiet_arg),
translate_params_equiv)
with patch.object(sys, "argv", params.split()):
sockeye.translate.main()
# read-in both outputs, ensure they are the same
with open(out_path, 'rt') as f:
lines = f.readlines()
with open(out_path_equiv, 'rt') as f:
lines_equiv = f.readlines()
assert all(a == b for a, b in zip(lines, lines_equiv))
# Test restrict-lexicon
out_restrict_path = os.path.join(work_dir, "out-restrict.txt")
if restrict_lexicon:
# fast_align lex table
lex_path = os.path.join(work_dir, "lex")
generate_fast_align_lex(lex_path)
# Top-K JSON
json_path = os.path.join(work_dir, "json")
params = "{} {}".format(sockeye.lexicon.__file__,
_LEXICON_PARAMS_COMMON.format(input=lex_path,
model=model_path,
json=json_path,
quiet=quiet_arg))
with patch.object(sys, "argv", params.split()):
sockeye.lexicon.main()
# Translate corpus with restrict-lexicon
params = "{} {} {} {}".format(sockeye.translate.__file__,
_TRANSLATE_PARAMS_COMMON.format(model=model_path,
input=test_source_path,
output=out_restrict_path,
quiet=quiet_arg),
translate_params,
_TRANSLATE_PARAMS_RESTRICT.format(json=json_path))
with patch.object(sys, "argv", params.split()):
sockeye.translate.main()
# test averaging
points = sockeye.average.find_checkpoints(model_path=model_path,
size=1,
strategy='best',
metric=C.PERPLEXITY)
assert len(points) > 0
averaged_params = sockeye.average.average(points)
assert averaged_params
# get best validation perplexity
metrics = sockeye.utils.read_metrics_file(path=os.path.join(model_path, C.METRICS_NAME))
perplexity = min(m[C.PERPLEXITY + '-val'] for m in metrics)
hypotheses = open(out_path, "r").readlines()
references = open(test_target_path, "r").readlines()
assert len(hypotheses) == len(references)
# compute metrics
bleu = raw_corpus_bleu(hypotheses=hypotheses, references=references, offset=0.01)
chrf = corpus_chrf(hypotheses=hypotheses, references=references)
bleu_restrict = None
if restrict_lexicon:
bleu_restrict = raw_corpus_bleu(hypotheses=hypotheses, references=references, offset=0.01)
# Run BLEU cli
eval_params = "{} {} ".format(sockeye.evaluate.__file__,
_EVAL_PARAMS_COMMON.format(hypotheses=out_path,
references=test_target_path,
metrics="bleu chrf",
quiet=quiet_arg), )
with patch.object(sys, "argv", eval_params.split()):
sockeye.evaluate.main()
return perplexity, bleu, bleu_restrict, chrf
| [
"str",
"str",
"str",
"str",
"int",
"int",
"int",
"int",
"int",
"int",
"int",
"str",
"str",
"Optional[str]",
"str",
"str",
"str",
"str",
"str",
"str"
] | [
3249,
3292,
4266,
4645,
4691,
4714,
4758,
4779,
4824,
4852,
4874,
7335,
7382,
7435,
7493,
7541,
7587,
7633,
7680,
7727
] | [
3252,
3295,
4269,
4648,
4694,
4717,
4761,
4782,
4827,
4855,
4877,
7338,
7385,
7448,
7496,
7544,
7590,
7636,
7683,
7730
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/integration/__init__.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/integration/test_seq_copy_int.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pytest
from test.common import run_train_translate, tmp_digits_dataset
_TRAIN_LINE_COUNT = 100
_DEV_LINE_COUNT = 10
_TEST_LINE_COUNT = 10
_TEST_LINE_COUNT_EMPTY = 2
_LINE_MAX_LENGTH = 9
_TEST_MAX_LENGTH = 20
ENCODER_DECODER_SETTINGS = [
# "Vanilla" LSTM encoder-decoder with attention
("--encoder rnn --num-layers 1 --rnn-cell-type lstm --rnn-num-hidden 16 --num-embed 8 --rnn-attention-type mlp"
" --rnn-attention-num-hidden 16 --batch-size 8 --loss cross-entropy --optimized-metric perplexity --max-updates 10"
" --checkpoint-frequency 10 --optimizer adam --initial-learning-rate 0.01",
"--beam-size 2",
True),
# "Kitchen sink" LSTM encoder-decoder with attention
("--encoder rnn --num-layers 4:2 --rnn-cell-type lstm --rnn-num-hidden 16"
" --rnn-residual-connections"
" --num-embed 16 --rnn-attention-type coverage --rnn-attention-num-hidden 16 --weight-tying "
"--rnn-attention-use-prev-word --rnn-context-gating --layer-normalization --batch-size 8 "
"--loss cross-entropy --label-smoothing 0.1 --loss-normalization-type batch --optimized-metric perplexity"
" --max-updates 10 --checkpoint-frequency 10 --optimizer adam --initial-learning-rate 0.01"
" --rnn-dropout-inputs 0.5:0.1 --rnn-dropout-states 0.5:0.1 --embed-dropout 0.1 --rnn-decoder-hidden-dropout 0.01"
" --rnn-decoder-state-init avg --rnn-encoder-reverse-input --rnn-dropout-recurrent 0.1:0.0"
" --learning-rate-decay-param-reset --weight-normalization",
"--beam-size 2",
False),
# Convolutional embedding encoder + LSTM encoder-decoder with attention
("--encoder rnn-with-conv-embed --conv-embed-max-filter-width 3 --conv-embed-num-filters 4:4:8"
" --conv-embed-pool-stride 2 --conv-embed-num-highway-layers 1 --num-layers 1 --rnn-cell-type lstm"
" --rnn-num-hidden 16 --num-embed 8 --rnn-attention-num-hidden 16 --batch-size 8 --loss cross-entropy"
" --optimized-metric perplexity --max-updates 10 --checkpoint-frequency 10 --optimizer adam"
" --initial-learning-rate 0.01",
"--beam-size 2",
False),
# Transformer encoder, GRU decoder, mhdot attention
("--encoder transformer --num-layers 2:1 --rnn-cell-type gru --rnn-num-hidden 16 --num-embed 8:16"
" --transformer-attention-heads 2 --transformer-model-size 8"
" --transformer-feed-forward-num-hidden 32 --transformer-activation-type gelu"
" --rnn-attention-type mhdot --rnn-attention-mhdot-heads 4 --rnn-attention-num-hidden 16 --batch-size 8 "
" --max-updates 10 --checkpoint-frequency 10 --optimizer adam --initial-learning-rate 0.01"
" --weight-init-xavier-factor-type avg --weight-init-scale 3.0 --embed-weight-init normal",
"--beam-size 2",
False),
# LSTM encoder, Transformer decoder
("--encoder rnn --decoder transformer --num-layers 2:2 --rnn-cell-type lstm --rnn-num-hidden 16 --num-embed 16"
" --transformer-attention-heads 2 --transformer-model-size 16"
" --transformer-feed-forward-num-hidden 32 --transformer-activation-type swish1"
" --batch-size 8 --max-updates 10"
" --checkpoint-frequency 10 --optimizer adam --initial-learning-rate 0.01",
"--beam-size 3",
False),
# Full transformer
("--encoder transformer --decoder transformer"
" --num-layers 3 --transformer-attention-heads 2 --transformer-model-size 16 --num-embed 16"
" --transformer-feed-forward-num-hidden 32"
" --transformer-dropout-prepost 0.1 --transformer-preprocess n --transformer-postprocess dr"
" --weight-tying --weight-tying-type src_trg_softmax"
" --batch-size 8 --max-updates 10"
" --checkpoint-frequency 10 --optimizer adam --initial-learning-rate 0.01",
"--beam-size 2",
True),
# 3-layer cnn
("--encoder cnn --decoder cnn "
" --batch-size 16 --num-layers 3 --max-updates 10 --checkpoint-frequency 10"
" --cnn-num-hidden 32 --cnn-positional-embedding-type fixed"
" --optimizer adam --initial-learning-rate 0.001",
"--beam-size 2",
True)]
@pytest.mark.parametrize("train_params, translate_params, restrict_lexicon", ENCODER_DECODER_SETTINGS)
def test_seq_copy(train_params: str, translate_params: str, restrict_lexicon: bool):
"""Task: copy short sequences of digits"""
with tmp_digits_dataset("test_seq_copy", _TRAIN_LINE_COUNT, _LINE_MAX_LENGTH,
_DEV_LINE_COUNT, _LINE_MAX_LENGTH,
_TEST_LINE_COUNT, _TEST_LINE_COUNT_EMPTY, _TEST_MAX_LENGTH) as data:
# Test model configuration, including the output equivalence of batch and no-batch decoding
translate_params_batch = translate_params + " --batch-size 2"
# Ignore return values (perplexity and BLEU) for integration test
run_train_translate(train_params,
translate_params,
translate_params_batch,
data['source'],
data['target'],
data['validation_source'],
data['validation_target'],
data['test_source'],
data['test_target'],
max_seq_len=_LINE_MAX_LENGTH + 1,
restrict_lexicon=restrict_lexicon,
work_dir=data['work_dir'])
| [
"str",
"str",
"bool"
] | [
4748,
4771,
4794
] | [
4751,
4774,
4798
] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/system/__init__.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/system/test_seq_copy_sys.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import pytest
logger = logging.getLogger(__name__)
from test.common import tmp_digits_dataset, run_train_translate
_TRAIN_LINE_COUNT = 10000
_DEV_LINE_COUNT = 100
_LINE_MAX_LENGTH = 10
_TEST_LINE_COUNT = 110
_TEST_LINE_COUNT_EMPTY = 10
_TEST_MAX_LENGTH = 11
_SEED_TRAIN = 13
_SEED_DEV = 17
@pytest.mark.parametrize("name, train_params, translate_params, use_prepared_data, perplexity_thresh, bleu_thresh", [
("Copy:lstm:lstm",
"--encoder rnn --num-layers 1 --rnn-cell-type lstm --rnn-num-hidden 64 --num-embed 32 --rnn-attention-type mlp"
" --rnn-attention-num-hidden 32 --batch-size 16 --loss cross-entropy --optimized-metric perplexity"
" --checkpoint-frequency 1000 --optimizer adam --initial-learning-rate 0.001"
" --rnn-dropout-states 0.0:0.1 --embed-dropout 0.1:0.0 --max-updates 4000 --weight-normalization"
" --gradient-clipping-type norm --gradient-clipping-threshold 10",
"--beam-size 5 ",
True,
1.02,
0.99),
("Copy:chunking",
"--encoder rnn --num-layers 1 --rnn-cell-type lstm --rnn-num-hidden 64 --num-embed 32 --rnn-attention-type mlp"
" --rnn-attention-num-hidden 32 --batch-size 16 --loss cross-entropy --optimized-metric perplexity"
" --checkpoint-frequency 1000 --optimizer adam --initial-learning-rate 0.001"
" --rnn-dropout-states 0.0:0.1 --embed-dropout 0.1:0.0 --max-updates 5000",
"--beam-size 5 --max-input-len 4",
False,
1.01,
0.99),
("Copy:word-based-batching",
"--encoder rnn --num-layers 1 --rnn-cell-type lstm --rnn-num-hidden 64 --num-embed 32 --rnn-attention-type mlp"
" --rnn-attention-num-hidden 32 --batch-size 80 --batch-type word --loss cross-entropy "
" --optimized-metric perplexity --max-updates 5000 --checkpoint-frequency 1000 --optimizer adam "
" --initial-learning-rate 0.001 --rnn-dropout-states 0.0:0.1 --embed-dropout 0.1:0.0 --layer-normalization",
"--beam-size 5",
True,
1.01,
0.99),
("Copy:transformer:lstm",
"--encoder transformer --num-layers 2:1 --rnn-cell-type lstm --rnn-num-hidden 64 --num-embed 32"
" --rnn-attention-type mhdot --rnn-attention-num-hidden 32 --batch-size 16 --rnn-attention-mhdot-heads 1"
" --loss cross-entropy --optimized-metric perplexity --max-updates 6000"
" --transformer-attention-heads 4 --transformer-model-size 32"
" --transformer-feed-forward-num-hidden 64 --transformer-activation-type gelu"
" --checkpoint-frequency 1000 --optimizer adam --initial-learning-rate 0.001",
"--beam-size 5",
False,
1.01,
0.99),
("Copy:lstm:transformer",
"--encoder rnn --num-layers 1 --rnn-cell-type lstm --rnn-num-hidden 64 --num-embed 32"
" --decoder transformer --batch-size 16"
" --loss cross-entropy --optimized-metric perplexity --max-updates 3000"
" --transformer-attention-heads 4 --transformer-model-size 32"
" --transformer-feed-forward-num-hidden 64 --transformer-activation-type swish1"
" --checkpoint-frequency 1000 --optimizer adam --initial-learning-rate 0.001",
"--beam-size 5",
True,
1.01,
0.98),
("Copy:transformer:transformer",
"--encoder transformer --decoder transformer"
" --batch-size 16 --max-updates 4000"
" --num-layers 2 --transformer-attention-heads 4 --transformer-model-size 32"
" --transformer-feed-forward-num-hidden 64 --num-embed 32"
" --checkpoint-frequency 1000 --optimizer adam --initial-learning-rate 0.001",
"--beam-size 1",
False,
1.01,
0.99),
("Copy:cnn:cnn",
"--encoder cnn --decoder cnn "
" --batch-size 16 --num-layers 3 --max-updates 3000"
" --cnn-num-hidden 32 --cnn-positional-embedding-type fixed --cnn-project-qkv "
" --checkpoint-frequency 1000 --optimizer adam --initial-learning-rate 0.001",
"--beam-size 1",
True,
1.02,
0.98)
])
def test_seq_copy(name, train_params, translate_params, use_prepared_data, perplexity_thresh, bleu_thresh):
"""Task: copy short sequences of digits"""
with tmp_digits_dataset("test_seq_copy.", _TRAIN_LINE_COUNT, _LINE_MAX_LENGTH, _DEV_LINE_COUNT,
_LINE_MAX_LENGTH, _TEST_LINE_COUNT, _TEST_LINE_COUNT_EMPTY, _TEST_MAX_LENGTH,
seed_train=_SEED_TRAIN, seed_dev=_SEED_DEV) as data:
# Test model configuration
perplexity, bleu, bleu_restrict, chrf = run_train_translate(train_params,
translate_params,
None, # no second set of parameters
data['source'],
data['target'],
data['validation_source'],
data['validation_target'],
data['test_source'],
data['test_target'],
use_prepared_data=use_prepared_data,
max_seq_len=_LINE_MAX_LENGTH + 1,
restrict_lexicon=True,
work_dir=data['work_dir'])
logger.info("test: %s", name)
logger.info("perplexity=%f, bleu=%f, bleu_restrict=%f chrf=%f", perplexity, bleu, bleu_restrict, chrf)
assert perplexity <= perplexity_thresh
assert bleu >= bleu_thresh
assert bleu_restrict >= bleu_thresh
@pytest.mark.parametrize("name, train_params, translate_params, use_prepared_data, perplexity_thresh, bleu_thresh", [
("Sort:lstm",
"--encoder rnn --num-layers 1 --rnn-cell-type lstm --rnn-num-hidden 64 --num-embed 32 --rnn-attention-type mlp"
" --rnn-attention-num-hidden 32 --batch-size 16 --loss cross-entropy --optimized-metric perplexity"
" --max-updates 5000 --checkpoint-frequency 1000 --optimizer adam --initial-learning-rate 0.001",
"--beam-size 5",
True,
1.04,
0.98),
("Sort:word-based-batching",
"--encoder rnn --num-layers 1 --rnn-cell-type lstm --rnn-num-hidden 64 --num-embed 32 --rnn-attention-type mlp"
" --rnn-attention-num-hidden 32 --batch-size 80 --batch-type word --loss cross-entropy"
" --optimized-metric perplexity --max-updates 5000 --checkpoint-frequency 1000 --optimizer adam "
" --initial-learning-rate 0.001 --rnn-dropout-states 0.0:0.1 --embed-dropout 0.1:0.0",
"--beam-size 5",
False,
1.01,
0.99),
("Sort:transformer:lstm",
"--encoder transformer --num-layers 1 --rnn-cell-type lstm --rnn-num-hidden 64 --num-embed 32"
" --rnn-attention-type mhdot --rnn-attention-num-hidden 32 --batch-size 16 --rnn-attention-mhdot-heads 2"
" --loss cross-entropy --optimized-metric perplexity --max-updates 5000"
" --transformer-attention-heads 4 --transformer-model-size 32"
" --transformer-feed-forward-num-hidden 64 --transformer-activation-type gelu"
" --checkpoint-frequency 1000 --optimizer adam --initial-learning-rate 0.001",
"--beam-size 5",
True,
1.02,
0.99),
("Sort:lstm:transformer",
"--encoder rnn --num-layers 1:2 --rnn-cell-type lstm --rnn-num-hidden 64 --num-embed 32"
" --decoder transformer --batch-size 16 --transformer-model-size 32"
" --loss cross-entropy --optimized-metric perplexity --max-updates 7000"
" --transformer-attention-heads 4"
" --transformer-feed-forward-num-hidden 64 --transformer-activation-type swish1"
" --checkpoint-frequency 1000 --optimizer adam --initial-learning-rate 0.001",
"--beam-size 5",
False,
1.02,
0.99),
("Sort:transformer",
"--encoder transformer --decoder transformer"
" --batch-size 16 --max-updates 5000"
" --num-layers 2 --transformer-attention-heads 4 --transformer-model-size 32 --num-embed 32"
" --transformer-feed-forward-num-hidden 64"
" --checkpoint-frequency 1000 --optimizer adam --initial-learning-rate 0.001",
"--beam-size 1",
True,
1.02,
0.99),
("Sort:cnn",
"--encoder cnn --decoder cnn "
" --batch-size 16 --num-layers 3 --max-updates 5000"
" --cnn-num-hidden 32 --cnn-positional-embedding-type fixed"
" --checkpoint-frequency 1000 --optimizer adam --initial-learning-rate 0.001",
"--beam-size 1",
False,
1.07,
0.96)
])
def test_seq_sort(name, train_params, translate_params, use_prepared_data, perplexity_thresh, bleu_thresh):
"""Task: sort short sequences of digits"""
with tmp_digits_dataset("test_seq_sort.", _TRAIN_LINE_COUNT, _LINE_MAX_LENGTH, _DEV_LINE_COUNT, _LINE_MAX_LENGTH,
_TEST_LINE_COUNT, _TEST_LINE_COUNT_EMPTY, _TEST_MAX_LENGTH,
sort_target=True, seed_train=_SEED_TRAIN, seed_dev=_SEED_DEV) as data:
# Test model configuration
perplexity, bleu, bleu_restrict, chrf = run_train_translate(train_params,
translate_params,
None, # no second set of parameters
data['source'],
data['target'],
data['validation_source'],
data['validation_target'],
data['test_source'],
data['test_target'],
use_prepared_data=use_prepared_data,
max_seq_len=_LINE_MAX_LENGTH + 1,
restrict_lexicon=True,
work_dir=data['work_dir'])
logger.info("test: %s", name)
logger.info("perplexity=%f, bleu=%f, bleu_restrict=%f chrf=%f", perplexity, bleu, bleu_restrict, chrf)
assert perplexity <= perplexity_thresh
assert bleu >= bleu_thresh
assert bleu_restrict >= bleu_thresh
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/__init__.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_arguments.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import argparse
import pytest
import os
import sockeye.arguments as arguments
import sockeye.constants as C
from itertools import zip_longest
# note that while --prepared-data and --source/--target are mutually exclusive this is not the case at the CLI level
@pytest.mark.parametrize("test_params, expected_params", [
# mandatory parameters
('--source test_src --target test_tgt --prepared-data prep_data '
'--validation-source test_validation_src --validation-target test_validation_tgt '
'--output test_output',
dict(source='test_src', target='test_tgt',
prepared_data='prep_data',
validation_source='test_validation_src', validation_target='test_validation_tgt',
output='test_output', overwrite_output=False,
source_vocab=None, target_vocab=None, shared_vocab=False, num_words=(50000, 50000), word_min_count=(1,1),
no_bucketing=False, bucket_width=10, max_seq_len=(100, 100),
monitor_pattern=None, monitor_stat_func='mx_default', use_tensorboard=False)),
# short parameters
('-s test_src -t test_tgt -d prep_data '
'-vs test_validation_src -vt test_validation_tgt '
'-o test_output',
dict(source='test_src', target='test_tgt',
prepared_data='prep_data',
validation_source='test_validation_src', validation_target='test_validation_tgt',
output='test_output', overwrite_output=False,
source_vocab=None, target_vocab=None, shared_vocab=False, num_words=(50000, 50000), word_min_count=(1,1),
no_bucketing=False, bucket_width=10, max_seq_len=(100, 100),
monitor_pattern=None, monitor_stat_func='mx_default', use_tensorboard=False))
])
def test_io_args(test_params, expected_params):
_test_args(test_params, expected_params, arguments.add_training_io_args)
@pytest.mark.parametrize("test_params, expected_params", [
('', dict(quiet=False)),
])
def test_logging_args(test_params, expected_params):
_test_args(test_params, expected_params, arguments.add_logging_args)
@pytest.mark.parametrize("test_params, expected_params", [
('', dict(device_ids=[-1], use_cpu=False, disable_device_locking=False, lock_dir='/tmp')),
('--device-ids 1 2 3 --use-cpu --disable-device-locking --lock-dir test_dir',
dict(device_ids=[1, 2, 3], use_cpu=True, disable_device_locking=True, lock_dir='test_dir'))
])
def test_device_args(test_params, expected_params):
_test_args(test_params, expected_params, arguments.add_device_args)
@pytest.mark.parametrize("test_params, expected_params", [
('', dict(params=None,
allow_missing_params=False,
num_layers=(1, 1),
num_embed=(512, 512),
rnn_attention_type='mlp',
rnn_attention_num_hidden=None,
rnn_attention_coverage_type='count',
rnn_attention_coverage_num_hidden=1,
weight_tying=False,
weight_tying_type="trg_softmax",
rnn_attention_mhdot_heads=None,
transformer_attention_heads=8,
transformer_feed_forward_num_hidden=2048,
transformer_activation_type=C.RELU,
transformer_model_size=512,
transformer_positional_embedding_type="fixed",
transformer_preprocess=('', ''),
transformer_postprocess=('drn', 'drn'),
rnn_attention_use_prev_word=False,
rnn_decoder_state_init="last",
rnn_encoder_reverse_input=False,
rnn_context_gating=False,
rnn_cell_type=C.LSTM_TYPE,
rnn_num_hidden=1024,
rnn_residual_connections=False,
rnn_first_residual_layer=2,
cnn_activation_type='glu',
cnn_kernel_width=(3, 5),
cnn_num_hidden=512,
cnn_positional_embedding_type="learned",
cnn_project_qkv=False,
layer_normalization=False,
weight_normalization=False,
encoder=C.RNN_NAME,
conv_embed_max_filter_width=8,
decoder=C.RNN_NAME,
conv_embed_output_dim=None,
conv_embed_num_filters=(200, 200, 250, 250, 300, 300, 300, 300),
conv_embed_num_highway_layers=4,
conv_embed_pool_stride=5,
conv_embed_add_positional_encodings=False,
rnn_attention_in_upper_layers=False))])
def test_model_parameters(test_params, expected_params):
_test_args(test_params, expected_params, arguments.add_model_parameters)
@pytest.mark.parametrize("test_params, expected_params", [
('', dict(batch_size=64,
batch_type="sentence",
fill_up='replicate',
loss=C.CROSS_ENTROPY,
label_smoothing=0.0,
loss_normalization_type='valid',
metrics=[C.PERPLEXITY],
optimized_metric=C.PERPLEXITY,
max_updates=None,
checkpoint_frequency=1000,
max_num_checkpoint_not_improved=8,
embed_dropout=(.0, .0),
transformer_dropout_attention=0.0,
transformer_dropout_act=0.0,
transformer_dropout_prepost=0.0,
conv_embed_dropout=0.0,
optimizer='adam',
optimizer_params=None,
kvstore='device',
gradient_compression_type=None,
gradient_compression_threshold=0.5,
min_num_epochs=None,
max_num_epochs=None,
initial_learning_rate=0.0003,
weight_decay=0.0,
momentum=None,
gradient_clipping_threshold=1.0,
gradient_clipping_type='abs',
learning_rate_scheduler_type='plateau-reduce',
learning_rate_reduce_factor=0.5,
learning_rate_reduce_num_not_improved=3,
learning_rate_half_life=10,
learning_rate_warmup=0,
learning_rate_schedule=None,
learning_rate_decay_param_reset=False,
learning_rate_decay_optimizer_states_reset='off',
weight_init='xavier',
weight_init_scale=2.34,
weight_init_xavier_rand_type='uniform',
weight_init_xavier_factor_type='in',
embed_weight_init='default',
rnn_dropout_inputs=(.0, .0),
rnn_dropout_states=(.0, .0),
rnn_dropout_recurrent=(.0, .0),
rnn_decoder_hidden_dropout=.0,
cnn_hidden_dropout=0.0,
rnn_forget_bias=0.0,
rnn_h2h_init=C.RNN_INIT_ORTHOGONAL,
decode_and_evaluate=0,
decode_and_evaluate_use_cpu=False,
decode_and_evaluate_device_id=None,
seed=13,
keep_last_params=-1)),
])
def test_training_arg(test_params, expected_params):
_test_args(test_params, expected_params, arguments.add_training_args)
@pytest.mark.parametrize("test_params, expected_params", [
('-m model', dict(input=None,
output=None,
checkpoints=None,
models=['model'],
beam_size=5,
batch_size=1,
chunk_size=None,
ensemble_mode='linear',
bucket_width=10,
max_input_len=None,
restrict_lexicon=None,
softmax_temperature=None,
output_type='translation',
sure_align_threshold=0.9,
max_output_length_num_stds=2,
length_penalty_alpha=1.0,
length_penalty_beta=0.0)),
])
def test_inference_args(test_params, expected_params):
_test_args(test_params, expected_params, arguments.add_inference_args)
# Make sure that the parameter names and default values used in the tutorials do not change without the tutorials
# being updated accordingly.
@pytest.mark.parametrize("test_params, expected_params, expected_params_present", [
# seqcopy tutorial
('-s train.source '
'-t train.target '
'-vs dev.source '
'-vt dev.target '
'--num-embed 32 '
'--rnn-num-hidden 64 '
'--rnn-attention-type dot '
'--use-cpu '
'--metrics perplexity accuracy '
'--max-num-checkpoint-not-improved 3 '
'-o seqcopy_model',
dict(source="train.source",
target="train.target",
validation_source="dev.source",
validation_target="dev.target",
num_embed=(32, 32),
rnn_num_hidden=64,
use_cpu=True,
metrics=['perplexity', 'accuracy'],
max_num_checkpoint_not_improved=3,
output="seqcopy_model",
# The tutorial text mentions that we train a RNN model:
encoder="rnn",
decoder="rnn"),
# Additionally we mention the checkpoint_frequency
['checkpoint_frequency']),
# WMT tutorial
('-s corpus.tc.BPE.de '
'-t corpus.tc.BPE.en '
'-vs newstest2016.tc.BPE.de '
'-vt newstest2016.tc.BPE.en '
'--num-embed 256 '
'--rnn-num-hidden 512 '
'--rnn-attention-type dot '
'--max-seq-len 60 '
'--decode-and-evaluate 500 '
'--use-tensorboard '
'--use-cpu '
'-o wmt_mode',
dict(
source="corpus.tc.BPE.de",
target="corpus.tc.BPE.en",
validation_source="newstest2016.tc.BPE.de",
validation_target="newstest2016.tc.BPE.en",
num_embed=(256, 256),
rnn_num_hidden=512,
rnn_attention_type='dot',
max_seq_len=(60, 60),
decode_and_evaluate=500,
use_tensorboard=True,
use_cpu=True,
# Arguments mentioned in the text, should be renamed in the tutorial if they change:
rnn_cell_type="lstm",
encoder="rnn",
decoder="rnn",
optimizer="adam"),
["num_layers",
"rnn_residual_connections",
"batch_size",
"learning_rate_schedule",
"optimized_metric",
"decode_and_evaluate",
"seed"])
])
def test_tutorial_train_args(test_params, expected_params, expected_params_present):
_test_args_subset(test_params, expected_params, expected_params_present, arguments.add_train_cli_args)
@pytest.mark.parametrize("test_params, expected_params, expected_params_present", [
# seqcopy tutorial
('-m seqcopy_model '
'--use-cpu',
dict(models=["seqcopy_model"],
use_cpu=True),
[]),
# WMT tutorial
('-m wmt_model wmt_model_seed2 '
'--use-cpu '
'--output-type align_plot',
dict(models=["wmt_model", "wmt_model_seed2"],
use_cpu=True,
output_type="align_plot"),
# Other parameters mentioned in the WMT tutorial
["beam_size",
"softmax_temperature",
"length_penalty_alpha"]),
])
def test_tutorial_translate_args(test_params, expected_params, expected_params_present):
_test_args_subset(test_params, expected_params, expected_params_present, arguments.add_translate_cli_args)
@pytest.mark.parametrize("test_params, expected_params, expected_params_present", [
# WMT tutorial
('-o wmt_model_avg/param.best wmt_model',
dict(inputs=["wmt_model"],
output="wmt_model_avg/param.best"),
[]),
])
def test_tutorial_averaging_args(test_params, expected_params, expected_params_present):
_test_args_subset(test_params, expected_params, expected_params_present, arguments.add_average_args)
@pytest.mark.parametrize("test_params, expected_params", [
('--source test_src --target test_tgt --output prepared_data ',
dict(source='test_src', target='test_tgt',
source_vocab=None,
target_vocab=None,
shared_vocab=False,
num_words=(50000, 50000),
word_min_count=(1,1),
no_bucketing=False,
bucket_width=10,
max_seq_len=(100, 100),
min_num_shards=1,
num_samples_per_shard=1000000,
seed=13,
output='prepared_data'
))
])
def test_prepare_data_cli_args(test_params, expected_params):
_test_args(test_params, expected_params, arguments.add_prepare_data_cli_args)
def _create_argument_values_that_must_be_files_or_dirs(params):
"""
Loop over test_params and create temporary files for training/validation sources/targets.
"""
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
params = params.split()
regular_files_params = {'-vs', '-vt', '-t', '-s', '--source', '--target',
'--validation-source', '--validation-target'}
folder_params = {'--prepared-data', '-d'}
to_unlink = set()
for arg, val in grouper(params, 2):
if arg in regular_files_params and not os.path.isfile(val):
open(val, 'w').close()
to_unlink.add(val)
if arg in folder_params:
os.mkdir(val)
to_unlink.add(val)
return to_unlink
def _delete_argument_values_that_must_be_files_or_dirs(to_unlink):
"""
Close and delete previously created files or directories.
"""
for name in to_unlink:
if os.path.isfile(name):
os.unlink(name)
else:
os.rmdir(name)
def _test_args(test_params, expected_params, args_func):
test_parser = argparse.ArgumentParser()
args_func(test_parser)
created = _create_argument_values_that_must_be_files_or_dirs(test_params)
try:
parsed_params = test_parser.parse_args(test_params.split())
finally:
_delete_argument_values_that_must_be_files_or_dirs(created)
assert dict(vars(parsed_params)) == expected_params
def _test_args_subset(test_params, expected_params, expected_params_present, args_func):
"""
Only checks the subset of the parameters given in `expected_params`.
:param test_params: A string of test parameters.
:param expected_params: A dict of parameters to test for the exact value.
:param expected_params_present: A dict of parameters to test for presence.
:param args_func: The function correctly setting up the parameters for ArgumentParser.
"""
test_parser = argparse.ArgumentParser()
args_func(test_parser)
created = _create_argument_values_that_must_be_files_or_dirs(test_params)
parsed_params = dict(vars(test_parser.parse_args(test_params.split())))
_delete_argument_values_that_must_be_files_or_dirs(created)
parsed_params_subset = {k: v for k, v in parsed_params.items() if k in expected_params}
assert parsed_params_subset == expected_params
for expected_param_present in expected_params_present:
assert expected_param_present in parsed_params, "Expected param %s to be present." % expected_param_present
| [] | [] | [] |
archives/zz1559152814_my-notebook.zip | paper:sockeye亚马逊翻译模型(2017业内最佳)/sockeye-master/test/unit/test_attention.py | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import mxnet as mx
import numpy as np
import pytest
import sockeye.rnn_attention
import sockeye.constants as C
import sockeye.coverage
from test.common import gaussian_vector, integer_vector
attention_types = [C.ATT_BILINEAR, C.ATT_DOT, C.ATT_DOT_SCALED, C.ATT_LOC, C.ATT_MLP]
@pytest.mark.parametrize("attention_type", attention_types)
def test_attention(attention_type,
batch_size=1,
encoder_num_hidden=2,
decoder_num_hidden=2):
# source: (batch_size, seq_len, encoder_num_hidden)
source = mx.sym.Variable("source")
# source_length: (batch_size,)
source_length = mx.sym.Variable("source_length")
source_seq_len = 3
config_attention = sockeye.rnn_attention.AttentionConfig(type=attention_type,
num_hidden=2,
input_previous_word=False,
source_num_hidden=2,
query_num_hidden=2,
layer_normalization=False,
config_coverage=None)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=source_seq_len)
attention_state = attention.get_initial_state(source_length, source_seq_len)
attention_func = attention.on(source, source_length, source_seq_len)
attention_input = attention.make_input(0, mx.sym.Variable("word_vec_prev"), mx.sym.Variable("decoder_state"))
attention_state = attention_func(attention_input, attention_state)
sym = mx.sym.Group([attention_state.context, attention_state.probs])
executor = sym.simple_bind(ctx=mx.cpu(),
source=(batch_size, source_seq_len, encoder_num_hidden),
source_length=(batch_size,),
decoder_state=(batch_size, decoder_num_hidden))
# TODO: test for other inputs (that are not equal at each source position)
executor.arg_dict["source"][:] = np.asarray([[[1., 2.], [1., 2.], [3., 4.]]])
executor.arg_dict["source_length"][:] = np.asarray([2.0])
executor.arg_dict["decoder_state"][:] = np.asarray([[5, 6]])
exec_output = executor.forward()
context_result = exec_output[0].asnumpy()
attention_prob_result = exec_output[1].asnumpy()
# expecting uniform attention_weights of 0.5: 0.5 * seq1 + 0.5 * seq2
assert np.isclose(context_result, np.asarray([[1., 2.]])).all()
# equal attention to first two and no attention to third
assert np.isclose(attention_prob_result, np.asarray([[0.5, 0.5, 0.]])).all()
coverage_cases = [("gru", 10), ("tanh", 4), ("count", 1), ("sigmoid", 1), ("relu", 30)]
@pytest.mark.parametrize("attention_coverage_type,attention_coverage_num_hidden", coverage_cases)
def test_coverage_attention(attention_coverage_type,
attention_coverage_num_hidden,
batch_size=3,
encoder_num_hidden=2,
decoder_num_hidden=2):
# source: (batch_size, seq_len, encoder_num_hidden)
source = mx.sym.Variable("source")
# source_length: (batch_size, )
source_length = mx.sym.Variable("source_length")
source_seq_len = 10
config_coverage = sockeye.coverage.CoverageConfig(type=attention_coverage_type,
num_hidden=attention_coverage_num_hidden,
layer_normalization=False)
config_attention = sockeye.rnn_attention.AttentionConfig(type="coverage",
num_hidden=5,
input_previous_word=False,
source_num_hidden=encoder_num_hidden,
query_num_hidden=decoder_num_hidden,
layer_normalization=False,
config_coverage=config_coverage)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=source_seq_len)
attention_state = attention.get_initial_state(source_length, source_seq_len)
attention_func = attention.on(source, source_length, source_seq_len)
attention_input = attention.make_input(0, mx.sym.Variable("word_vec_prev"), mx.sym.Variable("decoder_state"))
attention_state = attention_func(attention_input, attention_state)
sym = mx.sym.Group([attention_state.context, attention_state.probs, attention_state.dynamic_source])
source_shape = (batch_size, source_seq_len, encoder_num_hidden)
source_length_shape = (batch_size,)
decoder_state_shape = (batch_size, decoder_num_hidden)
executor = sym.simple_bind(ctx=mx.cpu(),
source=source_shape,
source_length=source_length_shape,
decoder_state=decoder_state_shape)
source_length_vector = integer_vector(shape=source_length_shape, max_value=source_seq_len)
executor.arg_dict["source"][:] = gaussian_vector(shape=source_shape)
executor.arg_dict["source_length"][:] = source_length_vector
executor.arg_dict["decoder_state"][:] = gaussian_vector(shape=decoder_state_shape)
exec_output = executor.forward()
context_result = exec_output[0].asnumpy()
attention_prob_result = exec_output[1].asnumpy()
dynamic_source_result = exec_output[2].asnumpy()
expected_probs = (1 / source_length_vector).reshape((batch_size, 1))
assert context_result.shape == (batch_size, encoder_num_hidden)
assert attention_prob_result.shape == (batch_size, source_seq_len)
assert dynamic_source_result.shape == (batch_size, source_seq_len, attention_coverage_num_hidden)
assert (np.sum(np.isclose(attention_prob_result, expected_probs), axis=1) == source_length_vector).all()
def test_last_state_attention(batch_size=1,
encoder_num_hidden=2):
"""
EncoderLastStateAttention is a bit different from other attention mechanisms as it doesn't take a query argument
and doesn't return a probability distribution over the inputs (aka alignment).
"""
# source: (batch_size, seq_len, encoder_num_hidden)
source = mx.sym.Variable("source")
# source_length: (batch_size,)
source_length = mx.sym.Variable("source_length")
source_seq_len = 3
config_attention = sockeye.rnn_attention.AttentionConfig(type="fixed",
num_hidden=0,
input_previous_word=False,
source_num_hidden=2,
query_num_hidden=2,
layer_normalization=False,
config_coverage=None)
attention = sockeye.rnn_attention.get_attention(config_attention, max_seq_len=source_seq_len)
attention_state = attention.get_initial_state(source_length, source_seq_len)
attention_func = attention.on(source, source_length, source_seq_len)
attention_input = attention.make_input(0, mx.sym.Variable("word_vec_prev"), mx.sym.Variable("decoder_state"))
attention_state = attention_func(attention_input, attention_state)
sym = mx.sym.Group([attention_state.context, attention_state.probs])
executor = sym.simple_bind(ctx=mx.cpu(),
source=(batch_size, source_seq_len, encoder_num_hidden),
source_length=(batch_size,))
# TODO: test for other inputs (that are not equal at each source position)
executor.arg_dict["source"][:] = np.asarray([[[1., 2.], [1., 2.], [3., 4.]]])
executor.arg_dict["source_length"][:] = np.asarray([2.0])
exec_output = executor.forward()
context_result = exec_output[0].asnumpy()
attention_prob_result = exec_output[1].asnumpy()
# expecting attention on last state based on source_length
assert np.isclose(context_result, np.asarray([[1., 2.]])).all()
assert np.isclose(attention_prob_result, np.asarray([[0., 1.0, 0.]])).all()
def test_get_context_and_attention_probs():
source = mx.sym.Variable('source')
source_length = mx.sym.Variable('source_length')
attention_scores = mx.sym.Variable('scores')
context, att_probs = sockeye.rnn_attention.get_context_and_attention_probs(source, source_length, attention_scores)
sym = mx.sym.Group([context, att_probs])
assert len(sym.list_arguments()) == 3
batch_size, seq_len, num_hidden = 32, 50, 100
# data
source_nd = mx.nd.random_normal(shape=(batch_size, seq_len, num_hidden))
source_length_np = np.random.randint(1, seq_len+1, (batch_size,))
source_length_nd = mx.nd.array(source_length_np)
scores_nd = mx.nd.zeros((batch_size, seq_len, 1))
in_shapes, out_shapes, _ = sym.infer_shape(source=source_nd.shape,
source_length=source_length_nd.shape,
scores=scores_nd.shape)
assert in_shapes == [(batch_size, seq_len, num_hidden), (batch_size, seq_len, 1), (batch_size,)]
assert out_shapes == [(batch_size, num_hidden), (batch_size, seq_len)]
context, probs = sym.eval(source=source_nd,
source_length=source_length_nd,
scores=scores_nd)
expected_probs = (1. / source_length_nd).reshape((batch_size, 1)).asnumpy()
assert (np.sum(np.isclose(probs.asnumpy(), expected_probs), axis=1) == source_length_np).all()
| [] | [] | [] |