repo
stringclasses 43
values | docfile_name
stringlengths 7
40
| doc_type
stringclasses 11
values | intent
stringlengths 8
128
| license
stringclasses 3
values | path_to_docfile
stringlengths 29
116
| relevant_code_files
sequencelengths 0
12
| relevant_code_dir
stringlengths 0
54
| target_text
stringlengths 339
44.2k
| relevant_code_context
stringlengths 1.12k
23.2M
|
---|---|---|---|---|---|---|---|---|---|
ynput__OpenPype | assignments_and_allocations.rst | Tutorial / Subdoc | Working with assignments and allocations | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/example/assignments_and_allocations.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Working with assignments and allocations
The API exposes assignments and allocations relationships on objects in
the project hierarchy. You can use these to retrieve the allocated or
assigned resources, which can be either groups or users.
Allocations can be used to allocate users or groups to a project team,
while assignments are more explicit and is used to assign users to
tasks. Both assignment and allocations are modelled as Appointment
objects, with a type attribute indicating the type of the appoinment.
The following example retrieves all users part of the project team:
# Retrieve a project
project = session.query('Project').first()
# Set to hold all users part of the project team
project_team = set()
# Add all allocated groups and users
for allocation in project['allocations']:
# Resource may be either a group or a user
resource = allocation['resource']
# If the resource is a group, add its members
if isinstance(resource, session.types['Group']):
for membership in resource['memberships']:
user = membership['user']
project_team.add(user)
# The resource is a user, add it.
else:
user = resource
project_team.add(user)
The next example shows how to assign the current user to a task:
# Retrieve a task and the current user
task = session.query('Task').first()
current_user = session.query(
u'User where username is {0}'.format(session.api_user)
).one()
# Create a new Appointment of type assignment.
session.create('Appointment', {
'context': task,
'resource': current_user,
'type': 'assignment'
})
# Finally, persist the new assignment
session.commit()
To list all users assigned to a task, see the following example:
task = session.query('Task').first()
users = session.query(
'select first_name, last_name from User '
'where assignments any (context_id = "{0}")'.format(task['id'])
)
for user in users:
print user['first_name'], user['last_name']
To list the current user's assigned tasks, see the example below:
assigned_tasks = session.query(
'select link from Task '
'where assignments any (resource.username = "{0}")'.format(session.api_user)
)
for task in assigned_tasks:
print u' / '.join(item['name'] for item in task['link'])
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
ynput__OpenPype | custom_attribute.rst | Tutorial / Subdoc | Using custom attributes | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/example/custom_attribute.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Using custom attributes
Custom attributes can be written and read from entities using the
custom_attributes property.
The custom_attributes property provides a similar interface to a
dictionary.
Keys can be printed using the keys method:
>>> task['custom_attributes'].keys()
[u'my_text_field']
or access keys and values as items:
>>> print task['custom_attributes'].items()
[(u'my_text_field', u'some text')]
Read existing custom attribute values:
>>> print task['custom_attributes']['my_text_field']
'some text'
Updating a custom attributes can also be done similar to a dictionary:
task['custom_attributes']['my_text_field'] = 'foo'
To query for tasks with a custom attribute, my_text_field, you can use
the key from the configuration:
for task in session.query(
'Task where custom_attributes any '
'(key is "my_text_field" and value is "bar")'
):
print task['name']
Limitations
Expression attributes
Expression attributes are not yet supported and the reported value will
always be the non-evaluated expression.
Hierarchical attributes
Hierarchical attributes are not yet fully supported in the API.
Hierarchical attributes support both read and write, but when read they
are not calculated and instead the raw value is returned:
# The hierarchical attribute `my_attribute` is set on Shot but this will not
# be reflected on the children. Instead the raw value is returned.
print shot['custom_attributes']['my_attribute']
'foo'
print task['custom_attributes']['my_attribute']
None
To work around this limitation it is possible to use the legacy api for
hierarchical attributes or to manually query the parents for values and
use the first value that is set.
Validation
Custom attributes are validated on the ftrack server before persisted.
The validation will check that the type of the data is correct for the
custom attribute.
- number - int or float
- text - str or unicode
- enumerator - list
- boolean - bool
- date - datetime.datetime or datetime.date
If the value set is not valid a ftrack_api.exception.ServerError is
raised with debug information:
shot['custom_attributes']['fstart'] = 'test'
Traceback (most recent call last):
...
ftrack_api.exception.ServerError: Server reported error:
ValidationError(Custom attribute value for "fstart" must be of type number.
Got "test" of type <type 'unicode'>)
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
ynput__OpenPype | encode_media.rst | Tutorial / Subdoc | Encoding media | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/example/encode_media.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Encoding media
Media such as images and video can be encoded by the ftrack server to
allow playing it in the ftrack web interface. Media can be encoded using
ftrack_api.session.Session.encode_media which accepts a path to a file
or an existing component in the ftrack.server location.
Here is an example of how to encode a video and read the output:
job = session.encode_media('/PATH/TO/MEDIA')
job_data = json.loads(job['data'])
print 'Source component id', job_data['source_component_id']
print 'Keeping original component', job_data['keep_original']
for output in job_data['output']:
print u'Output component - id: {0}, format: {1}'.format(
output['component_id'], output['format']
)
You can also call the corresponding helper method on an asset version
<ftrack_api.entity.asset_version.AssetVersion.encode_media>, to have the
encoded components automatically associated with the version:
job = asset_version.encode_media('/PATH/TO/MEDIA')
It is also possible to get the URL to an encoded component once the job
has finished:
job = session.encode_media('/PATH/TO/MEDIA')
# Wait for job to finish.
location = session.query('Location where name is "ftrack.server"').one()
for component in job['job_components']:
print location.get_url(component)
Media can also be an existing component in another location. Before
encoding it, the component needs to be added to the ftrack.server
location:
location = session.query('Location where name is "ftrack.server"').one()
location.add_component(component)
session.commit()
job = session.encode_media(component)
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
ynput__OpenPype | web_review.rst | Tutorial / Subdoc | Publishing for web review | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/example/web_review.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Publishing for web review
Follow the example/encode_media example if you want to upload and encode
media using ftrack.
If you already have a file encoded in the correct format and want to
bypass the built-in encoding in ftrack, you can create the component
manually and add it to the ftrack.server location:
# Retrieve or create version.
version = session.query('AssetVersion', 'SOME-ID')
server_location = session.query('Location where name is "ftrack.server"').one()
filepath = '/path/to/local/file.mp4'
component = version.create_component(
path=filepath,
data={
'name': 'ftrackreview-mp4'
},
location=server_location
)
# Meta data needs to contain *frameIn*, *frameOut* and *frameRate*.
component['metadata']['ftr_meta'] = json.dumps({
'frameIn': 0,
'frameOut': 150,
'frameRate': 25
})
component.session.commit()
To publish an image for review the steps are similar:
# Retrieve or create version.
version = session.query('AssetVersion', 'SOME-ID')
server_location = session.query('Location where name is "ftrack.server"').one()
filepath = '/path/to/image.jpg'
component = version.create_component(
path=filepath,
data={
'name': 'ftrackreview-image'
},
location=server_location
)
# Meta data needs to contain *format*.
component['metadata']['ftr_meta'] = json.dumps({
'format': 'image'
})
component.session.commit()
Here is a list of components names and how they should be used:
- ftrackreview-image (Images reviewable in the browser)
- ftrackreview-mp4 (H.264/mp4 video reviewable in browser)
- ftrackreview-webm (WebM video reviewable in browser)
Note
Make sure to use the pre-defined component names and set the ftr_meta on
the components or review will not work.
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
ynput__OpenPype | sync_ldap_users.rst | Tutorial / Subdoc | Sync users with LDAP | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/example/sync_ldap_users.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Sync users with LDAP
If ftrack is configured to connect to LDAP you may trigger a
synchronization through the api using the
ftrack_api.session.Session.call:
result = session.call([
dict(
action='delayed_job',
job_type='SYNC_USERS_LDAP'
)
])
job = result[0]['data]
You will get a ftrack_api.entity.job.Job instance back which can be used
to check the success of the job:
if job.get('status') == 'failed':
# The job failed get the error.
logging.error(job.get('data'))
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
ynput__OpenPype | publishing.rst | Tutorial / Subdoc | Publishing versions | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/example/publishing.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Publishing versions
To know more about publishing and the concepts around publishing, read
the ftrack article about publishing.
To publish an asset you first need to get the context where the asset
should be published:
# Get a task from a given id.
task = session.get('Task', '423ac382-e61d-4802-8914-dce20c92b740')
And the parent of the task which will be used to publish the asset on:
asset_parent = task['parent']
Then we create an asset and a version on the asset:
asset_type = session.query('AssetType where name is "Geometry"').one()
asset = session.create('Asset', {
'name': 'My asset',
'type': asset_type,
'parent': asset_parent
})
asset_version = session.create('AssetVersion', {
'asset': asset,
'task': task
})
Note
The task is not used as the parent of the asset, instead the task is
linked directly to the AssetVersion.
Then when we have a version where we can create the components:
asset_version.create_component(
'/path/to/a/file.mov', location='auto'
)
asset_version.create_component(
'/path/to/a/another-file.mov', location='auto'
)
session.commit()
This will automatically create a new component and add it to the
location which has been configured as the first in priority.
Components can also be named and added to a custom location like this:
location = session.query('Location where name is "my-location"')
asset_version.create_component(
'/path/to/a/file.mov',
data={
'name': 'foobar'
},
location=location
)
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
ynput__OpenPype | security_roles.rst | Tutorial / Subdoc | Working with user security roles | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/example/security_roles.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Working with user security roles
The API exposes SecurityRole and UserSecurityRole that can be used to
specify who should have access to certain data on different projects.
List all available security roles like this:
security_roles = session.query(
'select name from SecurityRole where type is "PROJECT"'
)
Note
We only query for project roles since those are the ones we can add to a
user for certain projects. Other types include API and ASSIGNED. Type
API can only be added to global API keys, which is currently not
supported via the api and type ASSIGNED only applies to assigned tasks.
To get all security roles from a user we can either use relations like
this:
for user_security_role in user['user_security_roles']:
if user_security_role['is_all_projects']:
result_string = 'all projects'
else:
result_string = ', '.join(
[project['full_name'] for project in user_security_role['projects']]
)
print 'User has security role "{0}" which is valid on {1}.'.format(
user_security_role['security_role']['name'],
result_string
)
or query them directly like this:
user_security_roles = session.query(
'UserSecurityRole where user.username is "{0}"'.format(session.api_user)
).all()
User security roles can also be added to a user for all projects like
this:
project_manager_role = session.query(
'SecurityRole where name is "Project Manager"'
).one()
session.create('UserSecurityRole', {
'is_all_projects': True,
'user': user,
'security_role': project_manager_role
})
session.commit()
or for certain projects only like this:
projects = session.query(
'Project where full_name is "project1" or full_name is "project2"'
).all()[:]
session.create('UserSecurityRole', {
'user': user,
'security_role': project_manager_role,
'projects': projects
})
session.commit()
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
ynput__OpenPype | list.rst | Tutorial / Subdoc | Using lists | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/example/list.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Using lists
Lists can be used to create a collection of asset versions or objects
such as tasks. It could be a list of items that should be sent to
client, be included in todays review session or items that belong
together in way that is different from the project hierarchy.
There are two types of lists, one for asset versions and one for other
objects such as tasks.
To create a list use Session.create:
user = # Get a user from ftrack.
project = # Get a project from ftrack.
list_category = # Get a list category from ftrack.
asset_version_list = session.create('AssetVersionList', {
'owner': user,
'project': project,
'category': list_category
})
task_list = session.create('TypedContextList', {
'owner': user,
'project': project,
'category': list_category
})
Then add items to the list like this:
asset_version_list['items'].append(asset_version)
task_list['items'].append(task)
And remove items from the list like this:
asset_version_list['items'].remove(asset_version)
task_list['items'].remove(task)
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
ynput__OpenPype | review_session.rst | Tutorial / Subdoc | Using review sessions | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/example/review_session.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Using review sessions
Client review sessions can either be queried manually or by using a
project instance.
review_sessions = session.query(
'ReviewSession where name is "Weekly review"'
)
project_review_sessions = project['review_sessions']
To create a new review session on a specific project use Session.create.
review_session = session.create('ReviewSession', {
'name': 'Weekly review',
'description': 'See updates from last week.',
'project': project
})
To add objects to a review session create them using Session.create and
reference a review session and an asset version.
review_session = session.create('ReviewSessionObject', {
'name': 'Compositing',
'description': 'Fixed shadows.',
'version': 'Version 3',
'review_session': review_session,
'asset_version': asset_version
})
To list all objects in a review session.
review_session_objects = review_session['review_session_objects']
Listing and adding collaborators to review session can be done using
Session.create and the review_session_invitees relation on a review
session.
invitee = session.create('ReviewSessionInvitee', {
'name': 'John Doe',
'email': '[email protected]',
'review_session': review_session
})
session.commit()
invitees = review_session['review_session_invitees']
To remove a collaborator simply delete the object using Session.delete.
session.delete(invitee)
To send out an invite email to a signle collaborator use
Session.send_review_session_invite.
session.send_review_session_invite(invitee)
Multiple invitees can have emails sent to them in one batch using
Session.send_review_session_invites.
session.send_review_session_invites(a_list_of_invitees)
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
ynput__OpenPype | timer.rst | Tutorial / Subdoc | Using timers | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/example/timer.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Using timers
Timers can be used to track how much time has been spend working on
something.
To start a timer for a user:
user = # Get a user from ftrack.
task = # Get a task from ftrack.
user.start_timer(task)
A timer has now been created for that user and should show up in the
ftrack web UI.
To stop the currently running timer for a user and create a timelog from
it:
user = # Get a user from ftrack.
timelog = user.stop_timer()
Note
Starting a timer when a timer is already running will raise in an
exception. Use the force parameter to automatically stop the running
timer first.
user.start_timer(task, force=True)
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
ynput__OpenPype | metadata.rst | Tutorial / Subdoc | Using metadata | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/example/metadata.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Using metadata
Key/value metadata can be written to entities using the metadata
property and also used to query entities.
The metadata property has a similar interface as a dictionary and keys
can be printed using the keys method:
>>> print new_sequence['metadata'].keys()
['frame_padding', 'focal_length']
or items:
>>> print new_sequence['metadata'].items()
[('frame_padding': '4'), ('focal_length': '70')]
Read existing metadata:
>>> print new_sequence['metadata']['frame_padding']
'4'
Setting metadata can be done in a few ways where that later one will
replace any existing metadata:
new_sequence['metadata']['frame_padding'] = '5'
new_sequence['metadata'] = {
'frame_padding': '4'
}
Entities can also be queried using metadata:
session.query(
'Sequence where metadata any (key is "frame_padding" and value is "4")'
)
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
ynput__OpenPype | working_with_entities.rst | Directory summarization | Working with entities | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/working_with_entities.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Working with entities
Entity <ftrack_api.entity.base.Entity> instances are Python dict-like
objects whose keys correspond to attributes for that type in the system.
They may also provide helper methods to perform common operations such
as replying to a note:
note = session.query('Note').first()
print note.keys()
print note['content']
note['content'] = 'A different message!'
reply = note.create_reply(...)
Attributes
Each entity instance is typed according to its underlying entity type on
the server and configured with appropriate attributes. For example, a
task will be represented by a Task class and have corresponding
attributes. You can
customise entity classes <working_with_entities/entity_types> to alter
attribute access or provide your own helper methods.
To see the available attribute names on an entity use the
~ftrack_api.entity.base.Entity.keys method on the instance:
>>> task = session.query('Task').first()
>>> print task.keys()
['id', 'name', ...]
If you need more information about the type of attribute, examine the
attributes property on the corresponding class:
>>> for attribute in type(task).attributes:
... print attribute
<ftrack_api.attribute.ScalarAttribute(id) object at 66701296>
<ftrack_api.attribute.ScalarAttribute(name) object at 66702192>
<ftrack_api.attribute.ReferenceAttribute(status) object at 66701240>
<ftrack_api.attribute.CollectionAttribute(timelogs) object at 66701184>
<ftrack_api.attribute.KeyValueMappedCollectionAttribute(metadata) object at 66701632>
...
Notice that there are different types of attribute such as
~ftrack_api.attribute.ScalarAttribute for plain values or
~ftrack_api.attribute.ReferenceAttribute for relationships. These
different types are reflected in the behaviour on the entity instance
when accessing a particular attribute by key:
>>> # Scalar >>> print task['name'] 'model' >>> task['name'] = 'comp'
>>> # Single reference >>> print task['status']
<Status(e610b180-4e64-11e1-a500-f23c91df25eb)> >>> new_status =
session.query('Status').first() >>> task['status'] = new_status
>>> # Collection >>> print task['timelogs']
<ftrack_api.collection.Collection object at 0x00000000040D95C0> >>>
print task['timelogs'][:] [<dynamic ftrack Timelog object 72322240>,
...] >>> new_timelog = session.create('Timelog', {...}) >>>
task['timelogs'].append(new_timelog)
Bi-directional relationships
Some attributes refer to different sides of a bi-directional
relationship. In the current version of the API bi-directional updates
are not propagated automatically to the other side of the relationship.
For example, setting a parent will not update the parent entity's
children collection locally. There are plans to support this behaviour
better in the future. For now, after commit,
populate <working_with_entities/populating> the reverse side attribute
manually.
Creating entities
In order to create a new instance of an entity call Session.create
passing in the entity type to create and any initial attribute values:
new_user = session.create('User', {'username': 'martin'})
If there are any default values that can be set client side then they
will be applied at this point. Typically this will be the unique entity
key:
>>> print new_user['id']
170f02a4-6656-4f15-a5cb-c4dd77ce0540
At this point no information has been sent to the server. However, you
are free to continue updating <working_with_entities/updating> this
object locally until you are ready to persist the changes by calling
Session.commit.
If you are wondering about what would happen if you accessed an unset
attribute on a newly created entity, go ahead and give it a go:
>>> print new_user['first_name']
NOT_SET
The session knows that it is a newly created entity that has not yet
been persisted so it doesn't try to fetch any attributes on access even
when session.auto_populate is turned on.
Updating entities
Updating an entity is as simple as modifying the values for specific
keys on the dict-like instance and calling Session.commit when ready.
The entity to update can either be a new entity or a retrieved entity:
task = session.query('Task').first()
task['bid'] = 8
Remember that, for existing entities, accessing an attribute will load
it from the server automatically. If you are interested in just setting
values without first fetching them from the server, turn auto-population
<understanding_sessions/auto_population> off temporarily:
>>> with session.auto_populating(False):
... task = session.query('Task').first()
... task['bid'] = 8
Server side reset of entity attributes or settings.
===========================
Some entities support resetting of attributes, for example to reset a
users api key:
session.reset_remote(
'api_key', entity=session.query('User where username is "test_user"').one()
)
Note
Currently the only attribute possible to reset is 'api_key' on the user
entity type.
Deleting entities
To delete an entity you need an instance of the entity in your session
(either from having created one or retrieving one). Then call
Session.delete on the entity and Session.commit when ready:
task_to_delete = session.query('Task').first()
session.delete(task_to_delete)
...
session.commit()
Note
Even though the entity is deleted, you will still have access to the
local instance and any local data stored on that instance whilst that
instance remains in memory.
Keep in mind that some deletions, when propagated to the server, will
cause other entities to be deleted also, so you don't have to worry
about deleting an entire hierarchy manually. For example, deleting a
Task will also delete all Notes on that task.
Populating entities
When an entity is retrieved via Session.query or Session.get it will
have some attributes prepopulated. The rest are dynamically loaded when
they are accessed. If you need to access many attributes it can be more
efficient to request all those attributes be loaded in one go. One way
to do this is to use a projections <querying/projections> in queries.
However, if you have entities that have been passed to you from
elsewhere you don't have control over the query that was issued to get
those entities. In this case you can you can populate those entities in
one go using Session.populate which works exactly like projections
<querying/projections> in queries do, but operating against known
entities:
>>> users = session.query('User')
>>> session.populate(users, 'first_name, last_name')
>>> with session.auto_populating(False): # Turn off for example purpose.
... for user in users:
... print 'Name: {0}'.format(user['first_name'])
... print 'Email: {0}'.format(user['email'])
Name: Martin
Email: NOT_SET
...
Note
You can populate a single or many entities in one call so long as they
are all the same entity type.
Entity states
Operations on entities are recorded in the session
<understanding_sessions/unit_of_work> as they happen. At any time you
can inspect an entity to determine its current state from those pending
operations.
To do this, use ftrack_api.inspection.state:
>>> import ftrack_api.inspection
>>> new_user = session.create('User', {})
>>> print ftrack_api.inspection.state(new_user)
CREATED
>>> existing_user = session.query('User').first()
>>> print ftrack_api.inspection.state(existing_user)
NOT_SET
>>> existing_user['email'] = '[email protected]'
>>> print ftrack_api.inspection.state(existing_user)
MODIFIED
>>> session.delete(new_user)
>>> print ftrack_api.inspection.state(new_user)
DELETED
Customising entity types
Each type of entity in the system is represented in the Python client by
a dedicated class. However, because the types of entities can vary these
classes are built on demand using schema information retrieved from the
server.
Many of the default classes provide additional helper methods which are
mixed into the generated class at runtime when a session is started.
In some cases it can be useful to tailor the custom classes to your own
pipeline workflows. Perhaps you want to add more helper functions,
change attribute access rules or even providing a layer of backwards
compatibility for existing code. The Python client was built with this
in mind and makes such customisations as easy as possible.
When a Session is constructed it fetches schema details from the
connected server and then calls an Entity factory
<ftrack_api.entity.factory.Factory> to create classes from those
schemas. It does this by emitting a synchronous event,
ftrack.api.session.construct-entity-type, for each schema and expecting
a class object to be returned.
In the default setup, a construct_entity_type.py
<../resource/plugin/construct_entity_type.py> plugin is placed on the
FTRACK_EVENT_PLUGIN_PATH. This plugin will register a trivial subclass
of ftrack_api.entity.factory.StandardFactory to create the classes in
response to the construct event. The simplest way to get started is to
edit this default plugin as required.
understanding_sessions/plugins
Default projections
When a query <querying> is issued without any projections
<querying/projections>, the session will automatically add default
projections according to the type of the entity.
For example, the following shows that for a User, only id is fetched by
default when no projections added to the query:
>>> user = session.query('User').first()
>>> with session.auto_populating(False): # For demonstration purpose only.
... print user.items()
[
(u'id', u'59f0963a-15e2-11e1-a5f1-0019bb4983d8')
(u'username', Symbol(NOT_SET)),
(u'first_name', Symbol(NOT_SET)),
...
]
Note
These default projections are also used when you access a relationship
attribute using the dictionary key syntax.
If you want to default to fetching username for a Task as well then you
can change the default_projections* in your class factory plugin:
class Factory(ftrack_api.entity.factory.StandardFactory):
'''Entity class factory.'''
def create(self, schema, bases=None):
'''Create and return entity class from *schema*.'''
cls = super(Factory, self).create(schema, bases=bases)
# Further customise cls before returning.
if schema['id'] == 'User':
cls.default_projections = ['id', 'username']
return cls
Now a projection-less query will also query username by default:
Note
You will need to start a new session to pick up the change you made:
session = ftrack_api.Session()
>>> user = session.query('User').first()
>>> with session.auto_populating(False): # For demonstration purpose only.
... print user.items()
[
(u'id', u'59f0963a-15e2-11e1-a5f1-0019bb4983d8')
(u'username', u'martin'),
(u'first_name', Symbol(NOT_SET)),
...
]
Note that if any specific projections are applied in a query, those
override the default projections entirely. This allows you to also
reduce the data loaded on demand:
>>> session = ftrack_api.Session() # Start new session to avoid cache.
>>> user = session.query('select id from User').first()
>>> with session.auto_populating(False): # For demonstration purpose only.
... print user.items()
[
(u'id', u'59f0963a-15e2-11e1-a5f1-0019bb4983d8')
(u'username', Symbol(NOT_SET)),
(u'first_name', Symbol(NOT_SET)),
...
]
Helper methods
If you want to add additional helper methods to the constructed classes
to better support your pipeline logic, then you can simply patch the
created classes in your factory, much like with changing the default
projections:
def get_full_name(self):
'''Return full name for user.'''
return '{0} {1}'.format(self['first_name'], self['last_name']).strip()
class Factory(ftrack_api.entity.factory.StandardFactory):
'''Entity class factory.'''
def create(self, schema, bases=None):
'''Create and return entity class from *schema*.'''
cls = super(Factory, self).create(schema, bases=bases)
# Further customise cls before returning.
if schema['id'] == 'User':
cls.get_full_name = get_full_name
return cls
Now you have a new helper method get_full_name on your User entities:
>>> session = ftrack_api.Session() # New session to pick up changes.
>>> user = session.query('User').first()
>>> print user.get_full_name()
Martin Pengelly-Phillips
If you'd rather not patch the existing classes, or perhaps have a lot of
helpers to mixin, you can instead inject your own class as the base
class. The only requirement is that it has the base
~ftrack_api.entity.base.Entity class in its ancestor classes:
import ftrack_api.entity.base
class CustomUser(ftrack_api.entity.base.Entity):
'''Represent user.'''
def get_full_name(self):
'''Return full name for user.'''
return '{0} {1}'.format(self['first_name'], self['last_name']).strip()
class Factory(ftrack_api.entity.factory.StandardFactory):
'''Entity class factory.'''
def create(self, schema, bases=None):
'''Create and return entity class from *schema*.'''
# Alter base class for constructed class.
if bases is None:
bases = [ftrack_api.entity.base.Entity]
if schema['id'] == 'User':
bases = [CustomUser]
cls = super(Factory, self).create(schema, bases=bases)
return cls
The resulting effect is the same:
>>> session = ftrack_api.Session() # New session to pick up changes.
>>> user = session.query('User').first()
>>> print user.get_full_name()
Martin Pengelly-Phillips
Note
Your custom class is not the leaf class which will still be a
dynamically generated class. Instead your custom class becomes the base
for the leaf class:
>>> print type(user).__mro__
(<dynamic ftrack class 'User'>, <dynamic ftrack class 'CustomUser'>, ...)
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
ynput__OpenPype | tutorial.rst | Tutorial | A quick dive into using the API | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/tutorial.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Tutorial
This tutorial provides a quick dive into using the API and the broad
stroke concepts involved.
First make sure the ftrack Python API is installed <installing>.
Then start a Python session and import the ftrack API:
>>> import ftrack_api
The API uses sessions <understanding_sessions> to manage communication
with an ftrack server. Create a session that connects to your ftrack
server (changing the passed values as appropriate):
>>> session = ftrack_api.Session(
... server_url='https://mycompany.ftrackapp.com',
... api_key='7545384e-a653-11e1-a82c-f22c11dd25eq',
... api_user='martin'
... )
Note
A session can use environment variables
<understanding_sessions/connection> to configure itself.
Now print a list of the available entity types retrieved from the
server:
>>> print session.types.keys()
[u'TypedContext', u'ObjectType', u'Priority', u'Project', u'Sequence',
u'Shot', u'Task', u'Status', u'Type', u'Timelog', u'User']
Now the list of possible entity types is known, query <querying> the
server to retrieve entities of a particular type by using the
Session.query method:
>>> projects = session.query('Project')
Each project retrieved will be an entity <working_with_entities>
instance that behaves much like a standard Python dictionary. For
example, to find out the available keys for an entity, call the
~ftrack_api.entity.Entity.keys method:
>>> print projects[0].keys()
[u'status', u'is_global', u'name', u'end_date', u'context_type',
u'id', u'full_name', u'root', u'start_date']
Now, iterate over the retrieved entities and print each ones name:
>>> for project in projects:
... print project['name']
test
client_review
tdb
man_test
ftrack
bunny
Note
Many attributes for retrieved entities are loaded on demand when the
attribute is first accessed. Doing this lots of times in a script can be
inefficient, so it is worth using projections <querying/projections> in
queries or pre-populating <working_with_entities/populating> entities
where appropriate. You can also customise default projections
<working_with_entities/entity_types/default_projections> to help others
pre-load common attributes.
To narrow a search, add criteria <querying/criteria> to the query:
>>> active_projects = session.query('Project where status is active')
Combine criteria for more powerful queries:
>>> import arrow
>>>
>>> active_projects_ending_before_next_week = session.query(
... 'Project where status is active and end_date before "{0}"'
... .format(arrow.now().replace(weeks=+1))
... )
Some attributes on an entity will refer to another entity or collection
of entities, such as children on a Project being a collection of Context
entities that have the project as their parent:
>>> project = session.query('Project').first()
>>> print project['children']
<ftrack_api.collection.Collection object at 0x00000000045B1438>
And on each Context there is a corresponding parent attribute which is a
link back to the parent:
>>> child = project['children'][0]
>>> print child['parent'] is project
True
These relationships can also be used in the criteria for a query:
>>> results = session.query(
... 'Context where parent.name like "te%"'
... )
To create new entities in the system use Session.create:
>>> new_sequence = session.create('Sequence', {
... 'name': 'Starlord Reveal'
... })
The created entity is not yet persisted to the server, but it is still
possible to modify it.
>>> new_sequence['description'] = 'First hero character reveal.'
The sequence also needs a parent. This can be done in one of two ways:
- Set the parent attribute on the sequence:
>>> new_sequence['parent'] = project
- Add the sequence to a parent's children attribute:
>>> project['children'].append(new_sequence)
When ready, persist to the server using Session.commit:
>>> session.commit()
When finished with a Session, it is important to ~Session.close it in
order to release resources and properly unsubscribe any registered event
listeners. It is also possible to use the session as a context manager
in order to have it closed automatically after use:
>>> with ftrack_api.Session() as session:
... print session.query('User').first()
<User(0154901c-eaf9-11e5-b165-00505681ec7a)>
>>> print session.closed
True
Once a Session is closed, any operations that attempt to use the closed
connection to the ftrack server will fail:
>>> session.query('Project').first()
ConnectionClosedError: Connection closed.
Continue to the next section to start learning more about the API in
greater depth or jump over to the usage examples <example> if you prefer
to learn by example.
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
ynput__OpenPype | understanding_sessions.rst | Module doc | Understanding sessions | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/understanding_sessions.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Understanding sessions
All communication with an ftrack server takes place through a Session.
This allows more opportunity for configuring the connection, plugins
etc. and also makes it possible to connect to multiple ftrack servers
from within the same Python process.
Connection
A session can be manually configured at runtime to connect to a server
with certain credentials:
>>> session = ftrack_api.Session(
... server_url='https://mycompany.ftrackapp.com',
... api_key='7545384e-a653-11e1-a82c-f22c11dd25eq',
... api_user='martin'
... )
Alternatively, a session can use the following environment variables to
configure itself:
- FTRACK_SERVER
- FTRACK_API_USER
- FTRACK_API_KEY
When using environment variables, no server connection arguments need to
be passed manually:
>>> session = ftrack_api.Session()
Unit of work
Each session follows the unit of work pattern. This means that many of
the operations performed using a session will happen locally and only be
persisted to the server at certain times, notably when calling
Session.commit. This approach helps optimise calls to the server and
also group related logic together in a transaction:
user = session.create('User', {})
user['username'] = 'martin'
other_user = session.create('User', {'username': 'bjorn'})
other_user['email'] = '[email protected]'
Behind the scenes a series of operations
<ftrack_api.operation.Operation> are recorded reflecting the changes
made. You can take a peek at these operations if desired by examining
the Session.recorded_operations property:
>>> for operation in session.recorded_operations:
... print operation
<ftrack_api.operation.CreateEntityOperation object at 0x0000000003EC49B0>
<ftrack_api.operation.UpdateEntityOperation object at 0x0000000003E16898>
<ftrack_api.operation.CreateEntityOperation object at 0x0000000003E16240>
<ftrack_api.operation.UpdateEntityOperation object at 0x0000000003E16128>
Calling Session.commit persists all recorded operations to the server
and clears the operation log:
session.commit()
Note
The commit call will optimise operations to be as efficient as possible
without breaking logical ordering. For example, a create followed by
updates on the same entity will be compressed into a single create.
Queries are special and always issued on demand. As a result, a query
may return unexpected results if the relevant local changes have not yet
been sent to the server:
>>> user = session.create('User', {'username': 'some_unique_username'})
>>> query = 'User where username is "{0}"'.format(user['username'])
>>> print len(session.query(query))
0
>>> session.commit()
>>> print len(session.query(query))
1
Where possible, query results are merged in with existing data
transparently with any local changes preserved:
>>> user = session.query('User').first()
>>> user['email'] = '[email protected]' # Not yet committed to server.
>>> retrieved = session.query(
... 'User where id is "{0}"'.format(user['id'])
... ).one()
>>> print retrieved['email'] # Displays locally set value.
'[email protected]'
>>> print retrieved is user
True
This is possible due to the smart caching layer in the session.
Auto-population
Another important concept in a session is that of auto-population. By
default a session is configured to auto-populate missing attribute
values on access. This means that the first time you access an attribute
on an entity instance a query will be sent to the server to fetch the
value:
user = session.query('User').first()
# The next command will issue a request to the server to fetch the
# 'username' value on demand at this is the first time it is accessed.
print user['username']
Once a value has been retrieved it is cached <caching> locally in the
session and accessing it again will not issue more server calls:
# On second access no server call is made.
print user['username']
You can control the auto population behaviour of a session by either
changing the Session.auto_populate attribute on a session or using the
provided context helper Session.auto_populating to temporarily change
the setting. When turned off you may see a special
~ftrack_api.symbol.NOT_SET symbol that represents a value has not yet
been fetched:
>>> with session.auto_populating(False):
... print user['email']
NOT_SET
Whilst convenient for simple scripts, making many requests to the server
for each attribute can slow execution of a script. To support
optimisation the API includes methods for batch fetching attributes.
Read about them in querying/projections and
working_with_entities/populating.
Entity types
When a session has successfully connected to the server it will
automatically download schema information and create appropriate classes
<working_with_entities/entity_types> for use. This is important as
different servers can support different entity types and configurations.
This information is readily available and useful if you need to check
that the entity types you expect are present. Here's how to print a list
of all entity types registered for use in the current API session:
>>> print session.types.keys()
[u'Task', u'Shot', u'TypedContext', u'Sequence', u'Priority',
u'Status', u'Project', u'User', u'Type', u'ObjectType']
Each entity type is backed by a customisable class
<working_with_entities/entity_types> that further describes the entity
type and the attributes that are available.
Hint
If you need to use an isinstance check, always go through the session as
the classes are built dynamically:
>>> isinstance(entity, session.types['Project'])
Configuring plugins
Plugins are used by the API to extend it with new functionality, such as
locations <location> or adding convenience methods to
understanding_sessions/entity_types. In addition to new API
functionality, event plugins may also be used for event processing by
listening to ftrack update events <handling_events> or adding custom
functionality to ftrack by registering actions <action>.
When starting a new Session either pass the plugins_paths to search
explicitly or rely on the environment variable FTRACK_EVENT_PLUGIN_PATH.
As each session is independent of others, you can configure plugins per
session.
The paths will be searched for plugins <plugin>, python files which
expose a register function. These functions will be evaluated and can be
used extend the API with new functionality, such as locations or
actions.
If you do not specify any override then the session will attempt to
discover and use the default plugins.
Plugins are discovered using ftrack_api.plugin.discover with the session
instance passed as the sole positional argument. Most plugins should
take the form of a mount function that then subscribes to specific
events
<handling_events> on the session:
def configure_locations(event):
'''Configure locations for session.'''
session = event['data']['session']
# Find location(s) and customise instances.
def register(session):
'''Register plugin with *session*.'''
session.event_hub.subscribe(
'topic=ftrack.api.session.configure-location',
configure_locations
)
Additional keyword arguments can be passed as plugin_arguments to the
Session on instantiation. These are passed to the plugin register
function if its signature supports them:
# a_plugin.py
def register(session, reticulate_splines=False):
'''Register plugin with *session*.'''
...
# main.py
session = ftrack_api.Session(
plugin_arguments={
'reticulate_splines': True,
'some_other_argument': 42
}
)
Lists of events which you can subscribe to in your plugins are available
both for synchronous event published by the python API <event_list> and
asynchronous events published by the server <ftrack:developing/events/list>
Quick setup
1. Create a directory where plugins will be stored. Place any plugins
you want loaded automatically in an API session here.
[image]
2. Configure the FTRACK_EVENT_PLUGIN_PATH to point to the directory.
Detailed setup
Start out by creating a directory on your machine where you will store
your plugins. Download example_plugin.py </resource/example_plugin.py>
and place it in the directory.
Open up a terminal window, and ensure that plugin is picked up when
instantiating the session and manually setting the plugin_paths:
>>> # Set up basic logging
>>> import logging
>>> logging.basicConfig()
>>> plugin_logger = logging.getLogger('com.example.example-plugin')
>>> plugin_logger.setLevel(logging.DEBUG)
>>>
>>> # Configure the API, loading plugins in the specified paths.
>>> import ftrack_api
>>> plugin_paths = ['/path/to/plugins']
>>> session = ftrack_api.Session(plugin_paths=plugin_paths)
If everything is working as expected, you should see the following in
the output:
DEBUG:com.example.example-plugin:Plugin registered
Instead of specifying the plugin paths when instantiating the session,
you can also specify the FTRACK_EVENT_PLUGIN_PATH to point to the
directory. To specify multiple directories, use the path separator for
your operating system.
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
ynput__OpenPype | querying.rst | Subdoc to file | Querying | MIT License | ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/doc/querying.rst | [
"ynput__OpenPype/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py"
] | Querying
The API provides a simple, but powerful query language in addition to
iterating directly over entity attributes. Using queries can often
substantially speed up your code as well as reduce the amount of code
written.
A query is issued using Session.query and returns a list of matching
entities. The query always has a single target entity type that the
query is built against. This means that you cannot currently retrieve
back a list of different entity types in one query, though using
projections
<querying/projections> does allow retrieving related entities of a
different type in one go.
The syntax for a query is:
select <projections> from <entity type> where <criteria>
However, both the selection of projections and criteria are optional.
This means the most basic query is just to fetch all entities of a
particular type, such as all projects in the system:
projects = session.query('Project')
A query always returns a ~ftrack_api.query.QueryResult instance that
acts like a list with some special behaviour. The main special behaviour
is that the actual query to the server is not issued until you iterate
or index into the query results:
for project in projects:
print project['name']
You can also explicitly call ~ftrack_api.query.QueryResult.all on the
result set:
projects = session.query('Project').all()
Note
This behaviour exists in order to make way for efficient paging and
other optimisations in future.
Using criteria to narrow results
Often you will have some idea of the entities you want to retrieve. In
this case you can optimise your code by not fetching more data than you
need. To do this, add criteria to your query:
projects = session.query('Project where status is active')
Each criteria follows the form:
<attribute> <operator> <value>
You can inspect the entity type or instance to find out which attributes
<working_with_entities/attributes> are available to filter on for a
particular entity type. The list of
operators <querying/criteria/operators> that can be applied and the
types of values they expect is listed later on.
Combining criteria
Multiple criteria can be applied in a single expression by joining them
with either and or or:
projects = session.query(
'Project where status is active and name like "%thrones"'
)
You can use parenthesis to control the precedence when compound criteria
are used (by default and takes precedence):
projects = session.query(
'Project where status is active and '
'(name like "%thrones" or full_name like "%thrones")'
)
Filtering on relationships
Filtering on relationships is also intuitively supported. Simply follow
the relationship using a dotted notation:
tasks_in_project = session.query(
'Task where project.id is "{0}"'.format(project['id'])
)
This works even for multiple strides across relationships (though do
note that excessive strides can affect performance):
tasks_completed_in_project = session.query(
'Task where project.id is "{0}" and '
'status.type.name is "Done"'
.format(project['id'])
)
The same works for collections (where each entity in the collection is
compared against the subsequent condition):
import arrow
tasks_with_time_logged_today = session.query(
'Task where timelogs.start >= "{0}"'.format(arrow.now().floor('day'))
)
In the above query, each Task that has at least one Timelog with a start
time greater than the start of today is returned.
When filtering on relationships, the conjunctions has and any can be
used to specify how the criteria should be applied. This becomes
important when querying using multiple conditions on collection
relationships. The relationship condition can be written against the
following form:
<not?> <relationship> <has|any> (<criteria>)
For optimal performance has should be used for scalar relationships when
multiple conditions are involved. For example, to find notes by a
specific author when only name is known:
notes_written_by_jane_doe = session.query(
'Note where author has (first_name is "Jane" and last_name is "Doe")'
)
This query could be written without has, giving the same results:
notes_written_by_jane_doe = session.query(
'Note where author.first_name is "Jane" and author.last_name is "Doe"'
)
any should be used for collection relationships. For example, to find
all projects that have at least one metadata instance that has
key=some_key and value=some_value the query would be:
projects_where_some_key_is_some_value = session.query(
'Project where metadata any (key=some_key and value=some_value)'
)
If the query was written without any, projects with one metadata
matching key and another matching the value would be returned.
any can also be used to query for empty relationship collections:
users_without_timelogs = session.query(
'User where not timelogs any ()'
)
Supported operators
This is the list of currently supported operators:
------------------------------------------------------------------------
Operators Description Example
-------------- --------------- -----------------------------------------
= is Exactly equal. name is "martin"
!= is_not Not exactly name is_not "martin"
equal.
> after Greater than start after "2015-06-01"
greater_than exclusive.
< before Less than end before "2015-06-01"
less_than exclusive.
>= Greater than bid >= 10
inclusive.
<= Less than bid <= 10
inclusive.
in One of. status.type.name in ("In Progress",
"Done")
not_in Not one of. status.name not_in ("Omitted", "On Hold")
like Matches name like "%thrones"
pattern.
not_like Does not match name not_like "%thrones"
pattern.
has Test scalar author has (first_name is "Jane" and
relationship. last_name is "Doe")
any Test collection metadata any (key=some_key and
relationship. value=some_value)
------------------------------------------------------------------------
Optimising using projections
In understanding_sessions we mentioned auto-population
<understanding_sessions/auto_population> of attribute values on access.
This meant that when iterating over a lot of entities and attributes a
large number of queries were being sent to the server. Ultimately, this
can cause your code to run slowly:
>>> projects = session.query('Project')
>>> for project in projects:
... print(
... # Multiple queries issued here for each attribute accessed for
... # each project in the loop!
... '{project[full_name]} - {project[status][name]})'
... .format(project=project)
... )
Fortunately, there is an easy way to optimise. If you know what
attributes you are interested in ahead of time you can include them in
your query string as projections in order to fetch them in one go:
>>> projects = session.query(
... 'select full_name, status.name from Project'
... )
>>> for project in projects:
... print(
... # No additional queries issued here as the values were already
... # loaded by the above query!
... '{project[full_name]} - {project[status][name]})'
... .format(project=project)
... )
Notice how this works for related entities as well. In the example
above, we also fetched the name of each Status entity attached to a
project in the same query, which meant that no further queries had to be
issued when accessing those nested attributes.
Note
There are no arbitrary limits to the number (or depth) of projections,
but do be aware that excessive projections can ultimately result in poor
performance also. As always, it is about choosing the right tool for the
job.
You can also customise the
working_with_entities/entity_types/default_projections to use for each
entity type when none are specified in the query string.
| # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import appdirs
import threading
import atexit
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub is True:
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = False
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = appdirs.user_cache_dir()
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version!= 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action':'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username':'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username':'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username':'martin', 'email':'[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression,'and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key]!= target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key)!= len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type,'and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression ='select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query ='select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query,'or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash!= server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r}!= Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all','set_only','modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy =='set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy =='modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container ='members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action':'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data) |
|
westpa__westpa | ploterr.rst | Manual | Ploterr command | MIT License | westpa__westpa/doc/documentation/cli/ploterr.rst | [
"westpa__westpa/src/westpa/cli/tools/ploterr.py"
] | ploterr
usage:
ploterr [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version]
{help,d.kinetics,d.probs,rw.probs,rw.kinetics,generic} ...
Plots error ranges for weighted ensemble datasets.
Command-line options
optional arguments:
-h, --help show this help message and exit
general options:
-r RCFILE, --rcfile RCFILE
use RCFILE as the WEST run-time configuration file (default: west.cfg)
--quiet emit only essential information
--verbose emit extra information
--debug enable extra checks and emit copious information
--version show program's version number and exit
supported input formats:
{help,d.kinetics,d.probs,rw.probs,rw.kinetics,generic}
help print help for this command or individual subcommands
d.kinetics output of w_direct kinetics
d.probs output of w_direct probs
rw.probs output of w_reweight probs
rw.kinetics output of w_reweight kinetics
generic arbitrary HDF5 file and dataset
| import logging
import os
import re
import h5py
import numpy as np
from westpa.tools import WESTMasterCommand, WESTSubcommand, ProgressIndicatorComponent, Plotter
from westpa.core import h5io
if os.environ.get('DISPLAY') is not None:
from matplotlib import pyplot
log = logging.getLogger('ploterrs')
class CommonPloterrs(WESTSubcommand):
def __init__(self, parent):
super().__init__(parent)
self.progress = ProgressIndicatorComponent()
self.xscale = None
self.yscale = None
self.xrange = None
self.yrange = None
self.xlabel = None
self.ylabel = None
self.title = None
self.plot_options_group = None
def add_args(self, parser):
self.progress.add_args(parser)
pogroup = self.plot_options_group = parser.add_argument_group('plot options')
pogroup.add_argument(
'--xscale',
choices=['linear', 'log','symlog'],
default='linear',
help='''Use "linear", "log", or "symlog" scaling for the x axis.
(Default: %(default)s).''',
)
pogroup.add_argument(
'--yscale',
choices=['linear', 'log','symlog'],
default='linear',
help='''Use "linear", "log", or "symlog" scaling for the y axis.
(Default: %(default)s).''',
)
pogroup.add_argument(
'--xrange',
help='''Restrict X range to XRANGE, which must be formatted as "xmin,xmax".
(Default: determined by input data.)''',
)
pogroup.add_argument(
'--yrange',
help='''Restrict Y range to YRANGE, which must be formatted as "ymin,ymax".
(Default: determined by input data.)''',
)
pogroup.add_argument('--xlabel', help='''Use XLABEL for the x-axis label. (Default: varies.)''')
pogroup.add_argument('--ylabel', help='''Use YLABEL for the y-axis label. (Default: varies.)''')
pogroup.add_argument('--title', help='''Use TITLE for the plot title. (Default: varies.)''')
pogroup.add_argument('--terminal', '-t', dest='plotting', action='store_true', help='''Plot output in terminal.''')
def process_args(self, args):
self.progress.process_args(args)
if args.xrange:
self.xrange = self.parse_range(args.xrange)
if args.yrange:
self.yrange = self.parse_range(args.yrange)
self.xscale = args.xscale
self.yscale = args.yscale
self.xlabel = args.xlabel or 'Iteration'
self.ylabel = args.ylabel
self.title = args.title
if args.plotting or os.environ.get('DISPLAY') is None:
self.interface = 'text'
else:
self.interface ='matplotlib'
def parse_range(self, rangespec):
try:
(lbt, ubt) = rangespec.split(',')
return float(lbt), float(ubt)
except (ValueError, TypeError) as e:
raise ValueError('invalid range specification {!r}: {!s}'.format(rangespec, e))
def do_plot(self, data, output_filename, title=None, x_range=None, y_range=None, x_label=None, y_label=None):
if not output_filename:
return
title = title or self.title
x_range = x_range or self.xrange
y_range = y_range or self.yrange
x_label = x_label or self.xlabel
y_label = y_label or self.ylabel
iters = data['iter_stop'] - 1
pyplot.figure()
pyplot.plot(iters, data['expected'], color='black')
pyplot.plot(iters, data['ci_lbound'], color='gray')
pyplot.plot(iters, data['ci_ubound'], color='gray')
pyplot.gca().set_xscale(self.xscale)
pyplot.gca().set_yscale(self.yscale)
if title:
pyplot.title(title)
if x_range is not None:
pyplot.xlim(x_range)
if y_range is not None:
pyplot.ylim(y_range)
if x_label:
pyplot.xlabel(x_label)
if y_label:
pyplot.ylabel(y_label)
pyplot.savefig(output_filename)
class GenericIntervalSubcommand(CommonPloterrs):
description = '''\
Plots generic expectation/CI data. A path to the HDF5 file and the dataset
within it must be provided. This path takes the form **FILENAME/PATH[SLICE]**.
If the dataset is not a vector (one dimensional) then a slice must be provided.
For example, to access the state 0 to state 1 rate evolution calculated by
``w_kinavg``, one would use ``kinavg.h5/rate_evolution[:,0,1]``.
-----------------------------------------------------------------------------
Command-line arguments
-----------------------------------------------------------------------------
'''
subcommand = 'generic'
help_text = 'arbitrary HDF5 file and dataset'
def __init__(self, parent):
super().__init__(parent)
self.h5file = None
self.h5dset = None
self.dset_slice = None
self.output_filename = None
def add_args(self, parser):
iogroup = parser.add_argument_group('input/output options')
iogroup.add_argument(
'-o',
'--output',
default='errbars.pdf',
help='''Write plot to OUTPUT (default: %(default)s), whose format will
be determined by filename extension.''',
)
iogroup.add_argument(
'dsspec',
help='''Use data located at DSSPEC, which must be formatted as
FILENAME/PATH[SLICE]. FILENAME is the HDF5 file to read, PATH is the
HDF5 path to the dataset, and SLICE, if provided, must be the Numpy-style
slice (including brackets) which selects a vector of data of the
appropriate type.''',
)
def process_args(self, args):
self.output_filename = args.output
(pathname, slicestr) = re.search(r'([^[]+)(\[[^\]]+\])?$', args.dsspec).groups()
if slicestr:
sl = eval('np.index_exp' + slicestr)
else:
sl = np.index_exp[...]
self.h5file, self.h5dset = h5io.resolve_filepath(pathname, mode='r')
self.dset_slice = sl
def load_and_validate_data(self):
reqd_fields = set(['iter_start', 'iter_stop', 'expected', 'ci_lbound', 'ci_ubound'])
self.progress.indicator.new_operation('loading data')
data = self.h5dset[self.dset_slice]
if data.ndim!= 1:
raise TypeError('dataset to be plotted must be 1-dimensional')
try:
fieldnames = set(data.dtype.fields.keys())
except AttributeError:
raise TypeError('dataset has inappropriate type')
else:
if len(fieldnames & reqd_fields) < len(reqd_fields):
raise TypeError('dataset does not contain correct fields')
return data
def go(self):
with self.progress.indicator:
data = self.load_and_validate_data()
self.progress.indicator.new_operation('plotting')
self.do_plot(data, self.output_filename)
class DirectKinetics(CommonPloterrs):
subcommand = 'd.kinetics'
help_text = 'output of w_direct kinetics'
input_filename = 'direct.h5'
flux_output_filename = 'flux_evolution_d_{state_label}.pdf'
rate_output_filename = 'rate_evolution_d_{istate_label}_{fstate_label}.pdf'
description = '''\
Plot evolution of state-to-state rates and total flux into states as generated
by ``w_{direct/reweight} kinetics`` (when used with the ``--evolution-mode``
option). Plots are generated for all rates/fluxes calculated. Output filenames
require (and plot titles and axis labels support) substitution based on which
flux/rate is being plotted:
istate_label, fstate_label
*(String, for rates)* Names of the initial and final states, as originally
given to ``w_assign``.
istate_index, fstate_index
*(Integer, for rates)* Indices of initial and final states.
state_label
*(String, for fluxes)* Name of state
state_index
*(Integer, for fluxes)* Index of state
'''
def __init__(self, parent):
super().__init__(parent)
self.kinavg_file = None
self.dset_slice = None
self.rate_output_pattern = None
self.flux_output_pattern = None
self.state_labels = None
def add_args(self, parser):
iogroup = parser.add_argument_group('input/output')
iogroup.add_argument(
'-i', '--input', default=self.input_filename, help='''Read kinetics results from INPUT (default: %(default)s).'''
)
iogroup.add_argument(
'--rate-output',
default=self.rate_output_filename,
help='''Filename pattern for rate evolution output. See above for valid
field names. (Default: %(default)r).''',
)
iogroup.add_argument(
'--flux-output',
default=self.flux_output_filename,
help='''Filename pattern for flux evolution output. See above for valid
field names. (Default: %(default)r).''',
)
def process_args(self, args):
self.kinavg_file = h5py.File(args.input, 'r')
self.state_labels = list(self.kinavg_file['state_labels'][...])
self.rate_output_pattern = args.rate_output
self.flux_output_pattern = args.flux_output
def plot_flux(self, istate):
label = self.state_labels[istate]
data = self.kinavg_file['target_flux_evolution'][:, istate]
if (data['iter_start'] == 0).all():
# No data
return
subdict = dict(state_label=label, state_index=istate)
output_filename = self.flux_output_pattern.format(**subdict) if self.flux_output_pattern else None
title = self.title if self.title is not None else 'Flux into state "{state_label}"'
title = title.format(**subdict)
x_label = self.xlabel.format(**subdict) if self.xlabel else None
y_label = self.ylabel if self.ylabel is not None else r'Flux $(\tau^{{-1}})$'
y_label = y_label.format(**subdict)
self.do_plot(data, output_filename, title, x_label=x_label, y_label=y_label)
def plot_rate(self, istate, jstate):
ilabel = self.state_labels[istate]
jlabel = self.state_labels[jstate]
data = self.kinavg_file['rate_evolution'][:, istate, jstate]
if (data['iter_start'] == 0).all():
# No data
return
subdict = dict(istate_label=ilabel, istate_index=istate, fstate_label=jlabel, fstate_index=jstate)
output_filename = self.rate_output_pattern.format(**subdict) if self.rate_output_pattern else None
title = self.title if self.title is not None else 'Rate from state "{istate_label}" to state "{fstate_label}"'
title = title.format(**subdict)
x_label = self.xlabel.format(**subdict) if self.xlabel else None
y_label = self.ylabel if self.ylabel is not None else r'Rate $(\tau^{{-1}})$'
y_label = y_label.format(**subdict)
self.do_plot(data, output_filename, title, x_label=x_label, y_label=y_label)
def go(self):
pi = self.progress.indicator
nstates = len(self.state_labels)
if self.interface =='matplotlib':
with pi:
# if --evolution-mode wasn't specified, neither of these exist:
if 'target_flux_evolution' in self.kinavg_file:
pi.new_operation('plotting fluxes', nstates)
for istate in range(nstates):
self.plot_flux(istate)
pi.progress += 1
# if --evolution-mode wasn't specified, we won't get this either
if 'rate_evolution' in self.kinavg_file:
pi.new_operation('plotting rates', nstates * nstates)
for istate in range(nstates):
for jstate in range(nstates):
self.plot_rate(istate, jstate)
pi.progress += 1
else:
print('rate evolution not available')
else:
plotter = Plotter(self.kinavg_file, 'rate_evolution', iteration=-1, interface='text')
for istate in range(nstates):
for jstate in range(nstates):
if istate!= jstate:
plotter.plot(istate, jstate)
plotter = Plotter(self.kinavg_file, 'conditional_flux_evolution', iteration=-1, interface='text')
for istate in range(nstates):
for jstate in range(nstates):
if istate!= jstate:
plotter.plot(istate, jstate)
class DirectStateprobs(CommonPloterrs):
subcommand = 'd.probs'
help_text = 'output of w_direct probs'
input_filename = 'direct.h5'
pop_output_filename = 'pop_evolution_d_{state_label}.pdf'
color_output_filename = 'color_evolution_d_{state_label}.pdf'
description = '''\
Plot evolution of macrostate populations and associated uncertainties. Plots
are generated for all states calculated. Output filenames require (and plot
titles and axis labels support) substitution based on which state is being
plotted:
state_label
*(String, for fluxes)* Name of state
state_index
*(Integer, for fluxes)* Index of state
'''
def __init__(self, parent):
super().__init__(parent)
self.stateprobs_file = None
self.dset_slice = None
self.rate_output_pattern = None
self.flux_output_pattern = None
self.state_labels = None
def add_args(self, parser):
iogroup = parser.add_argument_group('input/output')
iogroup.add_argument(
'-i', '--input', default=self.input_filename, help='''Read w_kinavg results from INPUT (default: %(default)s).'''
)
iogroup.add_argument(
'--population-output',
default=self.pop_output_filename,
help='''Filename pattern for population evolution output. See above for valid
field names. (Default: %(default)r).''',
)
iogroup.add_argument(
'--color-output',
default=self.color_output_filename,
help='''Filename pattern for ensemble evolution output. See above for valid
field names. (Default: %(default)r).''',
)
def process_args(self, args):
self.stateprobs_file = h5py.File(args.input, 'r')
self.state_labels = list(self.stateprobs_file['state_labels'][...])
self.pop_output_pattern = args.population_output
self.color_output_pattern = args.color_output
def plot_pop(self, istate):
label = self.state_labels[istate]
data = self.stateprobs_file['state_pop_evolution'][:, istate]
if (data['iter_start'] == 0).all():
# No data
return
subdict = dict(state_label=label, state_index=istate)
output_filename = self.pop_output_pattern.format(**subdict) if self.pop_output_pattern else None
title = self.title if self.title is not None else 'Population in state "{state_label}"'
title = title.format(**subdict)
x_label = self.xlabel.format(**subdict) if self.xlabel else None
y_label = self.ylabel if self.ylabel is not None else r'Population'
y_label = y_label.format(**subdict)
self.do_plot(data, output_filename, title, x_label=x_label, y_label=y_label)
def plot_color(self, istate):
label = self.state_labels[istate]
data = self.stateprobs_file['color_prob_evolution'][:, istate]
if (data['iter_start'] == 0).all():
# No data
return
subdict = dict(state_label=label, state_index=istate)
output_filename = self.color_output_pattern.format(**subdict) if self.color_output_pattern else None
title = self.title if self.title is not None else 'Population in ensemble "{state_label}"'
title = title.format(**subdict)
x_label = self.xlabel.format(**subdict) if self.xlabel else None
y_label = self.ylabel if self.ylabel is not None else r'Population'
y_label = y_label.format(**subdict)
self.do_plot(data, output_filename, title, x_label=x_label, y_label=y_label)
def go(self):
pi = self.progress.indicator
nstates = len(self.state_labels)
if self.interface =='matplotlib':
with pi:
if'state_pop_evolution' in self.stateprobs_file:
pi.new_operation('plotting populations', nstates)
for istate in range(nstates):
self.plot_pop(istate)
pi.progress += 1
if 'color_prob_evolution' in self.stateprobs_file:
pi.new_operation('plotting ensemble populations', nstates)
for istate in range(nstates):
self.plot_color(istate)
pi.progress += 1
else:
print('population evolution not available')
else:
plotter = Plotter(self.stateprobs_file,'state_pop_evolution', iteration=-1, interface='text')
for istate in range(nstates):
plotter.plot(istate)
plotter = Plotter(self.stateprobs_file, 'color_prob_evolution', iteration=-1, interface='text')
for istate in range(nstates):
plotter.plot(istate)
class ReweightStateprobs(DirectStateprobs):
subcommand = 'rw.probs'
help_text = 'output of w_reweight probs'
input_filename ='reweight.h5'
pop_output_filename = 'pop_evolution_rw_{state_label}.pdf'
color_output_filename = 'color_evolution_rw_{state_label}.pdf'
class ReweightKinetics(DirectKinetics):
subcommand = 'rw.kinetics'
help_text = 'output of w_reweight kinetics'
input_filename ='reweight.h5'
flux_output_filename = 'flux_evolution_rw_{state_label}.pdf'
rate_output_filename = 'rate_evolution_rw_{istate_label}_{fstate_label}.pdf'
class PloterrsTool(WESTMasterCommand):
prog = 'ploterrs'
subcommands = [DirectKinetics, DirectStateprobs, ReweightStateprobs, ReweightKinetics, GenericIntervalSubcommand]
subparsers_title ='supported input formats'
description = '''\
Plots error ranges for weighted ensemble datasets.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def entry_point():
PloterrsTool().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | plothist.rst | Manual | Plothist command | MIT License | westpa__westpa/doc/documentation/cli/plothist.rst | [
"westpa__westpa/src/westpa/cli/tools/plothist.py"
] | plothist
Use the plothist tool to plot the results of w_pdist. This tool uses an
hdf5 file as its input (i.e. the output of another analysis tool), and
outputs a pdf image.
The plothist tool operates in one of three (mutually exclusive) plotting
modes:
- evolution: Plots the relevant data as a time evolution over
specified number of simulation iterations
- average: Plots the relevant data as a time average over a specified
number of iterations
- instant: Plots the relevant data for a single specified iteration
Overview
The basic usage, independent of plotting mode, is as follows:
usage:
| ``plothist [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version]``
| `` {instant,average,evolution} input ...``
Note that the user must specify a plotting mode (i.e. 'instant',
'average', or 'evolution') and an input file, input.
Therefore, this tool is always called as:
plothist mode input_file [``other`` ``options``]
'instant' mode
usage:
| ``plothist instant [-h] input [-o PLOT_OUTPUT]``
| `` [--hdf5-output HDF5_OUTPUT] [--text-output TEXT_OUTPUT]``
| `` [--title TITLE] [--range RANGE] [--linear | --energy | --log10]``
| `` [--iter N_ITER] ``
| `` [DIMENSION] [ADDTLDIM]``
'average' mode
usage:
| ``plothist average [-h] input [-o PLOT_OUTPUT]``
| `` [--hdf5-output HDF5_OUTPUT] [--text-output TEXT_OUTPUT]``
| `` [--title TITLE] [--range RANGE] [--linear | --energy | --log10]``
| `` [--first-iter N_ITER] [--last-iter N_ITER] ``
| `` [DIMENSION] [ADDTLDIM]``
'evolution' mode
usage:
| ``plothist evolution [-h] input [-o PLOT_OUTPUT]``
| `` [--hdf5-output HDF5_OUTPUT]``
| `` [--title TITLE] [--range RANGE] [--linear | --energy | --log10]``
| `` [--first-iter N_ITER] [--last-iter N_ITER]``
| `` [--step-iter STEP] ``
| `` [DIMENSION]``
Command-Line Options
See the command-line tool index <command_line_tool_index> for more
information on the general options.
Unless specified (as a Note in the command-line option description), the
command-line options below are shared for all three plotting modes
Input/output options
No matter the mode, an input hdf5 file must be specified. There are
three possible outputs that are mode or user-specified: A text file, an
hdf5 file, and a pdf image.
Specifying input file
*``input``*
Specify the input hdf5 file ''input. This is the output file from a
previous analysis tool (e.g. 'pdist.h5')
Output plot pdf file
``-o ''plot_output'', --plot_output ''plot_output''``
Specify the name of the pdf plot image output (Default: 'hist.pdf').
Note: You can suppress plotting entirely by specifying an empty
string as plot_output (i.e. -o '' or --plot_output '')
Additional output options
Note: plothist provides additional, optional arguments to output the
data points used to construct the plot:
``--hdf5-output ''hdf5_output''``
Output plot data hdf5 file 'hdf5_output' (Default: No hdf5 output
file)
``--text-output ''text_output''``
Output plot data as a text file named 'text_output' (Default: No
text output file) Note: This option is only available for 1
dimensional histogram plots (that is, 'average' and 'instant' modes
only)
Plotting options
The following options allow the user to specify a plot title, the type
of plot (i.e. energy or probability distribution), whether to apply a
log transformation to the data, and the range of data values to include.
``--title ''title'' ``
Optionally specify a title, ``title``, for the plot (Default: No
title)
``--range ''<nowiki>'</nowiki>LB, UB<nowiki>'</nowiki>''``
Optionally specify the data range to be plotted as "LB, UB" (e.g.
' --range "-1, 10" ' - note that the quotation marks are necessary
if specifying a negative bound). For 1 dimensional histograms, the
range affects the y axis. For 2 dimensional plots (e.g. evolution
plot with 1 dimensional progress coordinate), it corresponds to the
range of the color bar
Mutually exclusive plotting options
The following three options determine how the plotted data is
represented (Default: '--energy')
``--energy ``
Plots the probability distribution on an inverted natural log scale
(i.e. -ln[P(x)] ), corresponding to the free energy (Default)
``--linear ``
Plots the probability distribution function as a linear scale
``--log10 ``
Plots the (base-10) logarithm of the probability distribution
Iteration selection options
Depending on plotting mode, you can select either a range or a single
iteration to plot.
``'instant'`` mode only:
``--iter ''n_iter'' ``
Plot the distribution for iteration ''n_iter'' (Default: Last
completed iteration)
``'average'`` and ``'evolution'`` modes only:
``--first-iter ''first_iter'' ``
Begin averaging or plotting at iteration ``first_iter`` (Default: 1)
``--last-iter ''last_iter'' ``
Average or plot up to and including ``last_iter`` (Default: Last
completed iteration)
``'evolution'`` mode only:
``--iter_step ''n_step'' ``
Average every ``n_step`` iterations together when plotting in
'evolution' mode (Default: 1 - i.e. plot each iteration)
Specifying progress coordinate dimension
For progress coordinates with dimensions greater than 1, you can specify
the dimension of the progress coordinate to use, the of progress
coordinate values to include, and the progress coordinate axis label
with a single positional argument:
``dimension ``
Specify 'dimension' as 'int[:[LB,UB]:label]', where 'int' specifies
the dimension (starting at 0), and, optionally, 'LB,UB' specifies
the lower and upper range bounds, and/or 'label' specifies the axis
label (Default: int = 0, full range, default label is 'dimension
int'; e.g 'dimension 0')
For 'average' and 'instant' modes, you can plot two dimensions at once
using a color map if this positional argument is specified:
``addtl_dimension ``
Specify the other dimension to include as 'addtl_dimension'
Examples
These examples assume the input file is created using w_pdist and is
named 'pdist.h5'
Basic plotting
Plot the energy ( -ln(P(x)) ) for the last iteration
plothist instant pdist.h5
Plot the evolution of the log10 of the probability distribution over all
iterations
plothist evolution pdist.h5 --log10
Plot the average linear probability distribution over all iterations
plothist average pdist.h5 --linear
Specifying progress coordinate
Plot the average probability distribution as the energy, label the
x-axis 'pcoord', over the entire range of the progress coordinate
plothist average pdist.h5 0::pcoord
Same as above, but only plot the energies for with progress coordinate
between 0 and 10
plothist average pdist.h5 '0:0,10:pcoord'
(Note: the quotes are needed if specifying a range that includes a
negative bound)
(For a simulation that uses at least 2 progress coordinates) plot the
probability distribution for the 5th iteration, representing the first
two progress coordinates as a heatmap
plothist instant pdist.h5 0 1 --iter 5 --linear
| import logging
import os
import re
import h5py
import numpy as np
import matplotlib
from matplotlib import pyplot
from matplotlib.image import NonUniformImage
from westpa.tools import WESTMasterCommand, WESTSubcommand
from westpa.core import h5io, textio
from westpa.fasthist import normhistnd
from westpa.core.extloader import get_object
log = logging.getLogger('plothist')
# Suppress divide-by-zero in log
np.seterr(divide='ignore', invalid='ignore')
def sum_except_along(array, axes):
'''Reduce the given array by addition over all axes except those listed in the scalar or
iterable ``axes``'''
try:
iter(axes)
except TypeError:
axes = [axes]
kept = set(axes)
summed = list(set(range(array.ndim)) - kept)
# Reorder axes so that the kept axes are first, and in the order they
# were given
array = np.transpose(array, list(axes) + summed).copy()
# Now, the last len(summed) axes are summed over
for _ in range(len(summed)):
array = np.add.reduce(array, axis=-1)
return array
class PlotHistBase(WESTSubcommand):
def __init__(self, parent):
super().__init__(parent)
self.input_arg_group = None
self.output_arg_group = None
self.input_h5 = None
self.opmode = None
self.plotscale = None
self.enerzero = None
self.plotrange = None
self.plottitle = None
self.postprocess_function = None
self.plot_contour = None
# Iteration range for average/evolution
self.avail_iter_start = None
self.avail_iter_stop = None
self.avail_iter_step = None
self.iter_start = None
self.iter_stop = None
self.iter_step = None
# Iteration for single point
self.n_iter = None
# An array of dicts describing what dimensions to work with and
# what their ranges should be for the plots.
self.dimensions = []
self.plot_output_filename = None
self.text_output_filename = None
self.hdf5_output_filename = None
def add_args(self, parser):
igroup = self.input_arg_group = parser.add_argument_group('input options')
igroup.add_argument('input', help='HDF5 file containing histogram data')
igroup.add_argument(
'firstdim',
nargs='?',
metavar='DIMENSION',
help='''Plot for the given DIMENSION, specified as INT[:[LB,UB]:LABEL], where
INT is a zero-based integer identifying the dimension in the histogram,
LB and UB are lower and upper bounds for plotting, and LABEL is the label for
the plot axis. (Default: dimension 0, full range.)''',
)
ogroup = self.output_arg_group = parser.add_argument_group('output options')
ogroup.add_argument(
'-o',
'--output',
'--plot-output',
dest='plot_output',
default='hist.pdf',
metavar='PLOT_OUTPUT',
help='''Store plot as PLOT_OUTPUT. This may be set to an empty string
(e.g. --plot-output='') to suppress plotting entirely. The output
format is determined by filename extension (and thus defaults to PDF).
Default: "%(default)s".''',
)
ogroup.add_argument('--hdf5-output', help='''Store plot data in the HDF5 file HDF5_OUTPUT.''')
ogroup.add_argument(
'--plot-contour',
dest='plot_contour',
action='store_const',
const=True,
default=False,
help='''Determines whether or not to superimpose a contour plot over the heatmap for 2D objects.''',
)
pgroup = parser.add_argument_group('plot options')
pmgroup = pgroup.add_mutually_exclusive_group()
pgroup.add_argument('--title', dest='title', help='Include TITLE as the top-of-graph title')
pmgroup.add_argument(
'--linear', dest='plotscale', action='store_const', const='linear', help='Plot the histogram on a linear scale.'
)
pmgroup.add_argument(
'--energy',
dest='plotscale',
action='store_const',
const='energy',
help='Plot the histogram on an inverted natural log scale, corresponding to (free) energy (default).',
)
pmgroup.add_argument(
'--zero-energy',
dest='enerzero',
metavar='E',
default='min',
help='Set the zero of energy to E, which may be a scalar, "min" or "max"',
)
pmgroup.add_argument(
'--log10', dest='plotscale', action='store_const', const='log10', help='Plot the histogram on a base-10 log scale.'
)
pgroup.add_argument(
'--range',
help='''Plot histogram ordinates over the given RANGE, specified as "LB,UB",
where LB and UB are the lower and upper bounds, respectively. For 1-D plots,
this is the Y axis. For 2-D plots, this is the colorbar axis.
(Default: full range.)''',
)
pgroup.add_argument(
'--postprocess-function',
help='''Names a function (as in module.function) that will be called just prior
to saving the plot. The function will be called as ``postprocess(hist, midpoints, binbounds)``
where ``hist`` is the histogram that was plotted, ``midpoints`` is the bin midpoints for
each dimension, and ``binbounds`` is the bin boundaries for each dimension for 2-D plots,
or None otherwise. The plot must be modified in place using the pyplot stateful interface.''',
)
parser.set_defaults(plotscale='energy')
def process_args(self, args):
self.plotscale = args.plotscale
self.input_h5 = h5py.File(args.input, 'r')
self.plot_output_filename = args.plot_output
self.hdf5_output_filename = args.hdf5_output
self.plot_contour = args.plot_contour
if args.title:
self.plottitle = args.title
if args.range:
self.plotrange = self.parse_range(args.range)
if args.firstdim:
self.dimensions.append(self.parse_dimspec(args.firstdim))
if not args.firstdim:
self.dimensions.append({'idim': 0, 'label': 'dimension 0'})
if args.enerzero:
lenerzero = args.enerzero.lower()
if lenerzero not in ('min','max'):
try:
self.enerzero = float(args.enerzero)
except ValueError:
raise ValueError('invalid energy zero point {!r}'.format(args.enerzero))
else:
self.enerzero = lenerzero
else:
self.enerzero ='min'
self.avail_iter_start, self.avail_iter_stop = h5io.get_iter_range(self.input_h5['histograms'])
try:
self.avail_iter_step = h5io.get_iter_step(self.input_h5['histograms'])
except KeyError:
self.avail_iter_step = 1
log.info(
'HDF5 file {!r} contains data for iterations {} -- {} with a step of {}'.format(
args.input, self.avail_iter_start, self.avail_iter_stop, self.avail_iter_step
)
)
if args.postprocess_function:
self.postprocess_function = get_object(args.postprocess_function, path=['.'])
def parse_dimspec(self, dimspec):
dimdata = {}
match = re.match(r'([0-9]+)(?::(?:([^,]+),([^:,]+))?(?::(.*))?)?', dimspec)
if not match:
raise ValueError('invalid dimension specification {!r}'.format(dimspec))
(idim_txt, lb_txt, ub_txt, label) = match.groups()
try:
dimdata['idim'] = int(idim_txt)
if lb_txt:
dimdata['lb'] = float(lb_txt)
if ub_txt:
dimdata['ub'] = float(ub_txt)
if label:
dimdata['label'] = label
else:
dimdata['label'] = 'dimension {}'.format(dimdata['idim'])
except ValueError as e:
raise ValueError('invalid dimension specification {!r}: {!r}'.format(dimspec, e))
return dimdata
def parse_range(self, rangespec):
try:
(lbt, ubt) = rangespec.split(',')
return float(lbt), float(ubt)
except (ValueError, TypeError) as e:
raise ValueError('invalid range specification {!r}: {!r}'.format(rangespec, e))
def _ener_zero(self, hist):
hist = -np.log(hist)
if self.enerzero =='min':
np.subtract(hist, hist.min(), out=hist, casting="unsafe")
elif self.enerzero =='max':
np.subtract(hist, hist.max(), out=hist, casting="unsafe")
else:
np.subtract(hist, self.enerzero, out=hist, casting="unsafe")
return hist
class PlotSupports2D(PlotHistBase):
def __init__(self, parent):
super().__init__(parent)
def add_args(self, parser):
self.input_arg_group.add_argument(
'seconddim',
nargs='?',
metavar='ADDTLDIM',
help='''For instantaneous/average plots, plot along the given additional
dimension, producing a color map.''',
)
self.output_arg_group.add_argument(
'--text-output',
help='''Store plot data in a text format at TEXT_OUTPUT. This option is
only valid for 1-D histograms. (Default: no text output.)''',
)
def process_args(self, args):
self.text_output_filename = args.text_output
if args.seconddim is not None:
self.dimensions.append(self.parse_dimspec(args.seconddim))
def _do_1d_output(self, hist, idim, midpoints):
enehist = self._ener_zero(hist)
log10hist = np.log10(hist)
if self.hdf5_output_filename:
with h5py.File(self.hdf5_output_filename, 'w') as output_h5:
h5io.stamp_creator_data(output_h5)
output_h5.attrs['source_data'] = os.path.abspath(self.input_h5.filename)
output_h5.attrs['source_dimension'] = idim
output_h5['midpoints'] = midpoints
output_h5['histogram'] = hist
if self.text_output_filename:
with textio.NumericTextOutputFormatter(self.text_output_filename) as output_file:
output_file.write_header('source data: {} dimension {}'.format(os.path.abspath(self.input_h5.filename), idim))
output_file.write_header('column 0: midpoint of bin')
output_file.write_header('column 1: probability in bin')
output_file.write_header('column 2: -ln P')
output_file.write_header('column 3: log10 P')
np.savetxt(output_file, np.column_stack([midpoints, hist, enehist, log10hist]))
if self.plot_output_filename:
if self.plotscale == 'energy':
plothist = enehist
label = r'$-\ln\,P(x)$'
elif self.plotscale == 'log10':
plothist = log10hist
label = r'$\log_{10}\ P(x)$'
else:
plothist = hist
label = r'$P(x)$'
pyplot.figure()
pyplot.plot(midpoints, plothist)
pyplot.xlim(self.dimensions[0].get('lb'), self.dimensions[0].get('ub'))
if self.plotrange:
pyplot.ylim(*self.plotrange)
pyplot.xlabel(self.dimensions[0]['label'])
pyplot.ylabel(label)
if self.plottitle:
pyplot.title(self.plottitle)
if self.postprocess_function:
self.postprocess_function(plothist, midpoints, None)
pyplot.savefig(self.plot_output_filename)
def _do_2d_output(self, hist, idims, midpoints, binbounds):
enehist = self._ener_zero(hist)
log10hist = np.log10(hist)
if self.hdf5_output_filename:
with h5py.File(self.hdf5_output_filename, 'w') as output_h5:
h5io.stamp_creator_data(output_h5)
output_h5.attrs['source_data'] = os.path.abspath(self.input_h5.filename)
output_h5.attrs['source_dimensions'] = np.array(idims, np.min_scalar_type(max(idims)))
output_h5.attrs['source_dimension_labels'] = np.array([dim['label'] for dim in self.dimensions])
for idim in idims:
output_h5['midpoints_{}'.format(idim)] = midpoints[idim]
output_h5['histogram'] = hist
if self.plot_output_filename:
if self.plotscale == 'energy':
plothist = enehist
label = r'$-\ln\,P(x)$'
elif self.plotscale == 'log10':
plothist = log10hist
label = r'$\log_{10}\ P(\vec{x})$'
else:
plothist = hist
plothist[~np.isfinite(plothist)] = np.nan
label = r'$P(\vec{x})$'
try:
vmin, vmax = self.plotrange
except TypeError:
vmin, vmax = None, None
pyplot.figure()
# Transpose input so that axis 0 is displayed as x and axis 1 is displayed as y
# pyplot.imshow(plothist.T, interpolation='nearest', aspect='auto',
# extent=(midpoints[0][0], midpoints[0][-1], midpoints[1][0], midpoints[1][-1]),
# origin='lower', vmin=vmin, vmax=vmax)
# The following reproduces the former calls to imshow and colorbar
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
ax = pyplot.gca()
nui = NonUniformImage(
ax, extent=(midpoints[0][0], midpoints[0][-1], midpoints[1][0], midpoints[1][-1]), origin='lower', norm=norm
)
nui.set_data(midpoints[0], midpoints[1], plothist.T)
ax.add_image(nui)
ax.set_xlim(midpoints[0][0], midpoints[0][-1])
ax.set_ylim(midpoints[1][0], midpoints[1][-1])
cb = pyplot.colorbar(nui)
cb.set_label(label)
pyplot.xlabel(self.dimensions[0]['label'])
pyplot.xlim(self.dimensions[0].get('lb'), self.dimensions[0].get('ub'))
pyplot.ylabel(self.dimensions[1]['label'])
pyplot.ylim(self.dimensions[1].get('lb'), self.dimensions[1].get('ub'))
if self.plottitle:
pyplot.title(self.plottitle)
if self.postprocess_function:
self.postprocess_function(plothist, midpoints, binbounds)
if self.plot_contour:
pyplot.contour(midpoints[0], midpoints[1], plothist.T)
pyplot.savefig(self.plot_output_filename)
class InstantPlotHist(PlotSupports2D):
subcommand = 'instant'
help_text = 'plot probability distribution for a single WE iteration'
description = '''\
Plot a probability distribution for a single WE iteration. The probability
distribution must have been previously extracted with ``w_pdist`` (or, at
least, must be compatible with the output format of ``w_pdist``; see
``w_pdist --help`` for more information).
'''
def add_args(self, parser):
self.input_arg_group.add_argument(
'--iter',
metavar='N_ITER',
dest='n_iter',
type=int,
help='''Plot distribution for iteration N_ITER
(default: last completed iteration).''',
)
def process_args(self, args):
if args.n_iter:
self.n_iter = min(args.n_iter, self.avail_iter_stop - 1)
else:
self.n_iter = self.avail_iter_stop - 1
def do_instant_plot_1d(self):
'''Plot the histogram for iteration self.n_iter'''
idim = self.dimensions[0]['idim']
n_iters = self.input_h5['n_iter'][...]
iiter = np.searchsorted(n_iters, self.n_iter)
binbounds = self.input_h5['binbounds_{}'.format(idim)][...]
midpoints = self.input_h5['midpoints_{}'.format(idim)][...]
hist = self.input_h5['histograms'][iiter]
# Average over other dimensions
hist = sum_except_along(hist, idim)
normhistnd(hist, [binbounds])
self._do_1d_output(hist, idim, midpoints)
def do_instant_plot_2d(self):
'''Plot the histogram for iteration self.n_iter'''
idim0 = self.dimensions[0]['idim']
idim1 = self.dimensions[1]['idim']
n_iters = self.input_h5['n_iter'][...]
iiter = np.searchsorted(n_iters, self.n_iter)
binbounds_0 = self.input_h5['binbounds_{}'.format(idim0)][...]
midpoints_0 = self.input_h5['midpoints_{}'.format(idim0)][...]
binbounds_1 = self.input_h5['binbounds_{}'.format(idim1)][...]
midpoints_1 = self.input_h5['midpoints_{}'.format(idim1)][...]
hist = self.input_h5['histograms'][iiter]
# Average over other dimensions
hist = sum_except_along(hist, [idim0, idim1])
normhistnd(hist, [binbounds_0, binbounds_1])
self._do_2d_output(hist, [idim0, idim1], [midpoints_0, midpoints_1], [binbounds_0, binbounds_1])
def go(self):
if len(self.dimensions) == 2:
self.do_instant_plot_2d()
else:
self.do_instant_plot_1d()
class AveragePlotHist(PlotSupports2D):
subcommand = 'average'
help_text = 'plot average of a probability distribution over a WE simulation'
description = '''\
Plot a probability distribution averaged over multiple iterations. The
probability distribution must have been previously extracted with ``w_pdist``
(or, at least, must be compatible with the output format of ``w_pdist``; see
``w_pdist --help`` for more information).
'''
def add_args(self, parser):
igroup = self.input_arg_group
igroup.add_argument(
'--first-iter',
dest='first_iter',
type=int,
metavar='N_ITER',
default=1,
help='''Begin averaging at iteration N_ITER (default: %(default)d).''',
)
igroup.add_argument(
'--last-iter',
dest='last_iter',
type=int,
metavar='N_ITER',
help='''Conclude averaging with N_ITER, inclusive (default: last completed iteration).''',
)
def process_args(self, args):
if args.first_iter:
self.iter_start = max(args.first_iter, self.avail_iter_start)
else:
self.iter_start = self.avail_iter_start
if args.last_iter:
self.iter_stop = min(args.last_iter + 1, self.avail_iter_stop)
else:
self.iter_stop = self.avail_iter_stop
def do_average_plot_1d(self):
'''Plot the average histogram for iterations self.iter_start to self.iter_stop'''
idim = self.dimensions[0]['idim']
n_iters = self.input_h5['n_iter'][...]
iiter_start = np.searchsorted(n_iters, self.iter_start)
iiter_stop = np.searchsorted(n_iters, self.iter_stop)
binbounds = self.input_h5['binbounds_{}'.format(idim)][...]
midpoints = self.input_h5['midpoints_{}'.format(idim)][...]
# hist = self.input_h5['histograms'][iiter_start:iiter_stop]
for iiter in range(iiter_start, iiter_stop):
iter_hist = sum_except_along(self.input_h5['histograms'][iiter], idim)
if iiter == iiter_start:
hist = iter_hist
else:
hist += iter_hist
del iter_hist
normhistnd(hist, [binbounds])
self._do_1d_output(hist, idim, midpoints)
def do_average_plot_2d(self):
'''Plot the histogram for iteration self.n_iter'''
idim0 = self.dimensions[0]['idim']
idim1 = self.dimensions[1]['idim']
n_iters = self.input_h5['n_iter'][...]
iiter_start = np.searchsorted(n_iters, self.iter_start)
iiter_stop = np.searchsorted(n_iters, self.iter_stop)
binbounds_0 = self.input_h5['binbounds_{}'.format(idim0)][...]
midpoints_0 = self.input_h5['midpoints_{}'.format(idim0)][...]
binbounds_1 = self.input_h5['binbounds_{}'.format(idim1)][...]
midpoints_1 = self.input_h5['midpoints_{}'.format(idim1)][...]
for iiter in range(iiter_start, iiter_stop):
iter_hist = sum_except_along(self.input_h5['histograms'][iiter], [idim0, idim1])
if iiter == iiter_start:
hist = iter_hist
else:
hist += iter_hist
normhistnd(hist, [binbounds_0, binbounds_1])
self._do_2d_output(hist, [idim0, idim1], [midpoints_0, midpoints_1], [binbounds_0, binbounds_1])
def go(self):
if len(self.dimensions) == 2:
self.do_average_plot_2d()
else:
self.do_average_plot_1d()
class EvolutionPlotHist(PlotHistBase):
subcommand = 'evolution'
help_text = 'plot evolution of a probability distribution over the course of a WE simulation'
description = '''\
Plot a probability distribution as it evolves over iterations. The
probability distribution must have been previously extracted with ``w_pdist``
(or, at least, must be compatible with the output format of ``w_pdist``; see
``w_pdist --help`` for more information).
'''
def add_args(self, parser):
igroup = self.input_arg_group
igroup.add_argument(
'--first-iter',
dest='first_iter',
type=int,
metavar='N_ITER',
default=1,
help='''Begin analysis at iteration N_ITER (default: %(default)d).''',
)
igroup.add_argument(
'--last-iter',
dest='last_iter',
type=int,
metavar='N_ITER',
help='''Conclude analysis with N_ITER, inclusive (default: last completed iteration).''',
)
igroup.add_argument(
'--step-iter', dest='step_iter', type=int, metavar='STEP', help='''Average in blocks of STEP iterations.'''
)
def process_args(self, args):
if args.first_iter:
self.iter_start = max(args.first_iter, self.avail_iter_start)
else:
self.iter_start = self.avail_iter_start
if args.last_iter:
self.iter_stop = min(args.last_iter + 1, self.avail_iter_stop)
else:
self.iter_stop = self.avail_iter_stop
if args.step_iter:
self.iter_step = max(args.step_iter, self.avail_iter_step)
else:
self.iter_step = self.avail_iter_step
log.info('using data for iterations {} -- {} with a step of {}'.format(self.iter_start, self.iter_stop, self.iter_step))
def go(self):
'''Plot the evolution of the histogram for iterations self.iter_start to self.iter_stop'''
idim = self.dimensions[0]['idim']
n_iters = self.input_h5['n_iter'][...]
iiter_start = np.searchsorted(n_iters, self.iter_start)
iiter_stop = np.searchsorted(n_iters, self.iter_stop)
binbounds = self.input_h5['binbounds_{}'.format(idim)][...]
midpoints = self.input_h5['midpoints_{}'.format(idim)][...]
hists_ds = self.input_h5['histograms']
itercount = self.iter_stop - self.iter_start
# We always round down, so that we don't have a dangling partial block at the end
nblocks = itercount // self.iter_step
block_iters = np.empty((nblocks, 2), dtype=n_iters.dtype)
blocked_hists = np.zeros((nblocks, hists_ds.shape[1 + idim]), dtype=hists_ds.dtype)
for iblock, istart in enumerate(range(iiter_start, iiter_start + nblocks * self.iter_step, self.iter_step)):
istop = min(istart + self.iter_step, iiter_stop)
histslice = hists_ds[istart:istop]
# Sum over time
histslice = np.add.reduce(histslice, axis=0)
# Sum over other dimensions
blocked_hists[iblock] = sum_except_along(histslice, idim)
# Normalize
normhistnd(blocked_hists[iblock], [binbounds])
block_iters[iblock, 0] = n_iters[istart]
block_iters[iblock, 1] = n_iters[istop - 1] + 1
# enehists = -np.log(blocked_hists)
enehists = self._ener_zero(blocked_hists)
log10hists = np.log10(blocked_hists)
if self.hdf5_output_filename:
with h5py.File(self.hdf5_output_filename, 'w') as output_h5:
h5io.stamp_creator_data(output_h5)
output_h5.attrs['source_data'] = os.path.abspath(self.input_h5.filename)
output_h5.attrs['source_dimension'] = idim
output_h5['midpoints'] = midpoints
output_h5['histograms'] = blocked_hists
output_h5['n_iter'] = block_iters
if self.plot_output_filename:
if self.plotscale == 'energy':
plothist = enehists
label = r'$-\ln\,P(x)$'
elif self.plotscale == 'log10':
plothist = log10hists
label = r'$\log_{10}\ P(x)$'
else:
plothist = blocked_hists
label = r'$P(x)$'
try:
vmin, vmax = self.plotrange
except TypeError:
vmin, vmax = None, None
pyplot.figure()
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
ax = pyplot.gca()
nui = NonUniformImage(
ax, extent=(midpoints[0], midpoints[-1], block_iters[0, -1], block_iters[-1, -1]), origin='lower', norm=norm
)
# not sure why plothist works but plothist.T doesn't, and the opposite is true
# for _do_2d_output
nui.set_data(midpoints, block_iters[:, -1], plothist)
ax.add_image(nui)
ax.set_xlim(midpoints[0], midpoints[-1])
ax.set_ylim(block_iters[0, -1], block_iters[-1, -1])
cb = pyplot.colorbar(nui)
cb.set_label(label)
pyplot.xlabel(self.dimensions[0]['label'])
pyplot.xlim(self.dimensions[0].get('lb'), self.dimensions[0].get('ub'))
pyplot.ylabel('WE Iteration')
if self.plottitle:
pyplot.title(self.plottitle)
if self.postprocess_function:
self.postprocess_function(plothist, midpoints, binbounds)
pyplot.savefig(self.plot_output_filename)
class PlotHistTool(WESTMasterCommand):
prog = 'plothist'
subparsers_title = 'plotting modes'
subcommands = [InstantPlotHist, AveragePlotHist, EvolutionPlotHist]
description = '''\
Plot probability density functions (histograms) generated by w_pdist or other
programs conforming to the same output format. This program operates in one of
three modes:
instant
Plot 1-D and 2-D histograms for an individual iteration. See
``plothist instant --help`` for more information.
average
Plot 1-D and 2-D histograms, averaged over several iterations. See
``plothist average --help`` for more information.
evolution
Plot the time evolution 1-D histograms as waterfall (heat map) plots.
See ``plothist evolution --help`` for more information.
This program takes the output of ``w_pdist`` as input (see ``w_pdist --help``
for more information), and can generate any kind of graphical output that
matplotlib supports.
------------------------------------------------------------------------------
Command-line options
------------------------------------------------------------------------------
'''
def entry_point():
PlotHistTool().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_assign.rst | Manual | w_assign command | MIT License | westpa__westpa/doc/documentation/cli/w_assign.rst | [
"westpa__westpa/src/westpa/cli/tools/w_assign.py"
] | w_assign
w_assign uses simulation output to assign walkers to user-specified bins
and macrostates. These assignments are required for some other
simulation tools, namely w_kinetics and w_kinavg.
w_assign supports parallelization (see general work manager options for
more on command line options to specify a work manager).
Overview
Usage:
w_assign [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version]
[-W WEST_H5FILE] [-o OUTPUT]
[--bins-from-system | --bins-from-expr BINS_FROM_EXPR | --bins-from-function BINS_FROM_FUNCTION]
[-p MODULE.FUNCTION]
[--states STATEDEF [STATEDEF ...] | --states-from-file STATEFILE | --states-from-function STATEFUNC]
[--wm-work-manager WORK_MANAGER] [--wm-n-workers N_WORKERS]
[--wm-zmq-mode MODE] [--wm-zmq-info INFO_FILE]
[--wm-zmq-task-endpoint TASK_ENDPOINT]
[--wm-zmq-result-endpoint RESULT_ENDPOINT]
[--wm-zmq-announce-endpoint ANNOUNCE_ENDPOINT]
[--wm-zmq-listen-endpoint ANNOUNCE_ENDPOINT]
[--wm-zmq-heartbeat-interval INTERVAL]
[--wm-zmq-task-timeout TIMEOUT]
[--wm-zmq-client-comm-mode MODE]
Command-Line Options
See the general command-line tool reference for more information on the
general options.
Input/output Options
-W, --west-data /path/to/file
Read simulation result data from file *file*. (**Default:** The
*hdf5* file specified in the configuration file, by default
**west.h5**)
-o, --output /path/to/file
Write assignment results to file *outfile*. (**Default:** *hdf5*
file **assign.h5**)
Binning Options
Specify how binning is to be assigned to the dataset.:
--bins-from-system
Use binning scheme specified by the system driver; system driver can be
found in the west configuration file, by default named **west.cfg**
(**Default binning**)
--bins-from-expr bin_expr
Use binning scheme specified in *``bin_expr``*, which takes the form a
Python list of lists, where each inner list corresponds to the binning a
given dimension. (for example, "[[0,1,2,4,inf],[-inf,0,inf]]" specifies bin
boundaries for two dimensional progress coordinate. Note that this option
accepts the special symbol 'inf' for floating point infinity
--bins-from-function bin_func
Bins specified by calling an external function *``bin_func``*.
*``bin_func``* should be formatted as '[PATH:]module.function', where the
function 'function' in module 'module' will be used
Macrostate Options
You can optionally specify how to assign user-defined macrostates. Note
that macrostates must be assigned for subsequent analysis tools, namely
w_kinetics and w_kinavg.:
--states statedef [statedef ...]
Specify a macrostate for a single bin as *``statedef``*, formatted
as a coordinate tuple where each coordinate specifies the bin to
which it belongs, for instance:
'[1.0, 2.0]' assigns a macrostate corresponding to the bin that
contains the (two-dimensional) progress coordinates 1.0 and 2.0.
Note that a macrostate label can optionally by specified, for
instance: 'bound:[1.0, 2.0]' assigns the corresponding bin
containing the given coordinates the macrostate named 'bound'. Note
that multiple assignments can be specified with this command, but
only one macrostate per bin is possible - if you wish to specify
multiple bins in a single macrostate, use the
*``--states-from-file``* option.
--states-from-file statefile
Read macrostate assignments from *yaml* file *``statefile``*. This
option allows you to assign multiple bins to a single macrostate.
The following example shows the contents of *``statefile``* that
specify two macrostates, bound and unbound, over multiple bins with
a two-dimensional progress coordinate:
---
states:
- label: unbound
coords:
- [9.0, 1.0]
- [9.0, 2.0]
- label: bound
coords:
- [0.1, 0.0]
Specifying Progress Coordinate
By default, progress coordinate information for each iteration is taken
from pcoord dataset in the specified input file (which, by default is
west.h5). Optionally, you can specify a function to construct the
progress coordinate for each iteration - this may be useful to
consolidate data from several sources or otherwise preprocess the
progress coordinate data.:
--construct-pcoord module.function, -p module.function
Use the function *module.function* to construct the progress
coordinate for each iteration. This will be called once per
iteration as *function(n_iter, iter_group)* and should return an
array indexable as [seg_id][timepoint][dimension]. The
**default** function returns the 'pcoord' dataset for that iteration
(i.e. the function executes return iter_group['pcoord'][...])
| import logging
import math
import os
import numpy as np
from numpy import index_exp
from westpa.core.data_manager import seg_id_dtype, weight_dtype
from westpa.core.binning import index_dtype, assign_and_label, accumulate_labeled_populations
from westpa.tools import WESTParallelTool, WESTDataReader, WESTDSSynthesizer, BinMappingComponent, ProgressIndicatorComponent
import westpa
from westpa.core import h5io
from westpa.core.h5io import WESTPAH5File
from westpa.core.extloader import get_object
log = logging.getLogger('w_assign')
# Changes to keep it alive...
def parse_pcoord_value(pc_str):
namespace = {'math': math, 'numpy': np, 'np': np, 'inf': float('inf')}
arr = np.array(eval(pc_str, namespace))
if arr.ndim == 0:
arr.shape = (1, 1)
elif arr.ndim == 1:
arr.shape = (1,) + arr.shape
else:
raise ValueError('too many dimensions')
return arr
def _assign_label_pop(
n_iter, lb, ub, mapper, nstates, state_map, last_labels, parent_id_dsspec, weight_dsspec, pcoord_dsspec, subsample
):
nbins = len(state_map) - 1
parent_ids = parent_id_dsspec.get_iter_data(n_iter, index_exp[lb:ub])
weights = weight_dsspec.get_iter_data(n_iter, index_exp[lb:ub])
pcoords = pcoord_dsspec.get_iter_data(n_iter, index_exp[lb:ub])
assignments, trajlabels, statelabels = assign_and_label(
lb, ub, parent_ids, mapper.assign, nstates, state_map, last_labels, pcoords, subsample
)
pops = np.zeros((nstates + 1, nbins + 1), weight_dtype)
accumulate_labeled_populations(weights, assignments, trajlabels, pops)
return (assignments, trajlabels, pops, lb, ub, statelabels)
class WAssign(WESTParallelTool):
prog = 'w_assign'
description = '''\
Assign walkers to bins, producing a file (by default named "assign.h5")
which can be used in subsequent analysis.
For consistency in subsequent analysis operations, the entire dataset
must be assigned, even if only a subset of the data will be used. This
ensures that analyses that rely on tracing trajectories always know the
originating bin of each trajectory.
-----------------------------------------------------------------------------
Source data
-----------------------------------------------------------------------------
Source data is provided either by a user-specified function
(--construct-dataset) or a list of "data set specifications" (--dsspecs).
If neither is provided, the progress coordinate dataset ''pcoord'' is used.
To use a custom function to extract or calculate data whose probability
distribution will be calculated, specify the function in standard Python
MODULE.FUNCTION syntax as the argument to --construct-dataset. This function
will be called as function(n_iter,iter_group), where n_iter is the iteration
whose data are being considered and iter_group is the corresponding group
in the main WEST HDF5 file (west.h5). The function must return data which can
be indexed as [segment][timepoint][dimension].
To use a list of data set specifications, specify --dsspecs and then list the
desired datasets one-by-one (space-separated in most shells). These data set
specifications are formatted as NAME[,file=FILENAME,slice=SLICE], which will
use the dataset called NAME in the HDF5 file FILENAME (defaulting to the main
WEST HDF5 file west.h5), and slice it with the Python slice expression SLICE
(as in [0:2] to select the first two elements of the first axis of the
dataset). The ``slice`` option is most useful for selecting one column (or
more) from a multi-column dataset, such as arises when using a progress
coordinate of multiple dimensions.
-----------------------------------------------------------------------------
Specifying macrostates
-----------------------------------------------------------------------------
Optionally, kinetic macrostates may be defined in terms of sets of bins.
Each trajectory will be labeled with the kinetic macrostate it was most
recently in at each timepoint, for use in subsequent kinetic analysis.
This is required for all kinetics analysis (w_kintrace and w_kinmat).
There are three ways to specify macrostates:
1. States corresponding to single bins may be identified on the command
line using the --states option, which takes multiple arguments, one for
each state (separated by spaces in most shells). Each state is specified
as a coordinate tuple, with an optional label prepended, as in
``bound:1.0`` or ``unbound:(2.5,2.5)``. Unlabeled states are named
``stateN``, where N is the (zero-based) position in the list of states
supplied to --states.
2. States corresponding to multiple bins may use a YAML input file specified
with --states-from-file. This file defines a list of states, each with a
name and a list of coordinate tuples; bins containing these coordinates
will be mapped to the containing state. For instance, the following
file::
---
states:
- label: unbound
coords:
- [9.0, 1.0]
- [9.0, 2.0]
- label: bound
coords:
- [0.1, 0.0]
produces two macrostates: the first state is called "unbound" and
consists of bins containing the (2-dimensional) progress coordinate
values (9.0, 1.0) and (9.0, 2.0); the second state is called "bound"
and consists of the single bin containing the point (0.1, 0.0).
3. Arbitrary state definitions may be supplied by a user-defined function,
specified as --states-from-function=MODULE.FUNCTION. This function is
called with the bin mapper as an argument (``function(mapper)``) and must
return a list of dictionaries, one per state. Each dictionary must contain
a vector of coordinate tuples with key "coords"; the bins into which each
of these tuples falls define the state. An optional name for the state
(with key "label") may also be provided.
-----------------------------------------------------------------------------
Output format
-----------------------------------------------------------------------------
The output file (-o/--output, by default "assign.h5") contains the following
attributes datasets:
``nbins`` attribute
*(Integer)* Number of valid bins. Bin assignments range from 0 to
*nbins*-1, inclusive.
``nstates`` attribute
*(Integer)* Number of valid macrostates (may be zero if no such states are
specified). Trajectory ensemble assignments range from 0 to *nstates*-1,
inclusive, when states are defined.
``/assignments`` [iteration][segment][timepoint]
*(Integer)* Per-segment and -timepoint assignments (bin indices).
``/npts`` [iteration]
*(Integer)* Number of timepoints in each iteration.
``/nsegs`` [iteration]
*(Integer)* Number of segments in each iteration.
``/labeled_populations`` [iterations][state][bin]
*(Floating-point)* Per-iteration and -timepoint bin populations, labeled
by most recently visited macrostate. The last state entry (*nstates-1*)
corresponds to trajectories initiated outside of a defined macrostate.
``/bin_labels`` [bin]
*(String)* Text labels of bins.
When macrostate assignments are given, the following additional datasets are
present:
``/trajlabels`` [iteration][segment][timepoint]
*(Integer)* Per-segment and -timepoint trajectory labels, indicating the
macrostate which each trajectory last visited.
``/state_labels`` [state]
*(String)* Labels of states.
``/state_map`` [bin]
*(Integer)* Mapping of bin index to the macrostate containing that bin.
An entry will contain *nbins+1* if that bin does not fall into a
macrostate.
Datasets indexed by state and bin contain one more entry than the number of
valid states or bins. For *N* bins, axes indexed by bin are of size *N+1*, and
entry *N* (0-based indexing) corresponds to a walker outside of the defined bin
space (which will cause most mappers to raise an error). More importantly, for
*M* states (including the case *M=0* where no states are specified), axes
indexed by state are of size *M+1* and entry *M* refers to trajectories
initiated in a region not corresponding to a defined macrostate.
Thus, ``labeled_populations[:,:,:].sum(axis=1)[:,:-1]`` gives overall per-bin
populations, for all defined bins and
``labeled_populations[:,:,:].sum(axis=2)[:,:-1]`` gives overall
per-trajectory-ensemble populations for all defined states.
-----------------------------------------------------------------------------
Parallelization
-----------------------------------------------------------------------------
This tool supports parallelized binning, including reading/calculating input
data.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def __init__(self):
super().__init__()
# Parallel processing by default (this is not actually necessary, but it is
# informative!)
self.wm_env.default_work_manager = self.wm_env.default_parallel_work_manager
self.data_reader = WESTDataReader()
self.dssynth = WESTDSSynthesizer(default_dsname='pcoord')
self.binning = BinMappingComponent()
self.progress = ProgressIndicatorComponent()
self.output_file = None
self.output_filename = None
self.states = []
self.subsample = False
def add_args(self, parser):
self.data_reader.add_args(parser)
self.binning.add_args(parser)
self.dssynth.add_args(parser)
sgroup = parser.add_argument_group('macrostate definitions').add_mutually_exclusive_group()
sgroup.add_argument(
'--states',
nargs='+',
metavar='STATEDEF',
help='''Single-bin kinetic macrostate, specified by a coordinate tuple (e.g. '1.0' or '[1.0,1.0]'),
optionally labeled (e.g. 'bound:[1.0,1.0]'). States corresponding to multiple bins
must be specified with --states-from-file.''',
)
sgroup.add_argument(
'--states-from-file',
metavar='STATEFILE',
help='''Load kinetic macrostates from the YAML file STATEFILE. See description
above for the appropriate structure.''',
)
sgroup.add_argument(
'--states-from-function',
metavar='STATEFUNC',
help='''Load kinetic macrostates from the function STATEFUNC, specified as
module_name.func_name. This function is called with the bin mapper as an argument,
and must return a list of dictionaries {'label': state_label, 'coords': 2d_array_like}
one for each macrostate; the 'coords' entry must contain enough rows to identify all bins
in the macrostate.''',
)
agroup = parser.add_argument_group('other options')
agroup.add_argument(
'-o', '--output', dest='output', default='assign.h5', help='''Store results in OUTPUT (default: %(default)s).'''
)
agroup.add_argument(
'--subsample',
dest='subsample',
action='store_const',
const=True,
help='''Determines whether or not the data should be subsampled.
This is rather useful for analysing steady state simulations.''',
)
agroup.add_argument(
'--config-from-file',
dest='config_from_file',
action='store_true',
help='''Load bins/macrostates from a scheme specified in west.cfg.''',
)
agroup.add_argument('--scheme-name', dest='scheme', help='''Name of scheme specified in west.cfg.''')
def process_args(self, args):
self.progress.process_args(args)
self.data_reader.process_args(args)
# Necessary to open the file to get the current iteration
# if we want to use the mapper in the file
self.data_reader.open(mode='r+')
self.n_iter = self.data_reader.current_iteration
# If we decide to use this option for iteration selection:
# getattr(args,'bins_from_h5file',None) or self.data_reader.current_iteration
with self.data_reader:
self.dssynth.h5filename = self.data_reader.we_h5filename
self.dssynth.process_args(args)
if args.config_from_file is False:
self.binning.set_we_h5file_info(self.n_iter, self.data_reader)
self.binning.process_args(args)
self.output_filename = args.output
if args.config_from_file:
if not args.scheme:
raise ValueError('A scheme must be specified.')
else:
self.load_config_from_west(args.scheme)
elif args.states:
self.parse_cmdline_states(args.states)
elif args.states_from_file:
self.load_state_file(args.states_from_file)
elif args.states_from_function:
self.load_states_from_function(get_object(args.states_from_function, path=['.']))
if self.states and len(self.states) < 2:
raise ValueError('zero, two, or more macrostates are required')
# self.output_file = WESTPAH5File(args.output, 'w', creating_program=True)
log.debug('state list: {!r}'.format(self.states))
self.subsample = args.subsample if args.subsample is not None else False
def parse_cmdline_states(self, state_strings):
states = []
for istring, state_string in enumerate(state_strings):
try:
(label, coord_str) = state_string.split(':')
except ValueError:
label ='state{}'.format(istring)
coord_str = state_string
coord = parse_pcoord_value(coord_str)
states.append({'label': label, 'coords': coord})
self.states = states
def load_config_from_west(self, scheme):
try:
config = westpa.rc.config['west']['analysis']
except Exception:
raise ValueError('There is no configuration file specified.')
ystates = config['analysis_schemes'][scheme]['states']
self.states_from_dict(ystates)
try:
self.subsample = config['subsample']
except Exception:
pass
from westpa.core._rc import bins_from_yaml_dict
self.binning.mapper = bins_from_yaml_dict(config['analysis_schemes'][scheme]['bins'][0])
path = os.path.join(os.getcwd(), config['directory'], scheme)
try:
os.mkdir(config['directory'])
os.mkdir(path)
except Exception:
pass
self.output_filename = os.path.join(path, 'assign.h5')
def load_state_file(self, state_filename):
import yaml
ydict = yaml.load(open(state_filename, 'rt'), Loader=yaml.Loader)
ystates = ydict['states']
self.states_from_dict(ystates)
def states_from_dict(self, ystates):
states = []
for istate, ystate in enumerate(ystates):
state = {}
state['label'] = ystate.get('label','state{}'.format(istate))
# coords can be:
# - a scalar, in which case it is one bin, 1-D
# - a single list, which is rejected as ambiguous
# - a list of lists, which is a list of coordinate tuples
coords = np.array(ystate['coords'])
if coords.ndim == 0:
coords.shape = (1, 1)
elif coords.ndim == 1:
raise ValueError(
'list {!r} is ambiguous (list of 1-d coordinates, or single multi-d coordinate?)'.format(ystate['coords'])
)
elif coords.ndim > 2:
raise ValueError('coordinates must be 2-D')
state['coords'] = coords
states.append(state)
self.states = states
def load_states_from_function(self, statefunc):
states = statefunc(self.binning.mapper)
for istate, state in enumerate(states):
state.setdefault('label','state{}'.format(istate))
try:
state['coords'] = np.array(state['coords'])
except KeyError:
raise ValueError('state function {!r} returned a state {!r} without coordinates'.format(statefunc, state))
self.states = states
log.debug('loaded states: {!r}'.format(self.states))
def assign_iteration(self, n_iter, nstates, nbins, state_map, last_labels):
'''Method to encapsulate the segment slicing (into n_worker slices) and parallel job submission
Submits job(s), waits on completion, splices them back together
Returns: assignments, trajlabels, pops for this iteration'''
futures = []
iter_group = self.data_reader.get_iter_group(n_iter)
nsegs, npts = iter_group['pcoord'].shape[:2]
n_workers = self.work_manager.n_workers or 1
assignments = np.empty((nsegs, npts), dtype=index_dtype)
trajlabels = np.empty((nsegs, npts), dtype=index_dtype)
statelabels = np.empty((nsegs, npts), dtype=index_dtype)
pops = np.zeros((nstates + 1, nbins + 1), dtype=weight_dtype)
# Submit jobs to work manager
blocksize = nsegs // n_workers
if nsegs % n_workers > 0:
blocksize += 1
def task_gen():
if __debug__:
checkset = set()
for lb in range(0, nsegs, blocksize):
ub = min(nsegs, lb + blocksize)
if __debug__:
checkset.update(set(range(lb, ub)))
args = ()
kwargs = dict(
n_iter=n_iter,
lb=lb,
ub=ub,
mapper=self.binning.mapper,
nstates=nstates,
state_map=state_map,
last_labels=last_labels,
parent_id_dsspec=self.data_reader.parent_id_dsspec,
weight_dsspec=self.data_reader.weight_dsspec,
pcoord_dsspec=self.dssynth.dsspec,
subsample=self.subsample,
)
yield (_assign_label_pop, args, kwargs)
# futures.append(self.work_manager.submit(_assign_label_pop,
# kwargs=)
if __debug__:
assert checkset == set(range(nsegs)),'segments missing: {}'.format(set(range(nsegs)) - checkset)
# for future in self.work_manager.as_completed(futures):
for future in self.work_manager.submit_as_completed(task_gen(), queue_size=self.max_queue_len):
assign_slice, traj_slice, slice_pops, lb, ub, state_slice = future.get_result(discard=True)
assignments[lb:ub, :] = assign_slice
trajlabels[lb:ub, :] = traj_slice
statelabels[lb:ub, :] = state_slice
pops += slice_pops
del assign_slice, traj_slice, slice_pops, state_slice
del futures
return (assignments, trajlabels, pops, statelabels)
def go(self):
assert self.data_reader.parent_id_dsspec._h5file is None
assert self.data_reader.weight_dsspec._h5file is None
if hasattr(self.dssynth.dsspec, '_h5file'):
assert self.dssynth.dsspec._h5file is None
pi = self.progress.indicator
pi.operation = 'Initializing'
with pi, self.data_reader, WESTPAH5File(self.output_filename, 'w', creating_program=True) as self.output_file:
assign = self.binning.mapper.assign
# We always assign the entire simulation, so that no trajectory appears to start
# in a transition region that doesn't get initialized in one.
iter_start = 1
iter_stop = self.data_reader.current_iteration
h5io.stamp_iter_range(self.output_file, iter_start, iter_stop)
nbins = self.binning.mapper.nbins
self.output_file.attrs['nbins'] = nbins
state_map = np.empty((self.binning.mapper.nbins + 1,), index_dtype)
state_map[:] = 0 # state_id == nstates => unknown state
# Recursive mappers produce a generator rather than a list of labels
# so consume the entire generator into a list
labels = [np.string_(label) for label in self.binning.mapper.labels]
self.output_file.create_dataset('bin_labels', data=labels, compression=9)
if self.states:
nstates = len(self.states)
state_map[:] = nstates # state_id == nstates => unknown state
state_labels = [np.string_(state['label']) for state in self.states]
for istate, sdict in enumerate(self.states):
assert state_labels[istate] == np.string_(sdict['label']) # sanity check
state_assignments = assign(sdict['coords'])
for assignment in state_assignments:
state_map[assignment] = istate
self.output_file.create_dataset('state_map', data=state_map, compression=9, shuffle=True)
self.output_file['state_labels'] = state_labels # + ['(unknown)']
else:
nstates = 0
self.output_file.attrs['nstates'] = nstates
# Stamp if this has been subsampled.
self.output_file.attrs['subsampled'] = self.subsample
iter_count = iter_stop - iter_start
nsegs = np.empty((iter_count,), seg_id_dtype)
npts = np.empty((iter_count,), seg_id_dtype)
# scan for largest number of segments and largest number of points
pi.new_operation('Scanning for segment and point counts', iter_stop - iter_start)
for iiter, n_iter in enumerate(range(iter_start, iter_stop)):
iter_group = self.data_reader.get_iter_group(n_iter)
nsegs[iiter], npts[iiter] = iter_group['pcoord'].shape[0:2]
pi.progress += 1
del iter_group
pi.new_operation('Preparing output')
# create datasets
self.output_file.create_dataset('nsegs', data=nsegs, shuffle=True, compression=9)
self.output_file.create_dataset('npts', data=npts, shuffle=True, compression=9)
max_nsegs = nsegs.max()
max_npts = npts.max()
assignments_shape = (iter_count, max_nsegs, max_npts)
assignments_dtype = np.min_scalar_type(nbins)
assignments_ds = self.output_file.create_dataset(
'assignments',
dtype=assignments_dtype,
shape=assignments_shape,
compression=4,
shuffle=True,
chunks=h5io.calc_chunksize(assignments_shape, assignments_dtype),
fillvalue=nbins,
)
if self.states:
trajlabel_dtype = np.min_scalar_type(nstates)
trajlabels_ds = self.output_file.create_dataset(
'trajlabels',
dtype=trajlabel_dtype,
shape=assignments_shape,
compression=4,
shuffle=True,
chunks=h5io.calc_chunksize(assignments_shape, trajlabel_dtype),
fillvalue=nstates,
)
statelabels_ds = self.output_file.create_dataset(
'statelabels',
dtype=trajlabel_dtype,
shape=assignments_shape,
compression=4,
shuffle=True,
chunks=h5io.calc_chunksize(assignments_shape, trajlabel_dtype),
fillvalue=nstates,
)
pops_shape = (iter_count, nstates + 1, nbins + 1)
pops_ds = self.output_file.create_dataset(
'labeled_populations',
dtype=weight_dtype,
shape=pops_shape,
compression=4,
shuffle=True,
chunks=h5io.calc_chunksize(pops_shape, weight_dtype),
)
h5io.label_axes(pops_ds, [np.string_(i) for i in ['iteration','state', 'bin']])
pi.new_operation('Assigning to bins', iter_stop - iter_start)
last_labels = None # mapping of seg_id to last macrostate inhabited
for iiter, n_iter in enumerate(range(iter_start, iter_stop)):
# get iteration info in this block
if iiter == 0:
last_labels = np.empty((nsegs[iiter],), index_dtype)
last_labels[:] = nstates # unknown state
# Slices this iteration into n_workers groups of segments, submits them to wm, splices results back together
assignments, trajlabels, pops, statelabels = self.assign_iteration(n_iter, nstates, nbins, state_map, last_labels)
# Do stuff with this iteration's results
last_labels = trajlabels[:, -1].copy()
assignments_ds[iiter, 0 : nsegs[iiter], 0 : npts[iiter]] = assignments
pops_ds[iiter] = pops
if self.states:
trajlabels_ds[iiter, 0 : nsegs[iiter], 0 : npts[iiter]] = trajlabels
statelabels_ds[iiter, 0 : nsegs[iiter], 0 : npts[iiter]] = statelabels
pi.progress += 1
del assignments, trajlabels, pops, statelabels
for dsname in 'assignments', 'npts', 'nsegs', 'labeled_populations','statelabels':
h5io.stamp_iter_range(self.output_file[dsname], iter_start, iter_stop)
def entry_point():
WAssign().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_bins.rst | Manual | w_bins command | MIT License | westpa__westpa/doc/documentation/cli/w_bins.rst | [
"westpa__westpa/src/westpa/cli/tools/w_bins.py"
] | w_bins
w_bins deals with binning modification and statistics
Overview
Usage:
w_bins [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version]
[-W WEST_H5FILE]
{info,rebin} ...
Display information and statistics about binning in a WEST simulation,
or modify the binning for the current iteration of a WEST simulation.
Command-Line Options
See the general command-line tool reference for more information on the
general options.
Options Under 'info'
Usage:
w_bins info [-h] [-n N_ITER] [--detail]
[--bins-from-system | --bins-from-expr BINS_FROM_EXPR | --bins-from-function BINS_FROM_FUNCTION | --bins-from-file]
Positional options:
info
Display information about binning.
Options for 'info':
-n N_ITER, --n-iter N_ITER
Consider initial points of segment N_ITER (default: current
iteration).
--detail
Display detailed per-bin information in addition to summary
information.
Binning options for 'info':
--bins-from-system
Bins are constructed by the system driver specified in the WEST
configuration file (default where stored bin definitions not
available).
--bins-from-expr BINS_FROM_EXPR, --binbounds BINS_FROM_EXPR
Construct bins on a rectilinear grid according to the given BINEXPR.
This must be a list of lists of bin boundaries (one list of bin
boundaries for each dimension of the progress coordinate), formatted
as a Python expression. E.g. "[[0,1,2,4,inf],[-inf,0,inf]]". The
numpy module and the special symbol "inf" (for floating-point
infinity) are available for use within BINEXPR.
--bins-from-function BINS_FROM_FUNCTION, --binfunc BINS_FROM_FUNCTION
Supply an external function which, when called, returns a properly
constructed bin mapper which will then be used for bin assignments.
This should be formatted as "[PATH:]MODULE.FUNC", where the function
FUNC in module MODULE will be used; the optional PATH will be
prepended to the module search path when loading MODULE.
--bins-from-file
Load bin specification from the data file being examined (default
where stored bin definitions available).
Options Under 'rebin'
Usage:
w_bins rebin [-h] [--confirm] [--detail]
[--bins-from-system | --bins-from-expr BINS_FROM_EXPR | --bins-from-function BINS_FROM_FUNCTION]
[--target-counts TARGET_COUNTS | --target-counts-from FILENAME]
Positional option:
rebin
Rebuild current iteration with new binning.
Options for 'rebin':
--confirm
Commit the revised iteration to HDF5; without this option, the
effects of the new binning are only calculated and printed.
--detail
Display detailed per-bin information in addition to summary
information.
Binning options for 'rebin';
Same as the binning options for 'info'.
Bin target count options for 'rebin';:
--target-counts TARGET_COUNTS
Use TARGET_COUNTS instead of stored or system driver target counts.
TARGET_COUNTS is a comma-separated list of integers. As a special
case, a single integer is acceptable, in which case the same target
count is used for all bins.
--target-counts-from FILENAME
Read target counts from the text file FILENAME instead of using
stored or system driver target counts. FILENAME must contain a list
of integers, separated by arbitrary whitespace (including newlines).
Input Options
-W WEST_H5FILE, --west_data WEST_H5FILE
Take WEST data from WEST_H5FILE (default: read from the HDF5 file
specified in west.cfg).
| import logging
import sys
import numpy as np
from westpa.tools import WESTTool, WESTDataReader, BinMappingComponent
import westpa
from westpa.tools.binning import write_bin_info
log = logging.getLogger('w_bins')
class WBinTool(WESTTool):
prog = 'w_bins'
description = '''\
Display information and statistics about binning in a WEST simulation, or
modify the binning for the current iteration of a WEST simulation.
-------------------------------------------------------------------------------
'''
def __init__(self):
super().__init__()
self.subcommand = None
self.data_reader = WESTDataReader()
self.binning = BinMappingComponent()
self.args = None
self.n_iter = None
# Interface for command-line tools
def add_args(self, parser):
self.data_reader.add_args(parser)
subparsers = parser.add_subparsers(help='available commands')
info_parser = subparsers.add_parser('info', help='Display information about binning.')
info_parser.add_argument(
'-n', '--n-iter', type=int, help='''Consider initial points of segment N_ITER (default: current iteration).'''
)
info_parser.add_argument(
'--detail',
action='store_true',
help='''Display detailed per-bin information in addition to summary
information.''',
)
self.binning.add_args(info_parser)
info_parser.set_defaults(func=self.cmd_info)
rebin_parser = subparsers.add_parser('rebin', help='Rebuild current iteration with new binning.')
rebin_parser.add_argument(
'--confirm',
action='store_true',
help='''Commit the revised iteration to HDF5; without this option, the effects of the
new binning are only calculated and printed.''',
)
rebin_parser.add_argument(
'--detail',
action='store_true',
help='''Display detailed per-bin information in addition to summary
information.''',
)
rebin_parser.add_argument(
'-n', '--n-iter', type=int, help='''Consider initial points of segment N_ITER (default: current iteration).'''
)
self.binning.add_args(rebin_parser, suppress=['--bins-from-file'])
self.binning.add_target_count_args(rebin_parser)
rebin_parser.set_defaults(func=self.cmd_rebin)
def process_args(self, args):
self.data_reader.process_args(args)
self.data_reader.open(mode='r+')
self.n_iter = getattr(args, 'n_iter', None) or self.data_reader.current_iteration
# we cannot read bin information during rebins
# interesting note: '==' is required here; 'is' fails
if args.func == self.cmd_rebin:
self.binning.target_counts_required = True
else:
self.binning.set_we_h5file_info(self.n_iter, self.data_reader)
self.binning.process_args(args)
self.args = args
self.subcommand = args.func
def go(self):
self.subcommand()
def cmd_info(self):
mapper = self.binning.mapper
# Get target states and their assignments
target_states = self.data_reader.get_target_states(self.n_iter)
n_target_states = len(target_states)
iter_group = self.data_reader.get_iter_group(self.n_iter)
# bin initial pcoords for iteration n_iter
initial_pcoords = iter_group['pcoord'][:, 0, :]
assignments = mapper.assign(initial_pcoords)
del initial_pcoords
print('Bin information for iteration {:d}'.format(self.n_iter))
# Get bin counts and weights
weights = iter_group['seg_index']['weight']
write_bin_info(mapper, assignments, weights, n_target_states, detailed=self.args.detail)
def cmd_rebin(self):
mapper = self.binning.mapper
assert mapper is not None
if self.n_iter == 1:
sys.stderr.write('rebin is not supported for the first iteration; reinitialize with w_init instead\n')
sys.exit(1)
n_target_states = len(self.data_reader.get_target_states(self.n_iter))
we_driver = westpa.rc.get_we_driver()
data_manager = self.data_reader.data_manager
segments = data_manager.get_segments(self.n_iter, load_pcoords=True)
last_iter_segments = data_manager.get_segments(self.n_iter - 1, load_pcoords=False)
# Bin on this iteration's initial points
# We don't have to worry about recycling because we are binning on
# initial points rather than final points, so recycling has already
# occurred for this iteration.
# We do need initial states, in case we merge a newly-created walker out of existence
# avail_initial_states = {state.state_id: state
# for state in data_manager.get_unused_initial_states(n_iter = self.n_iter)}
avail_initial_states = data_manager.get_unused_initial_states(n_iter=self.n_iter)
used_initial_states = data_manager.get_segment_initial_states(segments)
we_driver.new_iteration(
initial_states=avail_initial_states, bin_mapper=mapper, bin_target_counts=self.binning.bin_target_counts
)
we_driver.used_initial_states = {state.state_id: state for state in used_initial_states}
we_driver.assign(segments, initializing=True)
we_driver.rebin_current(parent_segments=last_iter_segments)
weights = np.array([segment.weight for segment in we_driver.next_iter_segments])
assignments = np.fromiter(we_driver.next_iter_assignments, dtype=int, count=len(weights))
write_bin_info(mapper, assignments, weights, n_target_states, detailed=self.args.detail)
if self.args.confirm:
data_manager.prepare_iteration(self.n_iter, list(we_driver.next_iter_segments))
# manually update endpoint statuses only
endpoint_types = sorted([(segment.seg_id, segment.endpoint_type) for segment in last_iter_segments])
last_iter_group = data_manager.get_iter_group(self.n_iter - 1)
last_iter_index = last_iter_group['seg_index'][...]
last_iter_index['endpoint_type'] = [pair[1] for pair in endpoint_types]
last_iter_group['seg_index'][...] = last_iter_index
data_manager.save_iter_binning(
self.n_iter, self.binning.mapper_hash, self.binning.mapper_pickle, we_driver.bin_target_counts
)
data_manager.update_initial_states(we_driver.all_initial_states)
data_manager.flush_backing()
def entry_point():
WBinTool().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_crawl.rst | Manual | w_crawl command | MIT License | westpa__westpa/doc/documentation/cli/w_crawl.rst | [
"westpa__westpa/src/westpa/cli/tools/w_crawl.py"
] | w_crawl
usage:
w_crawl [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version]
[--max-queue-length MAX_QUEUE_LENGTH] [-W WEST_H5FILE] [--first-iter N_ITER]
[--last-iter N_ITER] [-c CRAWLER_INSTANCE]
[--serial | --parallel | --work-manager WORK_MANAGER] [--n-workers N_WORKERS]
[--zmq-mode MODE] [--zmq-comm-mode COMM_MODE] [--zmq-write-host-info INFO_FILE]
[--zmq-read-host-info INFO_FILE] [--zmq-upstream-rr-endpoint ENDPOINT]
[--zmq-upstream-ann-endpoint ENDPOINT] [--zmq-downstream-rr-endpoint ENDPOINT]
[--zmq-downstream-ann-endpoint ENDPOINT] [--zmq-master-heartbeat MASTER_HEARTBEAT]
[--zmq-worker-heartbeat WORKER_HEARTBEAT] [--zmq-timeout-factor FACTOR]
[--zmq-startup-timeout STARTUP_TIMEOUT] [--zmq-shutdown-timeout SHUTDOWN_TIMEOUT]
task_callable
Crawl a weighted ensemble dataset, executing a function for each
iteration. This can be used for postprocessing of trajectories, cleanup
of datasets, or anything else that can be expressed as "do X for
iteration N, then do something with the result". Tasks are parallelized
by iteration, and no guarantees are made about evaluation order.
Command-line options
optional arguments:
-h, --help show this help message and exit
general options:
-r RCFILE, --rcfile RCFILE
use RCFILE as the WEST run-time configuration file (default: west.cfg)
--quiet emit only essential information
--verbose emit extra information
--debug enable extra checks and emit copious information
--version show program's version number and exit
parallelization options:
--max-queue-length MAX_QUEUE_LENGTH
Maximum number of tasks that can be queued. Useful to limit RAM use for tasks
that have very large requests/response. Default: no limit.
WEST input data options:
-W WEST_H5FILE, --west-data WEST_H5FILE
Take WEST data from WEST_H5FILE (default: read from the HDF5 file specified in
west.cfg).
iteration range:
--first-iter N_ITER Begin analysis at iteration N_ITER (default: 1).
--last-iter N_ITER Conclude analysis with N_ITER, inclusive (default: last completed iteration).
task options:
-c CRAWLER_INSTANCE, --crawler-instance CRAWLER_INSTANCE
Use CRAWLER_INSTANCE (specified as module.instance) as an instance of
WESTPACrawler to coordinate the calculation. Required only if initialization,
finalization, or task result processing is required.
task_callable Run TASK_CALLABLE (specified as module.function) on each iteration. Required.
parallelization options:
--serial run in serial mode
--parallel run in parallel mode (using processes)
--work-manager WORK_MANAGER
use the given work manager for parallel task distribution. Available work
managers are ('serial', 'threads', 'processes', 'zmq'); default is 'serial'
--n-workers N_WORKERS
Use up to N_WORKERS on this host, for work managers which support this option.
Use 0 for a dedicated server. (Ignored by work managers which do not support
this option.)
options for ZeroMQ ("zmq") work manager (master or node):
--zmq-mode MODE Operate as a master (server) or a node (workers/client). "server" is a
deprecated synonym for "master" and "client" is a deprecated synonym for
"node".
--zmq-comm-mode COMM_MODE
Use the given communication mode -- TCP or IPC (Unix-domain) -- sockets for
communication within a node. IPC (the default) may be more efficient but is not
available on (exceptionally rare) systems without node-local storage (e.g.
/tmp); on such systems, TCP may be used instead.
--zmq-write-host-info INFO_FILE
Store hostname and port information needed to connect to this instance in
INFO_FILE. This allows the master and nodes assisting in coordinating the
communication of other nodes to choose ports randomly. Downstream nodes read
this file with --zmq-read-host-info and know where how to connect.
--zmq-read-host-info INFO_FILE
Read hostname and port information needed to connect to the master (or other
coordinating node) from INFO_FILE. This allows the master and nodes assisting
in coordinating the communication of other nodes to choose ports randomly,
writing that information with --zmq-write-host-info for this instance to read.
--zmq-upstream-rr-endpoint ENDPOINT
ZeroMQ endpoint to which to send request/response (task and result) traffic
toward the master.
--zmq-upstream-ann-endpoint ENDPOINT
ZeroMQ endpoint on which to receive announcement (heartbeat and shutdown
notification) traffic from the master.
--zmq-downstream-rr-endpoint ENDPOINT
ZeroMQ endpoint on which to listen for request/response (task and result)
traffic from subsidiary workers.
--zmq-downstream-ann-endpoint ENDPOINT
ZeroMQ endpoint on which to send announcement (heartbeat and shutdown
notification) traffic toward workers.
--zmq-master-heartbeat MASTER_HEARTBEAT
Every MASTER_HEARTBEAT seconds, the master announces its presence to workers.
--zmq-worker-heartbeat WORKER_HEARTBEAT
Every WORKER_HEARTBEAT seconds, workers announce their presence to the master.
--zmq-timeout-factor FACTOR
Scaling factor for heartbeat timeouts. If the master doesn't hear from a worker
in WORKER_HEARTBEAT*FACTOR, the worker is assumed to have crashed. If a worker
doesn't hear from the master in MASTER_HEARTBEAT*FACTOR seconds, the master is
assumed to have crashed. Both cases result in shutdown.
--zmq-startup-timeout STARTUP_TIMEOUT
Amount of time (in seconds) to wait for communication between the master and at
least one worker. This may need to be changed on very large, heavily-loaded
computer systems that start all processes simultaneously.
--zmq-shutdown-timeout SHUTDOWN_TIMEOUT
Amount of time (in seconds) to wait for workers to shut down.
| import logging
from westpa.tools import WESTParallelTool, WESTDataReader, IterRangeSelection, ProgressIndicatorComponent
import westpa
from westpa.core.extloader import get_object
log = logging.getLogger('w_crawl')
class WESTPACrawler:
'''Base class for general crawling execution. This class
only exists on the master.'''
def initialize(self, iter_start, iter_stop):
'''Initialize this crawling process.'''
pass
def finalize(self):
'''Finalize this crawling process.'''
pass
def process_iter_result(self, n_iter, result):
'''Process the result of a per-iteration task.'''
pass
def _remote_task(n_iter, taskfn):
data_manager = westpa.rc.get_data_manager() # gaahhh...globals
data_manager.open_backing(mode='r')
return n_iter, taskfn(n_iter, data_manager.get_iter_group(n_iter))
class WCrawl(WESTParallelTool):
prog = 'w_crawl'
description = '''\
Crawl a weighted ensemble dataset, executing a function for each iteration.
This can be used for postprocessing of trajectories, cleanup of datasets,
or anything else that can be expressed as "do X for iteration N, then do
something with the result". Tasks are parallelized by iteration, and
no guarantees are made about evaluation order.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def __init__(self):
super().__init__()
# These are used throughout
self.progress = ProgressIndicatorComponent()
self.data_reader = WESTDataReader()
self.iter_range = IterRangeSelection(self.data_reader)
self.crawler = None
self.task_callable = None
def add_args(self, parser):
self.data_reader.add_args(parser)
self.iter_range.add_args(parser)
tgroup = parser.add_argument_group('task options')
tgroup.add_argument(
'-c',
'--crawler-instance',
help='''Use CRAWLER_INSTANCE (specified as module.instance) as an instance of
WESTPACrawler to coordinate the calculation. Required only if initialization,
finalization, or task result processing is required.''',
)
tgroup.add_argument(
'task_callable',
help='''Run TASK_CALLABLE (specified as module.function) on each iteration.
Required.''',
)
self.progress.add_args(parser)
def process_args(self, args):
self.progress.process_args(args)
self.data_reader.process_args(args)
with self.data_reader:
self.iter_range.process_args(args)
self.task_callable = get_object(args.task_callable, path=['.'])
if args.crawler_instance is not None:
self.crawler = get_object(args.crawler_instance, path=['.'])
else:
self.crawler = WESTPACrawler()
def go(self):
iter_start = self.iter_range.iter_start
iter_stop = self.iter_range.iter_stop
iter_count = iter_stop - iter_start
self.data_reader.open('r')
pi = self.progress.indicator
with pi:
pi.operation = 'Initializing'
self.crawler.initialize(iter_start, iter_stop)
try:
pi.new_operation('Dispatching tasks & processing results', iter_count)
task_gen = ((_remote_task, (n_iter, self.task_callable), {}) for n_iter in range(iter_start, iter_stop))
for future in self.work_manager.submit_as_completed(task_gen, self.max_queue_len):
n_iter, result = future.get_result(discard=True)
if self.crawler is not None:
self.crawler.process_iter_result(n_iter, result)
pi.progress += 1
finally:
pi.new_operation('Finalizing')
self.crawler.finalize()
def entry_point():
WCrawl().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_direct.rst | Manual | w_direct command | MIT License | westpa__westpa/doc/documentation/cli/w_direct.rst | [
"westpa__westpa/src/westpa/cli/tools/w_direct.py"
] | w_direct
usage:
w_direct [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version]
[--max-queue-length MAX_QUEUE_LENGTH]
[--serial | --parallel | --work-manager WORK_MANAGER] [--n-workers N_WORKERS]
[--zmq-mode MODE] [--zmq-comm-mode COMM_MODE] [--zmq-write-host-info INFO_FILE]
[--zmq-read-host-info INFO_FILE] [--zmq-upstream-rr-endpoint ENDPOINT]
[--zmq-upstream-ann-endpoint ENDPOINT] [--zmq-downstream-rr-endpoint ENDPOINT]
[--zmq-downstream-ann-endpoint ENDPOINT] [--zmq-master-heartbeat MASTER_HEARTBEAT]
[--zmq-worker-heartbeat WORKER_HEARTBEAT] [--zmq-timeout-factor FACTOR]
[--zmq-startup-timeout STARTUP_TIMEOUT] [--zmq-shutdown-timeout SHUTDOWN_TIMEOUT]
{help,init,average,kinetics,probs,all} ...
optional arguments:
-h, --help show this help message and exit
general options:
-r RCFILE, --rcfile RCFILE
use RCFILE as the WEST run-time configuration file (default: west.cfg)
--quiet emit only essential information
--verbose emit extra information
--debug enable extra checks and emit copious information
--version show program's version number and exit
parallelization options:
--max-queue-length MAX_QUEUE_LENGTH
Maximum number of tasks that can be queued. Useful to limit RAM use for tasks that
have very large requests/response. Default: no limit.
direct kinetics analysis schemes:
{help,init,average,kinetics,probs,all}
help print help for this command or individual subcommands
init calculate state-to-state kinetics by tracing trajectories
average Averages and returns fluxes, rates, and color/state populations.
kinetics Generates rate and flux values from a WESTPA simulation via tracing.
probs Calculates color and state probabilities via tracing.
all Runs the full suite, including the tracing of events.
parallelization options:
--serial run in serial mode
--parallel run in parallel mode (using processes)
--work-manager WORK_MANAGER
use the given work manager for parallel task distribution. Available work managers
are ('serial', 'threads', 'processes', 'zmq'); default is 'serial'
--n-workers N_WORKERS
Use up to N_WORKERS on this host, for work managers which support this option. Use
0 for a dedicated server. (Ignored by work managers which do not support this
option.)
options for ZeroMQ ("zmq") work manager (master or node):
--zmq-mode MODE Operate as a master (server) or a node (workers/client). "server" is a deprecated
synonym for "master" and "client" is a deprecated synonym for "node".
--zmq-comm-mode COMM_MODE
Use the given communication mode -- TCP or IPC (Unix-domain) -- sockets for
communication within a node. IPC (the default) may be more efficient but is not
available on (exceptionally rare) systems without node-local storage (e.g. /tmp);
on such systems, TCP may be used instead.
--zmq-write-host-info INFO_FILE
Store hostname and port information needed to connect to this instance in
INFO_FILE. This allows the master and nodes assisting in coordinating the
communication of other nodes to choose ports randomly. Downstream nodes read this
file with --zmq-read-host-info and know where how to connect.
--zmq-read-host-info INFO_FILE
Read hostname and port information needed to connect to the master (or other
coordinating node) from INFO_FILE. This allows the master and nodes assisting in
coordinating the communication of other nodes to choose ports randomly, writing
that information with --zmq-write-host-info for this instance to read.
--zmq-upstream-rr-endpoint ENDPOINT
ZeroMQ endpoint to which to send request/response (task and result) traffic toward
the master.
--zmq-upstream-ann-endpoint ENDPOINT
ZeroMQ endpoint on which to receive announcement (heartbeat and shutdown
notification) traffic from the master.
--zmq-downstream-rr-endpoint ENDPOINT
ZeroMQ endpoint on which to listen for request/response (task and result) traffic
from subsidiary workers.
--zmq-downstream-ann-endpoint ENDPOINT
ZeroMQ endpoint on which to send announcement (heartbeat and shutdown
notification) traffic toward workers.
--zmq-master-heartbeat MASTER_HEARTBEAT
Every MASTER_HEARTBEAT seconds, the master announces its presence to workers.
--zmq-worker-heartbeat WORKER_HEARTBEAT
Every WORKER_HEARTBEAT seconds, workers announce their presence to the master.
--zmq-timeout-factor FACTOR
Scaling factor for heartbeat timeouts. If the master doesn't hear from a worker in
WORKER_HEARTBEAT*FACTOR, the worker is assumed to have crashed. If a worker
doesn't hear from the master in MASTER_HEARTBEAT*FACTOR seconds, the master is
assumed to have crashed. Both cases result in shutdown.
--zmq-startup-timeout STARTUP_TIMEOUT
Amount of time (in seconds) to wait for communication between the master and at
least one worker. This may need to be changed on very large, heavily-loaded
computer systems that start all processes simultaneously.
--zmq-shutdown-timeout SHUTDOWN_TIMEOUT
Amount of time (in seconds) to wait for workers to shut down.
| import logging
import numpy as np
from westpa.core.data_manager import weight_dtype
from westpa.tools import WESTMasterCommand, WESTParallelTool
from westpa.core import h5io
from westpa.core.kinetics import sequence_macro_flux_to_rate, WKinetics
from westpa.tools.kinetics_tool import WESTKineticsBase, AverageCommands
from westpa.mclib import mcbs_ci_correl, _1D_simple_eval_block, _2D_simple_eval_block
# From w_stateprobs
from westpa.core.binning import accumulate_state_populations_from_labeled
log = logging.getLogger('w_direct')
# This block is responsible for submitting a set of calculations to be bootstrapped over for a particular type of calculation.
# A property which wishes to be calculated should adhere to this format.
def _rate_eval_block(iblock, start, stop, nstates, data_input, name, mcbs_alpha, mcbs_nsets, mcbs_acalpha, do_correl, mcbs_enable):
# Our rate estimator is a little more complex, so we've defined a custom evaluation block for it,
# instead of just using the block evalutors that we've imported.
results = []
for istate in range(nstates):
for jstate in range(nstates):
if istate == jstate:
continue
kwargs = {'istate': istate, 'jstate': jstate}
# Why are we sending in the total population dataset, instead of a sliced one?
# It's a requirement of our estimator; we need to pull from any given i to j state in order to properly normalize
# and avoid i to j rate constants which are affected by a third state k.
# That is, we need the populations for both i and j, and it's easier to just send in the entire dataset.
dataset = {'dataset': data_input['dataset'][:, istate, jstate], 'pops': data_input['pops']}
ci_res = mcbs_ci_correl(
dataset,
estimator=sequence_macro_flux_to_rate,
alpha=mcbs_alpha,
n_sets=mcbs_nsets,
autocorrel_alpha=mcbs_acalpha,
subsample=np.mean,
do_correl=do_correl,
mcbs_enable=mcbs_enable,
estimator_kwargs=kwargs,
)
results.append((name, iblock, istate, jstate, (start, stop) + ci_res))
return results
# The old w_kinetics
class DKinetics(WESTKineticsBase, WKinetics):
subcommand = 'init'
default_kinetics_file = 'direct.h5'
default_output_file = 'direct.h5'
help_text = 'calculate state-to-state kinetics by tracing trajectories'
description = '''\
Calculate state-to-state rates and transition event durations by tracing
trajectories.
A bin assignment file (usually "assign.h5") including trajectory labeling
is required (see "w_assign --help" for information on generating this file).
This subcommand for w_direct is used as input for all other w_direct
subcommands, which will convert the flux data in the output file into
average rates/fluxes/populations with confidence intervals.
-----------------------------------------------------------------------------
Output format
-----------------------------------------------------------------------------
The output file (-o/--output, by default "direct.h5") contains the
following datasets:
``/conditional_fluxes`` [iteration][state][state]
*(Floating-point)* Macrostate-to-macrostate fluxes. These are **not**
normalized by the population of the initial macrostate.
``/conditional_arrivals`` [iteration][stateA][stateB]
*(Integer)* Number of trajectories arriving at state *stateB* in a given
iteration, given that they departed from *stateA*.
``/total_fluxes`` [iteration][state]
*(Floating-point)* Total flux into a given macrostate.
``/arrivals`` [iteration][state]
*(Integer)* Number of trajectories arriving at a given state in a given
iteration, regardless of where they originated.
``/duration_count`` [iteration]
*(Integer)* The number of event durations recorded in each iteration.
``/durations`` [iteration][event duration]
*(Structured -- see below)* Event durations for transition events ending
during a given iteration. These are stored as follows:
istate
*(Integer)* Initial state of transition event.
fstate
*(Integer)* Final state of transition event.
duration
*(Floating-point)* Duration of transition, in units of tau.
weight
*(Floating-point)* Weight of trajectory at end of transition, **not**
normalized by initial state population.
Because state-to-state fluxes stored in this file are not normalized by
initial macrostate population, they cannot be used as rates without further
processing. The ``w_direct kinetics`` command is used to perform this normalization
while taking statistical fluctuation and correlation into account. See
``w_direct kinetics --help`` for more information. Target fluxes (total flux
into a given state) require no such normalization.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def __init__(self, parent):
super().__init__(parent)
def open_files(self):
self.output_file = h5io.WESTPAH5File(self.output_filename, 'a', creating_program=True)
h5io.stamp_creator_data(self.output_file)
self.assignments_file = h5io.WESTPAH5File(self.assignments_filename, 'r') #, driver='core', backing_store=False)
if not self.iter_range.check_data_iter_range_least(self.assignments_file):
raise ValueError('assignments data do not span the requested iterations')
def go(self):
pi = self.progress.indicator
with pi:
self.w_kinetics()
# The old w_kinavg
class DKinAvg(AverageCommands):
subcommand = 'kinetics'
help_text = 'Generates rate and flux values from a WESTPA simulation via tracing.'
default_kinetics_file = 'direct.h5'
description = '''\
Calculate average rates/fluxes and associated errors from weighted ensemble
data. Bin assignments (usually "assign.h5") and kinetics data (usually
"direct.h5") data files must have been previously generated (see
"w_assign --help" and "w_direct init --help" for information on
generating these files).
The evolution of all datasets may be calculated, with or without confidence
intervals.
-----------------------------------------------------------------------------
Output format
-----------------------------------------------------------------------------
The output file (-o/--output, usually "direct.h5") contains the following
dataset:
/avg_rates [state,state]
(Structured -- see below) State-to-state rates based on entire window of
iterations selected.
/avg_total_fluxes [state]
(Structured -- see below) Total fluxes into each state based on entire
window of iterations selected.
/avg_conditional_fluxes [state,state]
(Structured -- see below) State-to-state fluxes based on entire window of
iterations selected.
If --evolution-mode is specified, then the following additional datasets are
available:
/rate_evolution [window][state][state]
(Structured -- see below). State-to-state rates based on windows of
iterations of varying width. If --evolution-mode=cumulative, then
these windows all begin at the iteration specified with
--start-iter and grow in length by --step-iter for each successive
element. If --evolution-mode=blocked, then these windows are all of
width --step-iter (excluding the last, which may be shorter), the first
of which begins at iteration --start-iter.
/target_flux_evolution [window,state]
(Structured -- see below). Total flux into a given macro state based on
windows of iterations of varying width, as in /rate_evolution.
/conditional_flux_evolution [window,state,state]
(Structured -- see below). State-to-state fluxes based on windows of
varying width, as in /rate_evolution.
The structure of these datasets is as follows:
iter_start
(Integer) Iteration at which the averaging window begins (inclusive).
iter_stop
(Integer) Iteration at which the averaging window ends (exclusive).
expected
(Floating-point) Expected (mean) value of the observable as evaluated within
this window, in units of inverse tau.
ci_lbound
(Floating-point) Lower bound of the confidence interval of the observable
within this window, in units of inverse tau.
ci_ubound
(Floating-point) Upper bound of the confidence interval of the observable
within this window, in units of inverse tau.
stderr
(Floating-point) The standard error of the mean of the observable
within this window, in units of inverse tau.
corr_len
(Integer) Correlation length of the observable within this window, in units
of tau.
Each of these datasets is also stamped with a number of attributes:
mcbs_alpha
(Floating-point) Alpha value of confidence intervals. (For example,
*alpha=0.05* corresponds to a 95% confidence interval.)
mcbs_nsets
(Integer) Number of bootstrap data sets used in generating confidence
intervals.
mcbs_acalpha
(Floating-point) Alpha value for determining correlation lengths.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def w_kinavg(self):
pi = self.progress.indicator
# pi = None
# We're initializing the various datasets...
self.open_files()
self.open_assignments()
# Obviously, this is for the conditional and total fluxes. This is really all we need to sort for this.
cond_fluxes = h5io.IterBlockedDataset(self.kinetics_file['conditional_fluxes'])
cond_fluxes.cache_data()
total_fluxes = h5io.IterBlockedDataset(self.kinetics_file['total_fluxes'])
# This is necessary for both color and state populations...
#... but we also need this for the kinetics calculations.
pops = h5io.IterBlockedDataset(self.assignments_file['labeled_populations'])
pops.cache_data()
pops.data = pops.data.sum(axis=2)
submit_kwargs = dict(
pi=pi, nstates=self.nstates, start_iter=self.start_iter, stop_iter=self.stop_iter, step_iter=self.step_iter
)
# Calculate averages for the simulation, then report, if necessary.
submit_kwargs['dataset'] = {'dataset': cond_fluxes, 'pops': pops}
avg_rates = self.run_calculation(
eval_block=_rate_eval_block, name='Rate Evolution', dim=2, do_averages=True, **submit_kwargs
)
self.output_file.replace_dataset('avg_rates', data=avg_rates[1])
submit_kwargs['dataset'] = {'dataset': cond_fluxes}
avg_conditional_fluxes = self.run_calculation(
eval_block=_2D_simple_eval_block, name='Conditional Flux Evolution', dim=2, do_averages=True, **submit_kwargs
)
self.output_file.replace_dataset('avg_conditional_fluxes', data=avg_conditional_fluxes[1])
submit_kwargs['dataset'] = {'dataset': total_fluxes}
avg_total_fluxes = self.run_calculation(
eval_block=_1D_simple_eval_block, name='Target Flux Evolution', dim=1, do_averages=True, **submit_kwargs
)
self.output_file.replace_dataset('avg_total_fluxes', data=avg_total_fluxes[1])
# Now, print them!
# We've returned an average, but it still exists in a timeslice format. So we need to return the 'last' value.
if self.display_averages:
self.print_averages(avg_total_fluxes[1], '\nfluxes into macrostates:', dim=1)
self.print_averages(avg_conditional_fluxes[1], '\nfluxes from state to state:', dim=2)
self.print_averages(avg_rates[1], '\nrates from state to state:', dim=2)
# Do a bootstrap evolution.
submit_kwargs['dataset'] = {'dataset': cond_fluxes, 'pops': pops}
rate_evol = self.run_calculation(eval_block=_rate_eval_block, name='Rate Evolution', dim=2, **submit_kwargs)
self.output_file.replace_dataset('rate_evolution', data=rate_evol, shuffle=True, compression=9)
submit_kwargs['dataset'] = {'dataset': cond_fluxes}
rate_evol = self.run_calculation(
eval_block=_2D_simple_eval_block, name='Conditional Flux Evolution', dim=2, **submit_kwargs
)
self.output_file.replace_dataset('conditional_flux_evolution', data=rate_evol, shuffle=True, compression=9)
submit_kwargs['dataset'] = {'dataset': total_fluxes}
rate_evol = self.run_calculation(eval_block=_1D_simple_eval_block, name='Target Flux Evolution', dim=1, **submit_kwargs)
self.output_file.replace_dataset('target_flux_evolution', data=rate_evol, shuffle=True, compression=9)
def go(self):
pi = self.progress.indicator
with pi:
self.w_kinavg()
# The old w_stateprobs
class DStateProbs(AverageCommands):
subcommand = 'probs'
help_text = 'Calculates color and state probabilities via tracing.'
default_kinetics_file = 'direct.h5'
description = '''\
Calculate average populations and associated errors in state populations from
weighted ensemble data. Bin assignments, including macrostate definitions,
are required. (See "w_assign --help" for more information).
-----------------------------------------------------------------------------
Output format
-----------------------------------------------------------------------------
The output file (-o/--output, usually "direct.h5") contains the following
dataset:
/avg_state_probs [state]
(Structured -- see below) Population of each state across entire
range specified.
/avg_color_probs [state]
(Structured -- see below) Population of each ensemble across entire
range specified.
If --evolution-mode is specified, then the following additional datasets are
available:
/state_pop_evolution [window][state]
(Structured -- see below). State populations based on windows of
iterations of varying width. If --evolution-mode=cumulative, then
these windows all begin at the iteration specified with
--start-iter and grow in length by --step-iter for each successive
element. If --evolution-mode=blocked, then these windows are all of
width --step-iter (excluding the last, which may be shorter), the first
of which begins at iteration --start-iter.
/color_prob_evolution [window][state]
(Structured -- see below). Ensemble populations based on windows of
iterations of varying width. If --evolution-mode=cumulative, then
these windows all begin at the iteration specified with
--start-iter and grow in length by --step-iter for each successive
element. If --evolution-mode=blocked, then these windows are all of
width --step-iter (excluding the last, which may be shorter), the first
of which begins at iteration --start-iter.
The structure of these datasets is as follows:
iter_start
(Integer) Iteration at which the averaging window begins (inclusive).
iter_stop
(Integer) Iteration at which the averaging window ends (exclusive).
expected
(Floating-point) Expected (mean) value of the observable as evaluated within
this window, in units of inverse tau.
ci_lbound
(Floating-point) Lower bound of the confidence interval of the observable
within this window, in units of inverse tau.
ci_ubound
(Floating-point) Upper bound of the confidence interval of the observable
within this window, in units of inverse tau.
stderr
(Floating-point) The standard error of the mean of the observable
within this window, in units of inverse tau.
corr_len
(Integer) Correlation length of the observable within this window, in units
of tau.
Each of these datasets is also stamped with a number of attributes:
mcbs_alpha
(Floating-point) Alpha value of confidence intervals. (For example,
*alpha=0.05* corresponds to a 95% confidence interval.)
mcbs_nsets
(Integer) Number of bootstrap data sets used in generating confidence
intervals.
mcbs_acalpha
(Floating-point) Alpha value for determining correlation lengths.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def calculate_state_populations(self, pops):
#... but then this is how the state populations are done.
# This was taken, more or less, from the old w_stateprobs
iter_count = self.stop_iter - self.start_iter
all_state_pops = np.empty((iter_count, self.nstates + 1), weight_dtype)
iter_state_pops = np.empty((self.nstates + 1,), weight_dtype)
avg_state_pops = np.zeros((self.nstates + 1,), weight_dtype)
pops.cache_data(max_size='available')
state_map = self.assignments_file['state_map'][...]
try:
for iiter, n_iter in enumerate(range(self.start_iter, self.stop_iter)):
iter_state_pops.fill(0)
labeled_pops = pops.iter_entry(n_iter)
accumulate_state_populations_from_labeled(labeled_pops, state_map, iter_state_pops, check_state_map=False)
all_state_pops[iiter] = iter_state_pops
avg_state_pops += iter_state_pops
del labeled_pops
finally:
pops.drop_cache()
state_pops = h5io.IterBlockedDataset.empty_like(pops)
state_pops.data = all_state_pops
return state_pops
def w_stateprobs(self):
pi = self.progress.indicator
self.open_files()
self.open_assignments()
# So far, we definitely need this boilerplate...
# pi.new_operation('Reading data')
# This is necessary for both color and state populations...
pops = h5io.IterBlockedDataset(self.assignments_file['labeled_populations'])
state_pops = self.calculate_state_populations(pops)
# This now sorts it for the color populations
pops.cache_data()
pops.data = pops.data.sum(axis=2)
submit_kwargs = dict(
pi=pi,
nstates=self.nstates,
start_iter=self.start_iter,
stop_iter=self.stop_iter,
step_iter=self.step_iter,
eval_block=_1D_simple_eval_block,
)
# Calculate and print averages
submit_kwargs['dataset'] = {'dataset': pops}
color_evol_avg = self.run_calculation(name='Color Probability Evolution', dim=1, do_averages=True, **submit_kwargs)
self.output_file.replace_dataset('avg_color_probs', data=color_evol_avg[1], shuffle=True, compression=9)
submit_kwargs['dataset'] = {'dataset': state_pops}
state_evol_avg = self.run_calculation(name='State Probability Evolution', dim=1, do_averages=True, **submit_kwargs)
self.output_file.replace_dataset(name='avg_state_probs', data=state_evol_avg[1], shuffle=True, compression=9)
# Print!
if self.display_averages:
self.print_averages(color_evol_avg[1], '\naverage color probabilities:', dim=1)
self.print_averages(state_evol_avg[1], '\naverage state probabilities:', dim=1)
# Now, do a bootstrap evolution
submit_kwargs['dataset'] = {'dataset': pops}
pop_evol = self.run_calculation(name='Color Probability Evolution', dim=1, **submit_kwargs)
self.output_file.replace_dataset('color_prob_evolution', data=pop_evol, shuffle=True, compression=9)
submit_kwargs['dataset'] = {'dataset': state_pops}
pop_evol = self.run_calculation(name='State Probability Evolution', dim=1, **submit_kwargs)
self.output_file.replace_dataset(name='state_pop_evolution', data=pop_evol, shuffle=True, compression=9)
def go(self):
pi = self.progress.indicator
with pi:
self.w_stateprobs()
# Just a convenience class to run everything.
class DAll(DStateProbs, DKinAvg, DKinetics):
subcommand = 'all'
help_text = 'Runs the full suite, including the tracing of events.'
default_kinetics_file = 'direct.h5'
description = '''\
A convenience function to run init/kinetics/probs. Bin assignments,
including macrostate definitions, are required. (See
"w_assign --help" for more information).
For more information on the individual subcommands this subs in for, run
w_direct {init/kinetics/probs} --help.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def go(self):
# One minor issue; as this stands now, since it's inheriting from all the other classes, it needs
# a kinetics file to instantiate the other attributes. We'll need to modify how the loading works, there.
pi = self.progress.indicator
with pi:
self.w_kinetics()
self.w_kinavg()
self.w_stateprobs()
# Just a convenience class to average the observables.
class DAverage(DStateProbs, DKinAvg):
subcommand = 'average'
help_text = 'Averages and returns fluxes, rates, and color/state populations.'
default_kinetics_file = 'direct.h5'
description = '''\
A convenience function to run kinetics/probs. Bin assignments,
including macrostate definitions, are required. (See
"w_assign --help" for more information).
For more information on the individual subcommands this subs in for, run
w_direct {kinetics/probs} --help.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def go(self):
pi = self.progress.indicator
with pi:
self.w_kinavg()
self.w_stateprobs()
class WDirect(WESTMasterCommand, WESTParallelTool):
prog = 'w_direct'
# subcommands = [AvgTraceSubcommand,AvgMatrixSubcommand]
subcommands = [DKinetics, DAverage, DKinAvg, DStateProbs, DAll]
subparsers_title = 'direct kinetics analysis schemes'
def entry_point():
WDirect().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_eddist.rst | Manual | w_eddist command | MIT License | westpa__westpa/doc/documentation/cli/w_eddist.rst | [
"westpa__westpa/src/westpa/cli/tools/w_eddist.py"
] | w_eddist
usage:
w_eddist [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version]
[--max-queue-length MAX_QUEUE_LENGTH] [-b BINEXPR] [-C] [--loose] --istate ISTATE
--fstate FSTATE [--first-iter ITER_START] [--last-iter ITER_STOP] [-k KINETICS]
[-o OUTPUT] [--serial | --parallel | --work-manager WORK_MANAGER]
[--n-workers N_WORKERS] [--zmq-mode MODE] [--zmq-comm-mode COMM_MODE]
[--zmq-write-host-info INFO_FILE] [--zmq-read-host-info INFO_FILE]
[--zmq-upstream-rr-endpoint ENDPOINT] [--zmq-upstream-ann-endpoint ENDPOINT]
[--zmq-downstream-rr-endpoint ENDPOINT] [--zmq-downstream-ann-endpoint ENDPOINT]
[--zmq-master-heartbeat MASTER_HEARTBEAT] [--zmq-worker-heartbeat WORKER_HEARTBEAT]
[--zmq-timeout-factor FACTOR] [--zmq-startup-timeout STARTUP_TIMEOUT]
[--zmq-shutdown-timeout SHUTDOWN_TIMEOUT]
Calculate time-resolved transition-event duration distribution from
kinetics results
Source data
Source data is collected from the results of 'w_kinetics trace' (see
w_kinetics trace --help for more information on generating this
dataset).
Histogram binning
By default, histograms are constructed with 100 bins in each dimension.
This can be overridden by specifying -b/--bins, which accepts a number
of different kinds of arguments:
a single integer N
N uniformly spaced bins will be used in each dimension.
a sequence of integers N1,N2,... (comma-separated)
N1 uniformly spaced bins will be used for the first dimension, N2 for the
second, and so on.
a list of lists [[B11, B12, B13, ...], [B21, B22, B23, ...], ...]
The bin boundaries B11, B12, B13, ... will be used for the first dimension,
B21, B22, B23, ... for the second dimension, and so on. These bin
boundaries need not be uniformly spaced. These expressions will be
evaluated with Python's ``eval`` construct, with ``np`` available for
use [e.g. to specify bins using np.arange()].
The first two forms (integer, list of integers) will trigger a scan of
all data in each dimension in order to determine the minimum and maximum
values, which may be very expensive for large datasets. This can be
avoided by explicitly providing bin boundaries using the list-of-lists
form.
Note that these bins are NOT at all related to the bins used to drive WE
sampling.
Output format
The output file produced (specified by -o/--output, defaulting to
"pdist.h5") may be fed to plothist to generate plots (or appropriately
processed text or HDF5 files) from this data. In short, the following
datasets are created:
``histograms``
Normalized histograms. The first axis corresponds to iteration, and
remaining axes correspond to dimensions of the input dataset.
``/binbounds_0``
Vector of bin boundaries for the first (index 0) dimension. Additional
datasets similarly named (/binbounds_1, /binbounds_2, ...) are created
for additional dimensions.
``/midpoints_0``
Vector of bin midpoints for the first (index 0) dimension. Additional
datasets similarly named are created for additional dimensions.
``n_iter``
Vector of iteration numbers corresponding to the stored histograms (i.e.
the first axis of the ``histograms`` dataset).
Subsequent processing
The output generated by this program (-o/--output, default "pdist.h5")
may be plotted by the plothist program. See plothist --help for more
information.
Parallelization
This tool supports parallelized binning, including reading of input
data. Parallel processing is the default. For simple cases (reading
pre-computed input data, modest numbers of segments), serial processing
(--serial) may be more efficient.
Command-line options
optional arguments:
-h, --help show this help message and exit
-b BINEXPR, --bins BINEXPR
Use BINEXPR for bins. This may be an integer, which will be used for each
dimension of the progress coordinate; a list of integers (formatted as
[n1,n2,...]) which will use n1 bins for the first dimension, n2 for the second
dimension, and so on; or a list of lists of boundaries (formatted as [[a1, a2,
...], [b1, b2, ...], ... ]), which will use [a1, a2, ...] as bin boundaries for
the first dimension, [b1, b2, ...] as bin boundaries for the second dimension,
and so on. (Default: 100 bins in each dimension.)
-C, --compress Compress histograms. May make storage of higher-dimensional histograms more
tractable, at the (possible extreme) expense of increased analysis time.
(Default: no compression.)
--loose Ignore values that do not fall within bins. (Risky, as this can make buggy bin
boundaries appear as reasonable data. Only use if you are sure of your bin
boundary specification.)
--istate ISTATE Initial state defining transition event
--fstate FSTATE Final state defining transition event
general options:
-r RCFILE, --rcfile RCFILE
use RCFILE as the WEST run-time configuration file (default: west.cfg)
--quiet emit only essential information
--verbose emit extra information
--debug enable extra checks and emit copious information
--version show program's version number and exit
parallelization options:
--max-queue-length MAX_QUEUE_LENGTH
Maximum number of tasks that can be queued. Useful to limit RAM
use for tasks that have very large requests/response. Default:
no limit.
iteration range options:
--first-iter ITER_START
Iteration to begin analysis (default: 1)
--last-iter ITER_STOP
Iteration to end analysis
input/output options:
-k KINETICS, --kinetics KINETICS
Populations and transition rates (including evolution) are stored in KINETICS
(default: kintrace.h5).
-o OUTPUT, --output OUTPUT
Store results in OUTPUT (default: eddist.h5).
parallelization options:
--serial run in serial mode --parallel run in parallel mode (using
processes) --work-manager WORK_MANAGER use the given work manager
for parallel task distribution. Available work managers are
('serial', 'threads', 'processes', 'zmq'); default is 'processes'
--n-workers N_WORKERS Use up to N_WORKERS on this host, for work
managers which support this option. Use 0 for a dedicated server.
(Ignored by work managers which do not support this option.)
options for ZeroMQ ("zmq") work manager (master or node):
--zmq-mode MODE Operate as a master (server) or a node (workers/client). "server" is a
deprecated synonym for "master" and "client" is a deprecated synonym for
"node".
--zmq-comm-mode COMM_MODE
Use the given communication mode -- TCP or IPC (Unix-domain) -- sockets for
communication within a node. IPC (the default) may be more efficient but is not
available on (exceptionally rare) systems without node-local storage (e.g.
/tmp); on such systems, TCP may be used instead.
--zmq-write-host-info INFO_FILE
Store hostname and port information needed to connect to this instance in
INFO_FILE. This allows the master and nodes assisting in coordinating the
communication of other nodes to choose ports randomly. Downstream nodes read
this file with --zmq-read-host-info and know where how to connect.
--zmq-read-host-info INFO_FILE
Read hostname and port information needed to connect to the master (or other
coordinating node) from INFO_FILE. This allows the master and nodes assisting
in coordinating the communication of other nodes to choose ports randomly,
writing that information with --zmq-write-host-info for this instance to read.
--zmq-upstream-rr-endpoint ENDPOINT
ZeroMQ endpoint to which to send request/response (task and result) traffic
toward the master.
--zmq-upstream-ann-endpoint ENDPOINT
ZeroMQ endpoint on which to receive announcement (heartbeat and shutdown
notification) traffic from the master.
--zmq-downstream-rr-endpoint ENDPOINT
ZeroMQ endpoint on which to listen for request/response (task and result)
traffic from subsidiary workers.
--zmq-downstream-ann-endpoint ENDPOINT
ZeroMQ endpoint on which to send announcement (heartbeat and shutdown
notification) traffic toward workers.
--zmq-master-heartbeat MASTER_HEARTBEAT
Every MASTER_HEARTBEAT seconds, the master announces its presence to workers.
--zmq-worker-heartbeat WORKER_HEARTBEAT
Every WORKER_HEARTBEAT seconds, workers announce their presence to the master.
--zmq-timeout-factor FACTOR
Scaling factor for heartbeat timeouts. If the master doesn't hear from a worker
in WORKER_HEARTBEAT*FACTOR, the worker is assumed to have crashed. If a worker
doesn't hear from the master in MASTER_HEARTBEAT*FACTOR seconds, the master is
assumed to have crashed. Both cases result in shutdown.
--zmq-startup-timeout STARTUP_TIMEOUT
Amount of time (in seconds) to wait for communication between the master and at
least one worker. This may need to be changed on very large, heavily-loaded
computer systems that start all processes simultaneously.
--zmq-shutdown-timeout SHUTDOWN_TIMEOUT
Amount of time (in seconds) to wait for workers to shut down.
| import logging
import h5py
import numpy as np
from westpa.tools import WESTParallelTool, ProgressIndicatorComponent
from westpa.fasthist import histnd, normhistnd
from westpa.core import h5io
log = logging.getLogger('w_eddist')
class DurationDataset:
'''A facade for the 'dsspec' dataclass that incorporates the mask into get_iter_data method'''
def __init__(self, dataset, mask, iter_start=1):
self.dataset = dataset
self.mask = mask
self.dtype = dataset.dtype
self.iter_start = iter_start
def get_iter_data(self, n_iter):
try:
assert n_iter >= self.iter_start
dset = self.dataset[n_iter - 1][self.mask[n_iter - self.iter_start]]
except (AssertionError, IndexError):
raise ValueError("Iteration {} is not within the iteration range".format(n_iter))
nsegs = dset.shape[0]
if nsegs == 0:
return None
else:
return dset.reshape(nsegs, 1, 1)
def isiterable(x):
try:
iter(x)
except TypeError:
return False
else:
return True
def _remote_min_max(ndim, dset_dtype, n_iter, dsspec):
try:
minval = np.finfo(dset_dtype).min
maxval = np.finfo(dset_dtype).max
except ValueError:
minval = np.iinfo(dset_dtype).min
maxval = np.iinfo(dset_dtype).max
data_range = [(maxval, minval) for _i in range(ndim)]
dset = dsspec.get_iter_data(n_iter)
if dset is None:
return data_range
for idim in range(ndim):
dimdata = dset[:, :, idim]
current_min, current_max = data_range[idim]
current_min = min(current_min, dimdata.min())
current_max = max(current_max, dimdata.max())
data_range[idim] = (current_min, current_max)
del dimdata
del dset
return data_range
def _remote_bin_iter(iiter, n_iter, dsspec, wt_dsspec, initpoint, binbounds, ignore_out_of_range):
iter_hist_shape = tuple(len(bounds) - 1 for bounds in binbounds)
iter_hist = np.zeros(iter_hist_shape, dtype=np.float64)
dset = dsspec.get_iter_data(n_iter)
if dset is None:
return iiter, n_iter, iter_hist
else:
npts = dset.shape[1]
weights = wt_dsspec.get_iter_data(n_iter)[:, 0, 0]
# dset = dset[:,initpoint:,:]
for ipt in range(npts - initpoint):
histnd(dset[:, ipt, :], binbounds, weights, out=iter_hist, binbound_check=False, ignore_out_of_range=ignore_out_of_range)
del weights, dset
# normalize histogram
normhistnd(iter_hist, binbounds)
return iiter, n_iter, iter_hist
class WEDDist(WESTParallelTool):
prog = 'w_eddist'
description = '''\
Calculate time-resolved transition-event duration distribution from kinetics results
-----------------------------------------------------------------------------
Source data
-----------------------------------------------------------------------------
Source data is collected from the results of 'w_kinetics trace' (see w_kinetics trace --help for
more information on generating this dataset).
-----------------------------------------------------------------------------
Histogram binning
-----------------------------------------------------------------------------
By default, histograms are constructed with 100 bins in each dimension. This
can be overridden by specifying -b/--bins, which accepts a number of different
kinds of arguments:
a single integer N
N uniformly spaced bins will be used in each dimension.
a sequence of integers N1,N2,... (comma-separated)
N1 uniformly spaced bins will be used for the first dimension, N2 for the
second, and so on.
a list of lists [[B11, B12, B13,...], [B21, B22, B23,...],...]
The bin boundaries B11, B12, B13,... will be used for the first dimension,
B21, B22, B23,... for the second dimension, and so on. These bin
boundaries need not be uniformly spaced. These expressions will be
evaluated with Python's ``eval`` construct, with ``np`` available for
use [e.g. to specify bins using np.arange()].
The first two forms (integer, list of integers) will trigger a scan of all
data in each dimension in order to determine the minimum and maximum values,
which may be very expensive for large datasets. This can be avoided by
explicitly providing bin boundaries using the list-of-lists form.
Note that these bins are *NOT* at all related to the bins used to drive WE
sampling.
-----------------------------------------------------------------------------
Output format
-----------------------------------------------------------------------------
The output file produced (specified by -o/--output, defaulting to "pdist.h5")
may be fed to plothist to generate plots (or appropriately processed text or
HDF5 files) from this data. In short, the following datasets are created:
``histograms``
Normalized histograms. The first axis corresponds to iteration, and
remaining axes correspond to dimensions of the input dataset.
``/binbounds_0``
Vector of bin boundaries for the first (index 0) dimension. Additional
datasets similarly named (/binbounds_1, /binbounds_2,...) are created
for additional dimensions.
``/midpoints_0``
Vector of bin midpoints for the first (index 0) dimension. Additional
datasets similarly named are created for additional dimensions.
``n_iter``
Vector of iteration numbers corresponding to the stored histograms (i.e.
the first axis of the ``histograms`` dataset).
-----------------------------------------------------------------------------
Subsequent processing
-----------------------------------------------------------------------------
The output generated by this program (-o/--output, default "pdist.h5") may be
plotted by the ``plothist`` program. See ``plothist --help`` for more
information.
-----------------------------------------------------------------------------
Parallelization
-----------------------------------------------------------------------------
This tool supports parallelized binning, including reading of input data.
Parallel processing is the default. For simple cases (reading pre-computed
input data, modest numbers of segments), serial processing (--serial) may be
more efficient.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def __init__(self):
super().__init__()
# Parallel processing by default (this is not actually necessary, but it is
# informative!)
self.wm_env.default_work_manager = self.wm_env.default_parallel_work_manager
# These are used throughout
self.progress = ProgressIndicatorComponent()
self.default_kinetics_file = 'kintrace.h5'
self.kinetics_filename = None
self.kinetics_file = None # Kinavg file
self.istate = None
self.fstate = None
# Duration and weight dsspecs
self.duration_dsspec = None
self.wt_dsspec = None
self.binspec = None
self.output_filename = None
self.output_file = None
# These are used during histogram generation only
self.iter_start = None
self.iter_stop = None
self.ndim = None
# self.ntimepoints = None
self.dset_dtype = None
self.binbounds = None # bin boundaries for each dimension
self.midpoints = None # bin midpoints for each dimension
self.data_range = None # data range for each dimension, as the pairs (min,max)
self.ignore_out_of_range = False
self.compress_output = False
def add_args(self, parser):
parser.add_argument(
'-b',
'--bins',
dest='bins',
metavar='BINEXPR',
default='100',
help='''Use BINEXPR for bins. This may be an integer, which will be used for each
dimension of the progress coordinate; a list of integers (formatted as [n1,n2,...])
which will use n1 bins for the first dimension, n2 for the second dimension, and so on;
or a list of lists of boundaries (formatted as [[a1, a2,...], [b1, b2,...],... ]), which
will use [a1, a2,...] as bin boundaries for the first dimension, [b1, b2,...] as bin boundaries
for the second dimension, and so on. (Default: 100 bins in each dimension.)''',
)
parser.add_argument(
'-C',
'--compress',
action='store_true',
help='''Compress histograms. May make storage of higher-dimensional histograms
more tractable, at the (possible extreme) expense of increased analysis time.
(Default: no compression.)''',
)
parser.add_argument(
'--loose',
dest='ignore_out_of_range',
action='store_true',
help='''Ignore values that do not fall within bins. (Risky, as this can make buggy bin
boundaries appear as reasonable data. Only use if you are
sure of your bin boundary specification.)''',
)
parser.add_argument('--istate', type=int, required=True, dest='istate', help='''Initial state defining transition event''')
parser.add_argument('--fstate', type=int, required=True, dest='fstate', help='''Final state defining transition event''')
itergroup = parser.add_argument_group('iteration range options')
itergroup.add_argument(
'--first-iter', default=1, dest='iter_start', type=int, help='''Iteration to begin analysis (default: 1)'''
)
itergroup.add_argument('--last-iter', dest='iter_stop', type=int, help='''Iteration to end analysis''')
iogroup = parser.add_argument_group('input/output options')
# self.default_kinetics_file will be picked up as a class attribute from the appropriate subclass
iogroup.add_argument(
'-k',
'--kinetics',
default=self.default_kinetics_file,
help='''Populations and transition rates (including evolution) are stored in KINETICS
(default: %(default)s).''',
)
iogroup.add_argument(
'-o', '--output', dest='output', default='eddist.h5', help='''Store results in OUTPUT (default: %(default)s).'''
)
self.progress.add_args(parser)
def process_args(self, args):
self.progress.process_args(args)
self.kinetics_filename = args.kinetics
self.istate = args.istate
self.fstate = args.fstate
self.kinetics_file = h5io.WESTPAH5File(self.kinetics_filename, 'r')
self.iter_start = args.iter_start
if args.iter_stop is None:
self.iter_stop = self.kinetics_file.attrs['iter_stop']
else:
self.iter_stop = args.iter_stop + 1
self.binspec = args.bins
self.output_filename = args.output
self.ignore_out_of_range = bool(args.ignore_out_of_range)
self.compress_output = args.compress or False
def go(self):
pi = self.progress.indicator
pi.operation = 'Initializing'
with pi:
self.duration = self.kinetics_file['durations'][self.iter_start - 1 : self.iter_stop - 1]
# Only select transition events from specified istate to fstate
mask = (self.duration['istate'] == self.istate) & (self.duration['fstate'] == self.fstate)
self.duration_dsspec = DurationDataset(self.kinetics_file['durations']['duration'], mask, self.iter_start)
self.wt_dsspec = DurationDataset(self.kinetics_file['durations']['weight'], mask, self.iter_start)
self.output_file = h5py.File(self.output_filename, 'w')
h5io.stamp_creator_data(self.output_file)
# Construct bin boundaries
self.construct_bins(self.parse_binspec(self.binspec))
for idim, (binbounds, midpoints) in enumerate(zip(self.binbounds, self.midpoints)):
self.output_file['binbounds_{}'.format(idim)] = binbounds
self.output_file['midpoints_{}'.format(idim)] = midpoints
# construct histogram
self.construct_histogram()
# Record iteration range
iter_range = np.arange(self.iter_start, self.iter_stop, 1, dtype=(np.min_scalar_type(self.iter_stop)))
self.output_file['n_iter'] = iter_range
self.output_file['histograms'].attrs['iter_start'] = self.iter_start
self.output_file['histograms'].attrs['iter_stop'] = self.iter_stop
self.output_file.close()
@staticmethod
def parse_binspec(binspec):
namespace = {'numpy': np, 'np': np, 'inf': float('inf')}
try:
binspec_compiled = eval(binspec, namespace)
except Exception as e:
raise ValueError('invalid bin specification: {!r}'.format(e))
else:
if log.isEnabledFor(logging.DEBUG):
log.debug('bin specs: {!r}'.format(binspec_compiled))
return binspec_compiled
def construct_bins(self, bins):
'''
Construct bins according to ``bins``, which may be:
1) A scalar integer (for that number of bins in each dimension)
2) A sequence of integers (specifying number of bins for each dimension)
3) A sequence of sequences of bin boundaries (specifying boundaries for each dimension)
Sets ``self.binbounds`` to a list of arrays of bin boundaries appropriate for passing to
fasthist.histnd, along with ``self.midpoints`` to the midpoints of the bins.
'''
if not isiterable(bins):
self._construct_bins_from_scalar(bins)
elif not isiterable(bins[0]):
self._construct_bins_from_int_seq(bins)
else:
self._construct_bins_from_bound_seqs(bins)
if log.isEnabledFor(logging.DEBUG):
log.debug('binbounds: {!r}'.format(self.binbounds))
def scan_data_shape(self):
if self.ndim is None:
dset = self.duration_dsspec
# self.ntimepoints = dset.shape[1]
# self.ndim = dset.shape[2]
self.ndim = 1
self.dset_dtype = dset.dtype
def scan_data_range(self):
'''Scan input data for range in each dimension. The number of dimensions is determined
from the shape of the progress coordinate as of self.iter_start.'''
self.progress.indicator.new_operation('Scanning for data range', self.iter_stop - self.iter_start)
self.scan_data_shape()
dset_dtype = self.dset_dtype
ndim = self.ndim
dsspec = self.duration_dsspec
try:
minval = np.finfo(dset_dtype).min
maxval = np.finfo(dset_dtype).max
except ValueError:
minval = np.iinfo(dset_dtype).min
maxval = np.iinfo(dset_dtype).max
data_range = self.data_range = [(maxval, minval) for _i in range(self.ndim)]
# futures = []
# for n_iter in xrange(self.iter_start, self.iter_stop):
# _remote_min_max(ndim, dset_dtype, n_iter, dsspec)
# futures.append(self.work_manager.submit(_remote_min_max, args=(ndim, dset_dtype, n_iter, dsspec)))
# for future in self.work_manager.as_completed(futures):
for future in self.work_manager.submit_as_completed(
((_remote_min_max, (ndim, dset_dtype, n_iter, dsspec), {}) for n_iter in range(self.iter_start, self.iter_stop)),
self.max_queue_len,
):
bounds = future.get_result(discard=True)
for idim in range(ndim):
current_min, current_max = data_range[idim]
current_min = min(current_min, bounds[idim][0])
current_max = max(current_max, bounds[idim][1])
data_range[idim] = (current_min, current_max)
self.progress.indicator.progress += 1
def _construct_bins_from_scalar(self, bins):
if self.data_range is None:
self.scan_data_range()
# print(self.data_range)
self.binbounds = []
self.midpoints = []
for idim in range(self.ndim):
lb, ub = self.data_range[idim]
# Advance just beyond the upper bound of the range, so that we catch
# the maximum in the histogram
ub *= 1.01
# lb -= 0.01
boundset = np.linspace(lb, ub, bins + 1)
midpoints = (boundset[:-1] + boundset[1:]) / 2.0
self.binbounds.append(boundset)
self.midpoints.append(midpoints)
def _construct_bins_from_int_seq(self, bins):
if self.data_range is None:
self.scan_data_range()
self.binbounds = []
self.midpoints = []
for idim in range(self.ndim):
lb, ub = self.data_range[idim]
# Advance just beyond the upper bound of the range, so that we catch
# the maximum in the histogram
ub *= 1.01
boundset = np.linspace(lb, ub, bins[idim] + 1)
midpoints = (boundset[:-1] + boundset[1:]) / 2.0
self.binbounds.append(boundset)
self.midpoints.append(midpoints)
def _construct_bins_from_bound_seqs(self, bins):
self.binbounds = []
self.midpoints = []
for boundset in bins:
boundset = np.asarray(boundset)
if (np.diff(boundset) <= 0).any():
raise ValueError('boundary set {!r} is not strictly monotonically increasing'.format(boundset))
self.binbounds.append(boundset)
self.midpoints.append((boundset[:-1] + boundset[1:]) / 2.0)
def construct_histogram(self):
'''Construct a histogram using bins previously constructed with ``construct_bins()``.
The time series of histogram values is stored in ``histograms``.
Each histogram in the time series is normalized.'''
self.scan_data_shape()
iter_count = self.iter_stop - self.iter_start
histograms_ds = self.output_file.create_dataset(
'histograms',
dtype=np.float64,
shape=((iter_count,) + tuple(len(bounds) - 1 for bounds in self.binbounds)),
compression=9 if self.compress_output else None,
)
binbounds = [np.require(boundset, self.dset_dtype, 'C') for boundset in self.binbounds]
self.progress.indicator.new_operation('Constructing histograms', self.iter_stop - self.iter_start)
task_gen = (
(_remote_bin_iter, (iiter, n_iter, self.duration_dsspec, self.wt_dsspec, 0, binbounds, self.ignore_out_of_range), {})
for (iiter, n_iter) in enumerate(range(self.iter_start, self.iter_stop))
)
# futures = set()
# for iiter, n_iter in enumerate(xrange(self.iter_start, self.iter_stop)):
# initpoint = 1 if iiter > 0 else 0
# futures.add(self.work_manager.submit(_remote_bin_iter,
# args=(iiter, n_iter, self.dsspec, self.wt_dsspec, initpoint, binbounds)))
# for future in self.work_manager.as_completed(futures):
# future = self.work_manager.wait_any(futures)
# for future in self.work_manager.submit_as_completed(task_gen, self.queue_size):
log.debug('max queue length: {!r}'.format(self.max_queue_len))
for future in self.work_manager.submit_as_completed(task_gen, self.max_queue_len):
iiter, n_iter, iter_hist = future.get_result(discard=True)
self.progress.indicator.progress += 1
# store histogram
histograms_ds[iiter] = iter_hist
del iter_hist, future
def entry_point():
WEDDist().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_fluxanl.rst | Manual | w_fluxanl command | MIT License | westpa__westpa/doc/documentation/cli/deprecated/w_fluxanl.rst | [
"westpa__westpa/src/westpa/cli/tools/w_fluxanl.py"
] | w_fluxanl
w_fluxanl calculates the probability flux of a weighted ensemble
simulation based on a pre-defined target state. Also calculates
confidence interval of average flux. Monte Carlo bootstrapping
techniques are used to account for autocorrelation between fluxes and/or
errors that are not normally distributed.
Overview
usage:
w_fluxanl [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version]
[-W WEST_H5FILE] [-o OUTPUT]
[--first-iter N_ITER] [--last-iter N_ITER]
[-a ALPHA] [--autocorrel-alpha ACALPHA] [-N NSETS] [--evol] [--evol-step ESTEP]
Note: All command line arguments are optional for w_fluxanl.
Command-Line Options
See the general command-line tool reference for more information on the
general options.
Input/output options
These arguments allow the user to specify where to read input simulation
result data and where to output calculated progress coordinate
probability distribution data.
Both input and output files are hdf5 format.:
-W, --west-data file
Read simulation result data from file *file*. (**Default:** The
*hdf5* file specified in the configuration file)
-o, --output file
Store this tool's output in *file*. (**Default:** The *hdf5* file
**pcpdist.h5**)
Iteration range options
Specify the range of iterations over which to construct the progress
coordinate probability distribution.:
--first-iter n_iter
Construct probability distribution starting with iteration *n_iter*
(**Default:** 1)
--last-iter n_iter
Construct probability distribution's time evolution up to (and
including) iteration *n_iter* (**Default:** Last completed
iteration)
Confidence interval and bootstrapping options
Specify alpha values of constructed confidence intervals.:
-a alpha
Calculate a (1 - *alpha*) confidence interval for the mean flux
(**Default:** 0.05)
--autocorrel-alpha ACalpha
Identify autocorrelation of fluxes at *ACalpha* significance level.
Note: Specifying an *ACalpha* level that is too small may result in
failure to find autocorrelation in noisy flux signals (**Default:**
Same level as *alpha*)
-N n_sets, --nsets n_sets
Use *n_sets* samples for bootstrapping (**Default:** Chosen based
on *alpha*)
--evol
Calculate the time evolution of flux confidence intervals
(**Warning:** computationally expensive calculation)
--evol-step estep
(if ``'--evol'`` specified) Calculate the time evolution of flux
confidence intervals for every *estep* iterations (**Default:** 1)
Examples
Calculate the time evolution flux every 5 iterations:
w_fluxanl --evol --evol-step 5
Calculate mean flux confidence intervals at 0.01 signicance level and
calculate autocorrelations at 0.05 significance:
w_fluxanl --alpha 0.01 --autocorrel-alpha 0.05
Calculate the mean flux confidence intervals using a custom bootstrap
sample size of 500:
w_fluxanl --n-sets 500
| import h5py
import numpy as np
from scipy.signal import fftconvolve
from warnings import warn
import westpa
from westpa.core.data_manager import weight_dtype, n_iter_dtype, vstr_dtype
from westpa.core.we_driver import NewWeightEntry
from westpa.core import h5io
from westpa.tools import WESTTool, WESTDataReader, IterRangeSelection
from westpa.tools.dtypes import iter_block_ci_dtype as ci_dtype
import westpa.mclib as mclib
fluxentry_dtype = np.dtype([('n_iter', n_iter_dtype), ('flux', weight_dtype), ('count', np.uint)])
target_index_dtype = np.dtype(
[
('target_label', vstr_dtype),
('mean_flux', weight_dtype),
('mean_flux_ci_lb', weight_dtype),
('mean_flux_ci_ub', weight_dtype),
('mean_flux_correl_len', np.uintc),
]
)
def _extract_fluxes_fileversion_lt_7(iter_start, iter_stop, data_manager):
'''Extract fluxes from old format, where groups for iterations where recyling
occurs contain a'recycling' table.'''
assert data_manager.we_h5file_version < 7
iter_count = iter_stop - iter_start
target_count = data_manager.get_iter_group(iter_start)['recycling'].shape[0]
fluxdata = np.zeros((iter_count,), dtype=fluxentry_dtype)
if data_manager.we_h5file_version < 5:
flux_field = 'weight'
else:
flux_field = 'flux'
fluxdata = {itarget: np.zeros((iter_count,), dtype=fluxentry_dtype) for itarget in range(target_count)}
for iiter, n_iter in enumerate(range(iter_start, iter_stop)):
rdata = data_manager.get_iter_group(n_iter)['recycling']
for itarget in range(target_count):
fluxdata[itarget][iiter]['n_iter'] = n_iter
fluxdata[itarget][iiter]['flux'] = rdata[itarget][flux_field]
fluxdata[itarget][iiter]['count'] = rdata[itarget]['count']
del rdata
return fluxdata
def _extract_fluxes_fileversion_7(iter_start, iter_stop, data_manager):
'''Extract fluxes from HDF5 file version 7, where recycling information is
stored in the "new_weights" group of the iteration *following* recycling
events.'''
assert data_manager.we_h5file_version >= 7
iter_count = iter_stop - iter_start
iters = np.arange(iter_start, iter_stop, dtype=n_iter_dtype)
# for each target by name, collect the iterations, fluxes, and counts
# This is not the most foolproof way to do this, but it's good enough, and fast.
# The most correct way to do this is tracing trajectories,
# and warning if the boundary conditions change during the trace,
# but that's for another tool.
by_target = {}
for iiter, n_iter in enumerate(range(iter_start, iter_stop)):
target_states = data_manager.get_target_states(n_iter)
try:
new_weight_index = data_manager.get_iter_group(n_iter + 1)['new_weights']['index']
except KeyError:
# no recycling data available
continue
for tstate in target_states:
try:
target_info = by_target[tstate.label]
except KeyError:
# State not seen before
target_info = by_target[tstate.label] = np.zeros((iter_count,), dtype=fluxentry_dtype)
# If the target happens not to exist in an iteration (for whatever reason),
# store a count of -1 as a sentinel
target_info['count'][:] = -1
target_info['n_iter'][:] = iters[:]
recycled_from_tstate = (new_weight_index['source_type'] == NewWeightEntry.NW_SOURCE_RECYCLED) & (
new_weight_index['target_state_id'] == tstate.state_id
)
recycle_count = recycled_from_tstate.sum()
target_info['count'][iiter] = recycle_count
if recycle_count:
# flux is in units of per tau
target_info['flux'][iiter] = new_weight_index[recycled_from_tstate]['weight'].sum()
del new_weight_index, target_states
# Find the last contiguous run where target is available
for target_label in by_target:
fluxdata = by_target[target_label]
by_target[target_label] = fluxdata[np.searchsorted(fluxdata['count'], [0])[0] :]
return by_target
def extract_fluxes(iter_start=None, iter_stop=None, data_manager=None):
'''Extract flux values from the WEST HDF5 file for iterations >= iter_start
and < iter_stop, optionally using another data manager instance instead of the
global one returned by ``westpa.rc.get_data_manager()``.
Returns a dictionary mapping target names (if available, target index otherwise)
to a 1-D array of type ``fluxentry_dtype``, which contains columns for iteration
number, flux, and count.
'''
data_manager = data_manager or westpa.rc.get_data_manager()
iter_start = iter_start or 1
iter_stop = iter_stop or data_manager.current_iteration
if data_manager.we_h5file_version < 7:
return _extract_fluxes_fileversion_lt_7(iter_start, iter_stop, data_manager)
else:
return _extract_fluxes_fileversion_7(iter_start, iter_stop, data_manager)
class WFluxanlTool(WESTTool):
prog = 'w_fluxanl'
description = '''\
Extract fluxes into pre-defined target states from WEST data,
average, and construct confidence intervals. Monte Carlo bootstrapping
is used to account for the correlated and possibly non-Gaussian statistical
error in flux measurements.
All non-graphical output (including that to the terminal and HDF5) assumes that
the propagation/resampling period ``tau`` is equal to unity; to obtain results
in familiar units, divide all fluxes and multiply all correlation lengths by
the true value of ``tau``.
'''
output_format_version = 2
def __init__(self):
super().__init__()
self.data_reader = WESTDataReader()
self.iter_range = IterRangeSelection()
self.output_h5file = None
self.output_group = None
self.target_groups = {}
self.fluxdata = {}
self.alpha = None
self.autocorrel_alpha = None
self.n_sets = None
self.do_evol = False
self.evol_step = 1
def add_args(self, parser):
self.data_reader.add_args(parser)
self.iter_range.add_args(parser)
ogroup = parser.add_argument_group('output options')
ogroup.add_argument(
'-o',
'--output',
default='fluxanl.h5',
help='Store intermediate data and analysis results to OUTPUT (default: %(default)s).',
)
cgroup = parser.add_argument_group('calculation options')
cgroup.add_argument(
'--disable-bootstrap',
'-db',
dest='bootstrap',
action='store_const',
const=False,
help='''Enable the use of Monte Carlo Block Bootstrapping.''',
)
cgroup.add_argument(
'--disable-correl',
'-dc',
dest='correl',
action='store_const',
const=False,
help='''Disable the correlation analysis.''',
)
cgroup.add_argument(
'-a',
'--alpha',
type=float,
default=0.05,
help='''Calculate a (1-ALPHA) confidence interval on the average flux'
(default: %(default)s)''',
)
cgroup.add_argument(
'--autocorrel-alpha',
type=float,
dest='acalpha',
metavar='ACALPHA',
help='''Evaluate autocorrelation of flux to (1-ACALPHA) significance.
Note that too small an ACALPHA will result in failure to detect autocorrelation
in a noisy flux signal. (Default: same as ALPHA.)''',
)
cgroup.add_argument(
'-N', '--nsets', type=int, help='''Use NSETS samples for bootstrapping (default: chosen based on ALPHA)'''
)
cgroup.add_argument(
'--evol',
action='store_true',
dest='do_evol',
help='''Calculate time evolution of flux confidence intervals (expensive).''',
)
cgroup.add_argument(
'--evol-step',
type=int,
default=1,
metavar='ESTEP',
help='''Calculate time evolution of flux confidence intervals every ESTEP
iterations (default: %(default)s)''',
)
def process_args(self, args):
self.data_reader.process_args(args)
self.data_reader.open()
self.iter_range.data_manager = self.data_reader
self.iter_range.process_args(args)
self.output_h5file = h5py.File(args.output, 'w')
self.alpha = args.alpha
# Disable the bootstrap or the correlation analysis.
self.mcbs_enable = args.bootstrap if args.bootstrap is not None else True
self.do_correl = args.correl if args.correl is not None else True
self.autocorrel_alpha = args.acalpha or self.alpha
self.n_sets = args.nsets or mclib.get_bssize(self.alpha)
self.do_evol = args.do_evol
self.evol_step = args.evol_step or 1
def calc_store_flux_data(self):
westpa.rc.pstatus(
'Calculating mean flux and confidence intervals for iterations [{},{})'.format(
self.iter_range.iter_start, self.iter_range.iter_stop
)
)
fluxdata = extract_fluxes(self.iter_range.iter_start, self.iter_range.iter_stop, self.data_reader)
# Create a group to store data in
output_group = h5io.create_hdf5_group(self.output_h5file, 'target_flux', replace=False, creating_program=self.prog)
self.output_group = output_group
output_group.attrs['version_code'] = self.output_format_version
self.iter_range.record_data_iter_range(output_group)
n_targets = len(fluxdata)
index = np.empty((len(fluxdata),), dtype=target_index_dtype)
avg_fluxdata = np.empty((n_targets,), dtype=ci_dtype)
for itarget, (target_label, target_fluxdata) in enumerate(fluxdata.items()):
# Create group and index entry
index[itarget]['target_label'] = str(target_label)
target_group = output_group.create_group('target_{}'.format(itarget))
self.target_groups[target_label] = target_group
# Store per-iteration values
target_group['n_iter'] = target_fluxdata['n_iter']
target_group['count'] = target_fluxdata['count']
target_group['flux'] = target_fluxdata['flux']
h5io.label_axes(target_group['flux'], ['n_iter'], units=['tau^-1'])
# Calculate flux autocorrelation
fluxes = target_fluxdata['flux']
mean_flux = fluxes.mean()
fmm = fluxes - mean_flux
acorr = fftconvolve(fmm, fmm[::-1])
acorr = acorr[len(acorr) // 2 :]
acorr /= acorr[0]
acorr_ds = target_group.create_dataset('flux_autocorrel', data=acorr)
h5io.label_axes(acorr_ds, ['lag'], ['tau'])
# Calculate overall averages and CIs
# avg, lb_ci, ub_ci, correl_len = mclib.mcbs_ci_correl(fluxes, np.mean, self.alpha, self.n_sets,
# autocorrel_alpha=self.autocorrel_alpha, subsample=np.mean)
avg, lb_ci, ub_ci, sterr, correl_len = mclib.mcbs_ci_correl(
{'dataset': fluxes},
estimator=(lambda stride, dataset: np.mean(dataset)),
alpha=self.alpha,
n_sets=self.n_sets,
autocorrel_alpha=self.autocorrel_alpha,
subsample=np.mean,
do_correl=self.do_correl,
mcbs_enable=self.mcbs_enable,
)
avg_fluxdata[itarget] = (self.iter_range.iter_start, self.iter_range.iter_stop, avg, lb_ci, ub_ci, sterr, correl_len)
westpa.rc.pstatus('target {!r}:'.format(target_label))
westpa.rc.pstatus(' correlation length = {} tau'.format(correl_len))
westpa.rc.pstatus(' mean flux and CI = {:e} ({:e},{:e}) tau^(-1)'.format(avg, lb_ci, ub_ci))
index[itarget]['mean_flux'] = avg
index[itarget]['mean_flux_ci_lb'] = lb_ci
index[itarget]['mean_flux_ci_ub'] = ub_ci
index[itarget]['mean_flux_correl_len'] = correl_len
# Write index and summary
index_ds = output_group.create_dataset('index', data=index)
index_ds.attrs['mcbs_alpha'] = self.alpha
index_ds.attrs['mcbs_autocorrel_alpha'] = self.autocorrel_alpha
index_ds.attrs['mcbs_n_sets'] = self.n_sets
self.fluxdata = fluxdata
self.output_h5file['avg_flux'] = avg_fluxdata
def calc_evol_flux(self):
westpa.rc.pstatus(
'Calculating cumulative evolution of flux confidence intervals every {} iteration(s)'.format(self.evol_step)
)
for itarget, (target_label, target_fluxdata) in enumerate(self.fluxdata.items()):
fluxes = target_fluxdata['flux']
target_group = self.target_groups[target_label]
iter_start = target_group['n_iter'][0]
iter_stop = target_group['n_iter'][-1]
iter_count = iter_stop - iter_start
n_blocks = iter_count // self.evol_step
if iter_count % self.evol_step > 0:
n_blocks += 1
cis = np.empty((n_blocks,), dtype=ci_dtype)
for iblock in range(n_blocks):
block_iter_stop = min(iter_start + (iblock + 1) * self.evol_step, iter_stop)
istop = min((iblock + 1) * self.evol_step, len(target_fluxdata['flux']))
fluxes = target_fluxdata['flux'][:istop]
# avg, ci_lb, ci_ub, correl_len = mclib.mcbs_ci_correl(fluxes, np.mean, self.alpha, self.n_sets,
# autocorrel_alpha = self.autocorrel_alpha,
# subsample=np.mean)
avg, ci_lb, ci_ub, sterr, correl_len = mclib.mcbs_ci_correl(
{'dataset': fluxes},
estimator=(lambda stride, dataset: np.mean(dataset)),
alpha=self.alpha,
n_sets=self.n_sets,
autocorrel_alpha=self.autocorrel_alpha,
subsample=np.mean,
do_correl=self.do_correl,
mcbs_enable=self.mcbs_enable,
)
cis[iblock]['iter_start'] = iter_start
cis[iblock]['iter_stop'] = block_iter_stop
cis[iblock]['expected'], cis[iblock]['ci_lbound'], cis[iblock]['ci_ubound'] = avg, ci_lb, ci_ub
cis[iblock]['corr_len'] = correl_len
cis[iblock]['sterr'] = sterr
del fluxes
cis_ds = target_group.create_dataset('flux_evolution', data=cis)
cis_ds.attrs['iter_step'] = self.evol_step
cis_ds.attrs['mcbs_alpha'] = self.alpha
cis_ds.attrs['mcbs_autocorrel_alpha'] = self.autocorrel_alpha
cis_ds.attrs['mcbs_n_sets'] = self.n_sets
def go(self):
self.calc_store_flux_data()
if self.do_evol:
self.calc_evol_flux()
def entry_point():
warn('w_fluxanl is being deprecated. Please use w_assign and w_direct instead.')
WFluxanlTool().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_fork.rst | Manual | w_fork command | MIT License | westpa__westpa/doc/documentation/cli/w_fork.rst | [
"westpa__westpa/src/westpa/cli/core/w_fork.py"
] | w_fork
usage:
w_fork [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version] [-i INPUT_H5FILE]
[-I N_ITER] [-o OUTPUT_H5FILE] [--istate-map ISTATE_MAP] [--no-headers]
Prepare a new weighted ensemble simulation from an existing one at a
particular point. A new HDF5 file is generated. In the case of
executable propagation, it is the user's responsibility to prepare the
new simulation directory appropriately, particularly making the old
simulation's restart data from the appropriate iteration available as
the new simulations initial state data; a mapping of old simulation
segment to new simulation initial states is created, both in the new
HDF5 file and as a flat text file, to aid in this. Target states and
basis states for the new simulation are taken from those in the original
simulation.
optional arguments:
-h, --help show this help message and exit
-i INPUT_H5FILE, --input INPUT_H5FILE
Create simulation from the given INPUT_H5FILE (default: read from configuration
file.
-I N_ITER, --iteration N_ITER
Take initial distribution for new simulation from iteration N_ITER (default:
last complete iteration).
-o OUTPUT_H5FILE, --output OUTPUT_H5FILE
Save new simulation HDF5 file as OUTPUT (default: forked.h5).
--istate-map ISTATE_MAP
Write text file describing mapping of existing segments to new initial states
in ISTATE_MAP (default: istate_map.txt).
--no-headers Do not write header to ISTATE_MAP
general options:
-r RCFILE, --rcfile RCFILE
use RCFILE as the WEST run-time configuration file (default: west.cfg)
--quiet emit only essential information
--verbose emit extra information
--debug enable extra checks and emit copious information
--version show program's version number and exit | import argparse
import logging
import numpy as np
import westpa
from westpa.core.segment import Segment
from westpa.core.states import InitialState
from westpa.core.data_manager import n_iter_dtype, seg_id_dtype
log = logging.getLogger('w_fork')
def entry_point():
parser = argparse.ArgumentParser(
'w_fork',
description='''\
Prepare a new weighted ensemble simulation from an existing one at a particular
point. A new HDF5 file is generated. In the case of executable propagation,
it is the user's responsibility to prepare the new simulation directory
appropriately, particularly making the old simulation's restart data from the
appropriate iteration available as the new simulations initial state data; a
mapping of old simulation segment to new simulation initial states is
created, both in the new HDF5 file and as a flat text file, to aid in this.
Target states and basis states for the new simulation are taken from those
in the original simulation.
''',
)
westpa.rc.add_args(parser)
parser.add_argument(
'-i',
'--input',
dest='input_h5file',
help='''Create simulation from the given INPUT_H5FILE (default: read from
configuration file.''',
)
parser.add_argument(
'-I',
'--iteration',
dest='n_iter',
type=int,
help='''Take initial distribution for new simulation from iteration N_ITER
(default: last complete iteration).''',
)
parser.add_argument(
'-o',
'--output',
dest='output_h5file',
default='forked.h5',
help='''Save new simulation HDF5 file as OUTPUT (default: %(default)s).''',
)
parser.add_argument(
'--istate-map',
default='istate_map.txt',
help='''Write text file describing mapping of existing segments to new initial
states in ISTATE_MAP (default: %(default)s).''',
)
parser.add_argument('--no-headers', action='store_true', help='''Do not write header to ISTATE_MAP''')
args = parser.parse_args()
westpa.rc.process_args(args)
# Open old HDF5 file
dm_old = westpa.rc.new_data_manager()
if args.input_h5file:
dm_old.we_h5filename = args.input_h5file
dm_old.open_backing(mode='r')
# Get iteration if necessary
n_iter = args.n_iter or dm_old.current_iteration - 1
# Create and open new HDF5 file
dm_new = westpa.rc.new_data_manager()
dm_new.we_h5filename = args.output_h5file
dm_new.prepare_backing()
dm_new.open_backing()
# Copy target states
target_states = dm_old.get_target_states(n_iter)
dm_new.save_target_states(target_states, n_iter)
# Copy basis states
basis_states = dm_old.get_basis_states(n_iter)
dm_new.create_ibstate_group(basis_states, n_iter=1)
# Transform old segments into initial states and new segments
# We produce one initial state and one corresponding
# new segment for each old segment. Further adjustment
# can be accomplished by using w_binning.
old_iter_group = dm_old.get_iter_group(n_iter)
old_index = old_iter_group['seg_index'][...]
old_pcoord_ds = old_iter_group['pcoord']
n_segments = old_pcoord_ds.shape[0]
pcoord_len = old_pcoord_ds.shape[1]
pcoord_ndim = old_pcoord_ds.shape[2]
old_final_pcoords = old_pcoord_ds[:, pcoord_len - 1, :]
istates = dm_new.create_initial_states(n_segments, n_iter=1)
segments = []
state_map_dtype = np.dtype([('old_n_iter', n_iter_dtype), ('old_seg_id', seg_id_dtype), ('new_istate_id', seg_id_dtype)])
state_map = np.empty((n_segments,), dtype=state_map_dtype)
state_map['old_n_iter'] = n_iter
for iseg, (index_row, pcoord) in enumerate(zip(old_index, old_final_pcoords)):
istate = istates[iseg]
istate.iter_created = 0
istate.iter_used = 1
istate.istate_type = InitialState.ISTATE_TYPE_RESTART
istate.istate_status = InitialState.ISTATE_STATUS_PREPARED
istate.pcoord = pcoord
segment = Segment(
n_iter=1,
seg_id=iseg,
weight=index_row['weight'],
parent_id=-(istate.state_id + 1),
wtg_parent_ids=[-(istate.state_id + 1)],
status=Segment.SEG_STATUS_PREPARED,
)
segment.pcoord = np.zeros((pcoord_len, pcoord_ndim), dtype=pcoord.dtype)
segment.pcoord[0] = pcoord
segments.append(segment)
state_map[iseg]['old_seg_id'] = iseg
state_map[iseg]['new_istate_id'] = istate.state_id
dm_new.update_initial_states(istates, n_iter=0)
dm_new.prepare_iteration(n_iter=1, segments=segments)
# Update current iteration and close both files
dm_new.current_iteration = 1
dm_new.close_backing()
dm_old.close_backing()
# Write state map
istate_map_file = open(args.istate_map, 'wt')
if not args.no_headers:
istate_map_file.write('# mapping from previous segment IDs to new initial states\n')
istate_map_file.write('# generated by w_fork\n')
istate_map_file.write('# column 0: old simulation n_iter\n')
istate_map_file.write('# column 1: old simulation seg_id\n')
istate_map_file.write('# column 2: new simulation initial state ID\n')
for row in state_map:
istate_map_file.write(
'{old_n_iter:20d} {old_seg_id:20d} {new_istate_id:20d}\n'.format(
old_n_iter=int(row['old_n_iter']), old_seg_id=int(row['old_seg_id']), new_istate_id=int(row['new_istate_id'])
)
)
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_init.rst | Manual | w_init command | MIT License | westpa__westpa/doc/documentation/cli/w_init.rst | [
"westpa__westpa/src/westpa/cli/core/w_init.py"
] | w_init
w_init initializes the weighted ensemble simulation, creates the main
HDF5 file and prepares the first iteration.
Overview
Usage:
w_init [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version]
[--force] [--bstate-file BSTATE_FILE] [--bstate BSTATES]
[--tstate-file TSTATE_FILE] [--tstate TSTATES]
[--segs-per-state N] [--no-we] [--wm-work-manager WORK_MANAGER]
[--wm-n-workers N_WORKERS] [--wm-zmq-mode MODE]
[--wm-zmq-info INFO_FILE] [--wm-zmq-task-endpoint TASK_ENDPOINT]
[--wm-zmq-result-endpoint RESULT_ENDPOINT]
[--wm-zmq-announce-endpoint ANNOUNCE_ENDPOINT]
[--wm-zmq-heartbeat-interval INTERVAL]
[--wm-zmq-task-timeout TIMEOUT] [--wm-zmq-client-comm-mode MODE]
Initialize a new WEST simulation, creating the WEST HDF5 file and
preparing the first iteration's segments. Initial states are generated
from one or more "basis states" which are specified either in a file
specified with --bstates-from, or by one or more --bstate arguments. If
neither --bstates-from nor at least one --bstate argument is provided,
then a default basis state of probability one identified by the state ID
zero and label "basis" will be created (a warning will be printed in
this case, to remind you of this behavior, in case it is not what you
wanted). Target states for (non- equilibrium) steady-state simulations
are specified either in a file specified with --tstates-from, or by one
or more --tstate arguments. If neither --tstates-from nor at least one
--tstate argument is provided, then an equilibrium simulation (without
any sinks) will be performed.
Command-Line Options
See the general command-line tool reference for more information on the
general options.
State Options
--force
Overwrites any existing simulation data
--bstate BSTATES
Add the given basis state (specified as a string
'label,probability[,auxref]') to the list of basis states (after
those specified in --bstates-from, if any). This argument may be
specified more than once, in which case the given states are
appended in the order they are given on the command line.
--bstate-file BSTATE_FILE, --bstates-from BSTATE_FILE
Read basis state names, probabilities, and (optionally) data
references from BSTATE_FILE.
--tstate TSTATES
Add the given target state (specified as a string
'label,pcoord0[,pcoord1[,...]]') to the list of target states (after
those specified in the file given by --tstates-from, if any). This
argument may be specified more than once, in which case the given
states are appended in the order they appear on the command line.
--tstate-file TSTATE_FILE, --tstates-from TSTATE_FILE
Read target state names and representative progress coordinates from
TSTATE_FILE. WESTPA uses the representative progress coordinate of a target state and
converts the **entire** bin containing that progress coordinate into a
recycling sink.
--segs-per-state N
Initialize N segments from each basis state (default: 1).
--no-we, --shotgun
Do not run the weighted ensemble bin/split/merge algorithm on
newly-created segments. | import argparse
import io
import logging
import sys
import numpy as np
import westpa
from westpa.core.states import BasisState, TargetState
import westpa.work_managers as work_managers
from westpa.work_managers import make_work_manager
log = logging.getLogger('w_init')
EPS = np.finfo(np.float64).eps
def entry_point():
parser = argparse.ArgumentParser(
'w_init',
description='''\
Initialize a new WEST simulation, creating the WEST HDF5 file and preparing the first
iteration's segments.
Initial states are generated from one or more "basis states" which are specified either in a file specified with --bstates-from, or
by one or more "--bstate" arguments. If neither --bstates-from nor at least one "--bstate" argument is provided, then a default
basis state of probability one identified by the state ID zero and label "basis" will be created (a warning will be printed in this
case, to remind you of this behavior, in case it is not what you wanted).
Target states for (non-equilibrium) steady-state simulations are specified either in a file specified with --tstates-from, or
by one or more --tstate arguments. If neither --tstates-from nor at least one --tstate argument is provided, then an equilibrium
simulation (without any sinks) will be performed.
''',
)
westpa.rc.add_args(parser)
parser.add_argument('--force', dest='force', action='store_true', help='Overwrite any existing simulation data')
parser.add_argument(
'--bstate-file',
'--bstates-from',
metavar='BSTATE_FILE',
help='Read basis state names, probabilities, and (optionally) data references from BSTATE_FILE.',
)
parser.add_argument(
'--bstate',
action='append',
dest='bstates',
help='''Add the given basis state (specified as a string 'label,probability[,auxref]')
to the list of basis states (after those specified in --bstates-from, if any). This argument
may be specified more than once, in which case the given states are appended in the order
they are given on the command line.''',
)
parser.add_argument(
'--tstate-file',
'--tstates-from',
metavar='TSTATE_FILE',
help='''Read target state names and representative progress coordinates from TSTATE_FILE. WESTPA uses the
representative progress coordinate of a target state and converts the **entire** bin
containing that progress coordinate into a recycling sink.''',
)
parser.add_argument(
'--tstate',
action='append',
dest='tstates',
help='''Add the given target state (specified as a string 'label,pcoord0[,pcoord1[,...]]') to the
list of target states (after those specified in the file given by --tstates-from, if any).
This argument may be specified more than once, in which case the given states are appended
in the order they appear on the command line.''',
)
parser.add_argument(
'--sstate-file',
'--sstates-from',
metavar='SSTATE_FILE',
help='Read start state names, probabilities, and (optionally) data references from SSTATE_FILE.',
)
parser.add_argument(
'--sstate',
action='append',
dest='sstates',
help='''Add the given start state (specified as a string 'label,probability[,auxref]')
to the list of start states (after those specified in --sstates-from, if any). This argument
may be specified more than once, in which case the given states are appended in the order
they are given on the command line.''',
)
parser.add_argument(
'--segs-per-state',
type=int,
metavar='N',
default=1,
help='''Initialize N segments from each basis state (default: %(default)s).''',
)
parser.add_argument(
'--no-we',
'--shotgun',
dest='shotgun',
action='store_true',
help='''Do not run the weighted ensemble bin/split/merge algorithm on newly-created segments.''',
)
# TODO: Does this belong here or not? I like that it's parsing arguments, which is the purpose of entry_point.
# I don't necessarily like that it's setting state across different parts of the program.
work_managers.environment.add_wm_args(parser)
args = parser.parse_args()
westpa.rc.process_args(args)
work_managers.environment.process_wm_args(args)
initialize(
args.tstates,
args.tstate_file,
args.bstates,
args.bstate_file,
args.sstates,
args.sstate_file,
args.segs_per_state,
args.shotgun,
)
def initialize(tstates, tstate_file, bstates, bstate_file, sstates=None, sstate_file=None, segs_per_state=1, shotgun=False):
"""
Initialize a WESTPA simulation.
tstates : list of str
tstate_file : str
bstates : list of str
bstate_file : str
sstates : list of str
sstate_file : str
segs_per_state : int
shotgun : bool
"""
westpa.rc.work_manager = work_manager = make_work_manager()
system = westpa.rc.get_system_driver()
sim_manager = westpa.rc.get_sim_manager()
data_manager = westpa.rc.get_data_manager()
data_manager.system = system
with work_manager:
if work_manager.is_master:
# Process target states
target_states = []
if tstate_file:
target_states.extend(TargetState.states_from_file(tstate_file, system.pcoord_dtype))
if tstates:
tstates_strio = io.StringIO('\n'.join(tstates).replace(',',''))
target_states.extend(TargetState.states_from_file(tstates_strio, system.pcoord_dtype))
del tstates_strio
# Process basis states
basis_states = []
if bstate_file:
basis_states.extend(BasisState.states_from_file(bstate_file))
if bstates:
for bstate_str in bstates:
fields = bstate_str.split(',')
label = fields[0]
probability = float(fields[1])
try:
auxref = fields[2]
except IndexError:
auxref = None
basis_states.append(BasisState(label=label, probability=probability, auxref=auxref))
# Process the list of start states, creating a BasisState from each
start_states = []
if sstate_file:
start_states.extend(BasisState.states_from_file(sstate_file))
if sstates:
for sstate_str in sstates:
fields = sstate_str.split(',')
label = fields[0]
probability = float(fields[1])
try:
auxref = fields[2]
except IndexError:
auxref = None
start_states.append(BasisState(label=label, probability=probability, auxref=auxref))
if not basis_states:
log.error('At least one basis state is required')
sys.exit(3)
# Check that the total probability of basis states adds to one
bstate_prob, sstate_prob = (
sum(bstate.probability for bstate in basis_states),
sum(sstate.probability for sstate in start_states),
)
# tprob = sum(bstate.probability for bstate in basis_states)
tprob = bstate_prob + sstate_prob
if abs(1.0 - tprob) > len(basis_states) * EPS:
pscale = 1 / tprob
log.warning(
'Basis state probabilities do not add to unity (basis: {:.2f}, start states: {:.2f}); rescaling by {:g}. If using start states, some rescaling is normal.'.format(
bstate_prob, sstate_prob, pscale
)
)
for bstate in basis_states:
bstate.probability *= pscale
for sstate in start_states:
sstate.probability *= pscale
# Prepare simulation
sim_manager.initialize_simulation(
basis_states=basis_states,
target_states=target_states,
start_states=start_states,
segs_per_state=segs_per_state,
suppress_we=shotgun,
)
else:
work_manager.run()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_ipa.rst | Manual | w_ipa command | MIT License | westpa__westpa/doc/documentation/cli/w_ipa.rst | [
"westpa__westpa/src/westpa/cli/tools/w_ipa.py"
] | w_ipa
The w_ipa is a (beta) WESTPA tool that automates analysis using analysis
schemes and enables interactive analysis of WESTPA simulation data. The
tool can do a variety of different types of analysis, including the
following: * Calculate fluxes and rate constants * Adjust and use
alternate state definitions * Trace trajectory segments, including
statistical weights, position along the progress coordinate, and other
auxiliary data * Plot all of the above in the terminal!
If you are using w_ipa for kinetics automated kinetics analysis, keep in
mind that w_ipa is running w_assign and w_direct using the scheme
designated in your west.cfg file. For more diverse kinetics analysis
options, consider using w_assign and w_direct manually. This can be
useful if you'd like to use auxiliary coordinates that aren't your
progress coordinate, in one or two dimension options.
usage:
w_ipa [-h] [-r RCFILE] [--quiet] [--verbose] [--version] [--max-queue-length MAX_QUEUE_LENGTH]
[-W WEST_H5FILE] [--analysis-only] [--reanalyze] [--ignore-hash] [--debug] [--terminal]
[--serial | --parallel | --work-manager WORK_MANAGER] [--n-workers N_WORKERS]
[--zmq-mode MODE] [--zmq-comm-mode COMM_MODE] [--zmq-write-host-info INFO_FILE]
[--zmq-read-host-info INFO_FILE] [--zmq-upstream-rr-endpoint ENDPOINT]
[--zmq-upstream-ann-endpoint ENDPOINT] [--zmq-downstream-rr-endpoint ENDPOINT]
[--zmq-downstream-ann-endpoint ENDPOINT] [--zmq-master-heartbeat MASTER_HEARTBEAT]
[--zmq-worker-heartbeat WORKER_HEARTBEAT] [--zmq-timeout-factor FACTOR]
[--zmq-startup-timeout STARTUP_TIMEOUT] [--zmq-shutdown-timeout SHUTDOWN_TIMEOUT]
optional arguments:
-h, --help show this help message and exit
general options:
-r RCFILE, --rcfile RCFILE
use RCFILE as the WEST run-time configuration file (default:
west.cfg)
--quiet emit only essential information --verbose emit extra
information --version show program's version number and exit
parallelization options:
--max-queue-length MAX_QUEUE_LENGTH
Maximum number of tasks that can be queued. Useful to limit RAM use for tasks that
have very large requests/response. Default: no limit.
WEST input data options:
-W WEST_H5FILE, --west-data WEST_H5FILE
Take WEST data from WEST_H5FILE (default: read from the HDF5
file specified in west.cfg).
runtime options:
--analysis-only, -ao Use this flag to run the analysis and return to the terminal.
--reanalyze, -ra Use this flag to delete the existing files and reanalyze.
--ignore-hash, -ih Ignore hash and don't regenerate files.
--debug, -d Debug output largely intended for development.
--terminal, -t Plot output in terminal.
parallelization options:
--serial run in serial mode
--parallel run in parallel mode (using processes)
--work-manager WORK_MANAGER
use the given work manager for parallel task distribution. Available work managers
are ('serial', 'threads', 'processes', 'zmq'); default is 'processes'
--n-workers N_WORKERS
Use up to N_WORKERS on this host, for work managers which support this option. Use
0 for a dedicated server. (Ignored by work managers which do not support this
option.)
options for ZeroMQ ("zmq") work manager (master or node):
--zmq-mode MODE Operate as a master (server) or a node (workers/client). "server" is a deprecated
synonym for "master" and "client" is a deprecated synonym for "node".
--zmq-comm-mode COMM_MODE
Use the given communication mode -- TCP or IPC (Unix-domain) -- sockets for
communication within a node. IPC (the default) may be more efficient but is not
available on (exceptionally rare) systems without node-local storage (e.g. /tmp);
on such systems, TCP may be used instead.
--zmq-write-host-info INFO_FILE
Store hostname and port information needed to connect to this instance in
INFO_FILE. This allows the master and nodes assisting in coordinating the
communication of other nodes to choose ports randomly. Downstream nodes read this
file with --zmq-read-host-info and know where how to connect.
--zmq-read-host-info INFO_FILE
Read hostname and port information needed to connect to the master (or other
coordinating node) from INFO_FILE. This allows the master and nodes assisting in
coordinating the communication of other nodes to choose ports randomly, writing
that information with --zmq-write-host-info for this instance to read.
--zmq-upstream-rr-endpoint ENDPOINT
ZeroMQ endpoint to which to send request/response (task and result) traffic toward
the master.
--zmq-upstream-ann-endpoint ENDPOINT
ZeroMQ endpoint on which to receive announcement (heartbeat and shutdown
notification) traffic from the master.
--zmq-downstream-rr-endpoint ENDPOINT
ZeroMQ endpoint on which to listen for request/response (task and result) traffic
from subsidiary workers.
--zmq-downstream-ann-endpoint ENDPOINT
ZeroMQ endpoint on which to send announcement (heartbeat and shutdown
notification) traffic toward workers.
--zmq-master-heartbeat MASTER_HEARTBEAT
Every MASTER_HEARTBEAT seconds, the master announces its presence to workers.
--zmq-worker-heartbeat WORKER_HEARTBEAT
Every WORKER_HEARTBEAT seconds, workers announce their presence to the master.
--zmq-timeout-factor FACTOR
Scaling factor for heartbeat timeouts. If the master doesn't hear from a worker in
WORKER_HEARTBEAT*FACTOR, the worker is assumed to have crashed. If a worker
doesn't hear from the master in MASTER_HEARTBEAT*FACTOR seconds, the master is
assumed to have crashed. Both cases result in shutdown.
--zmq-startup-timeout STARTUP_TIMEOUT
Amount of time (in seconds) to wait for communication between the master and at
least one worker. This may need to be changed on very large, heavily-loaded
computer systems that start all processes simultaneously.
--zmq-shutdown-timeout SHUTDOWN_TIMEOUT
Amount of time (in seconds) to wait for workers to shut down. | import base64
import codecs
import hashlib
import os
import warnings
import numpy as np
import westpa
from westpa.core import h5io
from westpa.cli.tools import w_assign, w_direct, w_reweight
from westpa.tools import WESTParallelTool, WESTDataReader, ProgressIndicatorComponent, Plotter
from westpa.tools import WIPIDataset, __get_data_for_iteration__, WIPIScheme
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=RuntimeWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=ImportWarning)
warnings.filterwarnings('ignore')
class WIPI(WESTParallelTool):
'''
Welcome to w_ipa (WESTPA Interactive Python Analysis)!
From here, you can run traces, look at weights, progress coordinates, etc.
This is considered a'stateful' tool; that is, the data you are pulling is always pulled
from the current analysis scheme and iteration.
By default, the first analysis scheme in west.cfg is used, and you are set at iteration 1.
ALL PROPERTIES ARE ACCESSED VIA w or west
To see the current iteration, try:
w.iteration
OR
west.iteration
to set it, simply plug in a new value.
w.iteration = 100
To change/list the current analysis schemes:
w.list_schemes
w.scheme = OUTPUT FROM w.list_schemes
To see the states and bins defined in the current analysis scheme:
w.states
w.bin_labels
All information about the current iteration is available in an object called 'current':
w.current
walkers, summary, states, seg_id, weights, parents, kinavg, pcoord, bins, populations, and auxdata, if it exists.
In addition, the function w.trace(seg_id) will run a trace over a seg_id in the current iteration and return a dictionary
containing all pertinent information about that seg_id's history. It's best to store this, as the trace can be expensive.
Run help on any function or property for more information!
Happy analyzing!
'''
def __init__(self):
super().__init__()
self.data_reader = WESTDataReader()
self.wm_env.default_work_manager = self.wm_env.default_parallel_work_manager
self.progress = ProgressIndicatorComponent()
self._iter = 1
self.config_required = True
self.version = "1.0B"
# Set to matplotlib if you want that. But why would you?
# Well, whatever, we'll just set it to that for now.
self.interface ='matplotlib'
self._scheme = None
global iteration
def add_args(self, parser):
self.progress.add_args(parser)
self.data_reader.add_args(parser)
rgroup = parser.add_argument_group('runtime options')
rgroup.add_argument(
'--analysis-only',
'-ao',
dest='analysis_mode',
action='store_true',
help='''Use this flag to run the analysis and return to the terminal.''',
)
rgroup.add_argument(
'--reanalyze',
'-ra',
dest='reanalyze',
action='store_true',
help='''Use this flag to delete the existing files and reanalyze.''',
)
rgroup.add_argument(
'--ignore-hash', '-ih', dest='ignore_hash', action='store_true', help='''Ignore hash and don't regenerate files.'''
)
rgroup.add_argument(
'--debug', '-d', dest='debug_mode', action='store_true', help='''Debug output largely intended for development.'''
)
rgroup.add_argument('--terminal', '-t', dest='plotting', action='store_true', help='''Plot output in terminal.''')
# There is almost certainly a better way to handle this, but we'll sort that later.
import argparse
rgroup.add_argument('--f', '-f', dest='extra', default='blah', help=argparse.SUPPRESS)
parser.set_defaults(compression=True)
def process_args(self, args):
self.progress.process_args(args)
self.data_reader.process_args(args)
with self.data_reader:
self.niters = self.data_reader.current_iteration - 1
self.__config = westpa.rc.config
self.__settings = self.__config['west']['analysis']
for ischeme, scheme in enumerate(self.__settings['analysis_schemes']):
if (
self.__settings['analysis_schemes'][scheme]['enabled'] is True
or self.__settings['analysis_schemes'][scheme]['enabled'] is None
):
self.scheme = scheme
self.data_args = args
self.analysis_mode = args.analysis_mode
self.reanalyze = args.reanalyze
self.ignore_hash = args.ignore_hash
self.debug_mode = args.debug_mode
if args.plotting:
self.interface = 'text'
def hash_args(self, args, extra=None, path=None):
'''Create unique hash stamp to determine if arguments/file is different from before.'''
'''Combine with iteration to know whether or not file needs updating.'''
# Why are we not loading this functionality into the individual tools?
# While it may certainly be useful to store arguments (and we may well do that),
# it's rather complex and nasty to deal with pickling and hashing arguments through
# the various namespaces.
# In addition, it's unlikely that the functionality is desired at the individual tool level,
# since we'll always just rewrite a file when we call the function.
# return hashlib.md5(pickle.dumps([args, extra])).hexdigest()
# We don't care about the path, so we'll remove it.
# Probably a better way to do this, but who cares.
cargs = list(args)
for iarg, arg in enumerate(cargs):
if path in arg:
cargs[iarg] = arg.replace(path, '').replace('/', '')
if arg == '--disable-averages':
cargs.remove('--disable-averages')
to_hash = cargs + [extra]
# print(args)
# print(to_hash)
# print(str(to_hash).encode('base64'))
if self.debug_mode:
for iarg, arg in enumerate(to_hash):
if not isinstance(arg, list):
print('arg {num:02d} -- {arg:<20}'.format(num=iarg, arg=arg))
else:
for il, l in enumerate(arg):
print('arg {num:02d} -- {arg:<20}'.format(num=il + iarg, arg=l))
# print('args: {}'.format(to_hash))
# This SHOULD produce the same output, maybe? That would be nice, anyway.
# But we'll need to test it more.
return hashlib.md5(base64.b64encode(str(to_hash).encode())).hexdigest()
def stamp_hash(self, h5file_name, new_hash):
'''Loads a file, stamps it, and returns the opened file in read only'''
h5file = h5io.WESTPAH5File(h5file_name, 'r+')
h5file.attrs['arg_hash'] = new_hash
h5file.close()
h5file = h5io.WESTPAH5File(h5file_name, 'r')
return h5file
def analysis_structure(self):
'''
Run automatically on startup. Parses through the configuration file, and loads up all the data files from the different
analysis schematics. If they don't exist, it creates them automatically by hooking in to existing analysis routines
and going from there.
It does this by calling in the make_parser_and_process function for w_{assign,reweight,direct} using a custom built list
of args. The user can specify everything in the configuration file that would have been specified on the command line.
For instance, were one to call w_direct as follows:
w_direct --evolution cumulative --step-iter 1 --disable-correl
the west.cfg would look as follows:
west:
analysis:
w_direct:
evolution: cumulative
step_iter: 1
extra: ['disable-correl']
Alternatively, if one wishes to use the same options for both w_direct and w_reweight, the key 'w_direct' can be replaced
with 'kinetics'.
'''
# Make sure everything exists.
try:
os.mkdir(self.__settings['directory'])
except Exception:
pass
# Now, check to see whether they exist, and then load them.
self.__analysis_schemes__ = {}
# We really need to implement some sort of default behavior if an analysis scheme isn't set.
# Right now, we just crash. That isn't really graceful.
for scheme in self.__settings['analysis_schemes']:
if self.__settings['analysis_schemes'][scheme]['enabled']:
if self.work_manager.running is False:
self.work_manager.startup()
path = os.path.join(os.getcwd(), self.__settings['directory'], scheme)
# if 'postanalysis' in self.__settings['analysis_schemes'][scheme] and 'postanalysis' in self.__settings['postanalysis']:
# Should clean this up. But it uses the default global setting if a by-scheme one isn't set.
if 'postanalysis' in self.__settings:
if 'postanalysis' in self.__settings['analysis_schemes'][scheme]:
pass
else:
self.__settings['analysis_schemes'][scheme]['postanalysis'] = self.__settings['postanalysis']
try:
os.mkdir(path)
except Exception:
pass
self.__analysis_schemes__[scheme] = {}
try:
if (
self.__settings['analysis_schemes'][scheme]['postanalysis'] is True
or self.__settings['postanalysis'] is True
):
analysis_files = ['assign', 'direct','reweight']
else:
analysis_files = ['assign', 'direct']
except Exception:
analysis_files = ['assign', 'direct']
self.__settings['analysis_schemes'][scheme]['postanalysis'] = False
reanalyze_kinetics = False
assign_hash = None
for name in analysis_files:
arg_hash = None
if self.reanalyze is True:
reanalyze_kinetics = True
try:
os.remove(os.path.join(path, '{}.h5'.format(name)))
except Exception:
pass
else:
try:
# Try to load the hash. If we fail to load the hash or the file, we need to reload.
# if self.reanalyze == True:
# raise ValueError('Reanalyze set to true.')
self.__analysis_schemes__[scheme][name] = h5io.WESTPAH5File(
os.path.join(path, '{}.h5'.format(name)), 'r'
)
arg_hash = self.__analysis_schemes__[scheme][name].attrs['arg_hash']
if name == 'assign':
assign_hash = arg_hash
except Exception:
pass
# We shouldn't rely on this.
# self.reanalyze = True
if True:
if name == 'assign':
assign = w_assign.WAssign()
w_assign_config = {'output': os.path.join(path, '{}.h5'.format(name))}
try:
w_assign_config.update(self.__settings['w_assign'])
except Exception:
pass
try:
w_assign_config.update(self.__settings['analysis_schemes'][scheme]['w_assign'])
except Exception:
pass
args = []
for key, value in w_assign_config.items():
if key!= 'extra':
args.append(str('--') + str(key).replace('_', '-'))
args.append(str(value))
# This is for stuff like disabling correlation analysis, etc.
if 'extra' in list(w_assign_config.keys()):
# We're sorting to ensure that the order doesn't matter.
for value in sorted(w_assign_config['extra']):
args.append(str('--') + str(value).replace('_', '-'))
# We're just calling the built in function.
# This is a lot cleaner than what we had in before, and far more workable.
args.append('--config-from-file')
args.append('--scheme-name')
args.append('{}'.format(scheme))
# Why are we calling this if we're not sure we're remaking the file?
# We need to load up the bin mapper and states and see if they're the same.
assign.make_parser_and_process(args=args)
import pickle
# new_hash = self.hash_args(args=args, path=path, extra=[self.niters, pickle.dumps(assign.binning.mapper), assign.states])
# We need to encode it properly to ensure that some OS specific thing doesn't kill us. Same goes for the args, ultimately.
# Mostly, we just need to ensure that we're consistent.
new_hash = self.hash_args(
args=args,
path=path,
extra=[
int(self.niters),
codecs.encode(pickle.dumps(assign.binning.mapper), "base64"),
base64.b64encode(str(assign.states).encode()),
],
)
# Let's check the hash. If the hash is the same, we don't need to reload.
if self.debug_mode is True:
print('{:<10}: old hash, new hash -- {}, {}'.format(name, arg_hash, new_hash))
if self.ignore_hash is False and (arg_hash!= new_hash or self.reanalyze is True):
# If the hashes are different, or we need to reanalyze, delete the file.
try:
os.remove(os.path.join(path, '{}.h5'.format(name)))
except Exception:
pass
print('Reanalyzing file {}.h5 for scheme {}.'.format(name, scheme))
# reanalyze_kinetics = True
# We want to use the work manager we have here. Otherwise, just let the tool sort out what it needs, honestly.
assign.work_manager = self.work_manager
assign.go()
assign.data_reader.close()
# Stamp w/ hash, then reload as read only.
self.__analysis_schemes__[scheme][name] = self.stamp_hash(
os.path.join(path, '{}.h5'.format(name)), new_hash
)
del assign
# Update the assignment hash.
assign_hash = new_hash
# Since these are all contained within one tool, now, we want it to just... load everything.
if name == 'direct' or name =='reweight':
if name == 'direct':
analysis = w_direct.WDirect()
if name =='reweight':
analysis = w_reweight.WReweight()
analysis_config = {
'assignments': os.path.join(path, '{}.h5'.format('assign')),
'output': os.path.join(path, '{}.h5'.format(name)),
'kinetics': os.path.join(path, '{}.h5'.format(name)),
}
# Pull from general analysis options, then general SPECIFIC options for each analysis,
# then general options for that analysis scheme, then specific options for the analysis type in the scheme.
try:
analysis_config.update(self.__settings['kinetics'])
except Exception:
pass
try:
analysis_config.update(self.__settings['w_{}'.format(name)])
except Exception:
pass
try:
analysis_config.update(self.__settings['analysis_schemes'][scheme]['kinetics'])
except Exception:
pass
try:
analysis_config.update(self.__settings['analysis_schemes'][scheme]['w_{}'.format(name)])
except Exception:
pass
# We're pulling in a default set of arguments, then updating them with arguments from the west.cfg file, if appropriate, after setting the appropriate command
# Then, we call the magic function'make_parser_and_process' with the arguments we've pulled in.
# The tool has no real idea it's being called outside of its actual function, and we're good to go.
args = ['all']
for key, value in analysis_config.items():
if key!= 'extra':
args.append(str('--') + str(key).replace('_', '-'))
args.append(str(value))
# This is for stuff like disabling correlation analysis, etc.
if 'extra' in list(analysis_config.keys()):
for value in sorted(analysis_config['extra']):
args.append(str('--') + str(value).replace('_', '-'))
# We want to not display the averages, so...
args.append('--disable-averages')
new_hash = self.hash_args(args=args, path=path, extra=[int(self.niters), assign_hash])
# if arg_hash!= new_hash or self.reanalyze == True or reanalyze_kinetics == True:
if self.debug_mode is True:
print('{:<10}: old hash, new hash -- {}, {}'.format(name, arg_hash, new_hash))
if self.ignore_hash is False and (arg_hash!= new_hash or reanalyze_kinetics is True):
try:
os.remove(os.path.join(path, '{}.h5'.format(name)))
except Exception:
pass
print('Reanalyzing file {}.h5 for scheme {}.'.format(name, scheme))
analysis.make_parser_and_process(args=args)
# We want to hook into the existing work manager.
analysis.work_manager = self.work_manager
analysis.go()
# Open!
self.__analysis_schemes__[scheme][name] = self.stamp_hash(
os.path.join(path, '{}.h5'.format(name)), new_hash
)
del analysis
# Make sure this doesn't get too far out, here. We need to keep it alive as long as we're actually analyzing things.
# self.work_manager.shutdown()
print("")
print("Complete!")
@property
def assign(self):
return self.__analysis_schemes__[str(self.scheme)]['assign']
@property
def direct(self):
"""
The output from w_kinavg.py from the current scheme.
"""
return self.__analysis_schemes__[str(self.scheme)]['direct']
@property
def state_labels(self):
print("State labels and definitions!")
for istate, state in enumerate(self.assign['state_labels']):
print('{}: {}'.format(istate, state))
print('{}: {}'.format(istate + 1, 'Unknown'))
@property
def bin_labels(self):
print("Bin definitions! ")
for istate, state in enumerate(self.assign['bin_labels']):
print('{}: {}'.format(istate, state))
@property
def west(self):
return self.data_reader.data_manager.we_h5file
@property
def reweight(self):
if self.__settings['analysis_schemes'][str(self.scheme)]['postanalysis'] is True:
return self.__analysis_schemes__[str(self.scheme)]['reweight']
else:
value = "This sort of analysis has not been enabled."
current = {
'bin_prob_evolution': value,
'color_prob_evolution': value,
'conditional_flux_evolution': value,
'rate_evolution': value,
'state_labels': value,
'state_prob_evolution': value,
}
current.update({'bin_populations': value, 'iterations': value})
return current
@property
def scheme(self):
'''
Returns and sets what scheme is currently in use.
To see what schemes are available, run:
w.list_schemes
'''
# Let's do this a few different ways.
# We want to return things about the DIFFERENT schemes, if possible.
if self._scheme is None:
self._scheme = WIPIScheme(
scheme=self.__analysis_schemes__, name=self._schemename, parent=self, settings=self.__settings
)
# This just ensures that when we call it, it's clean.
self._scheme.name = None
return self._scheme
@scheme.setter
def scheme(self, scheme):
self._future = None
self._current = None
self._past = None
if scheme in self.__settings['analysis_schemes']:
pass
else:
for ischeme, schemename in enumerate(self.__settings['analysis_schemes']):
if ischeme == scheme:
scheme = schemename
if (
self.__settings['analysis_schemes'][scheme]['enabled'] is True
or self.__settings['analysis_schemes'][scheme]['enabled'] is None
):
self._schemename = scheme
else:
print("Scheme cannot be changed to scheme: {}; it is not enabled!".format(scheme))
@property
def list_schemes(self):
'''
Lists what schemes are configured in west.cfg file.
Schemes should be structured as follows, in west.cfg:
west:
system:
analysis:
directory: analysis
analysis_schemes:
scheme.1:
enabled: True
states:
- label: unbound
coords: [[7.0]]
- label: bound
coords: [[2.7]]
bins:
- type: RectilinearBinMapper
boundaries: [[0.0, 2.80, 7, 10000]]
'''
# print("The following schemes are available:")
# print("")
# for ischeme, scheme in enumerate(self.__settings['analysis_schemes']):
# print('{}. Scheme: {}'.format(ischeme, scheme))
# print("")
# print("Set via name, or via the index listed.")
# print("")
# print("Current scheme: {}".format(self.scheme))
self._scheme.list_schemes
@property
def iteration(self):
'''
Returns/sets the current iteration.
'''
# print("The current iteration is {}".format(self._iter))
return self._iter
@iteration.setter
def iteration(self, value):
print("Setting iteration to iter {}.".format(value))
if value <= 0:
print("Iteration must begin at 1.")
value = 1
if value > self.niters:
print("Cannot go beyond {} iterations!".format(self.niters))
print("Setting to {}".format(self.niters))
value = self.niters
# We want to trigger a rebuild on our current/past/future bits.
# The scheme should automatically reset to the proper iteration, but
# future needs to be manually triggered.
self._iter = value
self._future = None
return self._iter
@property
def current(self):
'''
The current iteration. See help for __get_data_for_iteration__
'''
return self.scheme[self.scheme.scheme].current
@property
def past(self):
'''
The previous iteration. See help for __get_data_for_iteration__
'''
return self.scheme[self.scheme.scheme].past
def trace(self, seg_id):
'''
Runs a trace on a seg_id within the current iteration, all the way back to the beginning,
returning a dictionary containing all interesting information:
seg_id, pcoord, states, bins, weights, iteration, auxdata (optional)
sorted in chronological order.
Call with a seg_id.
'''
if seg_id >= self.current.walkers:
print("Walker seg_id # {} is beyond the max count of {} walkers.".format(seg_id, self.current.walkers))
return 1
pi = self.progress.indicator
with pi:
pi.new_operation('Tracing scheme:iter:seg_id {}:{}:{}'.format(self.scheme, self.iteration, seg_id), self.iteration)
current = {'seg_id': [], 'pcoord': [],'states': [], 'weights': [], 'iteration': [], 'bins': []}
keys = []
try:
current['auxdata'] = {}
for key in list(self.current['auxdata'].keys()):
current['auxdata'][key] = []
key = []
except Exception:
pass
for iter in reversed(list(range(1, self.iteration + 1))):
iter_group = self.data_reader.get_iter_group(iter)
current['pcoord'].append(iter_group['pcoord'][seg_id, :, :])
current['states'].append(self.assign['trajlabels'][iter - 1, seg_id, :])
current['bins'].append(self.assign['assignments'][iter - 1, seg_id, :])
current['seg_id'].append(seg_id)
current['weights'].append(iter_group['seg_index']['weight'][seg_id])
current['iteration'].append(iter)
try:
for key in keys:
current['auxdata'][key].append(iter_group['auxdata'][key][seg_id])
except Exception:
pass
seg_id = iter_group['seg_index']['parent_id'][seg_id]
if seg_id < 0:
# Necessary for steady state simulations. This means they started in that iteration.
break
pi.progress += 1
current['seg_id'] = list(reversed(current['seg_id']))
current['iteration'] = list(reversed(current['iteration']))
current['states'] = np.concatenate(np.array(list(reversed(current['states']))))
current['bins'] = np.concatenate(np.array(list(reversed(current['bins']))))
current['weights'] = np.array(list(reversed(current['weights'])))
current['pcoord'] = np.concatenate(np.array(list(reversed(current['pcoord']))))
try:
for key in keys():
current['auxdata'][key] = np.concatenate(np.array(list(reversed(current['auxdata'][key]))))
except Exception:
pass
current['state_labels'] = self.assign['state_labels']
for i in ['pcoord','states', 'bins', 'weights']:
current[i] = WIPIDataset(raw=current[i], key=i)
if i == 'weights':
current[i].plotter = Plotter(
np.log10(current[i].raw), str('log10 of'+ str(i)), iteration=current[i].raw.shape[0], interface=self.interface
)
else:
current[i].plotter = Plotter(current[i].raw, i, iteration=current[i].raw.shape[0], interface=self.interface)
current[i].plot = current[i].plotter.plot
return WIPIDataset(raw=current, key=seg_id)
@property
def future(self, value=None):
'''
Similar to current/past, but keyed differently and returns different datasets.
See help for Future.
'''
if self._future is None:
self._future = self.Future(raw=self.__get_children__(), key=None)
self._future.iteration = self.iteration + 1
return self._future
class Future(WIPIDataset):
# This isn't a real fancy one.
def __getitem__(self, value):
if isinstance(value, str):
print(list(self.__dict__.keys()))
try:
return self.__dict__['raw'][value]
except Exception:
print('{} is not a valid data structure.'.format(value))
elif isinstance(value, int) or isinstance(value, np.int64):
# Otherwise, we assume they're trying to index for a seg_id.
# if value < self.parent.walkers:
current = {}
current['pcoord'] = self.__dict__['raw']['pcoord'][value]
current['states'] = self.__dict__['raw']['states'][value]
current['bins'] = self.__dict__['raw']['bins'][value]
current['parents'] = self.__dict__['raw']['parents'][value]
current['seg_id'] = self.__dict__['raw']['seg_id'][value]
current['weights'] = self.__dict__['raw']['weights'][value]
try:
current['auxdata'] = {}
for key in list(self.__dict__['raw']['auxdata'].keys()):
current['auxdata'][key] = self.__dict__['raw']['auxdata'][key][value]
except Exception:
pass
current = WIPIDataset(current, 'Segment {} in Iter {}'.format(value, self.iteration))
return current
def __get_children__(self):
'''
Returns all information about the children of a given walker in the current iteration.
Used to generate and create the future object, if necessary.
'''
if self.iteration == self.niters:
print("Currently at iteration {}, which is the max. There are no children!".format(self.iteration))
return 0
iter_data = __get_data_for_iteration__(value=self.iteration + 1, parent=self)
future = {
'weights': [],
'pcoord': [],
'parents': [],
'summary': iter_data['summary'],
'seg_id': [],
'walkers': iter_data['walkers'],
'states': [],
'bins': [],
}
for seg_id in range(0, self.current.walkers):
children = np.where(iter_data['parents'] == seg_id)[0]
if len(children) == 0:
error = "No children for seg_id {}.".format(seg_id)
future['weights'].append(error)
future['pcoord'].append(error)
future['parents'].append(error)
future['seg_id'].append(error)
future['states'].append(error)
future['bins'].append(error)
else:
# Now, we're gonna put them in the thing.
value = self.iteration + 1
future['weights'].append(iter_data['weights'][children])
future['pcoord'].append(iter_data['pcoord'][...][children, :, :])
try:
aux_data = iter_data['auxdata'][...][children, :, :]
try:
future['aux_data'].append(aux_data)
except Exception:
future['aux_data'] = aux_data
except Exception:
pass
future['parents'].append(iter_data['parents'][children])
future['seg_id'].append(iter_data['seg_id'][children])
future['states'].append(self.assign['trajlabels'][value - 1, children, :])
future['bins'].append(self.assign['assignments'][value - 1, children, :])
return future
def go(self):
'''
Function automatically called by main() when launched via the command line interface.
Generally, call main, not this function.
'''
w = self
print("")
print("Welcome to w_ipa (WESTPA Interactive Python Analysis) v. {}!".format(w.version))
print("Run w.introduction for a more thorough introduction, or w.help to see a list of options.")
print("Running analysis & loading files.")
self.data_reader.open()
self.analysis_structure()
# Seems to be consistent with other tools, such as w_assign. For setting the iterations.
self.data_reader.open()
self.niters = self.data_reader.current_iteration - 1
self.iteration = self.niters
try:
print('Your current scheme, system and iteration are : {}, {}, {}'.format(w.scheme, os.getcwd(), w.iteration))
except Exception:
pass
@property
def introduction(self):
'''
Just spits out an introduction, in case someone doesn't call help.
'''
help_string = '''
Call as a dictionary item or a.attribute:
w.past, w.current, w.future:
{current}
Raw schemes can be accessed as follows:
w.scheme.{scheme_keys}
and contain mostly the same datasets associated with w.
The following give raw access to the h5 files associated with the current scheme
w.west
w.assign
w.direct
w.reweight
OTHER:
{w}
'''.format(
current=self.__format_keys__(self.current.__dir__(), split=' ', offset=12),
scheme_keys=self.__format_keys__(list(self._scheme.raw.keys())),
w=self.__format_keys__(self.__dir__(), offset=8, max_length=0, split='', prepend='w.'),
)
print(help_string)
# Just a little function to be used with the introduction.
def __format_keys__(self, keys, split='/', offset=0, max_length=80, prepend=''):
rtn = ''
run_length = 0
for key in keys:
rtn += prepend + str(key) + split
run_length += len(str(key))
if run_length >= max_length:
run_length = offset
rtn += '\n' +'' * offset
if rtn[-1] == split:
return rtn[:-1]
else:
return rtn
@property
def help(self):
'''Just a minor function to call help on itself. Only in here to really help someone get help.'''
help(self)
def _repr_pretty_(self, p, cycle):
self.introduction
return " "
def __dir__(self):
return_list = ['past', 'current', 'future']
# For the moment, don't expose direct, reweight, or assign, as these are scheme dependent files.
# They do exist, and always link to the current scheme, however.
return_list += ['iteration', 'niters','scheme', 'list_schemes', 'bin_labels','state_labels', 'west', 'trace']
return sorted(set(return_list))
def entry_point():
west = WIPI()
w = west
# We're gonna print some defaults.
w.main()
if w.analysis_mode is False:
from IPython import embed
import IPython
# We're using this to set magic commands.
# Mostly, we're using it to allow tab completion of objects stored in dictionaries.
try:
# Worked on MacOS. Probably just an older version.
c = IPython.Config()
except Exception:
# Seems to be necessary on Linux, and likely on newer installs.
c = IPython.terminal.ipapp.load_default_config()
c.IPCompleter.greedy = True
embed(banner1='', exit_msg='Leaving w_ipa... goodbye.', config=c)
print("")
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_kinavg.rst | Manual | w_kinavg command | MIT License | westpa__westpa/doc/documentation/cli/deprecated/w_kinavg.rst | [
"westpa__westpa/src/westpa/cli/tools/w_kinavg.py"
] | w_kinavg
WARNING: w_kinavg is being deprecated. Please use w_direct instead.
usage:
w_kinavg trace [-h] [-W WEST_H5FILE] [--first-iter N_ITER] [--last-iter N_ITER] [--step-iter STEP]
[-a ASSIGNMENTS] [-o OUTPUT] [-k KINETICS] [--disable-bootstrap] [--disable-correl]
[--alpha ALPHA] [--autocorrel-alpha ACALPHA] [--nsets NSETS]
[-e {cumulative,blocked,none}] [--window-frac WINDOW_FRAC] [--disable-averages]
Calculate average rates/fluxes and associated errors from weighted
ensemble data. Bin assignments (usually "assign.h5") and kinetics data
(usually "direct.h5") data files must have been previously generated
(see "w_assign --help" and "w_direct init --help" for information on
generating these files).
The evolution of all datasets may be calculated, with or without
confidence intervals.
Output format
The output file (-o/--output, usually "direct.h5") contains the
following dataset:
/avg_rates [state,state]
(Structured -- see below) State-to-state rates based on entire window of
iterations selected.
/avg_total_fluxes [state]
(Structured -- see below) Total fluxes into each state based on entire
window of iterations selected.
/avg_conditional_fluxes [state,state]
(Structured -- see below) State-to-state fluxes based on entire window of
iterations selected.
If --evolution-mode is specified, then the following additional datasets
are available:
/rate_evolution [window][state][state]
(Structured -- see below). State-to-state rates based on windows of
iterations of varying width. If --evolution-mode=cumulative, then
these windows all begin at the iteration specified with
--start-iter and grow in length by --step-iter for each successive
element. If --evolution-mode=blocked, then these windows are all of
width --step-iter (excluding the last, which may be shorter), the first
of which begins at iteration --start-iter.
/target_flux_evolution [window,state]
(Structured -- see below). Total flux into a given macro state based on
windows of iterations of varying width, as in /rate_evolution.
/conditional_flux_evolution [window,state,state]
(Structured -- see below). State-to-state fluxes based on windows of
varying width, as in /rate_evolution.
The structure of these datasets is as follows:
iter_start
(Integer) Iteration at which the averaging window begins (inclusive).
iter_stop
(Integer) Iteration at which the averaging window ends (exclusive).
expected
(Floating-point) Expected (mean) value of the observable as evaluated within
this window, in units of inverse tau.
ci_lbound
(Floating-point) Lower bound of the confidence interval of the observable
within this window, in units of inverse tau.
ci_ubound
(Floating-point) Upper bound of the confidence interval of the observable
within this window, in units of inverse tau.
stderr
(Floating-point) The standard error of the mean of the observable
within this window, in units of inverse tau.
corr_len
(Integer) Correlation length of the observable within this window, in units
of tau.
Each of these datasets is also stamped with a number of attributes:
mcbs_alpha
(Floating-point) Alpha value of confidence intervals. (For example,
*alpha=0.05* corresponds to a 95% confidence interval.)
mcbs_nsets
(Integer) Number of bootstrap data sets used in generating confidence
intervals.
mcbs_acalpha
(Floating-point) Alpha value for determining correlation lengths.
Command-line options
optional arguments:
-h, --help show this help message and exit
WEST input data options:
-W WEST_H5FILE, --west-data WEST_H5FILE
Take WEST data from WEST_H5FILE (default: read from the HDF5 file specified in
west.cfg).
iteration range:
--first-iter N_ITER Begin analysis at iteration N_ITER (default: 1).
--last-iter N_ITER Conclude analysis with N_ITER, inclusive (default: last completed iteration).
--step-iter STEP Analyze/report in blocks of STEP iterations.
input/output options:
-a ASSIGNMENTS, --assignments ASSIGNMENTS
Bin assignments and macrostate definitions are in ASSIGNMENTS (default:
assign.h5).
-o OUTPUT, --output OUTPUT
Store results in OUTPUT (default: kinavg.h5).
input/output options:
-k KINETICS, --kinetics KINETICS
Populations and transition rates are stored in KINETICS (default: kintrace.h5).
confidence interval calculation options:
--disable-bootstrap, -db
Enable the use of Monte Carlo Block Bootstrapping.
--disable-correl, -dc
Disable the correlation analysis.
--alpha ALPHA Calculate a (1-ALPHA) confidence interval' (default: 0.05)
--autocorrel-alpha ACALPHA
Evaluate autocorrelation to (1-ACALPHA) significance. Note that too small an
ACALPHA will result in failure to detect autocorrelation in a noisy flux signal.
(Default: same as ALPHA.)
--nsets NSETS Use NSETS samples for bootstrapping (default: chosen based on ALPHA)
calculation options:
-e {cumulative,blocked,none}, --evolution-mode {cumulative,blocked,none}
How to calculate time evolution of rate estimates. ``cumulative`` evaluates rates
over windows starting with --start-iter and getting progressively wider to --stop-
iter by steps of --step-iter. ``blocked`` evaluates rates over windows of width
--step-iter, the first of which begins at --start-iter. ``none`` (the default)
disables calculation of the time evolution of rate estimates.
--window-frac WINDOW_FRAC
Fraction of iterations to use in each window when running in ``cumulative`` mode.
The (1 - frac) fraction of iterations will be discarded from the start of each
window.
misc options:
--disable-averages, -da
Whether or not the averages should be printed to the console (set to FALSE if flag
is used).
| from westpa.tools import WESTMasterCommand, WESTParallelTool
from westpa.cli.tools.w_direct import DKinAvg
from warnings import warn
# Just a shim to make sure everything works and is backwards compatible.
class WKinAvg(DKinAvg):
subcommand = 'trace'
help_text = 'averages and CIs for path-tracing kinetics analysis'
default_kinetics_file = 'kintrace.h5'
default_output_file = 'kinavg.h5'
class WDirect(WESTMasterCommand, WESTParallelTool):
prog = 'w_kinavg'
subcommands = [WKinAvg]
subparsers_title = 'direct kinetics analysis schemes'
description = '''\
Calculate average rates and associated errors from weighted ensemble data. Bin
assignments (usually "assignments.h5") and kinetics data (usually
"kintrace.h5" or "kinmat.h5") data files must have been previously generated
(see "w_assign --help" and "w_kinetics --help" for information on generating
these files).
-----------------------------------------------------------------------------
Output format
-----------------------------------------------------------------------------
The output file (-o/--output, usually "kinavg.h5") contains the following
dataset:
/avg_rates [state,state]
(Structured -- see below) State-to-state rates based on entire window of
iterations selected.
For trace mode, the following additional datasets are generated:
/avg_total_fluxes [state]
(Structured -- see below) Total fluxes into each state based on entire
window of iterations selected.
/avg_conditional_fluxes [state,state]
(Structured -- see below) State-to-state fluxes based on entire window of
iterations selected.
If --evolution-mode is specified, then the following additional dataset is
available:
/rate_evolution [window][state][state]
(Structured -- see below). State-to-state rates based on windows of
iterations of varying width. If --evolution-mode=cumulative, then
these windows all begin at the iteration specified with
--start-iter and grow in length by --step-iter for each successive
element. If --evolution-mode=blocked, then these windows are all of
width --step-iter (excluding the last, which may be shorter), the first
of which begins at iteration --start-iter.
If --evolution-mode is specified in trace mode, the following additional
datasets are available:
/target_flux_evolution [window,state]
(Structured -- see below). Total flux into a given macro state based on
windows of iterations of varying width, as in /rate_evolution.
/conditional_flux_evolution [window,state,state]
(Structured -- see below). State-to-state fluxes based on windows of
varying width, as in /rate_evolution.
The structure of these datasets is as follows:
iter_start
(Integer) Iteration at which the averaging window begins (inclusive).
iter_stop
(Integer) Iteration at which the averaging window ends (exclusive).
expected
(Floating-point) Expected (mean) value of the rate as evaluated within
this window, in units of inverse tau.
ci_lbound
(Floating-point) Lower bound of the confidence interval on the rate
within this window, in units of inverse tau.
ci_ubound
(Floating-point) Upper bound of the confidence interval on the rate
within this window, in units of inverse tau.
corr_len
(Integer) Correlation length of the rate within this window, in units
of tau.
Each of these datasets is also stamped with a number of attributes:
mcbs_alpha
(Floating-point) Alpha value of confidence intervals. (For example,
*alpha=0.05* corresponds to a 95% confidence interval.)
mcbs_nsets
(Integer) Number of bootstrap data sets used in generating confidence
intervals.
mcbs_acalpha
(Floating-point) Alpha value for determining correlation lengths.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def entry_point():
warn('{} is being deprecated. Please use w_direct instead.'.format(WDirect.prog))
import sys
try:
if sys.argv[1]!= 'trace':
sys.argv.insert(1, 'trace')
except Exception:
sys.argv.insert(1, 'trace')
WDirect().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_kinetics.rst | Manual | w_kinetics command | MIT License | westpa__westpa/doc/documentation/cli/deprecated/w_kinetics.rst | [
"westpa__westpa/src/westpa/cli/tools/w_kinetics.py"
] | w_kinetics
WARNING: w_kinetics is being deprecated. Please use w_direct instead.
usage:
w_kinetics trace [-h] [-W WEST_H5FILE] [--first-iter N_ITER] [--last-iter N_ITER]
[--step-iter STEP] [-a ASSIGNMENTS] [-o OUTPUT]
Calculate state-to-state rates and transition event durations by tracing
trajectories.
A bin assignment file (usually "assign.h5") including trajectory
labeling is required (see "w_assign --help" for information on
generating this file).
This subcommand for w_direct is used as input for all other w_direct
subcommands, which will convert the flux data in the output file into
average rates/fluxes/populations with confidence intervals.
Output format
The output file (-o/--output, by default "direct.h5") contains the
following datasets:
``/conditional_fluxes`` [iteration][state][state]
*(Floating-point)* Macrostate-to-macrostate fluxes. These are **not**
normalized by the population of the initial macrostate.
``/conditional_arrivals`` [iteration][stateA][stateB]
*(Integer)* Number of trajectories arriving at state *stateB* in a given
iteration, given that they departed from *stateA*.
``/total_fluxes`` [iteration][state]
*(Floating-point)* Total flux into a given macrostate.
``/arrivals`` [iteration][state]
*(Integer)* Number of trajectories arriving at a given state in a given
iteration, regardless of where they originated.
``/duration_count`` [iteration]
*(Integer)* The number of event durations recorded in each iteration.
``/durations`` [iteration][event duration]
*(Structured -- see below)* Event durations for transition events ending
during a given iteration. These are stored as follows:
istate
*(Integer)* Initial state of transition event.
fstate
*(Integer)* Final state of transition event.
duration
*(Floating-point)* Duration of transition, in units of tau.
weight
*(Floating-point)* Weight of trajectory at end of transition, **not**
normalized by initial state population.
Because state-to-state fluxes stored in this file are not normalized by
initial macrostate population, they cannot be used as rates without
further processing. The w_direct kinetics command is used to perform
this normalization while taking statistical fluctuation and correlation
into account. See w_direct kinetics --help for more information. Target
fluxes (total flux into a given state) require no such normalization.
Command-line options
optional arguments:
-h, --help show this help message and exit
WEST input data options:
-W WEST_H5FILE, --west-data WEST_H5FILE
Take WEST data from WEST_H5FILE (default: read from the HDF5 file specified in
west.cfg).
iteration range:
--first-iter N_ITER Begin analysis at iteration N_ITER (default: 1).
--last-iter N_ITER Conclude analysis with N_ITER, inclusive (default: last completed iteration).
--step-iter STEP Analyze/report in blocks of STEP iterations.
input/output options:
-a ASSIGNMENTS, --assignments ASSIGNMENTS
Bin assignments and macrostate definitions are in ASSIGNMENTS (default:
assign.h5).
-o OUTPUT, --output OUTPUT
Store results in OUTPUT (default: kintrace.h5).
| from westpa.tools import WESTMasterCommand, WESTParallelTool
from warnings import warn
from westpa.cli.tools.w_direct import DKinetics
# Just a shim to make sure everything works and is backwards compatible.
class WKinetics(DKinetics):
subcommand = 'trace'
help_text = 'averages and CIs for path-tracing kinetics analysis'
default_output_file = 'kintrace.h5'
class WDirect(WESTMasterCommand, WESTParallelTool):
prog = 'w_kinetics'
subcommands = [WKinetics]
subparsers_title = 'calculate state-to-state kinetics by tracing trajectories'
description = '''\
Calculate state-to-state rates and transition event durations by tracing
trajectories.
A bin assignment file (usually "assign.h5") including trajectory labeling
is required (see "w_assign --help" for information on generating this file).
The output generated by this program is used as input for the ``w_kinavg``
tool, which converts the flux data in the output file into average rates
with confidence intervals. See ``w_kinavg trace --help`` for more
information.
-----------------------------------------------------------------------------
Output format
-----------------------------------------------------------------------------
The output file (-o/--output, by default "kintrace.h5") contains the
following datasets:
``/conditional_fluxes`` [iteration][state][state]
*(Floating-point)* Macrostate-to-macrostate fluxes. These are **not**
normalized by the population of the initial macrostate.
``/conditional_arrivals`` [iteration][stateA][stateB]
*(Integer)* Number of trajectories arriving at state *stateB* in a given
iteration, given that they departed from *stateA*.
``/total_fluxes`` [iteration][state]
*(Floating-point)* Total flux into a given macrostate.
``/arrivals`` [iteration][state]
*(Integer)* Number of trajectories arriving at a given state in a given
iteration, regardless of where they originated.
``/duration_count`` [iteration]
*(Integer)* The number of event durations recorded in each iteration.
``/durations`` [iteration][event duration]
*(Structured -- see below)* Event durations for transition events ending
during a given iteration. These are stored as follows:
istate
*(Integer)* Initial state of transition event.
fstate
*(Integer)* Final state of transition event.
duration
*(Floating-point)* Duration of transition, in units of tau.
weight
*(Floating-point)* Weight of trajectory at end of transition, **not**
normalized by initial state population.
Because state-to-state fluxes stored in this file are not normalized by
initial macrostate population, they cannot be used as rates without further
processing. The ``w_kinavg`` command is used to perform this normalization
while taking statistical fluctuation and correlation into account. See
``w_kinavg trace --help`` for more information. Target fluxes (total flux
into a given state) require no such normalization.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def entry_point():
warn('{} is being deprecated. Please use w_direct instead.'.format(WDirect.prog))
import sys
try:
if sys.argv[1]!= 'trace':
sys.argv.insert(1, 'trace')
except Exception:
sys.argv.insert(1, 'trace')
WDirect().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_multi_west.rst | Manual | w_multi_west command | MIT License | westpa__westpa/doc/documentation/cli/w_multi_west.rst | [
"westpa__westpa/src/westpa/cli/tools/w_multi_west.py"
] | w_multi_west
The w_multi_west tool combines multiple WESTPA simulations into a single
aggregate simulation to facilitate the analysis of the set of
simulations. In particular, the tool creates a single west.h5 file that
contains all of the data from the west.h5 files of the individual
simulations. Each iteration x in the new file contains all of the
segments from iteration x from each of the set of simulation, all
normalized to the total weight.
Overview
usage:
w_multi_west [-h] [-m master] [-n sims] [--quiet | --verbose | --debug] [--version]
[-W WEST_H5FILE] [-a aux] [--auxall] [--ibstates]
[--serial | --parallel | --work-manager WORK_MANAGER] [--n-workers N_WORKERS]
[--zmq-mode MODE] [--zmq-comm-mode COMM_MODE] [--zmq-write-host-info INFO_FILE]
[--zmq-read-host-info INFO_FILE] [--zmq-upstream-rr-endpoint ENDPOINT]
[--zmq-upstream-ann-endpoint ENDPOINT] [--zmq-downstream-rr-endpoint ENDPOINT]
[--zmq-downstream-ann-endpoint ENDPOINT] [--zmq-master-heartbeat MASTER_HEARTBEAT]
[--zmq-worker-heartbeat WORKER_HEARTBEAT] [--zmq-timeout-factor FACTOR]
[--zmq-startup-timeout STARTUP_TIMEOUT] [--zmq-shutdown-timeout SHUTDOWN_TIMEOUT]
optional arguments:
-h, --help show this help message and exit
General options::
-m, --master directory
Master path of simulations where all the smaller simulations are
stored (default: Current Directory)
-n, --sims n Number of simulation directories. Assumes leading
zeros. (default: 0) --quiet emit only essential information
--verbose emit extra information --version show program's version
number and exit
Command-Line Options
See the general command-line tool reference for more information on the
general options.
Input/output options
These arguments allow the user to specify where to read input simulation
result data and where to output calculated progress coordinate
probability distribution data.
Both input and output files are hdf5 format:
-W, --west, --WEST_H5FILE file
The name of the main .h5 file inside each simulation directory. (Default: west.h5)
-o, --output file
Store this tool's output in file. (Default: multi.h5)
-a, --aux auxdata
Name of additional auxiliary dataset to be combined. Can be called multiple times.
(Default: None)
-aa, --auxall
Combine all auxiliary datsets as labeled in ``west.h5`` in folder 01. (Default: False)
-nr, --no-reweight
Do not perform reweighting. (Default: False)
-ib, --ibstates
Attempt to combine ``ibstates`` dataset if the basis states are identical across
all simulations. Needed when tracing with ``westpa.analysis``. (Default: False)
Examples
If you have five simulations, set up your directory such that you have
five directories are named numerically with leading zeroes, and each
directory contains a west.h5 file. For this example, each west.h5 also
contains an auxiliary dataset called RMSD. If you run ls, you will see
the following output:
01 02 03 04 05
To run the w_multi_west tool, do the following:
w_multi_west.py -m . -n 5 --aux=RMSD
If you used any custom WESTSystem, include that in the directory where
you run the code.
To proceed in analyzing the aggregated simulation data as a single
simulation, rename the output file multi.h5 to west.h5.
| import logging
import numpy as np
import pickle
log = logging.getLogger(__name__)
from westpa.tools.core import WESTTool
from westpa.core.data_manager import n_iter_dtype, istate_dtype
from westpa.tools.progress import ProgressIndicatorComponent
from westpa.core import h5io
from westpa.tools.core import WESTMultiTool
# from westtools.dtypes import iter_block_ci_dtype as ci_dtype
import gc
# from pympler.tracker import SummaryTracker
ci_dtype = np.dtype(
[
('iter_start', n_iter_dtype),
('iter_stop', n_iter_dtype),
('expected', np.float64),
('ci_lbound', np.float64),
('ci_ubound', np.float64),
('corr_len', n_iter_dtype),
('variance', np.float64),
('stderrormean', np.float64),
]
)
# directory locations are stored in a.yaml file with this format:
# ---
# PATHS: ['/path/to/simulation/1','/path/to/simulation/2',...,
# '/path/to/simulation/n']
# Straight up stolen from the data manager. In the future, maybe I can just sort it by subbing in the appropriate values.
def get_bin_mapper(we_h5file, hashval):
'''Look up the given hash value in the binning table, unpickling and returning the corresponding
bin mapper if available, or raising KeyError if not.'''
# Convert to a hex digest if we need to
try:
hashval = hashval.hexdigest()
except AttributeError:
pass
while True:
# these will raise KeyError if the group doesn't exist, which also means
# that bin data is not available, so no special treatment here
try:
binning_group = we_h5file['/bin_topologies']
index = binning_group['index']
pkl = binning_group['pickles']
except KeyError:
raise KeyError('hash {} not found. Could not retrieve binning group'.format(hashval))
n_entries = len(index)
if n_entries == 0:
raise KeyError('hash {} not found. No entries in index'.format(hashval))
chunksize = 1024
for istart in range(0, n_entries, chunksize):
chunk = index[istart : min(istart + chunksize, n_entries)]
for i in range(len(chunk)):
if chunk[i]['hash'] == hashval:
pkldat = bytes(pkl[istart + i, 0 : chunk[i]['pickle_len']].data)
mapper = pickle.loads(pkldat)
log.debug('loaded {!r} from {!r}'.format(mapper, binning_group))
log.debug('hash value {!r}'.format(hashval))
return mapper
raise KeyError('hash {} not found'.format(hashval))
def create_idtype_array(input_array):
'''Return a new array with the new istate_dtype while preserving old data.'''
new_array = np.zeros(input_array.shape, dtype=istate_dtype)
for j in input_array.dtype.names:
new_array[j] = input_array[j].copy()
# Need to turn 'basis_auxref' to empty bytestrings...
new_array['basis_auxref'] = b''
return new_array
class WMultiWest(WESTMultiTool):
prog = 'w_multi_west'
description = '''\
Tool designed to combine multiple WESTPA simulations while accounting for
reweighting.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def __init__(self):
super(WESTTool, self).__init__()
self.progress = ProgressIndicatorComponent()
# We no longer care about a lot of this.
self.ntrials = 0
self.nstates = 0
self.kin_trial = {}
self.west = {}
self.niters = 0
def add_args(self, parser):
self.progress.add_args(parser)
iogroup = parser.add_argument_group('input/output options')
iogroup.add_argument('-o', '--output-file', default='multi.h5', help='''The name of the output file to store results in.''')
iogroup.add_argument(
'-W',
'--west',
'--WEST_H5FILE',
default='west.h5',
help='''The name of the main.h5 file inside each simulation
directory''',
)
iogroup.add_argument('-a', '--aux', action='append', help='''Names of additional auxiliary datasets to be combined''')
iogroup.add_argument('-aa', '--auxall', action='store_true', help='''Combine all auxiliary datasets. Default: False''')
iogroup.add_argument('-nr', '--no-reweight', action='store_true', help='''Do not reweight. Default: False''')
iogroup.add_argument(
'-ib', '--ibstates', action='store_true', help='''Attempt to combine ibstates dataset. Default: False'''
)
def open_files(self):
self.output_file = h5io.WESTPAH5File(self.output_file, 'w', creating_program=True)
h5io.stamp_creator_data(self.output_file)
opened_files = self.generate_file_list([self.west])
self.westH5 = opened_files[self.west]
# Just some temp things while I clean everything up...
# west_files = self.westH5
# Determine max iteration...
# We can't really use the old method anymore, as we need to calculate rates in the bootstrap.
# Ergo, we're going to load things like w_kinavg, but that's all.
# We'll just load them up and store them internally, for the moment.
def process_args(self, args):
self.progress.process_args(args)
self.output_file = args.output_file
self.output_file_name = args.output_file
self.west = args.west
self.sims = args.sims
self.aux = args.aux
self.auxall = args.auxall
self.reweight = args.no_reweight
self.ibstates = args.ibstates
def total_number_of_walkers(self):
self.total_walkers = [0] * self.niters
for key, west in self.westH5.items():
# Sometimes, we're smaller or larger by one. Hm.
try:
self.total_walkers[:] += west['summary'][:-1]['n_particles']
except ValueError:
self.total_walkers[:] += west['summary'][:-1]['n_particles'][: len(self.total_walkers)]
class Segment:
def __init__(self, weight=0, iteration=0, simid=0, recycled_in=0):
self.weight = weight
self.iteration = iteration
self.simid = simid
self.recycled_in = recycled_in
def go(self):
pi = self.progress.indicator
self.istates = True # Assume serendipitously istates is same between runs...
with pi:
pi.new_operation('Initializing')
self.open_files()
self.total_number_of_walkers()
if self.auxall is True:
self.aux = list(self.westH5[1]['iterations/iter_00000001/auxdata'].keys())
# Create a giant WEST.h5 file, separating the individual walkers, and renormalizing the weights.
# It should then be compatible with existing toolsets.
# Isn't really going to start with auxdata, but we'll add it in.
# self.niters = 500
# Initialize data manager...
# Just bullshit for the current system.
# self.niters = self.westH5[1].attrs['west_current_iteration'] - 1
# print(self.niters, len(self.westH5))
# self.data_manager = data_manager.WESTDataManager()
westh5 = []
self.source_sinks = []
self.n_sims = {}
istate_addition = [0]
for ifile, (key, west) in enumerate(self.westH5.items()):
d = {'west': west, 'wm': None, 'rt': None,'remove_next_cycle': [],'seg_index': None}
# We're getting the bin mapper, then setting the recycling target...
binhash = west['iterations/iter_{0:08d}'.format(2)].attrs['binhash']
bin_mapper = get_bin_mapper(west, bytes(binhash, 'utf-8'))
try:
d['rt'] = bin_mapper.assign(west['tstates']['0']['pcoord'][...])[0]
self.source_sinks.append(bin_mapper.assign(west['tstates']['0']['pcoord'][...])[0])
except KeyError:
d['rt'] = None
self.source_sinks.append(None)
pass
# We're going to make a set of source and sink states that we can iterate through, eventually.
# Keep a count of how many simulations for this particular recycling target we have...
try:
self.n_sims[d['rt']] += 1
except KeyError:
self.n_sims[d['rt']] = 1
westh5.append(d)
if ifile == 0:
self.niters = west.attrs['west_current_iteration'] - 1
else:
self.niters = min(west.attrs['west_current_iteration'] - 1, self.niters)
istate_addition.append(istate_addition[-1] + len(west['ibstates/0/istate_index']))
# Check to see if all the bstates are identical
if self.ibstates:
check = [False, False] # Assuming they're false, so not accidentally outputing anything that errors out.
try:
check[0] = np.array_equal(bstate_index, west['ibstates/0/bstate_index'][:])
check[1] = np.array_equal(bstate_pcoord, west['ibstates/0/bstate_pcoord'][:])
if not np.all(check):
print(f'File {ifile} used different bstates than the first file. Will skip exporting ibstates dataset.')
self.ibstates = False
except NameError:
bstate_index = west['ibstates/0/bstate_index'][:] # noqa: F841
bstate_pcoord = west['ibstates/0/bstate_pcoord'][:] # noqa: F841
start_point = []
self.source_sinks = list(set(self.source_sinks))
# We'll need a global list of walkers to add to and take care of during the next round of simulations, as well as the current one.
# We'll organize it by source and sink states.
self.past_iter = {}
self.futr_iter = {}
self.past_rm = {}
self.futr_rm = {}
for i in self.source_sinks:
self.past_iter[i] = []
self.futr_iter[i] = []
self.past_rm[i] = []
self.futr_rm[i] = []
print(pi.new_operation('Recreating...', self.niters))
# tracker = SummaryTracker()
# self.output_file.close()
if self.ibstates:
# Copying the ibstates group from the first file as base
self.output_file.copy(self.westH5[1]['ibstates'], self.output_file)
del self.output_file['ibstates/0/istate_pcoord']
del self.output_file['ibstates/0/istate_index']
# Combining the rest of the istate datasets
for ifile, (key, west) in enumerate(self.westH5.items()):
if ifile == 0:
final_istate_index = west['ibstates/0/istate_index']
final_istate_pcoord = west['ibstates/0/istate_pcoord']
if final_istate_index.dtype!= istate_dtype:
final_istate_index = create_idtype_array(final_istate_index)
else:
addition = west['ibstates/0/istate_index'][:]
if addition.dtype!= istate_dtype:
addition = create_idtype_array(addition)
final_istate_index = np.append(final_istate_index, addition)
final_istate_pcoord = np.append(final_istate_pcoord, west['ibstates/0/istate_pcoord'][:])
# Saving them into self.output_file
self.output_file['ibstates/0'].create_dataset('istate_index', data=final_istate_index, dtype=istate_dtype)
self.output_file['ibstates/0'].create_dataset('istate_pcoord', data=final_istate_pcoord)
for iter in range(self.niters):
# We have the following datasets in each iteration:
# ibstates, which can now be combined with --ibstates
# pcoord
# seg_index
# wtgraph
# wtgraph is going to be a little more complex to handle, but not too bad.
# aux data specified
iter += 1
ifile = 0
# self.output_file = h5io.WESTPAH5File(self.output_file_name, 'w', creating_program=True)
# Determine how many simulations to append or remove per west file.
# self.segments = {}
# for key,value in self.n_sims.items():
# self.segments[key] = int(np.floor(len(self.past_iter[key]) / value))
# run_once = 0
# total_current_sims = 0
# for i in self.source_sinks:
# total_current_sims += len(self.past_iter[i])
# total_current_sims += len(self.past_rm[i])
for ifile, west in enumerate(westh5):
westdict = west['west']
seg_index = westdict['iterations/iter_{0:08d}'.format(iter)]['seg_index'][...]
pcoord = westdict['iterations/iter_{0:08d}'.format(iter)]['pcoord'][...]
wtgraph = westdict['iterations/iter_{0:08d}'.format(iter)]['wtgraph'][...]
# new_weight = westdict['iterations/iter_{0:08d}'.format(iter)]['new_weight'][...]
if self.aux:
auxdata = {}
for i in self.aux:
auxdata[str(i)] = westdict['iterations/iter_{0:08d}'.format(iter)]['auxdata'][str(i)][...]
if iter == 1 and ifile == 0:
new_dtype = np.dtype(seg_index.dtype.descr + [('group', '<i8')])
new_seg_index = np.zeros(seg_index.shape, dtype=new_dtype)
for dt, val in seg_index.dtype.fields.items():
new_seg_index[dt] = seg_index[dt]
new_seg_index['group'] = ifile
del seg_index
seg_index = new_seg_index[...]
del new_seg_index
if ifile == 0:
mseg = seg_index
mpco = pcoord
mwtg = wtgraph
if self.aux:
maux = {}
for i in self.aux:
maux[str(i)] = auxdata[str(i)]
if iter == 1:
summary = westdict['summary'][...]
start_point.append(0)
if ifile!= 0:
# print(mseg.shape, seg_index.shape, ifile)
# print(mpco.shape, pcoord.shape, ifile)
# print(mwtg.shape, wtgraph.shape, ifile)
if iter!= 1:
addition = prev_start_point[ifile] # noqa: F821
else:
addition = mseg.shape[0]
seg_index['parent_id'][np.where(seg_index['parent_id'] >= 0)] += addition
seg_index['parent_id'][np.where(seg_index['parent_id'] < 0)] -= istate_addition[ifile]
seg_index['wtg_offset'] += mwtg.shape[0]
start_point.append(mseg.shape[0])
wtgraph += mwtg.shape[0]
mseg = np.concatenate((mseg, seg_index))
mpco = np.concatenate((mpco, pcoord))
mwtg = np.concatenate((mwtg, wtgraph))
if self.aux:
for i in self.aux:
maux[str(i)] = np.concatenate((maux[str(i)], auxdata[str(i)]))
ifile += 1
del seg_index, pcoord, wtgraph, westdict
if self.aux:
del auxdata
gc.collect()
# Make a real copy to use in the next iteration.
# self.past_iter = self.futr_iter.copy()
# self.past_rm[i] = self.futr_rm.copy()
prev_start_point = start_point # noqa: F841
start_point = []
# This is... maybe wrong, actually? Or at least, it's not ALL that is required for normalizing things.
# We need to weight everything by 1/N, then just normalize if that normalization was wrong. Keep the relative weights sane.
#... or actually, no, that's fine, nevermind, what's wrong with me? But we'll leave it in for now.
# Normalize weight of each iteration, done unless specified not to.
if not self.reweight:
mseg['weight'] /= mseg['weight'].sum()
summary['n_particles'][iter - 1] = mseg.shape[0]
summary['norm'][iter - 1] = mseg['weight'].sum()
summary['min_seg_prob'][iter - 1] = min(mseg['weight'])
summary['max_seg_prob'][iter - 1] = max(mseg['weight'])
curr_iter = self.output_file.create_group('iterations/iter_{0:08d}'.format(iter))
curr_iter.attrs['n_iter'] = iter
# Hard-link ibstates dataset to the main one
if self.ibstates:
curr_iter['ibstates'] = self.output_file['ibstates/0']
ds_rate_evol = curr_iter.create_dataset('wtgraph', data=mwtg, shuffle=True, compression=9)
ds_rate_evol = curr_iter.create_dataset('seg_index', data=mseg, shuffle=True, compression=9)
ds_rate_evol = curr_iter.create_dataset('pcoord', data=mpco, shuffle=True, compression=9)
if self.aux:
aux_iter = self.output_file.create_group('iterations/iter_{0:08d}/auxdata'.format(iter))
for i in self.aux:
ds_rate_evol = aux_iter.create_dataset(str(i), data=maux[str(i)], shuffle=True, compression=9)
# We need to be careful about memory, here. We are blowing uppppp.
# We're STILL blowing up. Criiiiiipes.
# self.segments = {}
del mseg, mpco, mwtg, ds_rate_evol, curr_iter #, self.segments
if self.aux:
del maux, aux_iter
gc.collect()
self.output_file.flush()
# self.output_file.close()
# print("How big is our summary?")
# print(sys.getsizeof(summary))
# objgraph.show_most_common_types(limit=50)
# objgraph.show_growth(limit=10)
# objgraph.show_most_common_types(objects=objgraph.get_leaking_objects())
pi.progress += 1
pi.new_operation('Writing to file...')
ds_rate_evol = self.output_file.create_dataset('summary', data=summary, shuffle=True, compression=9) # noqa: F841
self.output_file.attrs['west_current_iteration'] = self.niters
self.output_file.attrs['west_file_format_version'] = 7
self.output_file.attrs['west_iter_prec'] = 8
self.output_file.attrs['westpa_fileformat_version'] = 7
self.output_file.attrs['westpa_iter_prec'] = 8
def entry_point():
WMultiWest().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_ntop.rst | Manual | w_ntop command | MIT License | westpa__westpa/doc/documentation/cli/w_ntop.rst | [
"westpa__westpa/src/westpa/cli/tools/w_ntop.py"
] | w_ntop
usage:
w_ntop [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version] [-W WEST_H5FILE]
[--first-iter N_ITER] [--last-iter N_ITER] [-a ASSIGNMENTS] [-n COUNT] [-t TIMEPOINT]
[--highweight | --lowweight | --random] [-o OUTPUT]
Select walkers from bins . An assignment file mapping walkers to bins at
each timepoint is required (seew_assign --help for further information
on generating this file). By default, high-weight walkers are selected
(hence the name w_ntop: select the N top-weighted walkers from each
bin); however, minimum weight walkers and randomly-selected walkers may
be selected instead.
Output format
The output file (-o/--output, by default "ntop.h5") contains the
following datasets:
``/n_iter`` [iteration]
*(Integer)* Iteration numbers for each entry in other datasets.
``/n_segs`` [iteration][bin]
*(Integer)* Number of segments in each bin/state in the given iteration.
This will generally be the same as the number requested with
``--n/--count`` but may be smaller if the requested number of walkers
does not exist.
``/seg_ids`` [iteration][bin][segment]
*(Integer)* Matching segments in each iteration for each bin.
For an iteration ``n_iter``, only the first ``n_iter`` entries are
valid. For example, the full list of matching seg_ids in bin 0 in the
first stored iteration is ``seg_ids[0][0][:n_segs[0]]``.
``/weights`` [iteration][bin][segment]
*(Floating-point)* Weights for each matching segment in ``/seg_ids``.
Command-line arguments
optional arguments:
-h, --help show this help message and exit
--highweight Select COUNT highest-weight walkers from each bin.
--lowweight Select COUNT lowest-weight walkers from each bin.
--random Select COUNT walkers randomly from each bin.
general options:
-r RCFILE, --rcfile RCFILE
use RCFILE as the WEST run-time configuration file (default: west.cfg)
--quiet emit only essential information
--verbose emit extra information
--debug enable extra checks and emit copious information
--version show program's version number and exit
WEST input data options:
-W WEST_H5FILE, --west-data WEST_H5FILE
Take WEST data from WEST_H5FILE (default: read from the HDF5 file specified in
west.cfg).
iteration range:
--first-iter N_ITER Begin analysis at iteration N_ITER (default: 1).
--last-iter N_ITER Conclude analysis with N_ITER, inclusive (default: last completed iteration).
input options:
-a ASSIGNMENTS, --assignments ASSIGNMENTS
Use assignments from the given ASSIGNMENTS file (default: assign.h5).
selection options:
-n COUNT, --count COUNT
Select COUNT walkers from each iteration for each bin (default: 1).
-t TIMEPOINT, --timepoint TIMEPOINT
Base selection on the given TIMEPOINT within each iteration. Default (-1)
corresponds to the last timepoint.
output options:
-o OUTPUT, --output OUTPUT
Write output to OUTPUT (default: ntop.h5).
| import h5py
import numpy as np
from westpa.tools import WESTTool, WESTDataReader, IterRangeSelection, ProgressIndicatorComponent
import westpa
from westpa.core import h5io
from westpa.core.data_manager import seg_id_dtype, n_iter_dtype, weight_dtype
from westpa.core.binning import assignments_list_to_table
class WNTopTool(WESTTool):
prog = 'w_ntop'
description = '''\
Select walkers from bins. An assignment file mapping walkers to
bins at each timepoint is required (see``w_assign --help`` for further
information on generating this file). By default, high-weight walkers are
selected (hence the name ``w_ntop``: select the N top-weighted walkers from
each bin); however, minimum weight walkers and randomly-selected walkers
may be selected instead.
-----------------------------------------------------------------------------
Output format
-----------------------------------------------------------------------------
The output file (-o/--output, by default "ntop.h5") contains the following
datasets:
``/n_iter`` [iteration]
*(Integer)* Iteration numbers for each entry in other datasets.
``/n_segs`` [iteration][bin]
*(Integer)* Number of segments in each bin/state in the given iteration.
This will generally be the same as the number requested with
``--n/--count`` but may be smaller if the requested number of walkers
does not exist.
``/seg_ids`` [iteration][bin][segment]
*(Integer)* Matching segments in each iteration for each bin.
For an iteration ``n_iter``, only the first ``n_iter`` entries are
valid. For example, the full list of matching seg_ids in bin 0 in the
first stored iteration is ``seg_ids[0][0][:n_segs[0]]``.
``/weights`` [iteration][bin][segment]
*(Floating-point)* Weights for each matching segment in ``/seg_ids``.
-----------------------------------------------------------------------------
Command-line arguments
-----------------------------------------------------------------------------
'''
def __init__(self):
super().__init__()
self.data_reader = WESTDataReader()
self.iter_range = IterRangeSelection()
self.progress = ProgressIndicatorComponent()
self.output_file = None
self.assignments_filename = None
self.output_filename = None
self.what = None
self.timepoint = None
self.count = None
def add_args(self, parser):
self.data_reader.add_args(parser)
self.iter_range.add_args(parser)
igroup = parser.add_argument_group('input options')
igroup.add_argument(
'-a',
'--assignments',
default='assign.h5',
help='''Use assignments from the given ASSIGNMENTS file (default: %(default)s).''',
)
sgroup = parser.add_argument_group('selection options')
sgroup.add_argument(
'-n',
'--count',
type=int,
default=1,
help='''Select COUNT walkers from each iteration for each bin (default: %(default)s).''',
)
sgroup.add_argument(
'-t',
'--timepoint',
type=int,
default=-1,
help='''Base selection on the given TIMEPOINT within each iteration. Default (-1)
corresponds to the last timepoint.''',
)
cgroup = parser.add_mutually_exclusive_group()
cgroup.add_argument(
'--highweight',
dest='select_what',
action='store_const',
const='highweight',
help='''Select COUNT highest-weight walkers from each bin.''',
)
cgroup.add_argument(
'--lowweight',
dest='select_what',
action='store_const',
const='lowweight',
help='''Select COUNT lowest-weight walkers from each bin.''',
)
cgroup.add_argument(
'--random',
dest='select_what',
action='store_const',
const='random',
help='''Select COUNT walkers randomly from each bin.''',
)
parser.set_defaults(select_what='highweight')
ogroup = parser.add_argument_group('output options')
ogroup.add_argument('-o', '--output', default='ntop.h5', help='''Write output to OUTPUT (default: %(default)s).''')
self.progress.add_args(parser)
def process_args(self, args):
self.progress.process_args(args)
self.data_reader.process_args(args)
with self.data_reader:
self.iter_range.process_args(args)
self.what = args.select_what
self.output_filename = args.output
self.assignments_filename = args.assignments
self.count = args.count
self.timepoint = args.timepoint
def go(self):
self.data_reader.open('r')
assignments_file = h5py.File(self.assignments_filename, mode='r')
output_file = h5io.WESTPAH5File(self.output_filename, mode='w')
pi = self.progress.indicator
count = self.count
timepoint = self.timepoint
nbins = assignments_file.attrs['nbins'] + 1
assignments_ds = assignments_file['assignments']
iter_start, iter_stop = self.iter_range.iter_start, self.iter_range.iter_stop
iter_count = iter_stop - iter_start
h5io.check_iter_range_least(assignments_ds, iter_start, iter_stop)
nsegs = assignments_file['nsegs'][h5io.get_iteration_slice(assignments_file['nsegs'], iter_start, iter_stop)]
output_file.create_dataset('n_iter', dtype=n_iter_dtype, data=list(range(iter_start, iter_stop)))
seg_count_ds = output_file.create_dataset('nsegs', dtype=np.uint, shape=(iter_count, nbins))
matching_segs_ds = output_file.create_dataset(
'seg_ids',
shape=(iter_count, nbins, count),
dtype=seg_id_dtype,
chunks=h5io.calc_chunksize((iter_count, nbins, count), seg_id_dtype),
shuffle=True,
compression=9,
)
weights_ds = output_file.create_dataset(
'weights',
shape=(iter_count, nbins, count),
dtype=weight_dtype,
chunks=h5io.calc_chunksize((iter_count, nbins, count), weight_dtype),
shuffle=True,
compression=9,
)
what = self.what
with pi:
pi.new_operation('Finding matching segments', extent=iter_count)
for iiter, n_iter in enumerate(range(iter_start, iter_stop)):
assignments = np.require(
assignments_ds[h5io.get_iteration_entry(assignments_ds, n_iter) + np.index_exp[:, timepoint]],
dtype=westpa.core.binning.index_dtype,
)
all_weights = self.data_reader.get_iter_group(n_iter)['seg_index']['weight']
# the following Cython function just executes this loop:
# for iseg in xrange(nsegs[iiter]):
# segs_by_bin[iseg,assignments[iseg]] = True
segs_by_bin = assignments_list_to_table(nsegs[iiter], nbins, assignments)
for ibin in range(nbins):
segs = np.nonzero(segs_by_bin[:, ibin])[0]
seg_count_ds[iiter, ibin] = min(len(segs), count)
if len(segs):
weights = all_weights.take(segs)
if what == 'lowweight':
indices = np.argsort(weights)[:count]
elif what == 'highweight':
indices = np.argsort(weights)[::-1][:count]
else:
assert what == 'random'
indices = np.random.permutation(len(weights))
matching_segs_ds[iiter, ibin, : len(segs)] = segs.take(indices)
weights_ds[iiter, ibin, : len(segs)] = weights.take(indices)
del segs, weights
del assignments, segs_by_bin, all_weights
pi.progress += 1
def entry_point():
WNTopTool().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_pdist.rst | Manual | w_pdist command | MIT License | westpa__westpa/doc/documentation/cli/w_pdist.rst | [
"westpa__westpa/src/westpa/cli/tools/w_pdist.py"
] | w_pdist
w_pdist constructs and calculates the progress coordinate probability
distribution's evolution over a user-specified number of simulation
iterations. w_pdist supports progress coordinates with dimensionality ≥
1.
The resulting distribution can be viewed with the plothist tool.
Overview
Usage:
w_pdist [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version]
[-W WEST_H5FILE] [--first-iter N_ITER] [--last-iter N_ITER]
[-b BINEXPR] [-o OUTPUT]
[--construct-dataset CONSTRUCT_DATASET | --dsspecs DSSPEC [DSSPEC ...]]
[--serial | --parallel | --work-manager WORK_MANAGER]
[--n-workers N_WORKERS] [--zmq-mode MODE]
[--zmq-info INFO_FILE] [--zmq-task-endpoint TASK_ENDPOINT]
[--zmq-result-endpoint RESULT_ENDPOINT]
[--zmq-announce-endpoint ANNOUNCE_ENDPOINT]
[--zmq-listen-endpoint ANNOUNCE_ENDPOINT]
[--zmq-heartbeat-interval INTERVAL]
[--zmq-task-timeout TIMEOUT] [--zmq-client-comm-mode MODE]
Note: This tool supports parallelization, which may be more efficient
for especially large datasets.
Command-Line Options
See the general command-line tool reference for more information on the
general options.
Input/output options
These arguments allow the user to specify where to read input simulation
result data and where to output calculated progress coordinate
probability distribution data.
Both input and output files are hdf5 format:
-W, --WEST_H5FILE file
Read simulation result data from file *file*. (**Default:** The
*hdf5* file specified in the configuration file (default config file
is *west.h5*))
-o, --output file
Store this tool's output in *file*. (**Default:** The *hdf5* file
**pcpdist.h5**)
Iteration range options
Specify the range of iterations over which to construct the progress
coordinate probability distribution.:
--first-iter n_iter
Construct probability distribution starting with iteration *n_iter*
(**Default:** 1)
--last-iter n_iter
Construct probability distribution's time evolution up to (and
including) iteration *n_iter* (**Default:** Last completed
iteration)
Probability distribution binning options
Specify the number of bins to use when constructing the progress
coordinate probability distribution. If using a multidimensional
progress coordinate, different binning schemes can be used for the
probability distribution for each progress coordinate.:
-b binexpr
*binexpr* specifies the number and formatting of the bins. Its
format can be as follows:
1. an integer, in which case all distributions have that many
equal sized bins
2. a python-style list of integers, of length corresponding to
the number of dimensions of the progress coordinate, in which
case each progress coordinate's probability distribution has the
corresponding number of bins
3. a python-style list of lists of scalars, where the list at
each index corresponds to each dimension of the progress
coordinate and specifies specific bin boundaries for that
progress coordinate's probability distribution.
(**Default:** 100 bins for all progress coordinates)
Examples
Assuming simulation results are stored in west.h5 (which is specified in
the configuration file named west.cfg), for a simulation with a
1-dimensional progress coordinate:
Calculate a probability distribution histogram using all default options
(output file: pdist.h5; histogram binning: 100 equal sized bins;
probability distribution over the lowest reached progress coordinate to
the largest; work is parallelized over all available local cores using
the 'processes' work manager):
w_pdist
Same as above, except using the serial work manager (which may be more
efficient for smaller datasets):
w_pdist --serial | import logging
import h5py
import numpy as np
from westpa.tools import (
WESTParallelTool,
WESTDataReader,
WESTDSSynthesizer,
WESTWDSSynthesizer,
IterRangeSelection,
ProgressIndicatorComponent,
)
from westpa.fasthist import histnd, normhistnd
from westpa.core import h5io
log = logging.getLogger('w_pdist')
def isiterable(x):
try:
iter(x)
except TypeError:
return False
else:
return True
def _remote_min_max(ndim, dset_dtype, n_iter, dsspec):
try:
minval = np.finfo(dset_dtype).min
maxval = np.finfo(dset_dtype).max
except ValueError:
minval = np.iinfo(dset_dtype).min
maxval = np.iinfo(dset_dtype).max
data_range = [(maxval, minval) for _i in range(ndim)]
dset = dsspec.get_iter_data(n_iter)
for idim in range(ndim):
dimdata = dset[:, :, idim]
current_min, current_max = data_range[idim]
current_min = min(current_min, dimdata.min())
current_max = max(current_max, dimdata.max())
data_range[idim] = (current_min, current_max)
del dimdata
del dset
return data_range
def _remote_bin_iter(iiter, n_iter, dsspec, wt_dsspec, initpoint, binbounds, ignore_out_of_range):
iter_hist_shape = tuple(len(bounds) - 1 for bounds in binbounds)
iter_hist = np.zeros(iter_hist_shape, dtype=np.float64)
dset = dsspec.get_iter_data(n_iter)
npts = dset.shape[1]
weights = wt_dsspec.get_iter_data(n_iter)
dset = dset[:, initpoint:, :]
for ipt in range(npts - initpoint):
histnd(dset[:, ipt, :], binbounds, weights, out=iter_hist, binbound_check=False, ignore_out_of_range=ignore_out_of_range)
del weights, dset
# normalize histogram
normhistnd(iter_hist, binbounds)
return iiter, n_iter, iter_hist
class WPDist(WESTParallelTool):
prog = 'w_pdist'
description = '''\
Calculate time-resolved, multi-dimensional probability distributions of WE
datasets.
-----------------------------------------------------------------------------
Source data
-----------------------------------------------------------------------------
Source data is provided either by a user-specified function
(--construct-dataset) or a list of "data set specifications" (--dsspecs).
If neither is provided, the progress coordinate dataset ''pcoord'' is used.
To use a custom function to extract or calculate data whose probability
distribution will be calculated, specify the function in standard Python
MODULE.FUNCTION syntax as the argument to --construct-dataset. This function
will be called as function(n_iter,iter_group), where n_iter is the iteration
whose data are being considered and iter_group is the corresponding group
in the main WEST HDF5 file (west.h5). The function must return data which can
be indexed as [segment][timepoint][dimension].
To use a list of data set specifications, specify --dsspecs and then list the
desired datasets one-by-one (space-separated in most shells). These data set
specifications are formatted as NAME[,file=FILENAME,slice=SLICE], which will
use the dataset called NAME in the HDF5 file FILENAME (defaulting to the main
WEST HDF5 file west.h5), and slice it with the Python slice expression SLICE
(as in [0:2] to select the first two elements of the first axis of the
dataset). The ``slice`` option is most useful for selecting one column (or
more) from a multi-column dataset, such as arises when using a progress
coordinate of multiple dimensions.
-----------------------------------------------------------------------------
Histogram binning
-----------------------------------------------------------------------------
By default, histograms are constructed with 100 bins in each dimension. This
can be overridden by specifying -b/--bins, which accepts a number of different
kinds of arguments:
a single integer N
N uniformly spaced bins will be used in each dimension.
a sequence of integers N1,N2,... (comma-separated)
N1 uniformly spaced bins will be used for the first dimension, N2 for the
second, and so on.
a list of lists [[B11, B12, B13,...], [B21, B22, B23,...],...]
The bin boundaries B11, B12, B13,... will be used for the first dimension,
B21, B22, B23,... for the second dimension, and so on. These bin
boundaries need not be uniformly spaced. These expressions will be
evaluated with Python's ``eval`` construct, with ``np`` available for
use [e.g. to specify bins using np.arange()].
The first two forms (integer, list of integers) will trigger a scan of all
data in each dimension in order to determine the minimum and maximum values,
which may be very expensive for large datasets. This can be avoided by
explicitly providing bin boundaries using the list-of-lists form.
Note that these bins are *NOT* at all related to the bins used to drive WE
sampling.
-----------------------------------------------------------------------------
Output format
-----------------------------------------------------------------------------
The output file produced (specified by -o/--output, defaulting to "pdist.h5")
may be fed to plothist to generate plots (or appropriately processed text or
HDF5 files) from this data. In short, the following datasets are created:
``histograms``
Normalized histograms. The first axis corresponds to iteration, and
remaining axes correspond to dimensions of the input dataset.
``/binbounds_0``
Vector of bin boundaries for the first (index 0) dimension. Additional
datasets similarly named (/binbounds_1, /binbounds_2,...) are created
for additional dimensions.
``/midpoints_0``
Vector of bin midpoints for the first (index 0) dimension. Additional
datasets similarly named are created for additional dimensions.
``n_iter``
Vector of iteration numbers corresponding to the stored histograms (i.e.
the first axis of the ``histograms`` dataset).
-----------------------------------------------------------------------------
Subsequent processing
-----------------------------------------------------------------------------
The output generated by this program (-o/--output, default "pdist.h5") may be
plotted by the ``plothist`` program. See ``plothist --help`` for more
information.
-----------------------------------------------------------------------------
Parallelization
-----------------------------------------------------------------------------
This tool supports parallelized binning, including reading of input data.
Parallel processing is the default. For simple cases (reading pre-computed
input data, modest numbers of segments), serial processing (--serial) may be
more efficient.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def __init__(self):
super().__init__()
# Parallel processing by default (this is not actually necessary, but it is
# informative!)
self.wm_env.default_work_manager = self.wm_env.default_parallel_work_manager
# These are used throughout
self.progress = ProgressIndicatorComponent()
self.data_reader = WESTDataReader()
self.input_dssynth = WESTDSSynthesizer(default_dsname='pcoord')
self.input_wdssynth = WESTWDSSynthesizer(default_dsname='seg_index')
self.iter_range = IterRangeSelection(self.data_reader)
self.iter_range.include_args['iter_step'] = False
self.binspec = None
self.output_filename = None
self.output_file = None
self.dsspec = None
self.wt_dsspec = None # dsspec for weights
# These are used during histogram generation only
self.iter_start = None
self.iter_stop = None
self.ndim = None
self.ntimepoints = None
self.dset_dtype = None
self.binbounds = None # bin boundaries for each dimension
self.midpoints = None # bin midpoints for each dimension
self.data_range = None # data range for each dimension, as the pairs (min,max)
self.ignore_out_of_range = False
self.compress_output = False
def add_args(self, parser):
self.data_reader.add_args(parser)
self.iter_range.add_args(parser)
parser.add_argument(
'-b',
'--bins',
dest='bins',
metavar='BINEXPR',
default='100',
help='''Use BINEXPR for bins. This may be an integer, which will be used for each
dimension of the progress coordinate; a list of integers (formatted as [n1,n2,...])
which will use n1 bins for the first dimension, n2 for the second dimension, and so on;
or a list of lists of boundaries (formatted as [[a1, a2,...], [b1, b2,...],... ]), which
will use [a1, a2,...] as bin boundaries for the first dimension, [b1, b2,...] as bin boundaries
for the second dimension, and so on. (Default: 100 bins in each dimension.)''',
)
parser.add_argument(
'-o', '--output', dest='output', default='pdist.h5', help='''Store results in OUTPUT (default: %(default)s).'''
)
parser.add_argument(
'-C',
'--compress',
action='store_true',
help='''Compress histograms. May make storage of higher-dimensional histograms
more tractable, at the (possible extreme) expense of increased analysis time.
(Default: no compression.)''',
)
parser.add_argument(
'--loose',
dest='ignore_out_of_range',
action='store_true',
help='''Ignore values that do not fall within bins. (Risky, as this can make buggy bin
boundaries appear as reasonable data. Only use if you are
sure of your bin boundary specification.)''',
)
igroup = parser.add_argument_group('input dataset options').add_mutually_exclusive_group(required=False)
igroup.add_argument(
'--construct-dataset',
help='''Use the given function (as in module.function) to extract source data.
This function will be called once per iteration as function(n_iter, iter_group)
to construct data for one iteration. Data returned must be indexable as
[seg_id][timepoint][dimension]''',
)
igroup.add_argument(
'--dsspecs', nargs='+', metavar='DSSPEC', help='''Construct probability distribution from one or more DSSPECs.'''
)
wgroup = parser.add_argument_group('input weight dataset options').add_mutually_exclusive_group(required=False)
wgroup.add_argument(
'--construct-wdataset',
help='''Use the given function (as in module.function) to extract weight data.
This function will be called once per iteration as function(n_iter, iter_group)
to construct data for one iteration. Data returned must be indexable as
[seg_id]''',
)
self.progress.add_args(parser)
def process_args(self, args):
self.progress.process_args(args)
self.data_reader.process_args(args)
self.input_dssynth.h5filename = self.data_reader.we_h5filename
self.input_dssynth.process_args(args)
self.dsspec = self.input_dssynth.dsspec
# Carrying an open HDF5 file across a fork() seems to corrupt the entire HDF5 library
# Open the WEST HDF5 file just long enough to process our iteration range, then close
# and reopen in go() [which executes after the fork]
with self.data_reader:
self.iter_range.process_args(args)
# Reading potential custom weights
self.input_wdssynth.h5filename = self.data_reader.we_h5filename
self.input_wdssynth.process_args(args)
self.wt_dsspec = self.input_wdssynth.dsspec
self.binspec = args.bins
self.output_filename = args.output
self.ignore_out_of_range = bool(args.ignore_out_of_range)
self.compress_output = args.compress or False
def go(self):
self.data_reader.open('r')
pi = self.progress.indicator
pi.operation = 'Initializing'
with pi:
self.output_file = h5py.File(self.output_filename, 'w')
h5io.stamp_creator_data(self.output_file)
self.iter_start = self.iter_range.iter_start
self.iter_stop = self.iter_range.iter_stop
# Construct bin boundaries
self.construct_bins(self.parse_binspec(self.binspec))
for idim, (binbounds, midpoints) in enumerate(zip(self.binbounds, self.midpoints)):
self.output_file['binbounds_{}'.format(idim)] = binbounds
self.output_file['midpoints_{}'.format(idim)] = midpoints
# construct histogram
self.construct_histogram()
# Record iteration range
iter_range = self.iter_range.iter_range()
self.output_file['n_iter'] = iter_range
self.iter_range.record_data_iter_range(self.output_file['histograms'])
self.output_file.close()
@staticmethod
def parse_binspec(binspec):
namespace = {'numpy': np, 'np': np, 'inf': float('inf')}
try:
binspec_compiled = eval(binspec, namespace)
except Exception as e:
raise ValueError('invalid bin specification: {!r}'.format(e))
else:
if log.isEnabledFor(logging.DEBUG):
log.debug('bin specs: {!r}'.format(binspec_compiled))
return binspec_compiled
def construct_bins(self, bins):
'''
Construct bins according to ``bins``, which may be:
1) A scalar integer (for that number of bins in each dimension)
2) A sequence of integers (specifying number of bins for each dimension)
3) A sequence of sequences of bin boundaries (specifying boundaries for each dimension)
Sets ``self.binbounds`` to a list of arrays of bin boundaries appropriate for passing to
fasthist.histnd, along with ``self.midpoints`` to the midpoints of the bins.
'''
if not isiterable(bins):
self._construct_bins_from_scalar(bins)
elif not isiterable(bins[0]):
self._construct_bins_from_int_seq(bins)
else:
self._construct_bins_from_bound_seqs(bins)
if log.isEnabledFor(logging.DEBUG):
log.debug('binbounds: {!r}'.format(self.binbounds))
def scan_data_shape(self):
if self.ndim is None:
dset = self.dsspec.get_iter_data(self.iter_start)
self.ntimepoints = dset.shape[1]
self.ndim = dset.shape[2]
self.dset_dtype = dset.dtype
def scan_data_range(self):
'''Scan input data for range in each dimension. The number of dimensions is determined
from the shape of the progress coordinate as of self.iter_start.'''
self.progress.indicator.new_operation('Scanning for data range', self.iter_stop - self.iter_start)
self.scan_data_shape()
dset_dtype = self.dset_dtype
ndim = self.ndim
dsspec = self.dsspec
try:
minval = np.finfo(dset_dtype).min
maxval = np.finfo(dset_dtype).max
except ValueError:
minval = np.iinfo(dset_dtype).min
maxval = np.iinfo(dset_dtype).max
data_range = self.data_range = [(maxval, minval) for _i in range(self.ndim)]
# futures = []
# for n_iter in xrange(self.iter_start, self.iter_stop):
# _remote_min_max(ndim, dset_dtype, n_iter, dsspec)
# futures.append(self.work_manager.submit(_remote_min_max, args=(ndim, dset_dtype, n_iter, dsspec)))
# for future in self.work_manager.as_completed(futures):
for future in self.work_manager.submit_as_completed(
((_remote_min_max, (ndim, dset_dtype, n_iter, dsspec), {}) for n_iter in range(self.iter_start, self.iter_stop)),
self.max_queue_len,
):
bounds = future.get_result(discard=True)
for idim in range(ndim):
current_min, current_max = data_range[idim]
current_min = min(current_min, bounds[idim][0])
current_max = max(current_max, bounds[idim][1])
data_range[idim] = (current_min, current_max)
self.progress.indicator.progress += 1
def _construct_bins_from_scalar(self, bins):
if self.data_range is None:
self.scan_data_range()
self.binbounds = []
self.midpoints = []
for idim in range(self.ndim):
lb, ub = self.data_range[idim]
# Advance just beyond the upper bound of the range, so that we catch
# the maximum in the histogram
ub *= 1.01
boundset = np.linspace(lb, ub, bins + 1)
midpoints = (boundset[:-1] + boundset[1:]) / 2.0
self.binbounds.append(boundset)
self.midpoints.append(midpoints)
def _construct_bins_from_int_seq(self, bins):
if self.data_range is None:
self.scan_data_range()
self.binbounds = []
self.midpoints = []
for idim in range(self.ndim):
lb, ub = self.data_range[idim]
# Advance just beyond the upper bound of the range, so that we catch
# the maximum in the histogram
ub *= 1.01
boundset = np.linspace(lb, ub, bins[idim] + 1)
midpoints = (boundset[:-1] + boundset[1:]) / 2.0
self.binbounds.append(boundset)
self.midpoints.append(midpoints)
def _construct_bins_from_bound_seqs(self, bins):
self.binbounds = []
self.midpoints = []
for boundset in bins:
boundset = np.asarray(boundset)
if (np.diff(boundset) <= 0).any():
raise ValueError('boundary set {!r} is not strictly monotonically increasing'.format(boundset))
self.binbounds.append(boundset)
self.midpoints.append((boundset[:-1] + boundset[1:]) / 2.0)
def construct_histogram(self):
'''Construct a histogram using bins previously constructed with ``construct_bins()``.
The time series of histogram values is stored in ``histograms``.
Each histogram in the time series is normalized.'''
self.scan_data_shape()
iter_count = self.iter_stop - self.iter_start
histograms_ds = self.output_file.create_dataset(
'histograms',
dtype=np.float64,
shape=((iter_count,) + tuple(len(bounds) - 1 for bounds in self.binbounds)),
compression=9 if self.compress_output else None,
)
binbounds = [np.require(boundset, self.dset_dtype, 'C') for boundset in self.binbounds]
self.progress.indicator.new_operation('Constructing histograms', self.iter_stop - self.iter_start)
task_gen = (
(
_remote_bin_iter,
(iiter, n_iter, self.dsspec, self.wt_dsspec, 1 if iiter > 0 else 0, binbounds, self.ignore_out_of_range),
{},
)
for (iiter, n_iter) in enumerate(range(self.iter_start, self.iter_stop))
)
# futures = set()
# for iiter, n_iter in enumerate(xrange(self.iter_start, self.iter_stop)):
# initpoint = 1 if iiter > 0 else 0
# futures.add(self.work_manager.submit(_remote_bin_iter,
# args=(iiter, n_iter, self.dsspec, self.wt_dsspec, initpoint, binbounds)))
# for future in self.work_manager.as_completed(futures):
# future = self.work_manager.wait_any(futures)
# for future in self.work_manager.submit_as_completed(task_gen, self.queue_size):
log.debug('max queue length: {!r}'.format(self.max_queue_len))
for future in self.work_manager.submit_as_completed(task_gen, self.max_queue_len):
iiter, n_iter, iter_hist = future.get_result(discard=True)
self.progress.indicator.progress += 1
# store histogram
histograms_ds[iiter] = iter_hist
del iter_hist, future
def entry_point():
WPDist().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_red.rst | Manual | w_red command | MIT License | westpa__westpa/doc/documentation/cli/w_red.rst | [
"westpa__westpa/src/westpa/cli/tools/w_red.py"
] | w_red
usage:
w_red [-h] [-r RCFILE] [--quiet] [--verbose] [--version] [--max-queue-length MAX_QUEUE_LENGTH]
[--debug] [--terminal]
[--serial | --parallel | --work-manager WORK_MANAGER] [--n-workers N_WORKERS]
[--zmq-mode MODE] [--zmq-comm-mode COMM_MODE] [--zmq-write-host-info INFO_FILE]
[--zmq-read-host-info INFO_FILE] [--zmq-upstream-rr-endpoint ENDPOINT]
[--zmq-upstream-ann-endpoint ENDPOINT] [--zmq-downstream-rr-endpoint ENDPOINT]
[--zmq-downstream-ann-endpoint ENDPOINT] [--zmq-master-heartbeat MASTER_HEARTBEAT]
[--zmq-worker-heartbeat WORKER_HEARTBEAT] [--zmq-timeout-factor FACTOR]
[--zmq-startup-timeout STARTUP_TIMEOUT] [--zmq-shutdown-timeout SHUTDOWN_TIMEOUT]
optional arguments:
-h, --help show this help message and exit
general options:
-r RCFILE, --rcfile RCFILE
use RCFILE as the WEST run-time configuration file (default:
west.cfg)
--quiet emit only essential information --verbose emit extra
information --version show program's version number and exit
parallelization options:
--max-queue-length MAX_QUEUE_LENGTH
Maximum number of tasks that can be queued. Useful to limit RAM use for tasks that
have very large requests/response. Default: no limit.
parallelization options:
--serial run in serial mode
--parallel run in parallel mode (using processes)
--work-manager WORK_MANAGER
| from h5py import File as H5File
import numpy as np
from westpa import rc
from westpa.tools import WESTParallelTool
class DurationCorrector(object):
@staticmethod
def from_kinetics_file(directh5, istate, fstate, dtau, n_iters=None):
iter_slice = slice(n_iters)
if isinstance(directh5, H5File):
dataset = directh5['durations'][iter_slice]
else:
with H5File(directh5, 'r') as directh5:
dataset = directh5['durations'][iter_slice]
torf = np.logical_and(dataset['istate'] == istate, dataset['fstate'] == fstate)
torf = np.logical_and(torf, dataset['weight'] > 0)
durations = dataset['duration']
weights = dataset['weight']
weights[~torf] = 0.0 # mask off irrelevant flux
return DurationCorrector(durations, weights, dtau)
def __init__(self, durations, weights, dtau, maxduration=None):
self.weights = np.array(weights)
self.durations = np.array(durations)
self.dtau = dtau
self._f_tilde = None
self._f_int1 = None
if maxduration is None:
self.maxduration = self.durations.shape[0]
else:
self.maxduration = maxduration
if dtau is None:
all_durations = []
all_durations.extend(durations)
all_durations.extend(np.arange(maxduration))
uniq_durations = np.unique(all_durations) # unique sorts automatically
self.dtau = np.min(np.diff(uniq_durations))
self._build_map()
@property
def event_duration_histogram(self):
return self._f_tilde
@property
def cumulative_event_duration_histogram(self):
return self._f_int1
def _build_map(self):
weights = self.weights
durations = self.durations
maxduration = self.maxduration
dtau = self.dtau
taugrid = np.arange(0, maxduration, dtau, dtype=float)
f_map = np.zeros(weights.shape, dtype=int) - 1
for i, tau in enumerate(taugrid):
matches = np.logical_and(durations >= tau, durations < tau + dtau)
f_map[matches] = i
self.taugrid = taugrid
self.f_map = f_map
def correction(self, iters, freqs=None):
r"""
Return the correction factor
__ __ -1
| t=theta tau=t |
| |\ |\ |
| | | ~ |
| | | f(tau) dtau dt | * maxduration
| \| \| |
| t=0 tau=0 |
|_ _|
where
~` ^
f(tau) is proportional to f(tau)/(theta-tau), and is normalized to
^
integrate to 1, and f(tau) is sum of the weights of walkers with
duration time tau.
---------
Arguments
---------
maxduration: the maximum duration time that could have been observed in
the simulation, which is usually equal to the length of the
simulation. This should be in units of tau.
"""
if iters is None:
iters = np.arange(len(self.weights))
if freqs is None:
freqs = np.ones(len(iters), dtype=float)
maxduration = np.max(iters) + 1
f_map = self.f_map[iters]
weights = self.weights[iters]
taugrid = self.taugrid # [self.taugrid < maxduration]
weights *= freqs[:, None]
dtau = self.dtau
f_tilde = np.zeros(len(taugrid), dtype=float)
for i, tau in enumerate(taugrid):
if tau < maxduration:
f_tilde[i] = weights[f_map == i].sum() / (maxduration - tau + 1)
if f_tilde.sum()!= 0:
f_tilde /= f_tilde.sum() * dtau
self._f_tilde = f_tilde
# now integrate f_tilde twice
# integral1[t/dtau] gives the integral of f_tilde(tau) dtau from 0 to t
self._f_int1 = integral1 = np.zeros(f_tilde.shape)
for i, tau in enumerate(taugrid):
if i > 0 and tau < maxduration:
integral1[i] = np.trapz(f_tilde[: i + 1], taugrid[: i + 1])
integral2 = np.trapz(integral1, taugrid)
if integral2 == 0:
return 0.0
return maxduration / integral2
def get_raw_rates(directh5, istate, fstate, n_iters=None):
rate_evol = directh5['rate_evolution'][slice(n_iters), istate, fstate]
avg = rate_evol['expected']
return avg
def calc_avg_rate(directh5_path, istate, fstate, **kwargs):
"""
Return the raw or RED-corrected rate constant with the confidence interval.
---------
Arguments
---------
dt: timestep (ps)
nstiter: duration of each iteration (number of steps)
ntpr: report inteval (number of steps)
"""
n_iters = kwargs.pop("n_iters", None)
ntpr = kwargs.pop("report_interval", 20)
nstiter = kwargs.pop("n_steps_iter", 1000)
callback = kwargs.pop("callback", None)
red = kwargs.pop("red", False)
if len(kwargs) > 0:
raise ValueError("unparsed kwargs")
dtau = float(ntpr) / nstiter
dc = None
with H5File(directh5_path, 'r') as directh5:
if n_iters is None:
n_iters = directh5['rate_evolution'].shape[0]
rate_evol = directh5['rate_evolution'][n_iters - 1, istate, fstate]
rate = rate_evol['expected']
if red:
dc = DurationCorrector.from_kinetics_file(directh5, istate, fstate, dtau, n_iters)
if callback is not None:
kw = {"correction": dc}
callback(**kw)
iters = np.arange(n_iters)
correction = dc.correction(iters) if dc else 1.0
rate *= correction
return rate
def calc_rates(directh5_path, istate, fstate, **kwargs):
"""
Return the raw and RED-corrected rate constants vs. iterations.
This code is faster than calling calc_rate() iteratively
---------
Arguments
---------
dt: timestep (ps)
nstiter: duration of each iteration (number of steps)
ntpr: report inteval (number of steps)
"""
n_iters = kwargs.pop("n_iters", None)
ntpr = kwargs.pop("report_interval", 20)
nstiter = kwargs.pop("n_steps_iter", 1000)
callback = kwargs.pop("callback", None)
red = kwargs.pop("red", False)
if len(kwargs) > 0:
raise ValueError("unparsed kwargs")
dtau = float(ntpr) / nstiter
dc = None
with H5File(directh5_path, 'r') as directh5:
rate_evol, cilb, ciub = get_raw_rates(directh5, istate, fstate, n_iters)
if n_iters is None:
n_iters = len(rate_evol)
if red:
dc = DurationCorrector.from_kinetics_file(directh5, istate, fstate, dtau, n_iters)
if callback is not None:
kw = {"correction": dc}
callback(**kw)
raw_rates = np.zeros(n_iters)
rates = np.zeros(n_iters)
for i in range(n_iters):
i_iter = i + 1
print("\riter %d/%d (%3.0f%%)" % (i_iter, n_iters, i_iter * 100.0 / n_iters), end="")
r = rate_evol[i]
iters = np.arange(i_iter)
correction = dc.correction(iters) if dc else 1.0
raw_rates[i] = r
rates[i] = raw_rates[i] * correction
print("\n")
return rates
class RateCalculator:
def __init__(self, directh5, istate, fstate, assignh5=None, **kwargs):
n_iters = kwargs.pop("n_iters", None)
ntpr = kwargs.pop("report_interval", 20)
nstiter = kwargs.pop("n_steps_iter", 1000)
if len(kwargs) > 0:
for k in kwargs:
print(k)
raise ValueError("unparsed kwargs")
dtau = float(ntpr) / nstiter
with H5File(directh5, 'r') as f:
state_labels = {}
for i, raw_label in enumerate(f['state_labels']):
label = raw_label.decode() if isinstance(raw_label, bytes) else raw_label
state_labels[label] = i
if istate not in state_labels:
raise ValueError(f"istate not found: {istate}, available options are {list(state_labels.keys())}")
if fstate not in state_labels:
raise ValueError(f"istate not found: {fstate}, available options are {list(state_labels.keys())}")
istate = state_labels[istate]
fstate = state_labels[fstate]
cond_fluxes = f['conditional_fluxes'][slice(n_iters), istate, fstate]
if assignh5 is not None:
with H5File(assignh5, 'r') as f:
pops = f['labeled_populations'][slice(n_iters)]
pops = pops.sum(axis=2)
else:
pops = None
self._dc = None
self._pops = pops
self._cond_fluxes = cond_fluxes
self._dtau = dtau
self._directh5 = directh5
self._assignh5 = assignh5
self._istate = istate
self._fstate = fstate
@property
def conditional_fluxes(self):
return self._cond_fluxes
@property
def populations(self):
return self._pops
@property
def tau(self):
return self._tau
@property
def dtau(self):
return self._dtau
@property
def istate(self):
return self._istate
@property
def fstate(self):
return self._fstate
@property
def n_iters(self):
return len(self.conditional_fluxes)
def _get_corrector(self):
if self._dc is None:
with H5File(self._directh5, 'r') as f:
self._dc = DurationCorrector.from_kinetics_file(f, self.istate, self.fstate, self.dtau, self.n_iters)
return self._dc
def calc_rate(self, i_iter=None, red=False, **kwargs):
if i_iter is None:
i_iter = self.n_iters
dc = self._get_corrector() if red else None
found = False
with H5File(self._directh5, 'r') as f:
for i in range(f['rate_evolution'].shape[0]):
rate_evol = f['rate_evolution'][i, self.istate, self.fstate]
start = rate_evol['iter_start']
stop = rate_evol['iter_stop']
if i_iter >= start and i_iter < stop:
rate = rate_evol['expected']
found = True
break
if not found:
self.log.error("Can't find rate evolution data for iteration %d!" % i_iter)
if dc:
iters = np.arange(i_iter)
correction = dc.correction(iters)
rate *= correction
return rate
def calc_rates(self, n_iters=None, **kwargs):
if n_iters is None:
n_iters = self.n_iters
rates = np.zeros(n_iters)
for i in range(n_iters):
i_iter = i + 1
print("\riter %d/%d (%3.0f%%)" % (i_iter, n_iters, i_iter * 100.0 / n_iters), end="")
r = self.calc_rate(i_iter, **kwargs)
rates[i] = r
print("\n")
return rates
class WRed(WESTParallelTool):
prog = 'w_red'
description = '''\
Apply the RED scheme to estimate steady-state WE fluxes from
shorter trajectories.
-----------------------------------------------------------------------------
Source data
-----------------------------------------------------------------------------
Source data is provided as a w_ipa "scheme" which is typically defined
in the west.cfg file. For instance, if a user wishes to estimate RED
fluxes for a scheme named "DEFAULT" that argument would be provided
to w_red and WRed would estimate RED fluxes based off of the data
contained in the assign.h5 and direct.h5 files in ANALYSIS/DEFAULT.
'''
def __init__(self):
super().__init__()
def go(self):
try:
rc.config['west']['analysis']['red']
except Exception:
raise ValueError('No RED parameters are specified in west.cfg.')
try:
rc.config['west']['analysis']['red']['scheme']
except Exception:
raise ValueError('No scheme specified for RED calculation in west.cfg.')
try:
rc.config['west']['analysis']['red']['istate_label']
except Exception:
raise ValueError('No intial state label specified for RED calculation in west.cfg.')
try:
rc.config['west']['analysis']['red']['fstate_label']
except Exception:
raise ValueError('No final state label specified for RED calculation in west.cfg.')
try:
rc.config['west']['analysis']['red']['nstiter']
except Exception:
raise ValueError('Time step not specified in west.cfg.')
try:
rc.config['west']['analysis']['red']['nstrep']
except Exception:
raise ValueError('Time step not specified in west.cfg.')
if rc.config['west']['analysis']['kinetics']['evolution'] == "cumulative":
pass
else:
print("Only RED estimates with cumulative averaging are supported at this time.")
exit()
config = rc.config
adir = config.get(['west', 'analysis', 'directory'])
name = config.get(['west', 'analysis','red','scheme'])
istate = config.get(['west', 'analysis','red', 'istate_label'])
fstate = config.get(['west', 'analysis','red', 'fstate_label'])
n_steps_per_iter = config.get(['west', 'analysis','red', 'nstiter'])
n_steps_per_report = config.get(['west', 'analysis','red', 'nstrep'])
directh5path = '%s/%s/direct.h5' % (adir, name)
assignh5path = '%s/%s/assign.h5' % (adir, name)
print('\nConfig successfully read from west.cfg:')
print('--------------------------------------')
print('scheme: %s' % name)
print('states: %s -> %s' % (istate, fstate))
print('nstiter: %s' % n_steps_per_iter)
print('nstrep: %s' % n_steps_per_report)
print('--------------------------------------')
print('\nEstimating RED fluxes...')
rater = RateCalculator(
directh5path,
istate,
fstate,
n_steps_iter=n_steps_per_iter,
report_interval=n_steps_per_report,
assignh5=assignh5path,
)
rates = rater.calc_rates(red=True, callback=None)
with H5File(directh5path, "r+") as dest_file:
try:
dest_file.create_dataset('red_flux_evolution', data=rates)
print('saved RED fluxes to red_flux_evolution in ANALYSIS/%s/direct.h5' % name)
except Exception:
warning = input('Dataset already exists! Overwrite? (y/n)')
if warning == "y":
dest_file['red_flux_evolution'][...] = rates
print('saved RED fluxes to red_flux_evolution in ANALYSIS/%s/direct.h5' % name)
elif warning == "n":
np.save('ANALYSIS/%s/red.npy' % name, rates)
print('saved RED fluxes to red_flux_evolution.npy in ANALYSIS/%s' % name)
else:
print('red rates not saved. Exiting...')
exit
def entry_point():
WRed().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_run.rst | Manual | w_run command | MIT License | westpa__westpa/doc/documentation/cli/w_run.rst | [
"westpa__westpa/src/westpa/cli/core/w_run.py"
] | w_run
w_run starts or continues a weighted ensemble simualtion.
Overview
Usage:
w_run [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version]
[--oneseg ] [--wm-work-manager WORK_MANAGER]
[--wm-n-workers N_WORKERS] [--wm-zmq-mode MODE]
[--wm-zmq-info INFO_FILE] [--wm-zmq-task-endpoint TASK_ENDPOINT]
[--wm-zmq-result-endpoint RESULT_ENDPOINT]
[--wm-zmq-announce-endpoint ANNOUNCE_ENDPOINT]
[--wm-zmq-heartbeat-interval INTERVAL]
[--wm-zmq-task-timeout TIMEOUT] [--wm-zmq-client-comm-mode MODE]
Command-Line Options
See the command-line tool index <command_line_tool_index> for more
information on the general options.
Segment Options
--oneseg
Only propagate one segment (useful for debugging propagators)
Example
A simple example for using w_run (mostly taken from odld example that is
available in the main WESTPA distribution):
w_run &> west.log
This commands starts up a serial weighted ensemble run and pipes the
results into the west.log file. As a side note --debug option is very
useful for debugging the code if something goes wrong. | import argparse
import logging
import traceback
import westpa
import westpa.work_managers as work_managers
from westpa.work_managers import make_work_manager
log = logging.getLogger('w_run')
def entry_point():
parser = argparse.ArgumentParser('w_run','start/continue a WEST simulation')
westpa.rc.add_args(parser)
parser.add_argument(
'--oneseg',
dest='only_one_segment',
action='store_true',
help='only propagate one segment (useful for debugging propagators)',
)
work_managers.environment.add_wm_args(parser)
args = parser.parse_args()
westpa.rc.process_args(args)
work_managers.environment.process_wm_args(args)
run_simulation()
def run_simulation():
work_manager = westpa.rc.work_manager = make_work_manager()
# Load the sim manager and other drivers
sim_manager = westpa.rc.get_sim_manager()
system = westpa.rc.get_system_driver()
data_manager = westpa.rc.get_data_manager()
we_driver = westpa.rc.get_we_driver()
propagator = westpa.rc.get_propagator()
propagator.system = system
data_manager.system = system
we_driver.system = system
sim_manager.data_manager = data_manager
sim_manager.system = system
sim_manager.propagator = propagator
sim_manager.we_driver = we_driver
with work_manager:
if work_manager.is_master:
work_manager.install_sigint_handler()
sim_manager.load_plugins()
log.debug('preparing run')
sim_manager.prepare_run()
try:
log.debug('beginning run')
sim_manager.run()
log.debug('finalizing run')
sim_manager.finalize_run()
except KeyboardInterrupt:
westpa.rc.pstatus('interrupted; shutting down')
except Exception as e:
westpa.rc.pstatus('exception caught; shutting down')
if str(e)!= '':
log.error(f'error message: {e}')
log.error(traceback.format_exc())
else:
work_manager.run()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_select.rst | Manual | w_select command | MIT License | westpa__westpa/doc/documentation/cli/w_select.rst | [
"westpa__westpa/src/westpa/cli/tools/w_select.py"
] | w_select
usage:
w_select [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version]
[--max-queue-length MAX_QUEUE_LENGTH] [-W WEST_H5FILE] [--first-iter N_ITER]
[--last-iter N_ITER] [-p MODULE.FUNCTION] [-v] [-a] [-o OUTPUT]
[--serial | --parallel | --work-manager WORK_MANAGER] [--n-workers N_WORKERS]
[--zmq-mode MODE] [--zmq-comm-mode COMM_MODE] [--zmq-write-host-info INFO_FILE]
[--zmq-read-host-info INFO_FILE] [--zmq-upstream-rr-endpoint ENDPOINT]
[--zmq-upstream-ann-endpoint ENDPOINT] [--zmq-downstream-rr-endpoint ENDPOINT]
[--zmq-downstream-ann-endpoint ENDPOINT] [--zmq-master-heartbeat MASTER_HEARTBEAT]
[--zmq-worker-heartbeat WORKER_HEARTBEAT] [--zmq-timeout-factor FACTOR]
[--zmq-startup-timeout STARTUP_TIMEOUT] [--zmq-shutdown-timeout SHUTDOWN_TIMEOUT]
Select dynamics segments matching various criteria. This requires a
user-provided prediate function. By default, only matching segments are
stored. If the -a/--include-ancestors option is given, then matching
segments and their ancestors will be stored.
Predicate function
Segments are selected based on a predicate function, which must be
callable as predicate(n_iter, iter_group) and return a collection of
segment IDs matching the predicate in that iteration.
The predicate may be inverted by specifying the -v/--invert command-line
argument.
Output format
The output file (-o/--output, by default "select.h5") contains the
following datasets:
``/n_iter`` [iteration]
*(Integer)* Iteration numbers for each entry in other datasets.
``/n_segs`` [iteration]
*(Integer)* Number of segment IDs matching the predicate (or inverted
predicate, if -v/--invert is specified) in the given iteration.
``/seg_ids`` [iteration][segment]
*(Integer)* Matching segments in each iteration. For an iteration
``n_iter``, only the first ``n_iter`` entries are valid. For example,
the full list of matching seg_ids in the first stored iteration is
``seg_ids[0][:n_segs[0]]``.
``/weights`` [iteration][segment]
*(Floating-point)* Weights for each matching segment in ``/seg_ids``.
Command-line arguments
optional arguments:
-h, --help show this help message and exit
general options:
-r RCFILE, --rcfile RCFILE
use RCFILE as the WEST run-time configuration file (default: west.cfg)
--quiet emit only essential information
--verbose emit extra information
--debug enable extra checks and emit copious information
--version show program's version number and exit
parallelization options:
--max-queue-length MAX_QUEUE_LENGTH
Maximum number of tasks that can be queued. Useful to limit RAM use for tasks that
have very large requests/response. Default: no limit.
WEST input data options:
-W WEST_H5FILE, --west-data WEST_H5FILE
Take WEST data from WEST_H5FILE (default: read from the HDF5 file specified in
west.cfg).
iteration range:
--first-iter N_ITER Begin analysis at iteration N_ITER (default: 1).
--last-iter N_ITER Conclude analysis with N_ITER, inclusive (default: last completed iteration).
selection options:
-p MODULE.FUNCTION, --predicate-function MODULE.FUNCTION
Use the given predicate function to match segments. This function should take an
iteration number and the HDF5 group corresponding to that iteration and return a
sequence of seg_ids matching the predicate, as in ``match_predicate(n_iter,
iter_group)``.
-v, --invert Invert the match predicate.
-a, --include-ancestors
Include ancestors of matched segments in output.
output options:
-o OUTPUT, --output OUTPUT
Write output to OUTPUT (default: select.h5).
parallelization options:
--serial run in serial mode
--parallel run in parallel mode (using processes)
--work-manager WORK_MANAGER
use the given work manager for parallel task distribution. Available work managers
are ('serial', 'threads', 'processes', 'zmq'); default is 'serial'
--n-workers N_WORKERS
Use up to N_WORKERS on this host, for work managers which support this option. Use
0 for a dedicated server. (Ignored by work managers which do not support this
option.)
options for ZeroMQ ("zmq") work manager (master or node):
--zmq-mode MODE Operate as a master (server) or a node (workers/client). "server" is a deprecated
synonym for "master" and "client" is a deprecated synonym for "node".
--zmq-comm-mode COMM_MODE
Use the given communication mode -- TCP or IPC (Unix-domain) -- sockets for
communication within a node. IPC (the default) may be more efficient but is not
available on (exceptionally rare) systems without node-local storage (e.g. /tmp);
on such systems, TCP may be used instead.
--zmq-write-host-info INFO_FILE
Store hostname and port information needed to connect to this instance in
INFO_FILE. This allows the master and nodes assisting in coordinating the
communication of other nodes to choose ports randomly. Downstream nodes read this
file with --zmq-read-host-info and know where how to connect.
--zmq-read-host-info INFO_FILE
Read hostname and port information needed to connect to the master (or other
coordinating node) from INFO_FILE. This allows the master and nodes assisting in
coordinating the communication of other nodes to choose ports randomly, writing
that information with --zmq-write-host-info for this instance to read.
--zmq-upstream-rr-endpoint ENDPOINT
ZeroMQ endpoint to which to send request/response (task and result) traffic toward
the master.
--zmq-upstream-ann-endpoint ENDPOINT
ZeroMQ endpoint on which to receive announcement (heartbeat and shutdown
notification) traffic from the master.
--zmq-downstream-rr-endpoint ENDPOINT
ZeroMQ endpoint on which to listen for request/response (task and result) traffic
from subsidiary workers.
--zmq-downstream-ann-endpoint ENDPOINT
ZeroMQ endpoint on which to send announcement (heartbeat and shutdown
notification) traffic toward workers.
--zmq-master-heartbeat MASTER_HEARTBEAT
Every MASTER_HEARTBEAT seconds, the master announces its presence to workers.
--zmq-worker-heartbeat WORKER_HEARTBEAT
Every WORKER_HEARTBEAT seconds, workers announce their presence to the master.
--zmq-timeout-factor FACTOR
Scaling factor for heartbeat timeouts. If the master doesn't hear from a worker in
WORKER_HEARTBEAT*FACTOR, the worker is assumed to have crashed. If a worker
doesn't hear from the master in MASTER_HEARTBEAT*FACTOR seconds, the master is
assumed to have crashed. Both cases result in shutdown.
--zmq-startup-timeout STARTUP_TIMEOUT
Amount of time (in seconds) to wait for communication between the master and at
least one worker. This may need to be changed on very large, heavily-loaded
computer systems that start all processes simultaneously.
--zmq-shutdown-timeout SHUTDOWN_TIMEOUT
Amount of time (in seconds) to wait for workers to shut down
| from westpa.tools import WESTParallelTool, WESTDataReader, IterRangeSelection, ProgressIndicatorComponent
import numpy as np
from westpa.core import h5io
from westpa.core.data_manager import seg_id_dtype, n_iter_dtype, weight_dtype
from westpa.core.extloader import get_object
def _find_matching_segments(west_datafile_name, n_iter, predicate, invert=False):
'''Find all segments in iteration ``n_iter`` that match (or do not match, if
``invert`` is true) the given ``predicate``. Returns a sequence of matching
seg_ids.'''
with h5io.WESTPAH5File(west_datafile_name, 'r') as west_datafile:
iter_group = west_datafile.get_iter_group(n_iter)
nsegs = iter_group['seg_index'].shape[0]
matching_ids = set(map(int, predicate(n_iter, iter_group)))
if invert:
matching_ids = set(range(nsegs)) - matching_ids
matchvec = sorted(np.fromiter(matching_ids, dtype=seg_id_dtype, count=len(matching_ids)))
return n_iter, matchvec
class WSelectTool(WESTParallelTool):
prog = 'w_select'
description = '''\
Select dynamics segments matching various criteria. This requires a
user-provided prediate function. By default, only matching segments are
stored. If the -a/--include-ancestors option is given, then matching segments
and their ancestors will be stored.
-----------------------------------------------------------------------------
Predicate function
-----------------------------------------------------------------------------
Segments are selected based on a predicate function, which must be callable
as ``predicate(n_iter, iter_group)`` and return a collection of segment IDs
matching the predicate in that iteration.
The predicate may be inverted by specifying the -v/--invert command-line
argument.
-----------------------------------------------------------------------------
Output format
-----------------------------------------------------------------------------
The output file (-o/--output, by default "select.h5") contains the following
datasets:
``/n_iter`` [iteration]
*(Integer)* Iteration numbers for each entry in other datasets.
``/n_segs`` [iteration]
*(Integer)* Number of segment IDs matching the predicate (or inverted
predicate, if -v/--invert is specified) in the given iteration.
``/seg_ids`` [iteration][segment]
*(Integer)* Matching segments in each iteration. For an iteration
``n_iter``, only the first ``n_iter`` entries are valid. For example,
the full list of matching seg_ids in the first stored iteration is
``seg_ids[0][:n_segs[0]]``.
``/weights`` [iteration][segment]
*(Floating-point)* Weights for each matching segment in ``/seg_ids``.
-----------------------------------------------------------------------------
Command-line arguments
-----------------------------------------------------------------------------
'''
def __init__(self):
super().__init__()
self.data_reader = WESTDataReader()
self.iter_range = IterRangeSelection()
self.progress = ProgressIndicatorComponent()
self.output_file = None
self.output_filename = None
self.predicate = None
self.invert = False
self.include_ancestors = False
def add_args(self, parser):
self.data_reader.add_args(parser)
self.iter_range.add_args(parser)
sgroup = parser.add_argument_group('selection options')
sgroup.add_argument(
'-p',
'--predicate-function',
metavar='MODULE.FUNCTION',
help='''Use the given predicate function to match segments. This function
should take an iteration number and the HDF5 group corresponding to that
iteration and return a sequence of seg_ids matching the predicate, as in
``match_predicate(n_iter, iter_group)``.''',
)
sgroup.add_argument('-v', '--invert', dest='invert', action='store_true', help='''Invert the match predicate.''')
sgroup.add_argument(
'-a', '--include-ancestors', action='store_true', help='''Include ancestors of matched segments in output.'''
)
ogroup = parser.add_argument_group('output options')
ogroup.add_argument('-o', '--output', default='select.h5', help='''Write output to OUTPUT (default: %(default)s).''')
self.progress.add_args(parser)
def process_args(self, args):
self.progress.process_args(args)
self.data_reader.process_args(args)
with self.data_reader:
self.iter_range.process_args(args)
predicate = get_object(args.predicate_function, path=['.'])
if not callable(predicate):
raise TypeError('predicate object {!r} is not callable'.format(predicate))
self.predicate = predicate
self.invert = bool(args.invert)
self.include_ancestors = bool(args.include_ancestors)
self.output_filename = args.output
def go(self):
self.data_reader.open('r')
output_file = h5io.WESTPAH5File(self.output_filename, mode='w')
pi = self.progress.indicator
iter_start, iter_stop = self.iter_range.iter_start, self.iter_range.iter_stop
iter_count = iter_stop - iter_start
output_file.create_dataset('n_iter', dtype=n_iter_dtype, data=list(range(iter_start, iter_stop)))
current_seg_count = 0
seg_count_ds = output_file.create_dataset('n_segs', dtype=np.uint, shape=(iter_count,))
matching_segs_ds = output_file.create_dataset(
'seg_ids',
shape=(iter_count, 0),
maxshape=(iter_count, None),
dtype=seg_id_dtype,
chunks=h5io.calc_chunksize((iter_count, 1000000), seg_id_dtype),
shuffle=True,
compression=9,
)
weights_ds = output_file.create_dataset(
'weights',
shape=(iter_count, 0),
maxshape=(iter_count, None),
dtype=weight_dtype,
chunks=h5io.calc_chunksize((iter_count, 1000000), weight_dtype),
shuffle=True,
compression=9,
)
with pi:
pi.new_operation('Finding matching segments', extent=iter_count)
# futures = set()
# for n_iter in xrange(iter_start,iter_stop):
# futures.add(self.work_manager.submit(_find_matching_segments,
# args=(self.data_reader.we_h5filename,n_iter,self.predicate,self.invert)))
# for future in self.work_manager.as_completed(futures):
for future in self.work_manager.submit_as_completed(
(
(_find_matching_segments, (self.data_reader.we_h5filename, n_iter, self.predicate, self.invert), {})
for n_iter in range(iter_start, iter_stop)
),
self.max_queue_len,
):
n_iter, matching_ids = future.get_result()
n_matches = len(matching_ids)
if n_matches:
if n_matches > current_seg_count:
current_seg_count = len(matching_ids)
matching_segs_ds.resize((iter_count, n_matches))
weights_ds.resize((iter_count, n_matches))
current_seg_count = n_matches
seg_count_ds[n_iter - iter_start] = n_matches
matching_segs_ds[n_iter - iter_start, :n_matches] = matching_ids
weights_ds[n_iter - iter_start, :n_matches] = self.data_reader.get_iter_group(n_iter)['seg_index']['weight'][
sorted(matching_ids)
]
del matching_ids
pi.progress += 1
if self.include_ancestors:
pi.new_operation('Tracing ancestors of matching segments', extent=iter_count)
from_previous = set()
current_seg_count = matching_segs_ds.shape[1]
for n_iter in range(iter_stop - 1, iter_start - 1, -1):
iiter = n_iter - iter_start
n_matches = seg_count_ds[iiter]
matching_ids = set(from_previous)
if n_matches:
matching_ids.update(matching_segs_ds[iiter, : seg_count_ds[iiter]])
from_previous.clear()
n_matches = len(matching_ids)
if n_matches > current_seg_count:
matching_segs_ds.resize((iter_count, n_matches))
weights_ds.resize((iter_count, n_matches))
current_seg_count = n_matches
if n_matches > 0:
seg_count_ds[iiter] = n_matches
matching_ids = sorted(matching_ids)
matching_segs_ds[iiter, :n_matches] = matching_ids
weights_ds[iiter, :n_matches] = self.data_reader.get_iter_group(n_iter)['seg_index']['weight'][
sorted(matching_ids)
]
parent_ids = self.data_reader.get_iter_group(n_iter)['seg_index']['parent_id'][sorted(matching_ids)]
from_previous.update(parent_id for parent_id in parent_ids if parent_id >= 0) # filter initial states
del parent_ids
del matching_ids
pi.progress += 1
def entry_point():
WSelectTool().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_stateprobs.rst | Manual | w_stateprobs command | MIT License | westpa__westpa/doc/documentation/cli/deprecated/w_stateprobs.rst | [
"westpa__westpa/src/westpa/cli/tools/w_stateprobs.py"
] | w_stateprobs
WARNING: w_stateprobs is being deprecated. Please use w_direct instead.
usage:
w_stateprobs trace [-h] [-W WEST_H5FILE] [--first-iter N_ITER] [--last-iter N_ITER]
[--step-iter STEP] [-a ASSIGNMENTS] [-o OUTPUT] [-k KINETICS]
[--disable-bootstrap] [--disable-correl] [--alpha ALPHA]
[--autocorrel-alpha ACALPHA] [--nsets NSETS] [-e {cumulative,blocked,none}]
[--window-frac WINDOW_FRAC] [--disable-averages]
Calculate average populations and associated errors in state populations
from weighted ensemble data. Bin assignments, including macrostate
definitions, are required. (See "w_assign --help" for more information).
Output format
The output file (-o/--output, usually "direct.h5") contains the
following dataset:
/avg_state_probs [state]
(Structured -- see below) Population of each state across entire
range specified.
/avg_color_probs [state]
(Structured -- see below) Population of each ensemble across entire
range specified.
If --evolution-mode is specified, then the following additional datasets
are available:
/state_pop_evolution [window][state]
(Structured -- see below). State populations based on windows of
iterations of varying width. If --evolution-mode=cumulative, then
these windows all begin at the iteration specified with
--start-iter and grow in length by --step-iter for each successive
element. If --evolution-mode=blocked, then these windows are all of
width --step-iter (excluding the last, which may be shorter), the first
of which begins at iteration --start-iter.
/color_prob_evolution [window][state]
(Structured -- see below). Ensemble populations based on windows of
iterations of varying width. If --evolution-mode=cumulative, then
these windows all begin at the iteration specified with
--start-iter and grow in length by --step-iter for each successive
element. If --evolution-mode=blocked, then these windows are all of
width --step-iter (excluding the last, which may be shorter), the first
of which begins at iteration --start-iter.
The structure of these datasets is as follows:
iter_start
(Integer) Iteration at which the averaging window begins (inclusive).
iter_stop
(Integer) Iteration at which the averaging window ends (exclusive).
expected
(Floating-point) Expected (mean) value of the observable as evaluated within
this window, in units of inverse tau.
ci_lbound
(Floating-point) Lower bound of the confidence interval of the observable
within this window, in units of inverse tau.
ci_ubound
(Floating-point) Upper bound of the confidence interval of the observable
within this window, in units of inverse tau.
stderr
(Floating-point) The standard error of the mean of the observable
within this window, in units of inverse tau.
corr_len
(Integer) Correlation length of the observable within this window, in units
of tau.
Each of these datasets is also stamped with a number of attributes:
mcbs_alpha
(Floating-point) Alpha value of confidence intervals. (For example,
*alpha=0.05* corresponds to a 95% confidence interval.)
mcbs_nsets
(Integer) Number of bootstrap data sets used in generating confidence
intervals.
mcbs_acalpha
(Floating-point) Alpha value for determining correlation lengths.
Command-line options
optional arguments:
-h, --help show this help message and exit
WEST input data options:
-W WEST_H5FILE, --west-data WEST_H5FILE
Take WEST data from WEST_H5FILE (default: read from the HDF5 file specified in
west.cfg).
iteration range:
--first-iter N_ITER Begin analysis at iteration N_ITER (default: 1).
--last-iter N_ITER Conclude analysis with N_ITER, inclusive (default: last completed iteration).
--step-iter STEP Analyze/report in blocks of STEP iterations.
input/output options:
-a ASSIGNMENTS, --assignments ASSIGNMENTS
Bin assignments and macrostate definitions are in ASSIGNMENTS (default:
assign.h5).
-o OUTPUT, --output OUTPUT
Store results in OUTPUT (default: stateprobs.h5).
input/output options:
-k KINETICS, --kinetics KINETICS
Populations and transition rates are stored in KINETICS (default: assign.h5).
confidence interval calculation options:
--disable-bootstrap, -db
Enable the use of Monte Carlo Block Bootstrapping.
--disable-correl, -dc
Disable the correlation analysis.
--alpha ALPHA Calculate a (1-ALPHA) confidence interval' (default: 0.05)
--autocorrel-alpha ACALPHA
Evaluate autocorrelation to (1-ACALPHA) significance. Note that too small an
ACALPHA will result in failure to detect autocorrelation in a noisy flux signal.
(Default: same as ALPHA.)
--nsets NSETS Use NSETS samples for bootstrapping (default: chosen based on ALPHA)
calculation options:
-e {cumulative,blocked,none}, --evolution-mode {cumulative,blocked,none}
How to calculate time evolution of rate estimates. ``cumulative`` evaluates rates
over windows starting with --start-iter and getting progressively wider to --stop-
iter by steps of --step-iter. ``blocked`` evaluates rates over windows of width
--step-iter, the first of which begins at --start-iter. ``none`` (the default)
disables calculation of the time evolution of rate estimates.
--window-frac WINDOW_FRAC
Fraction of iterations to use in each window when running in ``cumulative`` mode.
The (1 - frac) fraction of iterations will be discarded from the start of each
window.
misc options:
--disable-averages, -da
Whether or not the averages should be printed to the console (set to FALSE if flag
is used).
| from westpa.tools import WESTMasterCommand, WESTParallelTool
from warnings import warn
from westpa.cli.tools.w_direct import DStateProbs
# Just a shim to make sure everything works and is backwards compatible.
# We're making sure it has the appropriate functions so that it can be called
# as a regular tool, and not a subcommand.
class WStateProbs(DStateProbs):
subcommand = 'trace'
help_text = 'averages and CIs for path-tracing kinetics analysis'
default_output_file ='stateprobs.h5'
# This isn't strictly necessary, but for the moment, here it is.
# We really need to modify the underlying class so that we don't pull this sort of stuff if it isn't necessary.
# That'll take some case handling, which is fine.
default_kinetics_file = 'assign.h5'
class WDirect(WESTMasterCommand, WESTParallelTool):
prog = 'w_stateprobs'
subcommands = [WStateProbs]
subparsers_title = 'calculate state-to-state kinetics by tracing trajectories'
description = '''\
Calculate average populations and associated errors in state populations from
weighted ensemble data. Bin assignments, including macrostate definitions,
are required. (See "w_assign --help" for more information).
-----------------------------------------------------------------------------
Output format
-----------------------------------------------------------------------------
The output file (-o/--output, usually "stateprobs.h5") contains the following
dataset:
/avg_state_pops [state]
(Structured -- see below) Population of each state across entire
range specified.
If --evolution-mode is specified, then the following additional dataset is
available:
/state_pop_evolution [window][state]
(Structured -- see below). State populations based on windows of
iterations of varying width. If --evolution-mode=cumulative, then
these windows all begin at the iteration specified with
--start-iter and grow in length by --step-iter for each successive
element. If --evolution-mode=blocked, then these windows are all of
width --step-iter (excluding the last, which may be shorter), the first
of which begins at iteration --start-iter.
The structure of these datasets is as follows:
iter_start
(Integer) Iteration at which the averaging window begins (inclusive).
iter_stop
(Integer) Iteration at which the averaging window ends (exclusive).
expected
(Floating-point) Expected (mean) value of the rate as evaluated within
this window, in units of inverse tau.
ci_lbound
(Floating-point) Lower bound of the confidence interval on the rate
within this window, in units of inverse tau.
ci_ubound
(Floating-point) Upper bound of the confidence interval on the rate
within this window, in units of inverse tau.
corr_len
(Integer) Correlation length of the rate within this window, in units
of tau.
Each of these datasets is also stamped with a number of attributes:
mcbs_alpha
(Floating-point) Alpha value of confidence intervals. (For example,
*alpha=0.05* corresponds to a 95% confidence interval.)
mcbs_nsets
(Integer) Number of bootstrap data sets used in generating confidence
intervals.
mcbs_acalpha
(Floating-point) Alpha value for determining correlation lengths.
-----------------------------------------------------------------------------
Command-line options
-----------------------------------------------------------------------------
'''
def entry_point():
warn('{} is being deprecated. Please use w_direct instead.'.format(WDirect.prog))
# If we're not really supporting subcommands...
import sys
try:
if sys.argv[1]!= 'trace':
sys.argv.insert(1, 'trace')
except Exception:
sys.argv.insert(1, 'trace')
WDirect().main()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_states.rst | Manual | w_states command | MIT License | westpa__westpa/doc/documentation/cli/w_states.rst | [
"westpa__westpa/src/westpa/cli/core/w_states.py"
] | w_states
usage:
w_states [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version]
[--show | --append | --replace] [--bstate-file BSTATE_FILE] [--bstate BSTATES]
[--tstate-file TSTATE_FILE] [--tstate TSTATES]
[--serial | --parallel | --work-manager WORK_MANAGER] [--n-workers N_WORKERS]
[--zmq-mode MODE] [--zmq-comm-mode COMM_MODE] [--zmq-write-host-info INFO_FILE]
[--zmq-read-host-info INFO_FILE] [--zmq-upstream-rr-endpoint ENDPOINT]
[--zmq-upstream-ann-endpoint ENDPOINT] [--zmq-downstream-rr-endpoint ENDPOINT]
[--zmq-downstream-ann-endpoint ENDPOINT] [--zmq-master-heartbeat MASTER_HEARTBEAT]
[--zmq-worker-heartbeat WORKER_HEARTBEAT] [--zmq-timeout-factor FACTOR]
[--zmq-startup-timeout STARTUP_TIMEOUT] [--zmq-shutdown-timeout SHUTDOWN_TIMEOUT]
Display or manipulate basis (initial) or target (recycling) states for a
WEST simulation. By default, states are displayed (or dumped to files).
If --replace is specified, all basis/target states are replaced for the
next iteration. If --append is specified, the given target state(s) are
appended to the list for the next iteration. Appending basis states is
not permitted, as this would require renormalizing basis state
probabilities in ways that may be error-prone. Instead, use
w_states --show --bstate-file=bstates.txt and then edit the resulting
bstates.txt file to include the new desired basis states, then use
w_states --replace --bstate-file=bstates.txt to update the WEST HDF5
file appropriately.
optional arguments:
-h, --help show this help message and exit
--bstate-file BSTATE_FILE
Read (--append/--replace) or write (--show) basis state names, probabilities, and
data references from/to BSTATE_FILE.
--bstate BSTATES Add the given basis state (specified as a string 'label,probability[,auxref]') to
the list of basis states (after those specified in --bstate-file, if any). This
argument may be specified more than once, in which case the given states are
appended in the order they are given on the command line.
--tstate-file TSTATE_FILE
Read (--append/--replace) or write (--show) target state names and representative
progress coordinates from/to TSTATE_FILE
--tstate TSTATES Add the given target state (specified as a string 'label,pcoord0[,pcoord1[,...]]')
to the list of target states (after those specified in the file given by
--tstates-from, if any). This argument may be specified more than once, in which
case the given states are appended in the order they appear on the command line.
general options:
-r RCFILE, --rcfile RCFILE
use RCFILE as the WEST run-time configuration file (default: west.cfg)
--quiet emit only essential information
--verbose emit extra information
--debug enable extra checks and emit copious information
--version show program's version number and exit
modes of operation:
--show Display current basis/target states (or dump to files).
--append Append the given basis/target states to those currently in use.
--replace Replace current basis/target states with those specified.
parallelization options:
--serial run in serial mode
--parallel run in parallel mode (using processes)
--work-manager WORK_MANAGER
use the given work manager for parallel task distribution. Available work managers
are ('serial', 'threads', 'processes', 'zmq'); default is 'serial'
--n-workers N_WORKERS
Use up to N_WORKERS on this host, for work managers which support this option. Use
0 for a dedicated server. (Ignored by work managers which do not support this
option.)
options for ZeroMQ ("zmq") work manager (master or node):
--zmq-mode MODE Operate as a master (server) or a node (workers/client). "server" is a deprecated
synonym for "master" and "client" is a deprecated synonym for "node".
--zmq-comm-mode COMM_MODE
Use the given communication mode -- TCP or IPC (Unix-domain) -- sockets for
communication within a node. IPC (the default) may be more efficient but is not
available on (exceptionally rare) systems without node-local storage (e.g. /tmp);
on such systems, TCP may be used instead.
--zmq-write-host-info INFO_FILE
Store hostname and port information needed to connect to this instance in
INFO_FILE. This allows the master and nodes assisting in coordinating the
communication of other nodes to choose ports randomly. Downstream nodes read this
file with --zmq-read-host-info and know where how to connect.
--zmq-read-host-info INFO_FILE
Read hostname and port information needed to connect to the master (or other
coordinating node) from INFO_FILE. This allows the master and nodes assisting in
coordinating the communication of other nodes to choose ports randomly, writing
that information with --zmq-write-host-info for this instance to read.
--zmq-upstream-rr-endpoint ENDPOINT
ZeroMQ endpoint to which to send request/response (task and result) traffic toward
the master.
--zmq-upstream-ann-endpoint ENDPOINT
ZeroMQ endpoint on which to receive announcement (heartbeat and shutdown
notification) traffic from the master.
--zmq-downstream-rr-endpoint ENDPOINT
ZeroMQ endpoint on which to listen for request/response (task and result) traffic
from subsidiary workers.
--zmq-downstream-ann-endpoint ENDPOINT
ZeroMQ endpoint on which to send announcement (heartbeat and shutdown
notification) traffic toward workers.
--zmq-master-heartbeat MASTER_HEARTBEAT
Every MASTER_HEARTBEAT seconds, the master announces its presence to workers.
--zmq-worker-heartbeat WORKER_HEARTBEAT
Every WORKER_HEARTBEAT seconds, workers announce their presence to the master.
--zmq-timeout-factor FACTOR
Scaling factor for heartbeat timeouts. If the master doesn't hear from a worker in
WORKER_HEARTBEAT*FACTOR, the worker is assumed to have crashed. If a worker
doesn't hear from the master in MASTER_HEARTBEAT*FACTOR seconds, the master is
assumed to have crashed. Both cases result in shutdown.
--zmq-startup-timeout STARTUP_TIMEOUT
Amount of time (in seconds) to wait for communication between the master and at
least one worker. This may need to be changed on very large, heavily-loaded
computer systems that start all processes simultaneously.
--zmq-shutdown-timeout SHUTDOWN_TIMEOUT
Amount of time (in seconds) to wait for workers to shut down.
| import argparse
import io
import logging
import sys
import numpy as np
import westpa.work_managers as work_managers
from westpa.work_managers import make_work_manager
import westpa
from westpa.core.segment import Segment
from westpa.core.states import BasisState, TargetState
log = logging.getLogger('w_states')
EPS = np.finfo(np.float64).eps
def entry_point():
parser = argparse.ArgumentParser(
'w_states',
description='''\
Display or manipulate basis (initial) or target (recycling) states for a WEST simulation. By default, states are
displayed (or dumped to files). If ``--replace`` is specified, all basis/target states are replaced for the
next iteration. If ``--append`` is specified, the given target state(s) are appended to the list for the
next iteration.
Appending basis states is not permitted, as this would require renormalizing basis state
probabilities in ways that may be error-prone. Instead, use ``w_states --show --bstate-file=bstates.txt``
and then edit the resulting ``bstates.txt`` file to include the new desired basis states, then use
``w_states --replace --bstate-file=bstates.txt`` to update the WEST HDF5 file appropriately.
''',
)
westpa.rc.add_args(parser)
smgroup = parser.add_argument_group('modes of operation')
mode_group = smgroup.add_mutually_exclusive_group()
mode_group.add_argument(
'--show', dest='mode', action='store_const', const='show', help='Display current basis/target states (or dump to files).'
)
mode_group.add_argument(
'--append',
dest='mode',
action='store_const',
const='append',
help='Append the given basis/target states to those currently in use.',
)
mode_group.add_argument(
'--replace',
dest='mode',
action='store_const',
const='replace',
help='Replace current basis/target states with those specified.',
)
parser.add_argument(
'--bstate-file',
metavar='BSTATE_FILE',
help='''Read (--append/--replace) or write (--show) basis state names, probabilities,
and data references from/to BSTATE_FILE.''',
)
parser.add_argument(
'--bstate',
action='append',
dest='bstates',
help='''Add the given basis state (specified as a string 'label,probability[,auxref]')
to the list of basis states (after those specified in --bstate-file, if any). This argument
may be specified more than once, in which case the given states are appended in the order
they are given on the command line.''',
)
parser.add_argument(
'--tstate-file',
metavar='TSTATE_FILE',
help='''Read (--append/--replace) or write (--show) target state names
and representative progress coordinates from/to TSTATE_FILE''',
)
parser.add_argument(
'--tstate',
action='append',
dest='tstates',
help='''Add the given target state (specified as a string 'label,pcoord0[,pcoord1[,...]]') to the
list of target states (after those specified in the file given by --tstates-from, if any).
This argument may be specified more than once, in which case the given states are appended
in the order they appear on the command line.''',
)
parser.set_defaults(mode='show')
work_managers.environment.add_wm_args(parser)
args = parser.parse_args()
westpa.rc.process_args(args)
work_managers.environment.process_wm_args(args)
# Need to have something to pass to initialize
if not hasattr(args, 'bstates'):
args.bstates = None
if not hasattr(args, 'tstates'):
args.tstates = None
if not hasattr(args, 'tstate_file'):
args.tstate_file = None
initialize(args.mode, args.bstates, args.bstate_file, args.tstates, args.tstate_file)
# TODO: This would benefit from a refactor to set default args to None, and replace some of those "if <argument>" clauses
def initialize(mode, bstates, _bstate_file, tstates, _tstate_file):
work_manager = make_work_manager()
system = westpa.rc.get_system_driver()
with work_manager:
if work_manager.is_master:
data_manager = westpa.rc.get_data_manager()
data_manager.open_backing(mode='a')
sim_manager = westpa.rc.get_sim_manager()
n_iter = data_manager.current_iteration
assert mode in ('show','replace', 'append')
if mode =='show':
basis_states = data_manager.get_basis_states(n_iter)
if basis_states:
bstate_file = sys.stdout if not _bstate_file else open(_bstate_file, 'wt')
bstate_file.write('# Basis states for iteration {:d}\n'.format(n_iter))
BasisState.states_to_file(basis_states, bstate_file)
target_states = data_manager.get_target_states(n_iter)
if target_states:
tstate_file = sys.stdout if not _tstate_file else open(_tstate_file, 'wt')
tstate_file.write('# Target states for iteration {:d}\n'.format(n_iter))
TargetState.states_to_file(target_states, tstate_file)
elif mode =='replace':
seg_index = data_manager.get_seg_index(n_iter)
if (seg_index['status'] == Segment.SEG_STATUS_COMPLETE).any():
print('Iteration {:d} has completed segments; applying new states to iteration {:d}'.format(n_iter, n_iter + 1))
n_iter += 1
basis_states = []
if _bstate_file:
basis_states.extend(BasisState.states_from_file(_bstate_file))
if bstates:
for bstate_str in bstates:
fields = bstate_str.split(',')
label = fields[0]
probability = float(fields[1])
try:
auxref = fields[2]
except IndexError:
auxref = None
basis_states.append(BasisState(label=label, probability=probability, auxref=auxref))
if basis_states:
# Check that the total probability of basis states adds to one
tprob = sum(bstate.probability for bstate in basis_states)
if abs(1.0 - tprob) > len(basis_states) * EPS:
pscale = 1 / tprob
log.warning('Basis state probabilities do not add to unity; rescaling by {:g}'.format(pscale))
for bstate in basis_states:
bstate.probability *= pscale
# Assign progress coordinates to basis states
sim_manager.get_bstate_pcoords(basis_states, n_iter)
data_manager.create_ibstate_group(basis_states, n_iter)
sim_manager.report_basis_states(basis_states)
# Now handle target states
target_states = []
if _tstate_file:
target_states.extend(TargetState.states_from_file(_tstate_file, system.pcoord_dtype))
if tstates:
tstates_strio = io.StringIO('\n'.join(tstates).replace(',',''))
target_states.extend(TargetState.states_from_file(tstates_strio, system.pcoord_dtype))
del tstates_strio
if not target_states:
westpa.rc.pstatus('No target states specified.')
else:
data_manager.save_target_states(target_states, n_iter)
sim_manager.report_target_states(target_states)
data_manager.update_iter_group_links(n_iter)
else: # args.mode == 'append'
if _bstate_file or bstates:
sys.stderr.write('refusing to append basis states; use --show followed by --replace instead\n')
sys.exit(2)
target_states = data_manager.get_target_states(n_iter)
seg_index = data_manager.get_seg_index(n_iter)
if (seg_index['status'] == Segment.SEG_STATUS_COMPLETE).any():
print('Iteration {:d} has completed segments; applying new states to iteration {:d}'.format(n_iter, n_iter + 1))
n_iter += 1
if _tstate_file:
target_states.extend(TargetState.states_from_file(_tstate_file, system.pcoord_dtype))
if tstates:
tstates_strio = io.StringIO('\n'.join(tstates).replace(',',''))
target_states.extend(TargetState.states_from_file(tstates_strio, system.pcoord_dtype))
del tstates_strio
if not target_states:
westpa.rc.pstatus('No target states specified.')
else:
data_manager.save_target_states(target_states, n_iter)
sim_manager.report_target_states(target_states)
data_manager.update_iter_group_links(n_iter)
else:
work_manager.run()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_succ.rst | Manual | w_succ command | MIT License | westpa__westpa/doc/documentation/cli/w_succ.rst | [
"westpa__westpa/src/westpa/cli/core/w_succ.py"
] | w_succ
usage:
w_succ [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version] [-A H5FILE] [-W WEST_H5FILE]
[-o OUTPUT_FILE]
List segments which successfully reach a target state.
optional arguments:
-h, --help show this help message and exit
-o OUTPUT_FILE, --output OUTPUT_FILE
Store output in OUTPUT_FILE (default: write to standard output).
general options:
-r RCFILE, --rcfile RCFILE
use RCFILE as the WEST run-time configuration file (default: west.cfg)
--quiet emit only essential information
--verbose emit extra information
--debug enable extra checks and emit copious information
--version show program's version number and exit
general analysis options:
-A H5FILE, --analysis-file H5FILE
Store intermediate and final results in H5FILE (default: analysis.h5).
WEST input data options:
-W WEST_H5FILE, --west-data WEST_H5FILE
Take WEST data from WEST_H5FILE (default: read from the HDF5 file specified in
west.cfg).
| import argparse
import sys
import numpy as np
import westpa
from westpa.core.segment import Segment
from westpa.oldtools.aframe import WESTAnalysisTool, WESTDataReaderMixin, CommonOutputMixin
import logging
log = logging.getLogger('w_succ')
class WSucc(CommonOutputMixin, WESTDataReaderMixin, WESTAnalysisTool):
def __init__(self):
super().__init__()
self.include_args['CommonOutputMixin']['print_bin_labels'] = False
self.output_file = sys.stdout
def find_successful_trajs(self):
pcoord_formats = {
'u8': '%20d',
'i8': '%20d',
'u4': '%10d',
'i4': '%11d',
'u2': '%5d',
'i2': '%6d',
'f4': '%14.7g',
'f8': '%23.15g',
}
if not self.output_suppress_headers:
self.output_file.write(
'''\
# successful (recycled) segments
# column 0: iteration
# column 1: seg_id
# column 2: weight
# column>2: final progress coordinate value
'''
)
for n_iter in range(1, self.data_manager.current_iteration):
seg_index = self.get_seg_index(n_iter)
all_seg_ids = np.arange(len(seg_index), dtype=np.int_)
recycled_seg_ids = all_seg_ids[seg_index[:]['endpoint_type'] == Segment.SEG_ENDPOINT_RECYCLED]
if len(recycled_seg_ids) == 0:
# Attemping to retrieve a 0-length selection from HDF5 (the pcoords below) fails
continue
pcoord_ds = self.get_pcoord_dataset(n_iter)
pcoord_len = pcoord_ds.shape[1]
pcoord_ndim = pcoord_ds.shape[2]
final_pcoords = self.get_pcoord_dataset(n_iter)[recycled_seg_ids, pcoord_len - 1, :]
# The above HDF5 selection always returns a vector; we want a 2-d array
final_pcoords.shape = (len(recycled_seg_ids), pcoord_ndim)
for ipc, seg_id in enumerate(recycled_seg_ids):
self.output_file.write('%8d %8d %20.14g' % (n_iter, seg_id, seg_index[seg_id]['weight']))
fields = ['']
for field in final_pcoords[ipc]:
fields.append(pcoord_formats.get(field.dtype.str[1:], '%s') % field)
self.output_file.write(' '.join(fields))
self.output_file.write('\n')
def entry_point():
wsucc = WSucc()
parser = argparse.ArgumentParser(
'w_succ',
description='''\
List segments which successfully reach a target state''',
)
westpa.rc.add_args(parser)
wsucc.add_args(parser)
parser.add_argument(
'-o',
'--output',
dest='output_file',
help='Store output in OUTPUT_FILE (default: write to standard output).',
type=argparse.FileType('wt'),
default=sys.stdout,
)
args = parser.parse_args()
westpa.rc.process_args(args, config_required=False)
wsucc.process_args(args)
wsucc.output_file = args.output_file
wsucc.find_successful_trajs()
if __name__ == '__main__':
entry_point() |
|
westpa__westpa | w_trace.rst | Manual | w_trace command | MIT License | westpa__westpa/doc/documentation/cli/w_trace.rst | [
"westpa__westpa/src/westpa/cli/tools/w_trace.py"
] | w_trace
usage:
w_trace [-h] [-r RCFILE] [--quiet | --verbose | --debug] [--version] [-W WEST_H5FILE]
[-d DSNAME] [--output-pattern OUTPUT_PATTERN] [-o OUTPUT]
N_ITER:SEG_ID [N_ITER:SEG_ID ...]
Trace individual WEST trajectories and emit (or calculate) quantities
along the trajectory.
Trajectories are specified as N_ITER:SEG_ID pairs. Each segment is
traced back to its initial point, and then various quantities (notably
n_iter and seg_id) are printed in order from initial point up until the
given segment in the given iteration.
Output is stored in several files, all named according to the pattern
given by the -o/--output-pattern parameter. The default output pattern
is "traj%d_%d", where the printf-style format codes are replaced by the
iteration number and segment ID of the terminal segment of the
trajectory being traced.
Individual datasets can be selected for writing using the -d/--dataset
option (which may be specified more than once). The simplest form is
-d dsname, which causes data from dataset dsname along the trace to be
stored to HDF5. The dataset is assumed to be stored on a per-iteration
basis, with the first dimension corresponding to seg_id and the second
dimension corresponding to time within the segment. Further options are
specified as comma-separated key=value pairs after the data set name, as
in:
-d dsname,alias=newname,index=idsname,file=otherfile.h5,slice=[100,...]
The following options for datasets are supported:
alias=newname
When writing this data to HDF5 or text files, use ``newname``
instead of ``dsname`` to identify the dataset. This is mostly of
use in conjunction with the ``slice`` option in order, e.g., to
retrieve two different slices of a dataset and store then with
different names for future use.
index=idsname
The dataset is not stored on a per-iteration basis for all
segments, but instead is stored as a single dataset whose
first dimension indexes n_iter/seg_id pairs. The index to
these n_iter/seg_id pairs is ``idsname``.
file=otherfile.h5
Instead of reading data from the main WEST HDF5 file (usually
``west.h5``), read data from ``otherfile.h5``.
slice=[100,...]
Retrieve only the given slice from the dataset. This can be
used to pick a subset of interest to minimize I/O.
positional arguments
N_ITER:SEG_ID Trace trajectory ending (or at least alive at) N_ITER:SEG_ID.
optional arguments
-h, --help show this help message and exit
-d DSNAME, --dataset DSNAME
Include the dataset named DSNAME in trace output. An extended form like
DSNAME[,alias=ALIAS][,index=INDEX][,file=FILE][,slice=SLICE] will obtain the
dataset from the given FILE instead of the main WEST HDF5 file, slice it by
SLICE, call it ALIAS in output, and/or access per-segment data by a
n_iter,seg_id INDEX instead of a seg_id indexed dataset in the group for
n_iter.
general options
-r RCFILE, --rcfile RCFILE
use RCFILE as the WEST run-time configuration file (default: west.cfg)
--quiet emit only essential information
--verbose emit extra information
--debug enable extra checks and emit copious information
--version show program's version number and exit
WEST input data options
-W WEST_H5FILE, --west-data WEST_H5FILE
Take WEST data from WEST_H5FILE (default: read from the HDF5 file specified in
west.cfg).
output options
--output-pattern OUTPUT_PATTERN
Write per-trajectory data to output files/HDF5 groups whose names begin with
OUTPUT_PATTERN, which must contain two printf-style format flags which will be
replaced with the iteration number and segment ID of the terminal segment of
the trajectory being traced. (Default: traj_%d_%d.)
-o OUTPUT, --output OUTPUT
Store intermediate data and analysis results to OUTPUT (default: trajs.h5).
| import re
import h5py
import numpy as np
from westpa.tools import WESTTool, WESTDataReader
import westpa
from westpa.core import h5io
from westpa.core.segment import Segment
from westpa.core.states import InitialState
from westpa.core.data_manager import weight_dtype, n_iter_dtype, seg_id_dtype, utime_dtype
class Trace:
'''A class representing a trace of a certain trajectory segment back to its origin.'''
def __init__(self, summary, endpoint_type, basis_state, initial_state, data_manager=None):
self.summary = summary
self.endpoint_type = endpoint_type
self.basis_state = basis_state
self.initial_state = initial_state
self.data_manager = data_manager or westpa.rc.get_data_manager()
# A mapping from aux file names to open h5py.File objects, to minimize time
self._auxfiles = {}
def __len__(self):
try:
return len(self.summary)
except TypeError:
return 0
def __getitem__(self, sl):
return self.summary[sl]
def __iter__(self):
return iter(self.summary)
@classmethod
def from_data_manager(cls, n_iter, seg_id, data_manager=None):
'''Construct and return a trajectory trace whose last segment is identified
by ``seg_id`` in the iteration number ``n_iter``.'''
data_manager = data_manager or westpa.rc.get_data_manager()
# These values are used later on
endpoint_type = None
pcoord_dtype = None
pcoord_pt_shape = None
seginfo = []
parent_id = seg_id
while n_iter > 0 and parent_id >= 0:
seg_id = parent_id
iter_group = data_manager.get_iter_group(n_iter)
pcoord_ds = iter_group['pcoord']
seg_index = iter_group['seg_index']
n_segs = pcoord_ds.shape[0]
pcoord_len = pcoord_ds.shape[1]
assert seg_id < n_segs
indexrow = seg_index[seg_id]
final_pcoord = pcoord_ds[seg_id, pcoord_len - 1]
weight = indexrow['weight']
cputime = indexrow['cputime']
walltime = indexrow['walltime']
try:
parent_id = int(indexrow['parent_id'])
except IndexError:
# old HDF5 version
parent_id = int(iter_group['parents'][indexrow['parents_offset']])
if endpoint_type is None:
endpoint_type = indexrow['endpoint_type']
pcoord_pt_shape = pcoord_ds.shape[2:]
pcoord_dtype = pcoord_ds.dtype
seginfo.append((n_iter, seg_id, weight, walltime, cputime, final_pcoord))
del iter_group, pcoord_ds, seg_index
n_iter -= 1
# loop terminates with parent_id set to the identifier of the initial state,
# seg_id set to the identifier of the first segment in the trajectory, and
# n_iter set to one less than the iteration of the first segment
first_iter = n_iter + 1
first_seg_id = seg_id
first_parent_id = parent_id
# Initial segment (for fetching initial state)
first_segment = Segment(n_iter=first_iter, seg_id=first_seg_id, parent_id=first_parent_id)
seginfo.reverse()
summary_dtype = np.dtype(
[
('n_iter', n_iter_dtype),
('seg_id', seg_id_dtype),
('weight', weight_dtype),
('walltime', utime_dtype),
('cputime', utime_dtype),
('final_pcoord', pcoord_dtype, pcoord_pt_shape),
]
)
summary = np.array(seginfo, dtype=summary_dtype)
try:
initial_state = data_manager.get_segment_initial_states([first_segment], first_iter)[0]
except KeyError:
# old HDF5 version
assert parent_id < 0
istate_pcoord = data_manager.get_iter_group(first_iter)['pcoord'][first_seg_id, 0]
istate_id = -(first_parent_id + 1)
basis_state = None
initial_state = InitialState(istate_id, None, iter_created=0, pcoord=istate_pcoord)
else:
basis_state = data_manager.get_basis_states(first_iter)[initial_state.basis_state_id]
return cls(summary, endpoint_type, basis_state, initial_state, data_manager)
def get_segment_data_slice(self, datafile, dsname, n_iter, seg_id, slice_=None, index_data=None, iter_prec=None):
'''Return the data from the dataset named ``dsname`` within the given ``datafile`` (an open
h5py.File object) for the given iteration and segment. By default, it is assumed that the
dataset is stored in the iteration group for iteration ``n_iter``, but if ``index_data``
is provided, it must be an iterable (preferably a simple array) of (n_iter,seg_id) pairs,
and the index in the ``index_data`` iterable of the matching n_iter/seg_id pair is used as
the index of the data to retrieve.
If an optional ``slice_`` is provided, then the given slicing tuple is appended to that
used to retrieve the segment-specific data (i.e. it can be used to pluck a subset of the
data that would otherwise be returned).
'''
if slice_ is None:
slice_ = np.s_[...]
if index_data is not None:
dataset = datafile[dsname]
for i, (i_n_iter, i_seg_id) in enumerate(index_data):
if (i_n_iter, i_seg_id) == (n_iter, seg_id):
break
else:
raise KeyError((n_iter, seg_id))
itpl = (i,) + slice_
return dataset[itpl]
else:
if not iter_prec:
iter_prec = datafile.attrs.get('west_iter_prec', self.data_manager.default_iter_prec)
igname_tail = 'iter_{:0{iter_prec:d}d}'.format(int(n_iter), iter_prec=int(iter_prec))
try:
iter_group = datafile['/iterations/' + igname_tail]
except KeyError:
iter_group = datafile[igname_tail]
dataset = iter_group[dsname]
itpl = (seg_id,) + slice_
return dataset[itpl]
def trace_timepoint_dataset(self, dsname, slice_=None, auxfile=None, index_ds=None):
'''Return a trace along this trajectory over a dataset which is layed out as [seg_id][timepoint][...].
Overlapping values at segment boundaries are accounted for. Returns (data_trace, weight), where
data_trace is a time series of the dataset along this trajectory, and weight is the corresponding
trajectory weight at each time point.
If ``auxfile`` is given, then load the dataset from the given HDF5 file, which must be
layed out the same way as the main HDF5 file (e.g. iterations arranged as
iterations/iter_*).
If index_ds is given, instead of reading data per-iteration from iter_* groups, then the
given index_ds is used as an index of n_iter,seg_id pairs into ``dsname``. In this case,
the target data set need not exist on a per-iteration basis inside iter_* groups.
If ``slice_`` is given, then *further* slice the data returned from the HDF5 dataset. This can
minimize I/O if it is known (and specified) that only a subset of the data along the
trajectory is needed.
'''
# Figure out where to look for the dataset
if isinstance(auxfile, str):
datafile = h5py.File(auxfile, 'r')
close_datafile = True
elif auxfile is not None:
datafile = auxfile
close_datafile = False
else:
datafile = self.data_manager.we_h5file
close_datafile = False
iter_prec = self.data_manager.iter_prec
get_data_slice = self.get_segment_data_slice
# Load the index if we use it
if index_ds is not None:
if isinstance(index_ds, str):
index_ds = datafile[index_ds]
index_data = index_ds[...]
else:
index_data = None
# Be sure to retrieve the time series
if not slice_:
first_sl = np.index_exp[:,...]
other_sl = np.index_exp[1:,...]
else:
first_sl = np.index_exp[:] + slice_
other_sl = np.index_exp[1:] + slice_
# Retrieve the first segment's data
first_n_iter, first_seg_id = self.summary[0]['n_iter'], self.summary[0]['seg_id']
first_iter_data = get_data_slice(datafile, dsname, first_n_iter, first_seg_id, first_sl, index_data, iter_prec)
n_segs = len(self)
n_points_per_seg = len(first_iter_data)
length = n_points_per_seg + (n_segs - 1) * (n_points_per_seg - 1)
tracedata = np.empty((length,) + first_iter_data.shape[1:], dtype=first_iter_data.dtype)
traceweight = np.empty((length,), weight_dtype)
# Store first segment data
tracedata[0:n_points_per_seg] = first_iter_data
traceweight[0:n_points_per_seg] = self.summary[0]['weight']
del first_iter_data
# Store remainder of data
for iseg, summary_item in enumerate(self.summary[1:]):
n_iter = summary_item['n_iter']
seg_id = summary_item['seg_id']
weight = summary_item['weight']
offset = n_points_per_seg + iseg * (n_points_per_seg - 1)
length = n_points_per_seg - 1
seg_data = get_data_slice(datafile, dsname, n_iter, seg_id, other_sl, index_data, iter_prec)
tracedata[offset : offset + length] = seg_data
traceweight[offset : offset + length] = weight
del seg_data
if close_datafile:
datafile.close()
return tracedata, traceweight
"""
# This is disabled until there is a real use for it; the following code is
# outdated
def trace_perseg_dataset(self, dsname):
'''Return a trace along this trajectory over a dataset which is layed out as [seg_id][...].
Returns (data_trace, weight), where data_trace is a time series of the dataset along this
trajectory, and weight is the corresponding trajectory weight at each time point.'''
first_n_iter, first_seg_id = self.summary[0]['n_iter'], self.summary[0]['seg_id']
first_iter_group = self.data_manager.get_iter_group(first_n_iter)
first_iter_ds = first_iter_group[dsname]
n_segs = len(self)
tracedata = np.empty((n_segs,) + first_iter_ds.shape[1:], dtype=first_iter_ds.dtype)
traceweight = np.empty((n_segs,), weight_dtype)
tracedata[0] = first_iter_ds[first_seg_id]
traceweight[0] = self.summary[0]['weight']
for isegm1, summary_item in enumerate(self.summary[1:]):
iseg = isegm1 + 1
n_iter = summary_item['n_iter']
seg_id = summary_item['seg_id']
iter_group = self.data_manager.get_iter_group(n_iter)
seg_data = iter_group[dsname][seg_id]
tracedata[iseg] = seg_data
traceweight[iseg] = summary_item['weight']
del seg_data
return tracedata, traceweight
"""
class WTraceTool(WESTTool):
prog = 'w_trace'
description = '''\
Trace individual WEST trajectories and emit (or calculate) quantities along the
trajectory.
Trajectories are specified as N_ITER:SEG_ID pairs. Each segment is traced back
to its initial point, and then various quantities (notably n_iter and seg_id)
are printed in order from initial point up until the given segment in the given
iteration.
Output is stored in several files, all named according to the pattern given by
the -o/--output-pattern parameter. The default output pattern is "traj_%d_%d",
where the printf-style format codes are replaced by the iteration number and
segment ID of the terminal segment of the trajectory being traced.
Individual datasets can be selected for writing using the -d/--dataset option
(which may be specified more than once). The simplest form is ``-d dsname``,
which causes data from dataset ``dsname`` along the trace to be stored to
HDF5. The dataset is assumed to be stored on a per-iteration basis, with
the first dimension corresponding to seg_id and the second dimension
corresponding to time within the segment. Further options are specified
as comma-separated key=value pairs after the data set name, as in
-d dsname,alias=newname,index=idsname,file=otherfile.h5,slice=[100,...]
The following options for datasets are supported:
alias=newname
When writing this data to HDF5 or text files, use ``newname``
instead of ``dsname`` to identify the dataset. This is mostly of
use in conjunction with the ``slice`` option in order, e.g., to
retrieve two different slices of a dataset and store then with
different names for future use.
index=idsname
The dataset is not stored on a per-iteration basis for all
segments, but instead is stored as a single dataset whose
first dimension indexes n_iter/seg_id pairs. The index to
these n_iter/seg_id pairs is ``idsname``.
file=otherfile.h5
Instead of reading data from the main WEST HDF5 file (usually
``west.h5``), read data from ``otherfile.h5``.
slice=[100,...]
Retrieve only the given slice from the dataset. This can be
used to pick a subset of interest to minimize I/O.
-------------------------------------------------------------------------------
'''
pcoord_formats = {
'u8': '%20d',
'i8': '%20d',
'u4': '%10d',
'i4': '%11d',
'u2': '%5d',
'i2': '%6d',
'f4': '%14.7g',
'f8': '%023.15g',
}
def __init__(self):
super().__init__()
self.data_reader = WESTDataReader()
# self.h5storage = HDF5Storage()
self.output_file = None
self.output_pattern = None
self.endpoints = None
self.datasets = []
# Interface for command-line tools
def add_args(self, parser):
self.data_reader.add_args(parser)
# self.h5storage.add_args(parser)
parser.add_argument(
'-d',
'--dataset',
dest='datasets',
# this breaks argparse (see http://bugs.python.org/issue11874)
# metavar='DSNAME[,alias=ALIAS][,index=INDEX][,file=FILE][,slice=SLICE]',
metavar='DSNAME',
action='append',
help='''Include the dataset named DSNAME in trace output. An extended form like
DSNAME[,alias=ALIAS][,index=INDEX][,file=FILE][,slice=SLICE] will
obtain the dataset from the given FILE instead of the main WEST HDF5 file,
slice it by SLICE, call it ALIAS in output, and/or access per-segment data by a n_iter,seg_id
INDEX instead of a seg_id indexed dataset in the group for n_iter.''',
)
parser.add_argument(
'endpoints',
metavar='N_ITER:SEG_ID',
nargs='+',
help='''Trace trajectory ending (or at least alive at) N_ITER:SEG_ID.''',
)
# tgroup = parser.add_argument_group('trace options')
ogroup = parser.add_argument_group('output options')
ogroup.add_argument(
'--output-pattern',
default='traj_%d_%d',
help='''Write per-trajectory data to output files/HDF5 groups whose names begin with OUTPUT_PATTERN,
which must contain two printf-style format flags which will be replaced with the iteration number
and segment ID of the terminal segment of the trajectory being traced.
(Default: %(default)s.)''',
)
ogroup.add_argument(
'-o',
'--output',
default='trajs.h5',
help='Store intermediate data and analysis results to OUTPUT (default: %(default)s).',
)
def process_args(self, args):
self.data_reader.process_args(args)
# self.h5storage.process_args(args)
self.endpoints = [list(map(int, endpoint.split(':'))) for endpoint in args.endpoints]
self.output_pattern = args.output_pattern
for dsstr in args.datasets or []:
self.datasets.append(self.parse_dataset_string(dsstr))
# self.h5storage.open_analysis_h5file()
self.output_file = h5py.File(args.output, 'a')
def parse_dataset_string(self, dsstr):
dsinfo = {}
r = re.compile(r',(?=[^\]]*(?:\[|$))')
fields = r.split(dsstr)
dsinfo['dsname'] = fields[0]
for field in (field.strip() for field in fields[1:]):
k, v = field.split('=')
k = k.lower()
if k in ('alias', 'file', 'index'):
dsinfo[k] = v
elif k =='slice':
try:
dsinfo['slice'] = eval('np.index_exp' + v)
except SyntaxError:
raise SyntaxError('invalid index expression {!r}'.format(v))
else:
raise ValueError('invalid dataset option {!r}'.format(k))
return dsinfo
def go(self):
self.data_reader.open('r')
# Create a new 'trajectories' group if this is the first trace
try:
trajs_group = h5io.create_hdf5_group(self.output_file, 'trajectories', replace=False, creating_program=self.prog)
except ValueError:
trajs_group = self.output_file['trajectories']
for n_iter, seg_id in self.endpoints:
trajname = self.output_pattern % (n_iter, seg_id)
trajgroup = trajs_group.create_group(trajname)
trace = Trace.from_data_manager(n_iter, seg_id, self.data_reader.data_manager)
with open(trajname + '_trace.txt', 'wt') as trace_output:
self.emit_trace_text(trace, trace_output)
self.emit_trace_h5(trace, trajgroup)
aux_h5files = {}
for dsinfo in self.datasets:
dsname = dsinfo['dsname']
filename = dsinfo.get('file')
if filename:
try:
aux_h5file = aux_h5files[filename]
except KeyError:
aux_h5file = aux_h5files[filename] = h5py.File(filename, 'r')
else:
aux_h5file = None
slice_ = dsinfo.get('slice')
alias = dsinfo.get('alias', dsname)
index = dsinfo.get('index')
data, weights = trace.trace_timepoint_dataset(dsname, auxfile=aux_h5file, slice_=slice_, index_ds=index)
# Save data to HDF5
try:
del trajgroup[alias]
except KeyError:
pass
trajgroup[alias] = data
# All weight vectors will be the same length, so only store in HDF5 once
if not ('weights' in trajgroup and trajgroup['weights'].shape == weights.shape):
try:
del trajgroup['weights']
except KeyError:
pass
trajgroup['weights'] = weights
def emit_trace_h5(self, trace, output_group):
for dsname in ('basis_state', 'initial_state','segments'):
try:
del output_group[dsname]
except KeyError:
pass
if trace.basis_state:
output_group['basis_state'] = trace.basis_state.as_numpy_record()
output_group['initial_state'] = trace.initial_state.as_numpy_record()
output_group['segments'] = trace.summary
def emit_trace_text(self, trace, output_file):
'''Dump summary information about each segment in the given trace to the given output_file,
which must be opened for writing in text mode. Output columns are separated by at least
one space.'''
if not trace:
return
pcoord_ndim = trace[0]['final_pcoord'].shape[0]
lastseg = trace[-1]
len_n_iter = max(6, len(str(lastseg['n_iter'])))
len_seg_id = max(6, max(len(str(seg_id)) for seg_id in trace['seg_id']))
seg_pattern = (
' '.join(
[
'{n_iter:{len_n_iter}d}',
'{seg_id:{len_seg_id}d}',
'{weight:22.17e}',
'{walltime:10.6g}',
'{cputime:10.6g}',
'{pcoord_str:s}',
]
)
+ '\n'
)
output_file.write(
'''\
# Trace of trajectory ending in n_iter:seg_id {n_iter:d}:{seg_id:d} (endpoint type {endpoint_type_text:s})
# column 0: iteration (0 => initial state)
# column 1: seg_id (or initial state ID)
# column 2: weight
# column 3: wallclock time (s)
# column 4: CPU time (s)
'''.format(
n_iter=int(lastseg['n_iter']),
seg_id=int(lastseg['seg_id']),
endpoint_type_text=Segment.endpoint_type_names[trace.endpoint_type],
)
)
if pcoord_ndim == 1:
output_file.write(
'''\
# column 5: final progress coordinate value
'''
)
else:
fpcbegin = 5
fpcend = fpcbegin + pcoord_ndim - 1
output_file.write(
'''\
# columns {fpcbegin:d} -- {fpcend:d}: final progress coordinate value
'''.format(
fpcbegin=fpcbegin, fpcend=fpcend
)
)
pcoord_formats = self.pcoord_formats
# Output row for initial state
initial_state = trace.initial_state
pcoord_str =' '.join(pcoord_formats.get(pcfield.dtype.str[1:], '%s') % pcfield for pcfield in initial_state.pcoord)
output_file.write(
seg_pattern.format(
n_iter=0,
seg_id=initial_state.state_id,
weight=0.0,
walltime=0,
cputime=0,
pcoord_str=pcoord_str,
len_n_iter=len_n_iter,
len_seg_id=len_seg_id,
)
)
# Output rows for segments
for segment in trace:
pcoord_str =' '.join(
pcoord_formats.get(pcfield.dtype.str[1:], '%s') % pcfield for pcfield in segment['final_pcoord']
)
output_file.write(
seg_pattern.format(
n_iter=int(segment['n_iter']),
seg_id=int(segment['seg_id']),
weight=float(segment['weight']),
walltime=float(segment['walltime']),
cputime=float(segment['cputime']),
pcoord_str=pcoord_str,
len_n_iter=len_n_iter,
len_seg_id=len_seg_id,
)
)
def entry_point():
WTraceTool().main()
if __name__ == '__main__':
entry_point() |
|
tortoise__tortoise-orm | fields.rst | Module doc / Tutorial | Examples and usage | Apache License 2.0 | tortoise__tortoise-orm/docs/fields.rst | [
"tortoise__tortoise-orm/tortoise/fields/base.py",
"tortoise__tortoise-orm/tortoise/fields/data.py",
"tortoise__tortoise-orm/tortoise/fields/relational.py"
] | Fields
Usage
Fields are defined as properties of a Model class object:
from tortoise.models import Model
from tortoise import fields
class Tournament(Model):
id = fields.IntField(pk=True)
name = fields.CharField(max_length=255)
emphasize-children
Reference
Here is the list of fields available with custom options of these
fields:
Base Field
tortoise.fields.base
Data Fields
tortoise.fields.data
Relational Fields
tortoise.fields.relational
Extending A Field
It is possible to subclass fields allowing use of arbitrary types as
long as they can be represented in a database compatible format. An
example of this would be a simple wrapper around the
~tortoise.fields.CharField to store and query Enum types.
from enum import Enum
from typing import Type
from tortoise import ConfigurationError
from tortoise.fields import CharField
class EnumField(CharField):
"""
An example extension to CharField that serializes Enums
to and from a str representation in the DB.
"""
def __init__(self, enum_type: Type[Enum], **kwargs):
super().__init__(128, **kwargs)
if not issubclass(enum_type, Enum):
raise ConfigurationError("{} is not a subclass of Enum!".format(enum_type))
self._enum_type = enum_type
def to_db_value(self, value: Enum, instance) -> str:
return value.value
def to_python_value(self, value: str) -> Enum:
try:
return self._enum_type(value)
except Exception:
raise ValueError(
"Database value {} does not exist on Enum {}.".format(value, self._enum_type)
)
When subclassing, make sure that the to_db_value returns the same type
as the superclass (in the case of CharField, that is a str) and that,
naturally, to_python_value accepts the same type in the value parameter
(also str).
| from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type, Union
from pypika.terms import Term
from tortoise.exceptions import ConfigurationError
if TYPE_CHECKING: # pragma: nocoverage
from tortoise.models import Model
# TODO: Replace this with an enum
CASCADE = "CASCADE"
RESTRICT = "RESTRICT"
SET_NULL = "SET NULL"
SET_DEFAULT = "SET DEFAULT"
class _FieldMeta(type):
# TODO: Require functions to return field instances instead of this hack
def __new__(mcs, name: str, bases: Tuple[Type,...], attrs: dict):
if len(bases) > 1 and bases[0] is Field:
# Instantiate class with only the 1st base class (should be Field)
cls = type.__new__(mcs, name, (bases[0],), attrs) # type: Type[Field]
# All other base classes are our meta types, we store them in class attributes
cls.field_type = bases[1] if len(bases) == 2 else Union[bases[1:]]
return cls
return type.__new__(mcs, name, bases, attrs)
class Field(metaclass=_FieldMeta):
"""
Base Field type.
:param source_field: Provide a source_field name if the DB column name needs to be
something specific instead of enerated off the field name.
:param generated: Is this field DB-generated?
:param pk: Is this field a Primary Key? Can only have a single such field on the Model,
and if none is specified it will autogenerate a default primary key called ``id``.
:param null: Is this field nullable?
:param default: A default value for the field if not specified on Model creation.
This can also be a callable for dynamic defaults in which case we will call it.
The default value will not be part of the schema.
:param unique: Is this field unique?
:param index: Should this field be indexed by itself?
:param description: Field description. Will also appear in ``Tortoise.describe_model()``
and as DB comments in the generated DDL.
**Class Attributes:**
These attributes needs to be defined when defining an actual field type.
.. attribute:: field_type
:annotation: Type[Any]
The Python type the field is.
If adding a type as a mixin, _FieldMeta will automatically set this to that.
.. attribute:: indexable
:annotation: bool = True
Is the field indexable? Set to False if this field can't be indexed reliably.
.. attribute:: has_db_field
:annotation: bool = True
Does this field have a direct corresponding DB column? Or is the field virtualized?
.. attribute:: skip_to_python_if_native
:annotation: bool = False
If the DB driver natively supports this Python type, should we skip it?
This is for optimization purposes only, where we don't need to force type conversion
to and fro between Python and the DB.
.. attribute:: allows_generated
:annotation: bool = False
Is this field able to be DB-generated?
.. attribute:: function_cast
:annotation: Optional[pypika.Term] = None
A casting term that we need to apply in case the DB needs emulation help.
.. attribute:: SQL_TYPE
:annotation: str
The SQL type as a string that the DB will use.
.. attribute:: GENERATED_SQL
:annotation: str
The SQL that instructs the DB to auto-generate this field.
Required if ``allows_generated`` is ``True``.
**Per-DB overrides:**
One can specify per-DB overrides of any of the class attributes,
or the ``to_db_value`` or ``to_python_value`` methods.
To do so, specify a inner class in the form of :samp:`class _db__{SQL_DIALECT}:` like so:
.. code-block:: py3
class _db_sqlite:
SQL_TYPE = "VARCHAR(40)"
skip_to_python_if_native = False
def function_cast(self, term: Term) -> Term:
return functions.Cast(term, SqlTypes.NUMERIC)
Tortoise will then use the overridden attributes/functions for that dialect.
If you need a dynamic attribute, you can use a property.
"""
# Field_type is a readonly property for the instance, it is set by _FieldMeta
field_type: Type[Any] = None # type: ignore
indexable: bool = True
has_db_field: bool = True
skip_to_python_if_native: bool = False
allows_generated: bool = False
function_cast: Optional[Callable[[Term], Term]] = None
SQL_TYPE: str = None # type: ignore
GENERATED_SQL: str = None # type: ignore
# This method is just to make IDE/Linters happy
def __new__(cls, *args: Any, **kwargs: Any) -> "Field":
return super().__new__(cls)
def __init__(
self,
source_field: Optional[str] = None,
generated: bool = False,
pk: bool = False,
null: bool = False,
default: Any = None,
unique: bool = False,
index: bool = False,
description: Optional[str] = None,
model: "Optional[Model]" = None,
**kwargs: Any,
) -> None:
# TODO: Rename pk to primary_key, alias pk, deprecate
# TODO: Rename index to db_index, alias index, deprecate
if not self.indexable and (unique or index):
raise ConfigurationError(f"{self.__class__.__name__} can't be indexed")
if pk and null:
raise ConfigurationError(
f"{self.__class__.__name__} can't be both null=True and pk=True"
)
if pk:
index = True
unique = True
self.source_field = source_field
self.generated = generated
self.pk = pk
self.default = default
self.null = null
self.unique = unique
self.index = index
self.model_field_name = ""
self.description = description
self.docstring: Optional[str] = None
# TODO: consider making this not be set from constructor
self.model: Type["Model"] = model # type: ignore
self.reference: "Optional[Field]" = None
def to_db_value(self, value: Any, instance: "Union[Type[Model], Model]") -> Any:
"""
Converts from the Python type to the DB type.
:param value: Current python value in model.
:param instance: Model class or Model instance provided to look up.
Due to metacoding, to determine if this is an instance reliably, please do a:
.. code-block:: py3
if hasattr(instance, "_saved_in_db"):
"""
if value is None or isinstance(value, self.field_type):
return value
return self.field_type(value) # pylint: disable=E1102
def to_python_value(self, value: Any) -> Any:
"""
Converts from the DB type to the Python type.
:param value: Value from DB
"""
if value is None or isinstance(value, self.field_type):
return value
return self.field_type(value) # pylint: disable=E1102
@property
def required(self) -> bool:
"""
Returns ``True`` if the field is required to be provided.
It needs to be non-nullable and not have a default or be DB-generated to be required.
"""
return self.default is None and not self.null and not self.generated
@property
def constraints(self) -> dict:
"""
Returns a dict with constraints defined in the Pydantic/JSONSchema format.
"""
return {}
def _get_dialects(self) -> Dict[str, dict]:
return {
dialect[4:]: {
key: val
for key, val in getattr(self, dialect).__dict__.items()
if not key.startswith("_")
}
for dialect in [key for key in dir(self) if key.startswith("_db_")]
}
def get_db_field_types(self) -> Optional[Dict[str, str]]:
"""
Returns the DB types for this field.
:return: A dictionary that is keyed by dialect.
A blank dialect `""` means it is the default DB field type.
"""
if not self.has_db_field: # pragma: nocoverage
return None
return {
"": getattr(self, "SQL_TYPE"),
**{
dialect: _db["SQL_TYPE"]
for dialect, _db in self._get_dialects().items()
if "SQL_TYPE" in _db
},
}
def get_for_dialect(self, dialect: str, key: str) -> Any:
"""
Returns a field by dialect override.
:param dialect: The requested SQL Dialect.
:param key: The attribute/method name.
"""
dialect_data = self._get_dialects().get(dialect, {})
return dialect_data.get(key, getattr(self, key, None))
def describe(self, serializable: bool) -> dict:
"""
Describes the field.
:param serializable:
``False`` if you want raw python objects,
``True`` for JSON-serialisable data. (Defaults to ``True``)
:return:
A dictionary containing the field description.
(This assumes ``serializable=True``, which is the default):
.. code-block:: python3
{
"name": str # Field name
"field_type": str # Field type
"db_column": str # Name of DB column
# Optional: Only for pk/data fields
"raw_field": str # Name of raw field of the Foreign Key
# Optional: Only for Foreign Keys
"db_field_types": dict # DB Field types for default and DB overrides
"python_type": str # Python type
"generated": bool # Is the field generated by the DB?
"nullable": bool # Is the column nullable?
"unique": bool # Is the field unique?
"indexed": bool # Is the field indexed?
"default": ... # The default value (coerced to int/float/str/bool/null)
"description": str # Description of the field (nullable)
"docstring": str # Field docstring (nullable)
}
When ``serializable=False`` is specified some fields are not coerced to valid
JSON types. The changes are:
.. code-block:: python3
{
"field_type": Field # The Field class used
"python_type": Type # The actual Python type
"default": ... # The default value as native type OR a callable
}
"""
def _type_name(typ: Type) -> str:
if typ.__module__ == "builtins":
return typ.__name__
if typ.__module__ == "typing":
return str(typ).replace("typing.", "")
return f"{typ.__module__}.{typ.__name__}"
def type_name(typ: Any) -> Union[str, List[str]]:
try:
return typ._meta.full_name
except (AttributeError, TypeError):
pass
try:
return _type_name(typ)
except AttributeError:
try:
return [_type_name(_typ) for _typ in typ] # pragma: nobranch
except TypeError:
return str(typ)
def default_name(default: Any) -> Optional[Union[int, float, str, bool]]:
if isinstance(default, (int, float, str, bool, type(None))):
return default
if callable(default):
return f"<function {default.__module__}.{default.__name__}>"
return str(default)
field_type = getattr(self, "related_model", self.field_type)
desc = {
"name": self.model_field_name,
"field_type": self.__class__.__name__ if serializable else self.__class__,
"db_column": self.source_field or self.model_field_name,
"python_type": type_name(field_type) if serializable else field_type,
"generated": self.generated,
"nullable": self.null,
"unique": self.unique,
"indexed": self.index or self.unique,
"default": default_name(self.default) if serializable else self.default,
"description": self.description,
"docstring": self.docstring,
"constraints": self.constraints,
}
if self.has_db_field:
desc["db_field_types"] = self.get_db_field_types()
return desc
import datetime
import functools
import json
import warnings
from decimal import Decimal
from enum import Enum, IntEnum
from typing import TYPE_CHECKING, Any, Callable, Optional, Type, TypeVar, Union
from uuid import UUID, uuid4
from pypika import functions
from pypika.enums import SqlTypes
from pypika.terms import Term
from tortoise.exceptions import ConfigurationError
from tortoise.fields.base import Field
try:
from ciso8601 import parse_datetime
except ImportError: # pragma: nocoverage
from iso8601 import parse_date
parse_datetime = functools.partial(parse_date, default_timezone=None)
if TYPE_CHECKING: # pragma: nocoverage
from tortoise.models import Model
__all__ = (
"BigIntField",
"BinaryField",
"BooleanField",
"CharEnumField",
"CharField",
"DateField",
"DatetimeField",
"DecimalField",
"FloatField",
"IntEnumField",
"IntField",
"JSONField",
"SmallIntField",
"TextField",
"TimeDeltaField",
"UUIDField",
)
# Doing this we can replace json dumps/loads with different implementations
JsonDumpsFunc = Callable[[Any], str]
JsonLoadsFunc = Callable[[str], Any]
JSON_DUMPS: JsonDumpsFunc = functools.partial(json.dumps, separators=(",", ":"))
JSON_LOADS: JsonLoadsFunc = json.loads
try:
# Use python-rapidjson as an optional accelerator
import rapidjson
JSON_DUMPS = rapidjson.dumps
JSON_LOADS = rapidjson.loads
except ImportError: # pragma: nocoverage
pass
class IntField(Field, int):
"""
Integer field. (32-bit signed)
``pk`` (bool):
True if field is Primary Key.
"""
SQL_TYPE = "INT"
allows_generated = True
def __init__(self, pk: bool = False, **kwargs: Any) -> None:
if pk:
kwargs["generated"] = bool(kwargs.get("generated", True))
super().__init__(pk=pk, **kwargs)
@property
def constraints(self) -> dict:
return {
"ge": 1 if self.generated or self.reference else -2147483648,
"le": 2147483647,
}
class _db_postgres:
GENERATED_SQL = "SERIAL NOT NULL PRIMARY KEY"
class _db_sqlite:
GENERATED_SQL = "INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL"
class _db_mysql:
GENERATED_SQL = "INT NOT NULL PRIMARY KEY AUTO_INCREMENT"
class BigIntField(Field, int):
"""
Big integer field. (64-bit signed)
``pk`` (bool):
True if field is Primary Key.
"""
SQL_TYPE = "BIGINT"
allows_generated = True
def __init__(self, pk: bool = False, **kwargs: Any) -> None:
if pk:
kwargs["generated"] = bool(kwargs.get("generated", True))
super().__init__(pk=pk, **kwargs)
@property
def constraints(self) -> dict:
return {
"ge": 1 if self.generated or self.reference else -9223372036854775808,
"le": 9223372036854775807,
}
class _db_postgres:
GENERATED_SQL = "BIGSERIAL NOT NULL PRIMARY KEY"
class _db_sqlite:
GENERATED_SQL = "INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL"
class _db_mysql:
GENERATED_SQL = "BIGINT NOT NULL PRIMARY KEY AUTO_INCREMENT"
class SmallIntField(Field, int):
"""
Small integer field. (16-bit signed)
``pk`` (bool):
True if field is Primary Key.
"""
SQL_TYPE = "SMALLINT"
allows_generated = True
def __init__(self, pk: bool = False, **kwargs: Any) -> None:
if pk:
kwargs["generated"] = bool(kwargs.get("generated", True))
super().__init__(pk=pk, **kwargs)
@property
def constraints(self) -> dict:
return {
"ge": 1 if self.generated or self.reference else -32768,
"le": 32767,
}
class _db_postgres:
GENERATED_SQL = "SMALLSERIAL NOT NULL PRIMARY KEY"
class _db_sqlite:
GENERATED_SQL = "INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL"
class _db_mysql:
GENERATED_SQL = "SMALLINT NOT NULL PRIMARY KEY AUTO_INCREMENT"
class CharField(Field, str): # type: ignore
"""
Character field.
You must provide the following:
``max_length`` (int):
Maximum length of the field in characters.
"""
def __init__(self, max_length: int, **kwargs: Any) -> None:
if int(max_length) < 1:
raise ConfigurationError("'max_length' must be >= 1")
self.max_length = int(max_length)
super().__init__(**kwargs)
@property
def constraints(self) -> dict:
return {
"max_length": self.max_length,
}
@property
def SQL_TYPE(self) -> str: # type: ignore
return f"VARCHAR({self.max_length})"
class TextField(Field, str): # type: ignore
"""
Large Text field.
"""
indexable = False
SQL_TYPE = "TEXT"
def __init__(
self, pk: bool = False, unique: bool = False, index: bool = False, **kwargs: Any
) -> None:
if pk:
warnings.warn(
"TextField as a PrimaryKey is Deprecated, use CharField instead",
DeprecationWarning,
stacklevel=2,
)
if unique:
raise ConfigurationError(
f"TextField doesn't support unique indexes, consider CharField or another strategy"
)
if index:
raise ConfigurationError(f"TextField can't be indexed, consider CharField")
super().__init__(pk=pk, **kwargs)
class _db_mysql:
SQL_TYPE = "LONGTEXT"
class BooleanField(Field):
"""
Boolean field.
"""
# Bool is not subclassable, so we specify type here
field_type = bool
SQL_TYPE = "BOOL"
class _db_sqlite:
SQL_TYPE = "INT"
class DecimalField(Field, Decimal):
"""
Accurate decimal field.
You must provide the following:
``max_digits`` (int):
Max digits of significance of the decimal field.
``decimal_places`` (int):
How many of those signifigant digits is after the decimal point.
"""
skip_to_python_if_native = True
def __init__(self, max_digits: int, decimal_places: int, **kwargs: Any) -> None:
if int(max_digits) < 1:
raise ConfigurationError("'max_digits' must be >= 1")
if int(decimal_places) < 0:
raise ConfigurationError("'decimal_places' must be >= 0")
super().__init__(**kwargs)
self.max_digits = max_digits
self.decimal_places = decimal_places
self.quant = Decimal("1" if decimal_places == 0 else f"1.{('0' * decimal_places)}")
def to_python_value(self, value: Any) -> Optional[Decimal]:
if value is None:
return None
return Decimal(value).quantize(self.quant).normalize()
@property
def SQL_TYPE(self) -> str: # type: ignore
return f"DECIMAL({self.max_digits},{self.decimal_places})"
class _db_sqlite:
SQL_TYPE = "VARCHAR(40)"
def function_cast(self, term: Term) -> Term:
return functions.Cast(term, SqlTypes.NUMERIC)
class DatetimeField(Field, datetime.datetime):
"""
Datetime field.
``auto_now`` and ``auto_now_add`` is exclusive.
You can opt to set neither or only ONE of them.
``auto_now`` (bool):
Always set to ``datetime.utcnow()`` on save.
``auto_now_add`` (bool):
Set to ``datetime.utcnow()`` on first save only.
"""
skip_to_python_if_native = True
SQL_TYPE = "TIMESTAMP"
class _db_mysql:
SQL_TYPE = "DATETIME(6)"
def __init__(self, auto_now: bool = False, auto_now_add: bool = False, **kwargs: Any) -> None:
if auto_now_add and auto_now:
raise ConfigurationError("You can choose only 'auto_now' or 'auto_now_add'")
super().__init__(**kwargs)
self.auto_now = auto_now
self.auto_now_add = auto_now | auto_now_add
def to_python_value(self, value: Any) -> Optional[datetime.datetime]:
if value is None or isinstance(value, datetime.datetime):
return value
return parse_datetime(value)
def to_db_value(
self, value: Optional[datetime.datetime], instance: "Union[Type[Model], Model]"
) -> Optional[datetime.datetime]:
# Only do this if it is a Model instance, not class. Test for guaranteed instance var
if hasattr(instance, "_saved_in_db") and (
self.auto_now
or (self.auto_now_add and getattr(instance, self.model_field_name) is None)
):
value = datetime.datetime.utcnow()
setattr(instance, self.model_field_name, value)
return value
return value
@property
def constraints(self) -> dict:
data = {}
if self.auto_now_add:
data["readOnly"] = True
return data
class DateField(Field, datetime.date):
"""
Date field.
"""
skip_to_python_if_native = True
SQL_TYPE = "DATE"
def to_python_value(self, value: Any) -> Optional[datetime.date]:
if value is None or isinstance(value, datetime.date):
return value
return parse_datetime(value).date()
class TimeDeltaField(Field, datetime.timedelta):
"""
A field for storing time differences.
"""
SQL_TYPE = "BIGINT"
def to_python_value(self, value: Any) -> Optional[datetime.timedelta]:
if value is None or isinstance(value, datetime.timedelta):
return value
return datetime.timedelta(microseconds=value)
def to_db_value(
self, value: Optional[datetime.timedelta], instance: "Union[Type[Model], Model]"
) -> Optional[int]:
if value is None:
return None
return (value.days * 86400000000) + (value.seconds * 1000000) + value.microseconds
class FloatField(Field, float):
"""
Float (double) field.
"""
SQL_TYPE = "DOUBLE PRECISION"
class _db_sqlite:
SQL_TYPE = "REAL"
class _db_mysql:
SQL_TYPE = "DOUBLE"
class JSONField(Field, dict, list): # type: ignore
"""
JSON field.
This field can store dictionaries or lists of any JSON-compliant structure.
You can specify your own custom JSON encoder/decoder, leaving at the default should work well.
If you have ``python-rapidjson`` installed, we default to using that,
else the default ``json`` module will be used.
``encoder``:
The custom JSON encoder.
``decoder``:
The custom JSON decoder.
"""
SQL_TYPE = "TEXT"
indexable = False
class _db_postgres:
SQL_TYPE = "JSONB"
def __init__(
self,
encoder: JsonDumpsFunc = JSON_DUMPS,
decoder: JsonLoadsFunc = JSON_LOADS,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
def to_db_value(
self, value: Optional[Union[dict, list]], instance: "Union[Type[Model], Model]"
) -> Optional[str]:
return None if value is None else self.encoder(value)
def to_python_value(
self, value: Optional[Union[str, dict, list]]
) -> Optional[Union[dict, list]]:
return self.decoder(value) if isinstance(value, str) else value
class UUIDField(Field, UUID):
"""
UUID Field
This field can store uuid value.
If used as a primary key, it will auto-generate a UUID4 by default.
"""
SQL_TYPE = "CHAR(36)"
class _db_postgres:
SQL_TYPE = "UUID"
def __init__(self, **kwargs: Any) -> None:
if kwargs.get("pk", False) and "default" not in kwargs:
kwargs["default"] = uuid4
super().__init__(**kwargs)
def to_db_value(self, value: Any, instance: "Union[Type[Model], Model]") -> Optional[str]:
return value and str(value)
def to_python_value(self, value: Any) -> Optional[UUID]:
if value is None or isinstance(value, UUID):
return value
return UUID(value)
class BinaryField(Field, bytes): # type: ignore
"""
Binary field.
This is for storing ``bytes`` objects.
Note that filter or queryset-update operations are not supported.
"""
indexable = False
SQL_TYPE = "BLOB"
class _db_postgres:
SQL_TYPE = "BYTEA"
class _db_mysql:
SQL_TYPE = "LONGBLOB"
class IntEnumFieldInstance(SmallIntField):
def __init__(
self, enum_type: Type[IntEnum], description: Optional[str] = None, **kwargs: Any
) -> None:
# Validate values
for item in enum_type:
try:
value = int(item.value)
except ValueError:
raise ConfigurationError("IntEnumField only supports integer enums!")
if not 0 <= value < 32768:
raise ConfigurationError("The valid range of IntEnumField's values is 0..32767!")
# Automatic description for the field if not specified by the user
if description is None:
description = "\n".join([f"{e.name}: {int(e.value)}" for e in enum_type])[:2048]
super().__init__(description=description, **kwargs)
self.enum_type = enum_type
def to_python_value(self, value: Union[int, None]) -> Union[IntEnum, None]:
return self.enum_type(value) if value is not None else None
def to_db_value(
self, value: Union[IntEnum, None, int], instance: "Union[Type[Model], Model]"
) -> Union[int, None]:
if isinstance(value, IntEnum):
return int(value.value)
if isinstance(value, int):
return int(self.enum_type(value))
return value
IntEnumType = TypeVar("IntEnumType", bound=IntEnum)
def IntEnumField(
enum_type: Type[IntEnumType], description: Optional[str] = None, **kwargs: Any,
) -> IntEnumType:
"""
Enum Field
A field representing an integer enumeration.
The description of the field is set automatically if not specified to a multiline list of
"name: value" pairs.
**Note**: Valid int value of ``enum_type`` is acceptable.
``enum_type``:
The enum class
``description``:
The description of the field. It is set automatically if not specified to a multiline list
of "name: value" pairs.
"""
return IntEnumFieldInstance(enum_type, description, **kwargs) # type: ignore
class CharEnumFieldInstance(CharField):
def __init__(
self,
enum_type: Type[Enum],
description: Optional[str] = None,
max_length: int = 0,
**kwargs: Any,
) -> None:
# Automatic description for the field if not specified by the user
if description is None:
description = "\n".join([f"{e.name}: {str(e.value)}" for e in enum_type])[:2048]
# Automatic CharField max_length
if max_length == 0:
for item in enum_type:
item_len = len(str(item.value))
if item_len > max_length:
max_length = item_len
super().__init__(description=description, max_length=max_length, **kwargs)
self.enum_type = enum_type
def to_python_value(self, value: Union[str, None]) -> Union[Enum, None]:
return self.enum_type(value) if value is not None else None
def to_db_value(
self, value: Union[Enum, None, str], instance: "Union[Type[Model], Model]"
) -> Union[str, None]:
if isinstance(value, Enum):
return str(value.value)
if isinstance(value, str):
return str(self.enum_type(value).value)
return value
CharEnumType = TypeVar("CharEnumType", bound=Enum)
def CharEnumField(
enum_type: Type[CharEnumType],
description: Optional[str] = None,
max_length: int = 0,
**kwargs: Any,
) -> CharEnumType:
"""
Char Enum Field
A field representing a character enumeration.
**Warning**: If ``max_length`` is not specified or equals to zero, the size of represented
char fields is automatically detected. So if later you update the enum, you need to update your
table schema as well.
**Note**: Valid str value of ``enum_type`` is acceptable.
``enum_type``:
The enum class
``description``:
The description of the field. It is set automatically if not specified to a multiline list
of "name: value" pairs.
``max_length``:
The length of the created CharField. If it is zero it is automatically detected from
enum_type.
"""
return CharEnumFieldInstance(enum_type, description, max_length, **kwargs) # type: ignore
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Awaitable,
Generator,
Generic,
Iterator,
List,
Optional,
Type,
TypeVar,
Union,
)
from pypika import Table
from typing_extensions import Literal
from tortoise.exceptions import ConfigurationError, NoValuesFetched, OperationalError
from tortoise.fields.base import CASCADE, RESTRICT, SET_NULL, Field
if TYPE_CHECKING: # pragma: nocoverage
from tortoise.models import Model
from tortoise.queryset import QuerySet, Q
from tortoise.backends.base.client import BaseDBAsyncClient
MODEL = TypeVar("MODEL", bound="Model")
OneToOneNullableRelation = Union[Awaitable[Optional[MODEL]], Optional[MODEL]]
"""
Type hint for the result of accessing the :func:`.OneToOneField` field in the model
when obtained model can be nullable.
"""
OneToOneRelation = Union[Awaitable[MODEL], MODEL]
"""
Type hint for the result of accessing the :func:`.OneToOneField` field in the model.
"""
ForeignKeyNullableRelation = Union[Awaitable[Optional[MODEL]], Optional[MODEL]]
"""
Type hint for the result of accessing the :func:`.ForeignKeyField` field in the model
when obtained model can be nullable.
"""
ForeignKeyRelation = Union[Awaitable[MODEL], MODEL]
"""
Type hint for the result of accessing the :func:`.ForeignKeyField` field in the model.
"""
class _NoneAwaitable:
__slots__ = ()
def __await__(self) -> Generator[None, None, None]:
yield None
def __bool__(self) -> bool:
return False
NoneAwaitable = _NoneAwaitable()
class ReverseRelation(Generic[MODEL]):
"""
Relation container for :func:`.ForeignKeyField`.
"""
def __init__(
self, remote_model: Type[MODEL], relation_field: str, instance: "Model", from_field: str,
) -> None:
self.remote_model = remote_model
self.relation_field = relation_field
self.instance = instance
self.from_field = from_field
self._fetched = False
self._custom_query = False
self.related_objects: List[MODEL] = []
@property
def _query(self) -> "QuerySet[MODEL]":
if not self.instance._saved_in_db:
raise OperationalError(
"This objects hasn't been instanced, call.save() before calling related queries"
)
return self.remote_model.filter(
**{self.relation_field: getattr(self.instance, self.from_field)}
)
def __contains__(self, item: Any) -> bool:
self._raise_if_not_fetched()
return item in self.related_objects
def __iter__(self) -> "Iterator[MODEL]":
self._raise_if_not_fetched()
return self.related_objects.__iter__()
def __len__(self) -> int:
self._raise_if_not_fetched()
return len(self.related_objects)
def __bool__(self) -> bool:
self._raise_if_not_fetched()
return bool(self.related_objects)
def __getitem__(self, item: int) -> MODEL:
self._raise_if_not_fetched()
return self.related_objects[item]
def __await__(self) -> Generator[Any, None, List[MODEL]]:
return self._query.__await__()
async def __aiter__(self) -> AsyncGenerator[Any, MODEL]:
if not self._fetched:
self._set_result_for_query(await self)
for val in self.related_objects:
yield val
def filter(self, *args: "Q", **kwargs: Any) -> "QuerySet[MODEL]":
"""
Returns a QuerySet with related elements filtered by args/kwargs.
"""
return self._query.filter(*args, **kwargs)
def all(self) -> "QuerySet[MODEL]":
"""
Returns a QuerySet with all related elements.
"""
return self._query
def order_by(self, *orderings: str) -> "QuerySet[MODEL]":
"""
Returns a QuerySet related elements in order.
"""
return self._query.order_by(*orderings)
def limit(self, limit: int) -> "QuerySet[MODEL]":
"""
Returns a QuerySet with at most «limit» related elements.
"""
return self._query.limit(limit)
def offset(self, offset: int) -> "QuerySet[MODEL]":
"""
Returns a QuerySet with all related elements offset by «offset».
"""
return self._query.offset(offset)
def _set_result_for_query(self, sequence: List[MODEL]) -> None:
self._fetched = True
self.related_objects = sequence
def _raise_if_not_fetched(self) -> None:
if not self._fetched:
raise NoValuesFetched(
"No values were fetched for this relation, first use.fetch_related()"
)
class ManyToManyRelation(ReverseRelation[MODEL]):
"""
Many to many relation container for :func:`.ManyToManyField`.
"""
def __init__(self, instance: "Model", m2m_field: "ManyToManyFieldInstance") -> None:
super().__init__(m2m_field.related_model, m2m_field.related_name, instance, "pk") # type: ignore
self.field = m2m_field
self.instance = instance
async def add(self, *instances: MODEL, using_db: "Optional[BaseDBAsyncClient]" = None) -> None:
"""
Adds one or more of ``instances`` to the relation.
If it is already added, it will be silently ignored.
:raises OperationalError: If Object to add is not saved.
"""
if not instances:
return
if not self.instance._saved_in_db:
raise OperationalError(f"You should first call.save() on {self.instance}")
db = using_db if using_db else self.remote_model._meta.db
pk_formatting_func = type(self.instance)._meta.pk.to_db_value
related_pk_formatting_func = type(instances[0])._meta.pk.to_db_value
through_table = Table(self.field.through)
select_query = (
db.query_class.from_(through_table)
.where(
through_table[self.field.backward_key]
== pk_formatting_func(self.instance.pk, self.instance)
)
.select(self.field.backward_key, self.field.forward_key)
)
query = db.query_class.into(through_table).columns(
through_table[self.field.forward_key], through_table[self.field.backward_key],
)
if len(instances) == 1:
criterion = through_table[self.field.forward_key] == related_pk_formatting_func(
instances[0].pk, instances[0]
)
else:
criterion = through_table[self.field.forward_key].isin(
[related_pk_formatting_func(i.pk, i) for i in instances]
)
select_query = select_query.where(criterion)
# TODO: This is highly inefficient. Should use UNIQUE index by default.
# And optionally allow duplicates.
_, already_existing_relations_raw = await db.execute_query(str(select_query))
already_existing_relations = {
(
pk_formatting_func(r[self.field.backward_key], self.instance),
related_pk_formatting_func(r[self.field.forward_key], self.instance),
)
for r in already_existing_relations_raw
}
insert_is_required = False
for instance_to_add in instances:
if not instance_to_add._saved_in_db:
raise OperationalError(f"You should first call.save() on {instance_to_add}")
pk_f = related_pk_formatting_func(instance_to_add.pk, instance_to_add)
pk_b = pk_formatting_func(self.instance.pk, self.instance)
if (pk_b, pk_f) in already_existing_relations:
continue
query = query.insert(pk_f, pk_b)
insert_is_required = True
if insert_is_required:
await db.execute_query(str(query))
async def clear(self, using_db: "Optional[BaseDBAsyncClient]" = None) -> None:
"""
Clears ALL relations.
"""
db = using_db if using_db else self.remote_model._meta.db
through_table = Table(self.field.through)
pk_formatting_func = type(self.instance)._meta.pk.to_db_value
query = (
db.query_class.from_(through_table)
.where(
through_table[self.field.backward_key]
== pk_formatting_func(self.instance.pk, self.instance)
)
.delete()
)
await db.execute_query(str(query))
async def remove(
self, *instances: MODEL, using_db: "Optional[BaseDBAsyncClient]" = None
) -> None:
"""
Removes one or more of ``instances`` from the relation.
:raises OperationalError: remove() was called with no instances.
"""
db = using_db if using_db else self.remote_model._meta.db
if not instances:
raise OperationalError("remove() called on no instances")
through_table = Table(self.field.through)
pk_formatting_func = type(self.instance)._meta.pk.to_db_value
related_pk_formatting_func = type(instances[0])._meta.pk.to_db_value
if len(instances) == 1:
condition = (
through_table[self.field.forward_key]
== related_pk_formatting_func(instances[0].pk, instances[0])
) & (
through_table[self.field.backward_key]
== pk_formatting_func(self.instance.pk, self.instance)
)
else:
condition = (
through_table[self.field.backward_key]
== pk_formatting_func(self.instance.pk, self.instance)
) & (
through_table[self.field.forward_key].isin(
[related_pk_formatting_func(i.pk, i) for i in instances]
)
)
query = db.query_class.from_(through_table).where(condition).delete()
await db.execute_query(str(query))
class RelationalField(Field):
has_db_field = False
def __init__(
self, related_model: "Type[Model]", to_field: Optional[str] = None, **kwargs: Any
) -> None:
super().__init__(**kwargs)
self.related_model: "Type[Model]" = related_model
self.to_field: str = to_field # type: ignore
self.to_field_instance: Field = None # type: ignore
def describe(self, serializable: bool) -> dict:
desc = super().describe(serializable)
del desc["db_column"]
return desc
class ForeignKeyFieldInstance(RelationalField):
def __init__(
self,
model_name: str,
related_name: Union[Optional[str], Literal[False]] = None,
on_delete: str = CASCADE,
**kwargs: Any,
) -> None:
super().__init__(None, **kwargs) # type: ignore
if len(model_name.split("."))!= 2:
raise ConfigurationError('Foreign key accepts model name in format "app.Model"')
self.model_name = model_name
self.related_name = related_name
if on_delete not in {CASCADE, RESTRICT, SET_NULL}:
raise ConfigurationError("on_delete can only be CASCADE, RESTRICT or SET_NULL")
if on_delete == SET_NULL and not bool(kwargs.get("null")):
raise ConfigurationError("If on_delete is SET_NULL, then field must have null=True set")
self.on_delete = on_delete
def describe(self, serializable: bool) -> dict:
desc = super().describe(serializable)
desc["raw_field"] = self.source_field
return desc
class BackwardFKRelation(RelationalField):
def __init__(
self,
field_type: "Type[Model]",
relation_field: str,
relation_source_field: str,
null: bool,
description: Optional[str],
**kwargs: Any,
) -> None:
super().__init__(field_type, null=null, **kwargs)
self.relation_field: str = relation_field
self.relation_source_field: str = relation_source_field
self.description: Optional[str] = description
class OneToOneFieldInstance(ForeignKeyFieldInstance):
def __init__(
self,
model_name: str,
related_name: Union[Optional[str], Literal[False]] = None,
on_delete: str = CASCADE,
**kwargs: Any,
) -> None:
if len(model_name.split("."))!= 2:
raise ConfigurationError('OneToOneField accepts model name in format "app.Model"')
super().__init__(model_name, related_name, on_delete, unique=True, **kwargs)
class BackwardOneToOneRelation(BackwardFKRelation):
pass
class ManyToManyFieldInstance(RelationalField):
field_type = ManyToManyRelation
def __init__(
self,
model_name: str,
through: Optional[str] = None,
forward_key: Optional[str] = None,
backward_key: str = "",
related_name: str = "",
field_type: "Type[Model]" = None, # type: ignore
**kwargs: Any,
) -> None:
# TODO: rename through to through_table
# TODO: add through to use a Model
super().__init__(field_type, **kwargs)
if len(model_name.split("."))!= 2:
raise ConfigurationError('Foreign key accepts model name in format "app.Model"')
self.model_name: str = model_name
self.related_name: str = related_name
self.forward_key: str = forward_key or f"{model_name.split('.')[1].lower()}_id"
self.backward_key: str = backward_key
self.through: str = through # type: ignore
self._generated: bool = False
def OneToOneField(
model_name: str,
related_name: Union[Optional[str], Literal[False]] = None,
on_delete: str = CASCADE,
**kwargs: Any,
) -> OneToOneRelation:
"""
OneToOne relation field.
This field represents a foreign key relation to another model.
See :ref:`one_to_one` for usage information.
You must provide the following:
``model_name``:
The name of the related model in a :samp:`'{app}.{model}'` format.
The following is optional:
``related_name``:
The attribute name on the related model to reverse resolve the foreign key.
``on_delete``:
One of:
``field.CASCADE``:
Indicate that the model should be cascade deleted if related model gets deleted.
``field.RESTRICT``:
Indicate that the related model delete will be restricted as long as a
foreign key points to it.
``field.SET_NULL``:
Resets the field to NULL in case the related model gets deleted.
Can only be set if field has ``null=True`` set.
``field.SET_DEFAULT``:
Resets the field to ``default`` value in case the related model gets deleted.
Can only be set is field has a ``default`` set.
``to_field``:
The attribute name on the related model to establish foreign key relationship.
If not set, pk is used
"""
return OneToOneFieldInstance(model_name, related_name, on_delete, **kwargs)
def ForeignKeyField(
model_name: str,
related_name: Union[Optional[str], Literal[False]] = None,
on_delete: str = CASCADE,
**kwargs: Any,
) -> ForeignKeyRelation:
"""
ForeignKey relation field.
This field represents a foreign key relation to another model.
See :ref:`foreign_key` for usage information.
You must provide the following:
``model_name``:
The name of the related model in a :samp:`'{app}.{model}'` format.
The following is optional:
``related_name``:
The attribute name on the related model to reverse resolve the foreign key.
``on_delete``:
One of:
``field.CASCADE``:
Indicate that the model should be cascade deleted if related model gets deleted.
``field.RESTRICT``:
Indicate that the related model delete will be restricted as long as a
foreign key points to it.
``field.SET_NULL``:
Resets the field to NULL in case the related model gets deleted.
Can only be set if field has ``null=True`` set.
``field.SET_DEFAULT``:
Resets the field to ``default`` value in case the related model gets deleted.
Can only be set is field has a ``default`` set.
``to_field``:
The attribute name on the related model to establish foreign key relationship.
If not set, pk is used
"""
return ForeignKeyFieldInstance(model_name, related_name, on_delete, **kwargs)
def ManyToManyField(
model_name: str,
through: Optional[str] = None,
forward_key: Optional[str] = None,
backward_key: str = "",
related_name: str = "",
**kwargs: Any,
) -> "ManyToManyRelation":
"""
ManyToMany relation field.
This field represents a many-to-many between this model and another model.
See :ref:`many_to_many` for usage information.
You must provide the following:
``model_name``:
The name of the related model in a :samp:`'{app}.{model}'` format.
The following is optional:
``through``:
The DB table that represents the trough table.
The default is normally safe.
``forward_key``:
The forward lookup key on the through table.
The default is normally safe.
``backward_key``:
The backward lookup key on the through table.
The default is normally safe.
``related_name``:
The attribute name on the related model to reverse resolve the many to many.
"""
return ManyToManyFieldInstance( # type: ignore
model_name, through, forward_key, backward_key, related_name, **kwargs
) |
|
tortoise__tortoise-orm | models.rst | Module doc / Tutorial | Model usage | Apache License 2.0 | tortoise__tortoise-orm/docs/models.rst | [
"tortoise__tortoise-orm/tortoise/models.py"
] | Models
Usage
To get working with models, first you should import them
from tortoise.models import Model
With that you can start describing your own models like that
class Tournament(Model):
id = fields.IntField(pk=True)
name = fields.TextField()
created = fields.DatetimeField(auto_now_add=True)
def __str__(self):
return self.name
class Event(Model):
id = fields.IntField(pk=True)
name = fields.TextField()
tournament = fields.ForeignKeyField('models.Tournament', related_name='events')
participants = fields.ManyToManyField('models.Team', related_name='events', through='event_team')
modified = fields.DatetimeField(auto_now=True)
prize = fields.DecimalField(max_digits=10, decimal_places=2, null=True)
def __str__(self):
return self.name
class Team(Model):
id = fields.IntField(pk=True)
name = fields.TextField()
def __str__(self):
return self.name
Let see in details what we accomplished here:
class Tournament(Model):
Every model should be derived from base model. You also can derive from
your own model subclasses and you can make abstract models like this
class AbstractTournament(Model):
id = fields.IntField(pk=True)
name = fields.TextField()
created = fields.DatetimeField(auto_now_add=True)
class Meta:
abstract = True
def __str__(self):
return self.name
This models won't be created in schema generation and won't create
relations to other models.
Further we have field fields.DatetimeField(auto_now=True). Options
auto_now and auto_now_add work like Django's options.
Use of __models__
If you define the variable __models__ in the module which you load your
models from, generate_schema will use that list, rather than
automatically finding models for you.
Primary Keys
In Tortoise ORM we require that a model has a primary key.
That primary key will be accessible through a reserved field pk which
will be an alias of whichever field has been nominated as a primary key.
That alias field can be used as a field name when doing filtering e.g.
.filter(pk=...) etc…
Note
We currently support single (non-composite) primary keys of any
indexable field type, but only these field types are recommended:
IntField
BigIntField
CharField
UUIDField
One must define a primary key by setting a pk parameter to True. If you
don't define a primary key, we will create a primary key of type
IntField with name of id for you.
Note
If this is used on an Integer Field, generated will be set to True
unless you explicitly pass generated=False as well.
Any of these are valid primary key definitions in a Model:
id = fields.IntField(pk=True)
checksum = fields.CharField(pk=True)
guid = fields.UUIDField(pk=True)
Inheritance
When defining models in Tortoise ORM, you can save a lot of repetitive
work by leveraging from inheritance.
You can define fields in more generic classes and they are automatically
available in derived classes. Base classes are not limited to Model
classes. Any class will work. This way you are able to define your
models in a natural and easy to maintain way.
Let's have a look at some examples.
from tortoise import fields
from tortoise.models import Model
class TimestampMixin():
created_at = fields.DatetimeField(null=True, auto_now_add=True)
modified_at = fields.DatetimeField(null=True, auto_now=True)
class NameMixin():
name = fields.CharField(40, unique=True)
class MyAbstractBaseModel(Model):
id = fields.IntField(pk=True)
class Meta:
abstract = True
class UserModel(TimestampMixin, MyAbstractBaseModel):
# Overriding the id definition
# from MyAbstractBaseModel
id = fields.UUIDField(pk=True)
# Adding additional fields
first_name = fields.CharField(20, null=True)
class Meta:
table = "user"
class RoleModel(TimestampMixin, NameMixin, MyAbstractBaseModel):
class Meta:
table = "role"
Using the Meta class is not necessary. But it is a good habit, to give
your table an explicit name. This way you can change the model name
without breaking the schema. So the following definition is valid.
class RoleModel(TimestampMixin, NameMixin, MyAbstractBaseModel):
pass
ForeignKeyField
tournament = fields.ForeignKeyField('models.Tournament', related_name='events')
participants = fields.ManyToManyField('models.Team', related_name='events')
modified = fields.DatetimeField(auto_now=True)
prize = fields.DecimalField(max_digits=10, decimal_places=2, null=True)
In event model we got some more fields, that could be interesting for
us.
fields.ForeignKeyField('models.Tournament', related_name='events')
Here we create foreign key reference to tournament. We create it by
referring to model by it's literal, consisting of app name and model
name. models is default app name, but you can change it in
class Meta with app = 'other'.
related_name
Is keyword argument, that defines field for related query on
referenced models, so with that you could fetch all tournaments's
events with like this:
The DB-backing field
Note
A ForeignKeyField is a virtual field, meaning it has no direct DB
backing. Instead it has a field (by default called {FKNAME}_id (that is,
just an _id is appended) that is the actual DB-backing field.
It will just contain the Key value of the related table.
This is an important detail as it would allow one to assign/read the
actual value directly, which could be considered an optimization if the
entire foreign object isn't needed.
Specifying an FK can be done via either passing the object:
await SomeModel.create(tournament=the_tournament)
# or
somemodel.tournament=the_tournament
or by directly accessing the DB-backing field:
await SomeModel.create(tournament_id=the_tournament.pk)
# or
somemodel.tournament_id=the_tournament.pk
Querying a relationship is typicall done by appending a double
underscore, and then the foreign object's field. Then a normal query
attr can be appended. This can be chained if the next key is also a
foreign object:
{FKNAME}__{FOREIGNFIELD}__gt=3
or
{FKNAME}__{FOREIGNFK}__{VERYFOREIGNFIELD}__gt=3
There is however one major limiatation. We don't want to restrict
foreign column names, or have ambiguity (e.g. a foreign object may have
a field called isnull)
Then this would be entierly ambugious:
{FKNAME}__isnull
To prevent that we require that direct filters be applied to the
DB-backing field of the foreign key:
{FKNAME}_id__isnull
Fetching the foreign object
Fetching foreign keys can be done with both async and sync interfaces.
Async fetch:
events = await tournament.events.all()
You can async iterate over it like this:
async for event in tournament.events:
...
Sync usage requires that you call fetch_related before the time, and
then you can use common functions such as:
await tournament.fetch_related('events')
events = list(tournament.events)
eventlen = len(tournament.events)
if SomeEvent in tournament.events:
...
if tournament.events:
...
firstevent = tournament.events[0]
To get the Reverse-FK, e.g. an event.tournament we currently only
support the sync interface.
await event.fetch_related('tournament')
tournament = event.tournament
ManyToManyField
Next field is
fields.ManyToManyField('models.Team', related_name='events'). It
describes many to many relation to model Team.
To add to a ManyToManyField both the models need to be saved, else you
will get an OperationalError raised.
Resolving many to many fields can be done with both async and sync
interfaces.
Async fetch:
participants = await tournament.participants.all()
You can async iterate over it like this:
async for participant in tournament.participants:
...
Sync usage requires that you call fetch_related before the time, and
then you can use common functions such as:
await tournament.fetch_related('participants')
participants = list(tournament.participants)
participantlen = len(tournament.participants)
if SomeParticipant in tournament.participants:
...
if tournament.participants:
...
firstparticipant = tournament.participants[0]
The reverse lookup of team.event_team works exactly the same way.
Improving relational type hinting
Since Tortoise ORM is still a young project, it does not have such
widespread support by various editors who help you writing code using
good autocomplete for models and different relations between them.
However, you can get such autocomplete by doing a little work yourself.
All you need to do is add a few annotations to your models for fields
that are responsible for the relations.
Here is an updated example from getting_started, that will add
autocomplete for all models including fields for the relations between
models.
from tortoise.models import Model
from tortoise import fields
class Tournament(Model):
id = fields.IntField(pk=True)
name = fields.CharField(max_length=255)
events: fields.ReverseRelation["Event"]
def __str__(self):
return self.name
class Event(Model):
id = fields.IntField(pk=True)
name = fields.CharField(max_length=255)
tournament: fields.ForeignKeyRelation[Tournament] = fields.ForeignKeyField(
"models.Tournament", related_name="events"
)
participants: fields.ManyToManyRelation["Team"] = fields.ManyToManyField(
"models.Team", related_name="events", through="event_team"
)
def __str__(self):
return self.name
class Team(Model):
id = fields.IntField(pk=True)
name = fields.CharField(max_length=255)
events: fields.ManyToManyRelation[Event]
def __str__(self):
return self.name
| import asyncio
import inspect
import re
from copy import copy, deepcopy
from functools import partial
from typing import (
Any,
Awaitable,
Callable,
Dict,
Generator,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
)
from pypika import Order, Query, Table
from tortoise.backends.base.client import BaseDBAsyncClient
from tortoise.exceptions import (
ConfigurationError,
IncompleteInstanceError,
IntegrityError,
OperationalError,
TransactionManagementError,
)
from tortoise.fields.base import Field
from tortoise.fields.data import IntField
from tortoise.fields.relational import (
BackwardFKRelation,
BackwardOneToOneRelation,
ForeignKeyFieldInstance,
ManyToManyFieldInstance,
ManyToManyRelation,
NoneAwaitable,
OneToOneFieldInstance,
ReverseRelation,
)
from tortoise.filters import get_filters_for_field
from tortoise.functions import Function
from tortoise.queryset import Q, QuerySet, QuerySetSingle
from tortoise.signals import Signals
from tortoise.transactions import current_transaction_map, in_transaction
MODEL = TypeVar("MODEL", bound="Model")
# TODO: Define Filter type object. Possibly tuple?
def get_together(meta: "Model.Meta", together: str) -> Tuple[Tuple[str,...],...]:
_together = getattr(meta, together, ())
if _together and isinstance(_together, (list, tuple)) and isinstance(_together[0], str):
_together = (_together,)
# return without validation, validation will be done further in the code
return _together
def prepare_default_ordering(meta: "Model.Meta") -> Tuple[Tuple[str, Order],...]:
ordering_list = getattr(meta, "ordering", ())
parsed_ordering = tuple(
QuerySet._resolve_ordering_string(ordering) for ordering in ordering_list
)
return parsed_ordering
def _fk_setter(
self: "Model", value: "Optional[Model]", _key: str, relation_field: str, to_field: str
) -> None:
setattr(self, relation_field, getattr(value, to_field) if value else None)
setattr(self, _key, value)
def _fk_getter(
self: "Model", _key: str, ftype: "Type[Model]", relation_field: str, to_field: str
) -> Awaitable:
try:
return getattr(self, _key)
except AttributeError:
value = getattr(self, relation_field)
if value:
return ftype.filter(**{to_field: value}).first()
return NoneAwaitable
def _rfk_getter(
self: "Model", _key: str, ftype: "Type[Model]", frelfield: str, from_field: str
) -> ReverseRelation:
val = getattr(self, _key, None)
if val is None:
val = ReverseRelation(ftype, frelfield, self, from_field)
setattr(self, _key, val)
return val
def _ro2o_getter(
self: "Model", _key: str, ftype: "Type[Model]", frelfield: str, from_field: str
) -> "QuerySetSingle[Optional[Model]]":
if hasattr(self, _key):
return getattr(self, _key)
val = ftype.filter(**{frelfield: getattr(self, from_field)}).first()
setattr(self, _key, val)
return val
def _m2m_getter(
self: "Model", _key: str, field_object: ManyToManyFieldInstance
) -> ManyToManyRelation:
val = getattr(self, _key, None)
if val is None:
val = ManyToManyRelation(self, field_object)
setattr(self, _key, val)
return val
def _get_comments(cls: "Type[Model]") -> Dict[str, str]:
"""
Get comments exactly before attributes
It can be multiline comment. The placeholder "{model}" will be replaced with the name of the
model class. We require that the comments are in #: (with a colon) format, so you can
differentiate between private and public comments.
:param cls: The class we need to extract comments from its source.
:return: The dictionary of comments by field name
"""
try:
source = inspect.getsource(cls)
except TypeError: # pragma: nocoverage
return {}
comments = {}
for cls_ in reversed(cls.__mro__):
if cls_ is object:
continue
matches = re.findall(rf"((?:(?!\n|^)[^\w\n]*#:.*?\n)+?)[^\w\n]*(\w+)\s*[:=]", source)
for match in matches:
field_name = match[1]
# Extract text
comment = re.sub(r"(^\s*#:\s*|\s*$)", "", match[0], flags=re.MULTILINE)
# Class name template
comments[field_name] = comment.replace("{model}", cls_.__name__)
return comments
class MetaInfo:
__slots__ = (
"abstract",
"db_table",
"app",
"fields",
"db_fields",
"m2m_fields",
"o2o_fields",
"backward_o2o_fields",
"fk_fields",
"backward_fk_fields",
"fetch_fields",
"fields_db_projection",
"_inited",
"fields_db_projection_reverse",
"filters",
"fields_map",
"default_connection",
"basequery",
"basequery_all_fields",
"basetable",
"_filters",
"unique_together",
"indexes",
"pk_attr",
"generated_db_fields",
"_model",
"table_description",
"pk",
"db_pk_column",
"db_native_fields",
"db_default_fields",
"db_complex_fields",
"_default_ordering",
"_ordering_validated",
)
def __init__(self, meta: "Model.Meta") -> None:
self.abstract: bool = getattr(meta, "abstract", False)
self.db_table: str = getattr(meta, "table", "")
self.app: Optional[str] = getattr(meta, "app", None)
self.unique_together: Tuple[Tuple[str,...],...] = get_together(meta, "unique_together")
self.indexes: Tuple[Tuple[str,...],...] = get_together(meta, "indexes")
self._default_ordering: Tuple[Tuple[str, Order],...] = prepare_default_ordering(meta)
self._ordering_validated: bool = False
self.fields: Set[str] = set()
self.db_fields: Set[str] = set()
self.m2m_fields: Set[str] = set()
self.fk_fields: Set[str] = set()
self.o2o_fields: Set[str] = set()
self.backward_fk_fields: Set[str] = set()
self.backward_o2o_fields: Set[str] = set()
self.fetch_fields: Set[str] = set()
self.fields_db_projection: Dict[str, str] = {}
self.fields_db_projection_reverse: Dict[str, str] = {}
self._filters: Dict[str, Dict[str, dict]] = {}
self.filters: Dict[str, dict] = {}
self.fields_map: Dict[str, Field] = {}
self._inited: bool = False
self.default_connection: Optional[str] = None
self.basequery: Query = Query()
self.basequery_all_fields: Query = Query()
self.basetable: Table = Table("")
self.pk_attr: str = getattr(meta, "pk_attr", "")
self.generated_db_fields: Tuple[str] = None # type: ignore
self._model: Type["Model"] = None # type: ignore
self.table_description: str = getattr(meta, "table_description", "")
self.pk: Field = None # type: ignore
self.db_pk_column: str = ""
self.db_native_fields: List[Tuple[str, str, Field]] = []
self.db_default_fields: List[Tuple[str, str, Field]] = []
self.db_complex_fields: List[Tuple[str, str, Field]] = []
@property
def full_name(self) -> str:
return f"{self.app}.{self._model.__name__}"
def add_field(self, name: str, value: Field) -> None:
if name in self.fields_map:
raise ConfigurationError(f"Field {name} already present in meta")
value.model = self._model
self.fields_map[name] = value
value.model_field_name = name
if value.has_db_field:
self.fields_db_projection[name] = value.source_field or name
if isinstance(value, ManyToManyFieldInstance):
self.m2m_fields.add(name)
elif isinstance(value, BackwardOneToOneRelation):
self.backward_o2o_fields.add(name)
elif isinstance(value, BackwardFKRelation):
self.backward_fk_fields.add(name)
field_filters = get_filters_for_field(
field_name=name, field=value, source_field=value.source_field or name
)
self._filters.update(field_filters)
self.finalise_fields()
@property
def db(self) -> BaseDBAsyncClient:
try:
return current_transaction_map[self.default_connection].get()
except KeyError:
raise ConfigurationError("No DB associated to model")
@property
def ordering(self) -> Tuple[Tuple[str, Order],...]:
if not self._ordering_validated:
unknown_fields = {f for f, _ in self._default_ordering} - self.fields
raise ConfigurationError(
f"Unknown fields {','.join(unknown_fields)} in "
f"default ordering for model {self._model.__name__}"
)
return self._default_ordering
def get_filter(self, key: str) -> dict:
return self.filters[key]
def finalise_model(self) -> None:
"""
Finalise the model after it had been fully loaded.
"""
self.finalise_fields()
self._generate_filters()
self._generate_lazy_fk_m2m_fields()
self._generate_db_fields()
def finalise_fields(self) -> None:
self.db_fields = set(self.fields_db_projection.values())
self.fields = set(self.fields_map.keys())
self.fields_db_projection_reverse = {
value: key for key, value in self.fields_db_projection.items()
}
self.fetch_fields = (
self.m2m_fields
| self.backward_fk_fields
| self.fk_fields
| self.backward_o2o_fields
| self.o2o_fields
)
generated_fields = []
for field in self.fields_map.values():
if not field.generated:
continue
generated_fields.append(field.source_field or field.model_field_name)
self.generated_db_fields = tuple(generated_fields) # type: ignore
self._ordering_validated = True
for field_name, _ in self._default_ordering:
if field_name.split("__")[0] not in self.fields:
self._ordering_validated = False
break
def _generate_lazy_fk_m2m_fields(self) -> None:
# Create lazy FK fields on model.
for key in self.fk_fields:
_key = f"_{key}"
fk_field_object: ForeignKeyFieldInstance = self.fields_map[key] # type: ignore
relation_field = fk_field_object.source_field
to_field = fk_field_object.to_field_instance.model_field_name
setattr(
self._model,
key,
property(
partial(
_fk_getter,
_key=_key,
ftype=fk_field_object.related_model,
relation_field=relation_field,
to_field=to_field,
),
partial(
_fk_setter, _key=_key, relation_field=relation_field, to_field=to_field,
),
partial(
_fk_setter,
value=None,
_key=_key,
relation_field=relation_field,
to_field=to_field,
),
),
)
# Create lazy reverse FK fields on model.
for key in self.backward_fk_fields:
_key = f"_{key}"
backward_fk_field_object: BackwardFKRelation = self.fields_map[key] # type: ignore
setattr(
self._model,
key,
property(
partial(
_rfk_getter,
_key=_key,
ftype=backward_fk_field_object.related_model,
frelfield=backward_fk_field_object.relation_field,
from_field=backward_fk_field_object.to_field_instance.model_field_name,
)
),
)
# Create lazy one to one fields on model.
for key in self.o2o_fields:
_key = f"_{key}"
o2o_field_object: OneToOneFieldInstance = self.fields_map[key] # type: ignore
relation_field = o2o_field_object.source_field
to_field = o2o_field_object.to_field_instance.model_field_name
setattr(
self._model,
key,
property(
partial(
_fk_getter,
_key=_key,
ftype=o2o_field_object.related_model,
relation_field=relation_field,
to_field=to_field,
),
partial(
_fk_setter, _key=_key, relation_field=relation_field, to_field=to_field,
),
partial(
_fk_setter,
value=None,
_key=_key,
relation_field=relation_field,
to_field=to_field,
),
),
)
# Create lazy reverse one to one fields on model.
for key in self.backward_o2o_fields:
_key = f"_{key}"
backward_o2o_field_object: BackwardOneToOneRelation = self.fields_map[ # type: ignore
key
]
setattr(
self._model,
key,
property(
partial(
_ro2o_getter,
_key=_key,
ftype=backward_o2o_field_object.related_model,
frelfield=backward_o2o_field_object.relation_field,
from_field=backward_o2o_field_object.to_field_instance.model_field_name,
),
),
)
# Create lazy M2M fields on model.
for key in self.m2m_fields:
_key = f"_{key}"
setattr(
self._model,
key,
property(partial(_m2m_getter, _key=_key, field_object=self.fields_map[key])),
)
def _generate_db_fields(self) -> None:
self.db_default_fields.clear()
self.db_complex_fields.clear()
self.db_native_fields.clear()
for key in self.db_fields:
model_field = self.fields_db_projection_reverse[key]
field = self.fields_map[model_field]
default_converter = field.__class__.to_python_value is Field.to_python_value
if (
field.skip_to_python_if_native
and field.field_type in self.db.executor_class.DB_NATIVE
):
self.db_native_fields.append((key, model_field, field))
elif not default_converter:
self.db_complex_fields.append((key, model_field, field))
elif field.field_type in self.db.executor_class.DB_NATIVE:
self.db_native_fields.append((key, model_field, field))
else:
self.db_default_fields.append((key, model_field, field))
def _generate_filters(self) -> None:
get_overridden_filter_func = self.db.executor_class.get_overridden_filter_func
for key, filter_info in self._filters.items():
overridden_operator = get_overridden_filter_func(
filter_func=filter_info["operator"] # type: ignore
)
if overridden_operator:
filter_info = copy(filter_info)
filter_info["operator"] = overridden_operator # type: ignore
self.filters[key] = filter_info
class ModelMeta(type):
__slots__ = ()
def __new__(mcs, name: str, bases: Tuple[Type,...], attrs: dict):
fields_db_projection: Dict[str, str] = {}
fields_map: Dict[str, Field] = {}
filters: Dict[str, Dict[str, dict]] = {}
fk_fields: Set[str] = set()
m2m_fields: Set[str] = set()
o2o_fields: Set[str] = set()
meta_class: "Model.Meta" = attrs.get("Meta", type("Meta", (), {}))
pk_attr: str = "id"
# Searching for Field attributes in the class hierarchy
def __search_for_field_attributes(base: Type, attrs: dict) -> None:
"""
Searching for class attributes of type fields.Field
in the given class.
If an attribute of the class is an instance of fields.Field,
then it will be added to the fields dict. But only, if the
key is not already in the dict. So derived classes have a higher
precedence. Multiple Inheritance is supported from left to right.
After checking the given class, the function will look into
the classes according to the MRO (method resolution order).
The MRO is 'natural' order, in which python traverses methods and
fields. For more information on the magic behind check out:
`The Python 2.3 Method Resolution Order
<https://www.python.org/download/releases/2.3/mro/>`_.
"""
for parent in base.__mro__[1:]:
__search_for_field_attributes(parent, attrs)
meta = getattr(base, "_meta", None)
if meta:
# For abstract classes
for key, value in meta.fields_map.items():
attrs[key] = value
else:
# For mixin classes
for key, value in base.__dict__.items():
if isinstance(value, Field) and key not in attrs:
attrs[key] = value
# Start searching for fields in the base classes.
inherited_attrs: dict = {}
for base in bases:
__search_for_field_attributes(base, inherited_attrs)
if inherited_attrs:
# Ensure that the inherited fields are before the defined ones.
attrs = {**inherited_attrs, **attrs}
if name!= "Model":
custom_pk_present = False
for key, value in attrs.items():
if isinstance(value, Field):
if value.pk:
if custom_pk_present:
raise ConfigurationError(
f"Can't create model {name} with two primary keys,"
" only single primary key is supported"
)
if value.generated and not value.allows_generated:
raise ConfigurationError(
f"Field '{key}' ({value.__class__.__name__}) can't be DB-generated"
)
custom_pk_present = True
pk_attr = key
if not custom_pk_present and not getattr(meta_class, "abstract", None):
if "id" not in attrs:
attrs = {"id": IntField(pk=True), **attrs}
if not isinstance(attrs["id"], Field) or not attrs["id"].pk:
raise ConfigurationError(
f"Can't create model {name} without explicit primary key if field 'id'"
" already present"
)
for key, value in attrs.items():
if isinstance(value, Field):
if getattr(meta_class, "abstract", None):
value = deepcopy(value)
fields_map[key] = value
value.model_field_name = key
if isinstance(value, OneToOneFieldInstance):
o2o_fields.add(key)
elif isinstance(value, ForeignKeyFieldInstance):
fk_fields.add(key)
elif isinstance(value, ManyToManyFieldInstance):
m2m_fields.add(key)
else:
fields_db_projection[key] = value.source_field or key
filters.update(
get_filters_for_field(
field_name=key,
field=fields_map[key],
source_field=fields_db_projection[key],
)
)
if value.pk:
filters.update(
get_filters_for_field(
field_name="pk",
field=fields_map[key],
source_field=fields_db_projection[key],
)
)
# Clean the class attributes
for slot in fields_map:
attrs.pop(slot, None)
attrs["_meta"] = meta = MetaInfo(meta_class)
meta.fields_map = fields_map
meta.fields_db_projection = fields_db_projection
meta._filters = filters
meta.fk_fields = fk_fields
meta.backward_fk_fields = set()
meta.o2o_fields = o2o_fields
meta.backward_o2o_fields = set()
meta.m2m_fields = m2m_fields
meta.default_connection = None
meta.pk_attr = pk_attr
meta.pk = fields_map.get(pk_attr) # type: ignore
if meta.pk:
meta.db_pk_column = meta.pk.source_field or meta.pk_attr
meta._inited = False
if not fields_map:
meta.abstract = True
new_class: Type["Model"] = super().__new__(mcs, name, bases, attrs)
for field in meta.fields_map.values():
field.model = new_class
for fname, comment in _get_comments(new_class).items():
if fname in fields_map:
fields_map[fname].docstring = comment
if fields_map[fname].description is None:
fields_map[fname].description = comment.split("\n")[0]
if new_class.__doc__ and not meta.table_description:
meta.table_description = inspect.cleandoc(new_class.__doc__).split("\n")[0]
meta._model = new_class
meta.finalise_fields()
return new_class
class Model(metaclass=ModelMeta):
"""
Base class for all Tortoise ORM Models.
"""
# I don' like this here, but it makes auto completion and static analysis much happier
_meta = MetaInfo(None) # type: ignore
_listeners: Dict[Signals, Dict[Type[MODEL], List[Callable]]] = { # type: ignore
Signals.pre_save: {},
Signals.post_save: {},
Signals.pre_delete: {},
Signals.post_delete: {},
}
def __init__(self, **kwargs: Any) -> None:
# self._meta is a very common attribute lookup, lets cache it.
meta = self._meta
self._partial = False
self._saved_in_db = False
self._custom_generated_pk = False
# Assign defaults for missing fields
for key in meta.fields.difference(self._set_kwargs(kwargs)):
field_object = meta.fields_map[key]
if callable(field_object.default):
setattr(self, key, field_object.default())
else:
setattr(self, key, field_object.default)
def _set_kwargs(self, kwargs: dict) -> Set[str]:
meta = self._meta
# Assign values and do type conversions
passed_fields = {*kwargs.keys()} | meta.fetch_fields
for key, value in kwargs.items():
if key in meta.fk_fields or key in meta.o2o_fields:
if value and not value._saved_in_db:
raise OperationalError(
f"You should first call.save() on {value} before referring to it"
)
setattr(self, key, value)
passed_fields.add(meta.fields_map[key].source_field)
elif key in meta.fields_db_projection:
field_object = meta.fields_map[key]
if field_object.generated:
self._custom_generated_pk = True
if value is None and not field_object.null:
raise ValueError(f"{key} is non nullable field, but null was passed")
setattr(self, key, field_object.to_python_value(value))
elif key in meta.backward_fk_fields:
raise ConfigurationError(
"You can't set backward relations through init, change related model instead"
)
elif key in meta.backward_o2o_fields:
raise ConfigurationError(
"You can't set backward one to one relations through init,"
" change related model instead"
)
elif key in meta.m2m_fields:
raise ConfigurationError(
"You can't set m2m relations through init, use m2m_manager instead"
)
return passed_fields
@classmethod
def _init_from_db(cls: Type[MODEL], **kwargs: Any) -> MODEL:
self = cls.__new__(cls)
self._partial = False
self._saved_in_db = True
meta = self._meta
try:
# This is like so for performance reasons.
# We want to avoid conditionals and calling.to_python_value()
# Native fields are fields that are already converted to/from python to DB type
# by the DB driver
for key, model_field, field in meta.db_native_fields:
setattr(self, model_field, kwargs[key])
# Fields that don't override.to_python_value() are converted without a call
# as we already know what we will be doing.
for key, model_field, field in meta.db_default_fields:
value = kwargs[key]
setattr(self, model_field, None if value is None else field.field_type(value))
# These fields need manual.to_python_value()
for key, model_field, field in meta.db_complex_fields:
setattr(self, model_field, field.to_python_value(kwargs[key]))
except KeyError:
self._partial = True
# TODO: Apply similar perf optimisation as above for partial
for key, value in kwargs.items():
setattr(self, key, meta.fields_map[key].to_python_value(value))
return self
def __str__(self) -> str:
return f"<{self.__class__.__name__}>"
def __repr__(self) -> str:
if self.pk:
return f"<{self.__class__.__name__}: {self.pk}>"
return f"<{self.__class__.__name__}>"
def __hash__(self) -> int:
if not self.pk:
raise TypeError("Model instances without id are unhashable")
return hash(self.pk)
def __eq__(self, other: object) -> bool:
return type(other) is type(self) and self.pk == other.pk # type: ignore
def _get_pk_val(self) -> Any:
return getattr(self, self._meta.pk_attr)
def _set_pk_val(self, value: Any) -> None:
setattr(self, self._meta.pk_attr, value)
pk = property(_get_pk_val, _set_pk_val)
"""
Alias to the models Primary Key.
Can be used as a field name when doing filtering e.g. ``.filter(pk=...)`` etc...
"""
def update_from_dict(self, data: dict) -> MODEL:
"""
Updates the current model with the provided dict.
This can allow mass-updating a model from a dict, also ensuring that datatype conversions happen.
This will ignore any extra fields, and NOT update the model with them,
but will raise errors on bad types or updating Many-instance relations.
:param data: The parameters you want to update in a dict format
:return: The current model instance
:raises ConfigurationError: When attempting to update a remote instance
(e.g. a reverse ForeignKey or ManyToMany relation)
:raises ValueError: When a passed parameter is not type compatible
"""
self._set_kwargs(data)
return self # type: ignore
@classmethod
def register_listener(cls, signal: Signals, listener: Callable):
"""
Register listener to current model class for special Signal.
:param signal: one of tortoise.signals.Signal
:param listener: callable listener
:raises ConfigurationError: When listener is not callable
"""
if not callable(listener):
raise ConfigurationError("Signal listener must be callable!")
cls_listeners = cls._listeners.get(signal).setdefault(cls, []) # type:ignore
if listener not in cls_listeners:
cls_listeners.append(listener)
async def _pre_delete(self, using_db: Optional[BaseDBAsyncClient] = None,) -> None:
listeners = []
cls_listeners = self._listeners.get(Signals.pre_delete, {}).get(self.__class__, [])
for listener in cls_listeners:
listeners.append(listener(self.__class__, self, using_db,))
await asyncio.gather(*listeners)
async def _post_delete(self, using_db: Optional[BaseDBAsyncClient] = None,) -> None:
listeners = []
cls_listeners = self._listeners.get(Signals.post_delete, {}).get(self.__class__, [])
for listener in cls_listeners:
listeners.append(listener(self.__class__, self, using_db,))
await asyncio.gather(*listeners)
async def _pre_save(
self,
using_db: Optional[BaseDBAsyncClient] = None,
update_fields: Optional[List[str]] = None,
) -> None:
listeners = []
cls_listeners = self._listeners.get(Signals.pre_save, {}).get(self.__class__, [])
for listener in cls_listeners:
listeners.append(listener(self.__class__, self, using_db, update_fields))
await asyncio.gather(*listeners)
async def _post_save(
self,
using_db: Optional[BaseDBAsyncClient] = None,
created: bool = False,
update_fields: Optional[List[str]] = None,
) -> None:
listeners = []
cls_listeners = self._listeners.get(Signals.post_save, {}).get(self.__class__, [])
for listener in cls_listeners:
listeners.append(listener(self.__class__, self, created, using_db, update_fields))
await asyncio.gather(*listeners)
async def save(
self,
using_db: Optional[BaseDBAsyncClient] = None,
update_fields: Optional[List[str]] = None,
) -> None:
"""
Creates/Updates the current model object.
:param update_fields: If provided, it should be a tuple/list of fields by name.
This is the subset of fields that should be updated.
If the object needs to be created ``update_fields`` will be ignored.
:param using_db: Specific DB connection to use instead of default bound
:raises IncompleteInstanceError: If the model is partial and the fields are not available for persistance.
"""
db = using_db or self._meta.db
executor = db.executor_class(model=self.__class__, db=db)
if self._partial:
if update_fields:
for field in update_fields:
if not hasattr(self, self._meta.pk_attr):
raise IncompleteInstanceError(
f"{self.__class__.__name__} is a partial model without primary key fetchd. Partial update not available"
)
if not hasattr(self, field):
raise IncompleteInstanceError(
f"{self.__class__.__name__} is a partial model, field '{field}' is not available"
)
else:
raise IncompleteInstanceError(
f"{self.__class__.__name__} is a partial model, can only be saved with the relevant update_field provided"
)
await self._pre_save(using_db, update_fields)
if self._saved_in_db:
await executor.execute_update(self, update_fields)
created = False
else:
await executor.execute_insert(self)
created = True
self._saved_in_db = True
await self._post_save(using_db, created, update_fields)
async def delete(self, using_db: Optional[BaseDBAsyncClient] = None) -> None:
"""
Deletes the current model object.
:param using_db: Specific DB connection to use instead of default bound
:raises OperationalError: If object has never been persisted.
"""
db = using_db or self._meta.db
if not self._saved_in_db:
raise OperationalError("Can't delete unpersisted record")
await self._pre_delete(using_db)
await db.executor_class(model=self.__class__, db=db).execute_delete(self)
await self._post_delete(using_db)
async def fetch_related(self, *args: Any, using_db: Optional[BaseDBAsyncClient] = None) -> None:
"""
Fetch related fields.
.. code-block:: python3
User.fetch_related("emails", "manager")
:param args: The related fields that should be fetched.
:param using_db: Specific DB connection to use instead of default bound
"""
db = using_db or self._meta.db
await db.executor_class(model=self.__class__, db=db).fetch_for_list([self], *args)
@classmethod
async def get_or_create(
cls: Type[MODEL],
defaults: Optional[dict] = None,
using_db: Optional[BaseDBAsyncClient] = None,
**kwargs: Any,
) -> Tuple[MODEL, bool]:
"""
Fetches the object if exists (filtering on the provided parameters),
else creates an instance with any unspecified parameters as default values.
:param defaults: Default values to be added to a created instance if it can't be fetched.
:param using_db: Specific DB connection to use instead of default bound
:param kwargs: Query parameters.
"""
if not defaults:
defaults = {}
db = using_db if using_db else cls._meta.db
async with in_transaction(connection_name=db.connection_name):
instance = await cls.filter(**kwargs).first()
if instance:
return instance, False
try:
return await cls.create(**defaults, **kwargs), True
except (IntegrityError, TransactionManagementError):
# Let transaction close
pass
# Try after transaction in case transaction error
return await cls.get(**kwargs), False
@classmethod
async def create(cls: Type[MODEL], **kwargs: Any) -> MODEL:
"""
Create a record in the DB and returns the object.
.. code-block:: python3
user = await User.create(name="...", email="...")
Equivalent to:
.. code-block:: python3
user = User(name="...", email="...")
await user.save()
:param kwargs: Model parameters.
"""
instance = cls(**kwargs)
instance._saved_in_db = False
db = kwargs.get("using_db") or cls._meta.db
await instance.save(using_db=db)
return instance
@classmethod
async def bulk_create(
cls: Type[MODEL], objects: List[MODEL], using_db: Optional[BaseDBAsyncClient] = None,
) -> None:
"""
Bulk insert operation:
.. note::
The bulk insert operation will do the minimum to ensure that the object
created in the DB has all the defaults and generated fields set,
but may be incomplete reference in Python.
e.g. ``IntField`` primary keys will not be populated.
This is recommend only for throw away inserts where you want to ensure optimal
insert performance.
.. code-block:: python3
User.bulk_create([
User(name="...", email="..."),
User(name="...", email="...")
])
:param objects: List of objects to bulk create
:param using_db: Specific DB connection to use instead of default bound
"""
db = using_db or cls._meta.db
await db.executor_class(model=cls, db=db).execute_bulk_insert(objects) # type: ignore
@classmethod
def first(cls: Type[MODEL]) -> QuerySetSingle[Optional[MODEL]]:
"""
Generates a QuerySet that returns the first record.
"""
return QuerySet(cls).first()
@classmethod
def filter(cls: Type[MODEL], *args: Q, **kwargs: Any) -> QuerySet[MODEL]:
"""
Generates a QuerySet with the filter applied.
:param args: Q funtions containing constraints. Will be AND'ed.
:param kwargs: Simple filter constraints.
"""
return QuerySet(cls).filter(*args, **kwargs)
@classmethod
def exclude(cls: Type[MODEL], *args: Q, **kwargs: Any) -> QuerySet[MODEL]:
"""
Generates a QuerySet with the exclude applied.
:param args: Q funtions containing constraints. Will be AND'ed.
:param kwargs: Simple filter constraints.
"""
return QuerySet(cls).exclude(*args, **kwargs)
@classmethod
def annotate(cls: Type[MODEL], **kwargs: Function) -> QuerySet[MODEL]:
"""
Annotates the result set with extra Functions/Aggregations.
:param kwargs: Parameter name and the Function/Aggregation to annotate with.
"""
return QuerySet(cls).annotate(**kwargs)
@classmethod
def all(cls: Type[MODEL]) -> QuerySet[MODEL]:
"""
Returns the complete QuerySet.
"""
return QuerySet(cls)
@classmethod
def get(cls: Type[MODEL], *args: Q, **kwargs: Any) -> QuerySetSingle[MODEL]:
"""
Fetches a single record for a Model type using the provided filter parameters.
.. code-block:: python3
user = await User.get(username="foo")
:param args: Q funtions containing constraints. Will be AND'ed.
:param kwargs: Simple filter constraints.
:raises MultipleObjectsReturned: If provided search returned more than one object.
:raises DoesNotExist: If object can not be found.
"""
return QuerySet(cls).get(*args, **kwargs)
@classmethod
def get_or_none(cls: Type[MODEL], *args: Q, **kwargs: Any) -> QuerySetSingle[Optional[MODEL]]:
"""
Fetches a single record for a Model type using the provided filter parameters or None.
.. code-block:: python3
user = await User.get(username="foo")
:param args: Q funtions containing constraints. Will be AND'ed.
:param kwargs: Simple filter constraints.
"""
return QuerySet(cls).get_or_none(*args, **kwargs)
@classmethod
async def fetch_for_list(
cls, instance_list: "List[Model]", *args: Any, using_db: Optional[BaseDBAsyncClient] = None,
) -> None:
"""
Fetches related models for provided list of Model objects.
:param instance_list: List of Model objects to fetch relations for.
:param args: Relation names to fetch.
:param using_db: DO NOT USE
"""
db = using_db or cls._meta.db
await db.executor_class(model=cls, db=db).fetch_for_list(instance_list, *args)
@classmethod
def check(cls) -> None:
"""
Calls various checks to validate the model.
:raises ConfigurationError: If the model has not been configured correctly.
"""
cls._check_together("unique_together")
cls._check_together("indexes")
@classmethod
def _check_together(cls, together: str) -> None:
"""
Check the value of "unique_together" option.
:raises ConfigurationError: If the model has not been configured correctly.
"""
_together = getattr(cls._meta, together)
if not isinstance(_together, (tuple, list)):
raise ConfigurationError(f"'{cls.__name__}.{together}' must be a list or tuple.")
if any(not isinstance(unique_fields, (tuple, list)) for unique_fields in _together):
raise ConfigurationError(
f"All '{cls.__name__}.{together}' elements must be lists or tuples."
)
for fields_tuple in _together:
for field_name in fields_tuple:
field = cls._meta.fields_map.get(field_name)
if not field:
raise ConfigurationError(
f"'{cls.__name__}.{together}' has no '{field_name}' field."
)
if isinstance(field, ManyToManyFieldInstance):
raise ConfigurationError(
f"'{cls.__name__}.{together}' '{field_name}' field refers"
" to ManyToMany field."
)
@classmethod
def describe(cls, serializable: bool = True) -> dict:
"""
Describes the given list of models or ALL registered models.
:param serializable:
``False`` if you want raw python objects,
``True`` for JSON-serialisable data. (Defaults to ``True``)
:return:
A dictionary containing the model description.
The base dict has a fixed set of keys that reference a list of fields
(or a single field in the case of the primary key):
.. code-block:: python3
{
"name": str # Qualified model name
"app": str # 'App' namespace
"table": str # DB table name
"abstract": bool # Is the model Abstract?
"description": str # Description of table (nullable)
"docstring": str # Model docstring (nullable)
"unique_together": [...] # List of List containing field names that
# are unique together
"pk_field": {...} # Primary key field
"data_fields": [...] # Data fields
"fk_fields": [...] # Foreign Key fields FROM this model
"backward_fk_fields": [...] # Foreign Key fields TO this model
"o2o_fields": [...] # OneToOne fields FROM this model
"backward_o2o_fields": [...] # OneToOne fields TO this model
"m2m_fields": [...] # Many-to-Many fields
}
Each field is specified as defined in :meth:`tortoise.fields.base.Field.describe`
"""
return {
"name": cls._meta.full_name,
"app": cls._meta.app,
"table": cls._meta.db_table,
"abstract": cls._meta.abstract,
"description": cls._meta.table_description or None,
"docstring": inspect.cleandoc(cls.__doc__ or "") or None,
"unique_together": cls._meta.unique_together or [],
"pk_field": cls._meta.fields_map[cls._meta.pk_attr].describe(serializable),
"data_fields": [
field.describe(serializable)
for name, field in cls._meta.fields_map.items()
if name!= cls._meta.pk_attr and name in (cls._meta.fields - cls._meta.fetch_fields)
],
"fk_fields": [
field.describe(serializable)
for name, field in cls._meta.fields_map.items()
if name in cls._meta.fk_fields
],
"backward_fk_fields": [
field.describe(serializable)
for name, field in cls._meta.fields_map.items()
if name in cls._meta.backward_fk_fields
],
"o2o_fields": [
field.describe(serializable)
for name, field in cls._meta.fields_map.items()
if name in cls._meta.o2o_fields
],
"backward_o2o_fields": [
field.describe(serializable)
for name, field in cls._meta.fields_map.items()
if name in cls._meta.backward_o2o_fields
],
"m2m_fields": [
field.describe(serializable)
for name, field in cls._meta.fields_map.items()
if name in cls._meta.m2m_fields
],
}
def __await__(self: MODEL) -> Generator[Any, None, MODEL]:
async def _self() -> MODEL:
return self
return _self().__await__()
class Meta:
"""
The ``Meta`` class is used to configure metadata for the Model.
Usage:
.. code-block:: python3
class Foo(Model):
...
class Meta:
table="custom_table"
unique_together=(("field_a", "field_b"), )
""" |
|
tortoise__tortoise-orm | pydantic.rst | Tutorial | How to generate Pydantic Models
from Tortoise Models | Apache License 2.0 | tortoise__tortoise-orm/docs/contrib/pydantic.rst | [
"tortoise__tortoise-orm/tortoise/contrib/pydantic/creator.py",
"tortoise__tortoise-orm/tortoise/contrib/pydantic/base.py"
] | Pydantic serialisation
Tortoise ORM has a Pydantic plugin that will generate Pydantic Models
from Tortoise Models, and then provides helper functions to serialise
that model and its related objects.
We currently only support generating Pydantic objects for serialisation,
and no deserialisation at this stage.
Tutorial
1: Basic usage
Here we introduce:
- Creating a Pydantic model from a Tortoise model
- Docstrings & doc-comments are used
- Evaluating the generated schema
- Simple serialisation with both .dict() and .json()
Lets start with a basic Tortoise Model:
from tortoise import fields
from tortoise.models import Model
class Tournament(Model):
"""
This references a Tournament
"""
id = fields.IntField(pk=True)
name = fields.CharField(max_length=100)
#: The date-time the Tournament record was created at
created_at = fields.DatetimeField(auto_now_add=True)
To create a Pydantic model from that one would call:
tortoise.contrib.pydantic.creator.pydantic_model_creator
from tortoise.contrib.pydantic import pydantic_model_creator
Tournament_Pydantic = pydantic_model_creator(Tournament)
And now have a Pydantic Model that can be used for representing schema
and serialisation.
The JSON-Schema of Tournament_Pydantic is now:
>>> print(Tournament_Pydantic.schema())
{
'title': 'Tournament',
'description': 'This references a Tournament',
'type': 'object',
'properties': {
'id': {
'title': 'Id',
'type': 'integer'
},
'name': {
'title': 'Name',
'type': 'string'
},
'created_at': {
'title': 'Created At',
'description': 'The date-time the Tournament record was created at',
'type': 'string',
'format': 'date-time'
}
}
}
Note how the class docstring and doc-comment #: is included as
descriptions in the Schema.
To serialise an object it is simply (in an async context):
tournament = await Tournament.create(name="New Tournament")
tourpy = await Tournament_Pydantic.from_tortoise_orm(tournament)
And one could get the contents by using regular Pydantic-object methods,
such as .dict() or .json()
>>> print(tourpy.dict())
{
'id': 1,
'name': 'New Tournament',
'created_at': datetime.datetime(2020, 3, 1, 20, 28, 9, 346808)
}
>>> print(tourpy.json())
{
"id": 1,
"name": "New Tournament",
"created_at": "2020-03-01T20:28:09.346808"
}
2: Querysets & Lists
Here we introduce:
- Creating a list-model to serialise a queryset
- Default sorting is honoured
from tortoise import fields
from tortoise.models import Model
class Tournament(Model):
"""
This references a Tournament
"""
id = fields.IntField(pk=True)
name = fields.CharField(max_length=100)
#: The date-time the Tournament record was created at
created_at = fields.DatetimeField(auto_now_add=True)
class Meta:
# Define the default ordering
# the pydantic serialiser will use this to order the results
ordering = ["name"]
To create a Pydantic list-model from that one would call:
tortoise.contrib.pydantic.creator.pydantic_queryset_creator
from tortoise.contrib.pydantic import pydantic_model_creator
Tournament_Pydantic_List = pydantic_queryset_creator(Tournament)
And now have a Pydantic Model that can be used for representing schema
and serialisation.
The JSON-Schema of Tournament_Pydantic_List is now:
>>> print(Tournament_Pydantic_List.schema())
{
'title': 'Tournaments',
'description': 'This references a Tournament',
'type': 'array',
'items': {
'$ref': '#/definitions/Tournament'
},
'definitions': {
'Tournament': {
'title': 'Tournament',
'description': 'This references a Tournament',
'type': 'object',
'properties': {
'id': {
'title': 'Id',
'type': 'integer'
},
'name': {
'title': 'Name',
'type': 'string'
},
'created_at': {
'title': 'Created At',
'description': 'The date-time the Tournament record was created at',
'type': 'string',
'format': 'date-time'
}
}
}
}
}
Note that the Tournament is now not the root. A simple list is.
To serialise an object it is simply (in an async context):
# Create objects
await Tournament.create(name="New Tournament")
await Tournament.create(name="Another")
await Tournament.create(name="Last Tournament")
tourpy = await Tournament_Pydantic_List.from_queryset(Tournament.all())
And one could get the contents by using regular Pydantic-object methods,
such as .dict() or .json()
>>> print(tourpy.dict())
{
'__root__': [
{
'id': 2,
'name': 'Another',
'created_at': datetime.datetime(2020, 3, 2, 6, 53, 39, 776504)
},
{
'id': 3,
'name': 'Last Tournament',
'created_at': datetime.datetime(2020, 3, 2, 6, 53, 39, 776848)
},
{
'id': 1,
'name': 'New Tournament',
'created_at': datetime.datetime(2020, 3, 2, 6, 53, 39, 776211)
}
]
}
>>> print(tourpy.json())
[
{
"id": 2,
"name": "Another",
"created_at": "2020-03-02T06:53:39.776504"
},
{
"id": 3,
"name": "Last Tournament",
"created_at": "2020-03-02T06:53:39.776848"
},
{
"id": 1,
"name": "New Tournament",
"created_at": "2020-03-02T06:53:39.776211"
}
]
Note how .dict() has a _root__ element with the list, but the .json()
has the list as root. Also note how the results are sorted
alphabetically by name.
3: Relations & Early-init
Here we introduce:
- Relationships
- Early model init
Note
The part of this tutorial about early-init is only required if you need
to generate the pydantic models before you have initialised Tortoise
ORM.
Look at example_pydantic_basic (in function run) to see where the
*_creator is only called after we initialised Tortoise ORM properly, in
that case an early init is not needed.
Source to example: example_pydantic_tut3
We define our models with a relationship:
from tortoise import fields
from tortoise.models import Model
class Tournament(Model):
"""
This references a Tournament
"""
id = fields.IntField(pk=True)
name = fields.CharField(max_length=100)
#: The date-time the Tournament record was created at
created_at = fields.DatetimeField(auto_now_add=True)
class Event(Model):
"""
This references an Event in a Tournament
"""
id = fields.IntField(pk=True)
name = fields.CharField(max_length=100)
created_at = fields.DatetimeField(auto_now_add=True)
tournament = fields.ForeignKeyField(
"models.Tournament", related_name="events", description="The Tournement this happens in"
)
Next we create our Pydantic Model using pydantic_model_creator:
from tortoise.contrib.pydantic import pydantic_model_creator
Tournament_Pydantic = pydantic_model_creator(Tournament)
The JSON-Schema of Tournament_Pydantic is now:
>>> print(Tournament_Pydantic.schema())
{
'title': 'Tournament',
'description': 'This references a Tournament',
'type': 'object',
'properties': {
'id': {
'title': 'Id',
'type': 'integer'
},
'name': {
'title': 'Name',
'type': 'string'
},
'created_at': {
'title': 'Created At',
'description': 'The date-time the Tournament record was created at',
'type': 'string',
'format': 'date-time'
}
}
}
Oh no! Where is the relation?
Because the models have not fully initialised, it doesn't know about the
relations at this stage.
We need to initialise our model relationships early using
tortoise.Tortoise.init_models
from tortoise import Tortoise
Tortoise.init_models(["__main__"], "models")
# Now lets try again
Tournament_Pydantic = pydantic_model_creator(Tournament)
The JSON-Schema of Tournament_Pydantic is now:
>>> print(Tournament_Pydantic.schema())
{
'title': 'Tournament',
'description': 'This references a Tournament',
'type': 'object',
'properties': {
'id': {
'title': 'Id',
'type': 'integer'
},
'name': {
'title': 'Name',
'type': 'string'
},
'created_at': {
'title': 'Created At',
'description': 'The date-time the Tournament record was created at',
'type': 'string',
'format': 'date-time'
},
'events': {
'title': 'Events',
'description': 'The Tournement this happens in',
'type': 'array',
'items': {
'$ref': '#/definitions/Event'
}
}
},
'definitions': {
'Event': {
'title': 'Event',
'description': 'This references an Event in a Tournament',
'type': 'object',
'properties': {
'id': {
'title': 'Id',
'type': 'integer'
},
'name': {
'title': 'Name',
'type': 'string'
},
'created_at': {
'title': 'Created At',
'type': 'string',
'format': 'date-time'
}
}
}
}
}
Aha! that's much better.
Note we can also create a model for Event the same way, and it should
just work:
Event_Pydantic = pydantic_model_creator(Event)
>>> print(Event_Pydantic.schema())
{
'title': 'Event',
'description': 'This references an Event in a Tournament',
'type': 'object',
'properties': {
'id': {
'title': 'Id',
'type': 'integer'
},
'name': {
'title': 'Name',
'type': 'string'
},
'created_at': {
'title': 'Created At',
'type': 'string',
'format': 'date-time'
},
'tournament': {
'title': 'Tournament',
'description': 'The Tournement this happens in',
'allOf': [
{
'$ref': '#/definitions/Tournament'
}
]
}
},
'definitions': {
'Tournament': {
'title': 'Tournament',
'description': 'This references a Tournament',
'type': 'object',
'properties': {
'id': {
'title': 'Id',
'type': 'integer'
},
'name': {
'title': 'Name',
'type': 'string'
},
'created_at': {
'title': 'Created At',
'description': 'The date-time the Tournament record was created at',
'type': 'string',
'format': 'date-time'
}
}
}
}
}
And that also has the relation defined!
Note how both schema's don't follow relations back. This is on by
default, and in a later tutorial we will show the options.
Lets create and serialise the objects and see what they look like (in an
async context):
# Create objects
tournament = await Tournament.create(name="New Tournament")
event = await Event.create(name="The Event", tournament=tournament)
# Serialise Tournament
tourpy = await Tournament_Pydantic.from_tortoise_orm(tournament)
>>> print(tourpy.json())
{
"id": 1,
"name": "New Tournament",
"created_at": "2020-03-02T07:23:27.731656",
"events": [
{
"id": 1,
"name": "The Event",
"created_at": "2020-03-02T07:23:27.732492"
}
]
}
And serialising the event (in an async context):
eventpy = await Event_Pydantic.from_tortoise_orm(event)
>>> print(eventpy.json())
{
"id": 1,
"name": "The Event",
"created_at": "2020-03-02T07:23:27.732492",
"tournament": {
"id": 1,
"name": "New Tournament",
"created_at": "2020-03-02T07:23:27.731656"
}
}
html-toggle
4: PydanticMeta & Callables
Here we introduce:
- Configuring model creator via PydanticMeta class.
- Using callable functions to annotate extra data.
Source to example: example_pydantic_tut4
Let's add some methods that calculate data, and tell the creators to use
them:
class Tournament(Model):
"""
This references a Tournament
"""
id = fields.IntField(pk=True)
name = fields.CharField(max_length=100)
created_at = fields.DatetimeField(auto_now_add=True)
# It is useful to define the reverse relations manually so that type checking
# and auto completion work
events: fields.ReverseRelation["Event"]
def name_length(self) -> int:
"""
Computed length of name
"""
return len(self.name)
def events_num(self) -> int:
"""
Computed team size
"""
try:
return len(self.events)
except NoValuesFetched:
return -1
class PydanticMeta:
# Let's exclude the created timestamp
exclude = ("created_at",)
# Let's include two callables as computed columns
computed = ("name_length", "events_num")
class Event(Model):
"""
This references an Event in a Tournament
"""
id = fields.IntField(pk=True)
name = fields.CharField(max_length=100)
created_at = fields.DatetimeField(auto_now_add=True)
tournament = fields.ForeignKeyField(
"models.Tournament", related_name="events", description="The Tournement this happens in"
)
class Meta:
ordering = ["name"]
class PydanticMeta:
exclude = ("created_at",)
There is much to unpack here.
Firstly, we defined a PydanticMeta block, and in there is configuration
options for the pydantic model creator. See
tortoise.contrib.pydantic.creator.PydanticMeta for the available
options.
Secondly, we excluded created_at in both models, as we decided it
provided no benefit.
Thirly, we added two callables: name_length and events_num. We want
these as part of the result set. Note that callables/computed fields
require manual specification of return type, as without this we can't
determine the record type which is needed to create a valid Pydantic
schema. This is not needed for standard Tortoise ORM fields, as the
fields already define a valid type.
Note that the Pydantic serializer can't call async methods, but since
the tortoise helpers pre-fetch relational data, it is available before
serialization. So we don't need to await the relation. We should however
protect against the case where no prefetching was done, hence catching
and handling the tortoise.exceptions.NoValuesFetched exception.
Next we create our Pydantic Model using pydantic_model_creator:
from tortoise import Tortoise
Tortoise.init_models(["__main__"], "models")
Tournament_Pydantic = pydantic_model_creator(Tournament)
The JSON-Schema of Tournament_Pydantic is now:
{
"title": "Tournament",
"description": "This references a Tournament",
"type": "object",
"properties": {
"id": {
"title": "Id",
"type": "integer"
},
"name": {
"title": "Name",
"type": "string"
},
"events": {
"title": "Events",
"description": "The Tournement this happens in",
"type": "array",
"items": {
"$ref": "#/definitions/Event"
}
},
"name_length": {
"title": "Name Length",
"description": "Computes length of name",
"type": "integer"
},
"events_num": {
"title": "Events Num",
"description": "Computes team size.",
"type": "integer"
}
},
"definitions": {
"Event": {
"title": "Event",
"description": "This references an Event in a Tournament",
"type": "object",
"properties": {
"id": {
"title": "Id",
"type": "integer"
},
"name": {
"title": "Name",
"type": "string"
}
}
}
}
}
Note that created_at is removed, and name_length & events_num is added.
Lets create and serialise the objects and see what they look like (in an
async context):
# Create objects
tournament = await Tournament.create(name="New Tournament")
await Event.create(name="Event 1", tournament=tournament)
await Event.create(name="Event 2", tournament=tournament)
# Serialise Tournament
tourpy = await Tournament_Pydantic.from_tortoise_orm(tournament)
>>> print(tourpy.json())
{
"id": 1,
"name": "New Tournament",
"events": [
{
"id": 1,
"name": "Event 1"
},
{
"id": 2,
"name": "Event 2"
}
],
"name_length": 14,
"events_num": 2
}
| import inspect
from base64 import b32encode
from hashlib import sha3_224
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, cast
import pydantic
from tortoise import fields
from tortoise.contrib.pydantic.base import PydanticListModel, PydanticModel
from tortoise.contrib.pydantic.utils import get_annotations
if TYPE_CHECKING: # pragma: nocoverage
from tortoise.models import Model
_MODEL_INDEX: Dict[str, Type[PydanticModel]] = {}
class PydanticMeta:
"""
The ``PydanticMeta`` class is used to configure metadata for generating the pydantic Model.
Usage:
.. code-block:: python3
class Foo(Model):
...
class PydanticMeta:
exclude = ("foo", "baa")
computed = ("count_peanuts", )
"""
#: If not empty, only fields this property contains will be in the pydantic model
include: Tuple[str,...] = ()
#: Fields listed in this property will be excluded from pydantic model
exclude: Tuple[str,...] = ()
#: Computed fields can be listed here to use in pydantic model
computed: Tuple[str,...] = ()
#: Use backward relations without annotations - not recommended, it can be huge data
#: without control
backward_relations: bool = True
#: Maximum recursion level allowed
max_recursion: int = 3
#: Allow cycles in recursion - This can result in HUGE data - Be careful!
#: Please use this with ``exclude``/``include`` and sane ``max_recursion``
allow_cycles: bool = False
#: If we should exclude raw fields (the ones have _id suffixes) of relations
exclude_raw_fields: bool = True
#: Sort fields alphabetically.
#: If not set (or ``False``) then leave fields in declaration order
sort_alphabetically: bool = False
def _br_it(val: str) -> str:
return val.replace("\n", "<br/>").strip()
def _cleandoc(obj: Any) -> str:
return _br_it(inspect.cleandoc(obj.__doc__ or ""))
def _pydantic_recursion_protector(
cls: "Type[Model]",
*,
stack: tuple,
exclude: Tuple[str,...] = (),
include: Tuple[str,...] = (),
computed: Tuple[str,...] = (),
name=None,
allow_cycles: bool = False,
sort_alphabetically: Optional[bool] = None,
) -> Optional[Type[PydanticModel]]:
"""
It is an inner function to protect pydantic model creator against cyclic recursion
"""
if not allow_cycles and cls in (c[0] for c in stack[:-1]):
return None
caller_fname = stack[0][1]
prop_path = [caller_fname] # It stores the fields in the hierarchy
level = 1
for _, parent_fname, parent_max_recursion in stack[1:]:
# Check recursion level
prop_path.insert(0, parent_fname)
if level >= parent_max_recursion:
# This is too verbose, Do we even need a way of reporting truncated models?
# tortoise.logger.warning(
# "Recursion level %i has reached for model %s",
# level,
# parent_cls.__qualname__ + "." + ".".join(prop_path),
# )
return None
level += 1
return pydantic_model_creator(
cls,
exclude=exclude,
include=include,
computed=computed,
name=name,
_stack=stack,
allow_cycles=allow_cycles,
sort_alphabetically=sort_alphabetically,
)
def pydantic_model_creator(
cls: "Type[Model]",
*,
name=None,
exclude: Tuple[str,...] = (),
include: Tuple[str,...] = (),
computed: Tuple[str,...] = (),
allow_cycles: Optional[bool] = None,
sort_alphabetically: Optional[bool] = None,
_stack: tuple = (),
exclude_readonly: bool = False,
) -> Type[PydanticModel]:
"""
Function to build `Pydantic Model <https://pydantic-docs.helpmanual.io/usage/models/>`__ off Tortoise Model.
:param cls: The Tortoise Model
:param name: Specify a custom name explicitly, instead of a generated name.
:param exclude: Extra fields to exclude from the provided model.
:param include: Extra fields to include from the provided model.
:param computed: Extra computed fields to include from the provided model.
:param allow_cycles: Do we allow any cycles in the generated model?
This is only useful for recursive/self-referential models.
A value of ``False`` (the default) will prevent any and all backtracking.
:param sort_alphabetically: Sort the parameters alphabetically instead of Field-definition order.
The default order would be:
* Field definition order +
* order of reverse relations (as discovered) +
* order of computed functions (as provided).
:param exclude_readonly: Build a subset model that excludes any readonly fields
"""
# Fully qualified class name
fqname = cls.__module__ + "." + cls.__qualname__
postfix = ""
def get_name() -> str:
# If arguments are specified (different from the defaults), we append a hash to the
# class name, to make it unique
# We don't check by stack, as cycles get explicitly renamed.
# When called later, include is explicitly set, so fence passes.
nonlocal postfix
is_default = (
exclude == ()
and include == ()
and computed == ()
and sort_alphabetically is None
and allow_cycles is None
)
hashval = (
f"{fqname};{exclude};{include};{computed};{_stack}:{sort_alphabetically}:{allow_cycles}"
)
postfix = (
"." + b32encode(sha3_224(hashval.encode("utf-8")).digest()).decode("utf-8").lower()[:6]
if not is_default
else ""
)
return fqname + postfix
# We need separate model class for different exclude, include and computed parameters
_name = name or get_name()
has_submodel = False
# Get settings and defaults
meta = getattr(cls, "PydanticMeta", PydanticMeta)
default_include: Tuple[str,...] = tuple(getattr(meta, "include", PydanticMeta.include))
default_exclude: Tuple[str,...] = tuple(getattr(meta, "exclude", PydanticMeta.exclude))
default_computed: Tuple[str,...] = tuple(getattr(meta, "computed", PydanticMeta.computed))
max_recursion: int = int(getattr(meta, "max_recursion", PydanticMeta.max_recursion))
exclude_raw_fields: bool = bool(
getattr(meta, "exclude_raw_fields", PydanticMeta.exclude_raw_fields)
)
_sort_fields: bool = bool(
getattr(meta, "sort_alphabetically", PydanticMeta.sort_alphabetically)
) if sort_alphabetically is None else sort_alphabetically
_allow_cycles: bool = bool(
getattr(meta, "allow_cycles", PydanticMeta.allow_cycles)
if allow_cycles is None
else allow_cycles
)
# Update parameters with defaults
include = tuple(include) + default_include
exclude = tuple(exclude) + default_exclude
computed = tuple(computed) + default_computed
# Get all annotations
annotations = get_annotations(cls)
# Properties and their annotations` store
pconfig: Type[pydantic.main.BaseConfig] = type(
"Config",
(PydanticModel.Config,),
{"title": name or cls.__name__, "extra": pydantic.main.Extra.forbid, "fields": {}},
)
pannotations: Dict[str, Optional[Type]] = {}
properties: Dict[str, Any] = {"__annotations__": pannotations, "Config": pconfig}
# Get model description
model_description = cls.describe(serializable=False)
# Field map we use
field_map: Dict[str, dict] = {}
pk_raw_field: str = ""
def field_map_update(keys: tuple, is_relation=True) -> None:
nonlocal pk_raw_field
for key in keys:
fds = model_description[key]
if isinstance(fds, dict):
fds = [fds]
for fd in fds:
n = fd["name"]
if key == "pk_field":
pk_raw_field = n
# Include or exclude field
if (include and n not in include) or n in exclude:
continue
# Remove raw fields
raw_field = fd.get("raw_field", None)
if raw_field is not None and exclude_raw_fields and raw_field!= pk_raw_field:
del field_map[raw_field]
field_map[n] = fd
# Update field definitions from description
if not exclude_readonly:
field_map_update(("pk_field",), is_relation=False)
field_map_update(("data_fields",), is_relation=False)
if not exclude_readonly:
field_map_update(
("fk_fields", "o2o_fields", "m2m_fields", "backward_fk_fields", "backward_o2o_fields")
)
# Add possible computed fields
field_map.update(
{
k: {"field_type": callable, "function": getattr(cls, k), "description": None}
for k in computed
}
)
# Sort field map (Python 3.7+ has guaranteed ordered dictionary keys)
if _sort_fields:
# Sort Alphabetically
field_map = {k: field_map[k] for k in sorted(field_map)}
else:
# Sort to definition order
field_map = {
k: field_map[k] for k in tuple(cls._meta.fields_map.keys()) + computed if k in field_map
}
# Process fields
for fname, fdesc in field_map.items():
comment = ""
fconfig: Dict[str, Any] = {}
field_type = fdesc["field_type"]
def get_submodel(_model: "Type[Model]") -> Optional[Type[PydanticModel]]:
""" Get Pydantic model for the submodel """
nonlocal exclude, _name, has_submodel
if _model:
new_stack = _stack + ((cls, fname, max_recursion),)
# Get pydantic schema for the submodel
prefix_len = len(fname) + 1
pmodel = _pydantic_recursion_protector(
_model,
exclude=tuple(
str(v[prefix_len:]) for v in exclude if v.startswith(fname + ".")
),
include=tuple(
str(v[prefix_len:]) for v in include if v.startswith(fname + ".")
),
computed=tuple(
str(v[prefix_len:]) for v in computed if v.startswith(fname + ".")
),
stack=new_stack,
allow_cycles=_allow_cycles,
sort_alphabetically=sort_alphabetically,
)
else:
pmodel = None
# If the result is None it has been exluded and we need to exclude the field
if pmodel is None:
exclude += (fname,)
else:
has_submodel = True
# We need to rename if there are duplicate instances of this model
if cls in (c[0] for c in _stack):
_name = name or get_name()
return pmodel
# Foreign keys and OneToOne fields are embedded schemas
if (
field_type is fields.relational.ForeignKeyFieldInstance
or field_type is fields.relational.OneToOneFieldInstance
or field_type is fields.relational.BackwardOneToOneRelation
):
model = get_submodel(fdesc["python_type"])
if model:
if fdesc.get("nullable"):
fconfig["nullable"] = True
if fdesc.get("nullable") or fdesc.get("default"):
model = Optional[model]
pannotations[fname] = model
# Backward FK and ManyToMany fields are list of embedded schemas
elif (
field_type is fields.relational.BackwardFKRelation
or field_type is fields.relational.ManyToManyFieldInstance
):
model = get_submodel(fdesc["python_type"])
if model:
pannotations[fname] = List[model] # type: ignore
# Computed fields as methods
elif field_type is callable:
func = fdesc["function"]
annotation = get_annotations(cls, func).get("return", None)
comment = _cleandoc(func)
if annotation is not None:
pannotations[fname] = annotation
# Any other tortoise fields
else:
annotation = annotations.get(fname, None)
fconfig.update(fdesc["constraints"])
ptype = fdesc["python_type"]
if fdesc.get("nullable"):
fconfig["nullable"] = True
if fdesc.get("nullable") or fdesc.get("default"):
ptype = Optional[ptype]
if not (exclude_readonly and fdesc["constraints"].get("readOnly") is True):
pannotations[fname] = annotation or ptype
# Create a schema for the field
if fname in pannotations:
# Use comment if we have and enabled or use the field description if specified
description = comment or _br_it(fdesc.get("docstring") or fdesc["description"] or "")
fconfig["description"] = description
fconfig["title"] = fname.replace("_", " ").title()
pconfig.fields[fname] = fconfig
# Here we endure that the name is unique, but complete objects are still labeled verbatim
if not has_submodel and exclude:
_name = name or f"{fqname}.leaf"
elif has_submodel:
_name = name or get_name()
# Here we de-dup to ensure that a uniquely named object is a unique object
# This fixes some Pydantic constraints.
if _name in _MODEL_INDEX:
return _MODEL_INDEX[_name]
# Creating Pydantic class for the properties generated before
model = cast(Type[PydanticModel], type(_name, (PydanticModel,), properties))
# Copy the Model docstring over
model.__doc__ = _cleandoc(cls)
# Store the base class
setattr(model.__config__, "orig_model", cls)
# Store model reference so we can de-dup it later on if needed.
_MODEL_INDEX[_name] = model
return model
def pydantic_queryset_creator(
cls: "Type[Model]",
*,
name=None,
exclude: Tuple[str,...] = (),
include: Tuple[str,...] = (),
computed: Tuple[str,...] = (),
allow_cycles: Optional[bool] = None,
sort_alphabetically: Optional[bool] = None,
) -> Type[PydanticListModel]:
"""
Function to build a `Pydantic Model <https://pydantic-docs.helpmanual.io/usage/models/>`__ list off Tortoise Model.
:param cls: The Tortoise Model to put in a list.
:param name: Specify a custom name explicitly, instead of a generated name.
The list generated name is currently naive and merely adds a "s" to the end
of the singular name.
:param exclude: Extra fields to exclude from the provided model.
:param include: Extra fields to include from the provided model.
:param computed: Extra computed fields to include from the provided model.
:param allow_cycles: Do we allow any cycles in the generated model?
This is only useful for recursive/self-referential models.
A value of ``False`` (the default) will prevent any and all backtracking.
:param sort_alphabetically: Sort the parameters alphabetically instead of Field-definition order.
The default order would be:
* Field definition order +
* order of reverse relations (as discovered) +
* order of computed functions (as provided).
"""
submodel = pydantic_model_creator(
cls,
exclude=exclude,
include=include,
computed=computed,
allow_cycles=allow_cycles,
sort_alphabetically=sort_alphabetically,
)
lname = name or f"{submodel.__name__}_list"
properties = {"__annotations__": {"__root__": List[submodel]}} # type: ignore
# Creating Pydantic class for the properties generated before
model = cast(Type[PydanticListModel], type(lname, (PydanticListModel,), properties))
# Copy the Model docstring over
model.__doc__ = _cleandoc(cls)
# The title of the model to hide the hash postfix
setattr(model.__config__, "title", name or f"{getattr(submodel.__config__,'title')}_list")
# Store the base class & submodel
setattr(model.__config__, "submodel", submodel)
return model
from typing import TYPE_CHECKING, List, Type, Union
import pydantic
from pydantic import BaseModel # pylint: disable=E0611
from tortoise import fields
if TYPE_CHECKING: # pragma: nocoverage
from tortoise.models import Model
from tortoise.queryset import QuerySet, QuerySetSingle
def _get_fetch_fields(
pydantic_class: "Type[PydanticModel]", model_class: "Type[Model]"
) -> List[str]:
"""
Recursively collect fields needed to fetch
:param pydantic_class: The pydantic model class
:param model_class: The tortoise model class
:return: The list of fields to be fetched
"""
fetch_fields = []
for field_name, field_type in pydantic_class.__annotations__.items():
origin = getattr(field_type, "__origin__", None)
if origin in (list, List, Union):
field_type = field_type.__args__[0]
# noinspection PyProtectedMember
if field_name in model_class._meta.fetch_fields and issubclass(field_type, PydanticModel):
subclass_fetch_fields = _get_fetch_fields(
field_type, getattr(field_type.__config__, "orig_model")
)
if subclass_fetch_fields:
fetch_fields.extend([field_name + "__" + f for f in subclass_fetch_fields])
else:
fetch_fields.append(field_name)
return fetch_fields
class PydanticModel(BaseModel):
"""
Pydantic BaseModel for Tortoise objects.
This provides an extra method above the usual Pydantic
`model properties <https://pydantic-docs.helpmanual.io/usage/models/#model-properties>`__
"""
class Config:
orm_mode = True # It should be in ORM mode to convert tortoise data to pydantic
# noinspection PyMethodParameters
@pydantic.validator("*", pre=True, each_item=False) # It is a classmethod!
def _tortoise_convert(cls, value): # pylint: disable=E0213
# Computed fields
if callable(value):
return value()
# Convert ManyToManyRelation to list
if isinstance(value, (fields.ManyToManyRelation, fields.ReverseRelation)):
return list(value)
return value
@classmethod
async def from_tortoise_orm(cls, obj: "Model") -> "PydanticModel":
"""
Returns a serializable pydantic model instance built from the provided model instance.
.. note::
This will prefetch all the relations automatically. It is probably what you want.
If you don't want this, or require a ``sync`` method, look to using ``.from_orm()``.
In that case you'd have to manage prefetching yourself,
or exclude relational fields from being part of the model using
:class:`tortoise.contrib.pydantic.creator.PydanticMeta`, or you would be
getting ``OperationalError`` exceptions.
This is due to how the ``asyncio`` framework forces I/O to happen in explicit ``await``
statements. Hence we can only do lazy-fetching during an awaited method.
:param obj: The Model instance you want serialized.
"""
# Get fields needed to fetch
fetch_fields = _get_fetch_fields(cls, getattr(cls.__config__, "orig_model"))
# Fetch fields
await obj.fetch_related(*fetch_fields)
# Convert to pydantic object
values = super().from_orm(obj)
return values
@classmethod
async def from_queryset_single(cls, queryset: "QuerySetSingle") -> "PydanticModel":
"""
Returns a serializable pydantic model instance for a single model
from the provided queryset.
This will prefetch all the relations automatically.
:param queryset: a queryset on the model this PydanticModel is based on.
"""
fetch_fields = _get_fetch_fields(cls, getattr(cls.__config__, "orig_model"))
return cls.from_orm(await queryset.prefetch_related(*fetch_fields))
@classmethod
async def from_queryset(cls, queryset: "QuerySet") -> "List[PydanticModel]":
"""
Returns a serializable pydantic model instance that contains a list of models,
from the provided queryset.
This will prefetch all the relations automatically.
:param queryset: a queryset on the model this PydanticModel is based on.
"""
fetch_fields = _get_fetch_fields(cls, getattr(cls.__config__, "orig_model"))
return [cls.from_orm(e) for e in await queryset.prefetch_related(*fetch_fields)]
class PydanticListModel(BaseModel):
"""
Pydantic BaseModel for List of Tortoise Models
This provides an extra method above the usual Pydantic
`model properties <https://pydantic-docs.helpmanual.io/usage/models/#model-properties>`__
"""
@classmethod
async def from_queryset(cls, queryset: "QuerySet") -> "PydanticListModel":
"""
Returns a serializable pydantic model instance that contains a list of models,
from the provided queryset.
This will prefetch all the relations automatically.
:param queryset: a queryset on the model this PydanticListModel is based on.
"""
submodel = getattr(cls.__config__, "submodel")
fetch_fields = _get_fetch_fields(submodel, getattr(submodel.__config__, "orig_model"))
values = cls(
__root__=[submodel.from_orm(e) for e in await queryset.prefetch_related(*fetch_fields)]
)
return values |
|
tortoise__tortoise-orm | query.rst | Tutorial | How to use QuerySet to build your queries | Apache License 2.0 | tortoise__tortoise-orm/docs/query.rst | [
"tortoise__tortoise-orm/tortoise/queryset.py",
"tortoise__tortoise-orm/tortoise/query_utils.py"
] | Query API
This document describes how to use QuerySet to build your queries
Be sure to check examples for better understanding
You start your query from your model class:
Event.filter(id=1)
There are several method on model itself to start query:
- filter(*args, **kwargs) - create QuerySet with given filters
- exclude(*args, **kwargs) - create QuerySet with given excluding
filters
- all() - create QuerySet without filters
- first() - create QuerySet limited to one object and returning
instance instead of list
- annotate() - create QuerySet with given annotation
This method returns QuerySet object, that allows further filtering and
some more complex operations
Also model class have this methods to create object:
- create(**kwargs) - creates object with given kwargs
- get_or_create(defaults, **kwargs) - gets object for given kwargs, if
not found create it with additional kwargs from defaults dict
Also instance of model itself has these methods:
- save() - update instance, or insert it, if it was never saved before
- delete() - delete instance from db
- fetch_related(*args) - fetches objects related to instance. It can
fetch FK relation, Backward-FK relations and M2M relations. It also
can fetch variable depth of related objects like this:
await team.fetch_related('events__tournament') - this will fetch all
events for team, and for each of this events their tournament will
be prefetched too. After fetching objects they should be available
normally like this: team.events[0].tournament.name
Another approach to work with related objects on instance is to query
them explicitly in async for:
async for team in event.participants:
print(team.name)
You also can filter related objects like this:
await team.events.filter(name='First')
which will return you a QuerySet object with predefined filter
QuerySet
QuerySet could be constructed, filtered and passed around without
actually hitting database. Only after you await QuerySet, it will
generate query and run it against database.
Here are some common usage scenarios with QuerySet (we are using models
defined in getting_started):
Regular select into model instances:
await Event.filter(name__startswith='FIFA')
This query will get you all events with name starting with FIFA, where
name is fields defined on model, and startswith is filter modifier. Take
note, that modifiers should be separated by double underscore. You can
read more on filter modifiers in Filtering section of this document.
It's also possible to filter your queries with .exclude():
await Team.exclude(name__icontains='junior')
As more interesting case, when you are working with related data, you
could also build your query around related entities:
# getting all events, which tournament name is "World Cup"
await Event.filter(tournament__name='World Cup')
# Gets all teams participating in events with ids 1, 2, 3
await Team.filter(events__id__in=[1,2,3])
# Gets all tournaments where teams with "junior" in their name are participating
await Tournament.filter(event__participants__name__icontains='junior').distinct()
Usually you not only want to filter by related data, but also get that
related data as well. You could do it using .prefetch_related():
# This will fetch events, and for each of events ``.tournament`` field will be populated with
# corresponding ``Tournament`` instance
await Event.all().prefetch_related('tournament')
# This will fetch tournament with their events and teams for each event
tournament_list = await Tournament.all().prefetch_related('events__participants')
# Fetched result for m2m and backward fk relations are stored in list-like container
for tournament in tournament_list:
print([e.name for e in tournament.events])
General rule about how prefetch_related() works is that each level of
depth of related models produces 1 additional query, so
.prefetch_related('events__participants') will produce two additional
queries to fetch your data.
Sometimes, when performance is crucial, you don't want to make
additional queries like this. In cases like this you could use values()
or values_list() to produce more efficient query
# This will return list of dicts with keys 'id', 'name', 'tournament_name' and
# 'tournament_name' will be populated by name of related tournament.
# And it will be done in one query
events = await Event.filter(id__in=[1,2,3]).values('id', 'name', tournament_name='tournament__name')
QuerySet also supports aggregation and database functions through
.annotate() method
from tortoise.functions import Count, Trim, Lower, Upper, Coalesce
# This query will fetch all tournaments with 10 or more events, and will
# populate filed `.events_count` on instances with corresponding value
await Tournament.annotate(events_count=Count('events')).filter(events_count__gte=10)
await Tournament.annotate(clean_name=Trim('name'))).filter(clean_name='tournament')
await Tournament.annotate(name_upper=Upper('name'))).filter(name_upper='TOURNAMENT')
await Tournament.annotate(name_lower=Lower('name'))).filter(name_lower='tournament')
await Tournament.annotate(desc_clean=Coalesce('desc', ''))).filter(desc_clean='')
Q objects
Sometimes you need to do more complicated queries than the simple AND
<model>.filter() provides. Luckily we have Q objects to spice things up
and help you find what you need. These Q-objects can then be used as
argument to <model>.filter() instead.
Q objects are extremely versatile, some example use cases:
- creating an OR filter
- nested filters
- inverted filters
- combining any of the above to simply write complicated
multilayer filters
Q objects can take any (special) kwargs for filtering that
<model>.filter() accepts, see those docs for a full list of filter
options in that regard.
They can also be combined by using bitwise operators (| is OR and & is
AND for those unfamiliar with bitwise operators)
For example to find the events with as name Event 1 or Event 2:
found_events = await Event.filter(
Q(name='Event 1') | Q(name='Event 2')
)
Q objects can be nested as well, the above for example is equivalent to:
found_events = await Event.filter(
Q(Q(name='Event 1'), Q(name='Event 2'), join_type="OR")
)
If join type is omitted it defaults to AND.
Note
Q objects without filter arguments are considered NOP and will be
ignored for the final query (regardless on if they are used as AND or OR
param)
Also, Q objects support negated to generate NOT (~ operator) clause in
your query
not_third_events = await Event.filter(~Q(name='3'))
tortoise.query_utils
Filtering
When using .filter() method you can use number of modifiers to field
names to specify desired operation
teams = await Team.filter(name__icontains='CON')
- not
- in - checks if value of field is in passed list
- not_in
- gte - greater or equals than passed value
- gt - greater than passed value
- lte - lower or equals than passed value
- lt - lower than passed value
- range - between and given two values
- isnull - field is null
- not_isnull - field is not null
- contains - field contains specified substring
- icontains - case insensitive contains
- startswith - if field starts with value
- istartswith - case insensitive startswith
- endswith - if field ends with value
- iendswith - case insensitive endswith
- iequals - case insensitive equals
Complex prefetch
Sometimes it is required to fetch only certain related records. You can
achieve it with Prefetch object:
tournament_with_filtered = await Tournament.all().prefetch_related(
Prefetch('events', queryset=Event.filter(name='First'))
).first()
F expression
An F() object represents the value of a model field. It makes it
possible to refer to model field values and perform database operations
using them without actually having to pull them out of the database into
Python memory.
For example to use F to update user balance atomic:
from tortoise.expressions import F
await User.filter(id=1).update(balance = F('balance') - 10)
await User.filter(id=1).update(balance = F('balance') + F('award'), award = 0)
# or use .save()
user = await User.get(id=1)
user.balance = F('balance') - 10
await user.save(update_fields=['balance'])
| import types
from copy import copy
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Callable,
Dict,
Generator,
Generic,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from pypika import JoinType, Order, Table
from pypika.functions import Count
from pypika.queries import QueryBuilder
from pypika.terms import Term
from typing_extensions import Protocol
from tortoise.backends.base.client import BaseDBAsyncClient, Capabilities
from tortoise.exceptions import (
DoesNotExist,
FieldError,
IntegrityError,
MultipleObjectsReturned,
ParamsError,
)
from tortoise.expressions import F
from tortoise.fields.relational import (
ForeignKeyFieldInstance,
OneToOneFieldInstance,
RelationalField,
)
from tortoise.functions import Function
from tortoise.query_utils import Prefetch, Q, QueryModifier, _get_joins_for_related_field
# Empty placeholder - Should never be edited.
QUERY: QueryBuilder = QueryBuilder()
if TYPE_CHECKING: # pragma: nocoverage
from tortoise.models import Model
MODEL = TypeVar("MODEL", bound="Model")
T_co = TypeVar("T_co", covariant=True)
class QuerySetSingle(Protocol[T_co]):
"""
Awaiting on this will resolve a single instance of the Model object, and not a sequence.
"""
# pylint: disable=W0104
def __await__(self) -> Generator[Any, None, T_co]:
... # pragma: nocoverage
def prefetch_related(self, *args: Union[str, Prefetch]) -> "QuerySetSingle[MODEL]":
... # pragma: nocoverage
def annotate(self, **kwargs: Function) -> "QuerySetSingle[MODEL]":
... # pragma: nocoverage
def only(self, *fields_for_select: str) -> "QuerySetSingle[MODEL]":
... # pragma: nocoverage
def values_list(self, *fields_: str, flat: bool = False) -> "ValuesListQuery":
... # pragma: nocoverage
def values(self, *args: str, **kwargs: str) -> "ValuesQuery":
... # pragma: nocoverage
class AwaitableQuery(Generic[MODEL]):
__slots__ = ("_joined_tables", "query", "model", "_db", "capabilities", "_annotations")
def __init__(self, model: Type[MODEL]) -> None:
self._joined_tables: List[Table] = []
self.model: "Type[Model]" = model
self.query: QueryBuilder = QUERY
self._db: BaseDBAsyncClient = None # type: ignore
self.capabilities: Capabilities = model._meta.db.capabilities
self._annotations: Dict[str, Function] = {}
def resolve_filters(
self,
model: "Type[Model]",
q_objects: List[Q],
annotations: Dict[str, Any],
custom_filters: Dict[str, Dict[str, Any]],
) -> None:
"""
Builds the common filters for a QuerySet.
:param model: The Model this querysit is based on.
:param q_objects: The Q expressions to apply.
:param annotations: Extra annotations to add.
:param custom_filters: Pre-resolved filters to be passed though.
"""
has_aggregate = self._resolve_annotate()
modifier = QueryModifier()
for node in q_objects:
modifier &= node.resolve(model, annotations, custom_filters, model._meta.basetable)
where_criterion, joins, having_criterion = modifier.get_query_modifiers()
for join in joins:
if join[0] not in self._joined_tables:
self.query = self.query.join(join[0], how=JoinType.left_outer).on(join[1])
self._joined_tables.append(join[0])
self.query._wheres = where_criterion
self.query._havings = having_criterion
if has_aggregate and (self._joined_tables or having_criterion or self.query._orderbys):
self.query = self.query.groupby(
self.model._meta.basetable[self.model._meta.db_pk_column]
)
def _join_table_by_field(
self, table: Table, related_field_name: str, related_field: RelationalField
) -> Table:
joins = _get_joins_for_related_field(table, related_field, related_field_name)
for join in joins:
if join[0] not in self._joined_tables:
self.query = self.query.join(join[0], how=JoinType.left_outer).on(join[1])
self._joined_tables.append(join[0])
return joins[-1][0]
@staticmethod
def _resolve_ordering_string(ordering: str) -> Tuple[str, Order]:
order_type = Order.asc
if ordering[0] == "-":
field_name = ordering[1:]
order_type = Order.desc
else:
field_name = ordering
return field_name, order_type
def resolve_ordering(
self,
model: "Type[Model]",
table: Table,
orderings: Iterable[Tuple[str, str]],
annotations: Dict[str, Any],
) -> None:
"""
Applies standard ordering to QuerySet.
:param model: The Model this querysit is based on.
:param table: ``pypika.Table`` to keep track of the virtual SQL table
(to allow self referential joins)
:param orderings: What columns/order to order by
:param annotations: Annotations that may be ordered on
:raises FieldError: If a field provided does not exist in model.
"""
# Do not apply default ordering for annotated queries to not mess them up
if not orderings and self.model._meta.ordering and not annotations:
orderings = self.model._meta.ordering
for ordering in orderings:
field_name = ordering[0]
if field_name in model._meta.fetch_fields:
raise FieldError(
"Filtering by relation is not possible. Filter by nested field of related model"
)
if field_name.split("__")[0] in model._meta.fetch_fields:
related_field_name = field_name.split("__")[0]
related_field = cast(RelationalField, model._meta.fields_map[related_field_name])
related_table = self._join_table_by_field(table, related_field_name, related_field)
self.resolve_ordering(
related_field.related_model,
related_table,
[("__".join(field_name.split("__")[1:]), ordering[1])],
{},
)
elif field_name in annotations:
annotation = annotations[field_name]
annotation_info = annotation.resolve(self.model, table)
self.query = self.query.orderby(annotation_info["field"], order=ordering[1])
else:
field_object = model._meta.fields_map.get(field_name)
if not field_object:
raise FieldError(f"Unknown field {field_name} for model {model.__name__}")
field_name = field_object.source_field or field_name
field = table[field_name]
func = field_object.get_for_dialect(
model._meta.db.capabilities.dialect, "function_cast"
)
if func:
field = func(field_object, field)
self.query = self.query.orderby(field, order=ordering[1])
def _resolve_annotate(self) -> bool:
if not self._annotations:
return False
table = self.model._meta.basetable
annotation_info = {}
for key, annotation in self._annotations.items():
annotation_info[key] = annotation.resolve(self.model, table)
for key, info in annotation_info.items():
for join in info["joins"]:
self._join_table_by_field(*join)
self.query._select_other(info["field"].as_(key))
return any(info["field"].is_aggregate for info in annotation_info.values())
def _make_query(self) -> None:
raise NotImplementedError() # pragma: nocoverage
async def _execute(self) -> Any:
raise NotImplementedError() # pragma: nocoverage
class QuerySet(AwaitableQuery[MODEL]):
__slots__ = (
"fields",
"_prefetch_map",
"_prefetch_queries",
"_single",
"_raise_does_not_exist",
"_db",
"_limit",
"_offset",
"_fields_for_select",
"_filter_kwargs",
"_orderings",
"_q_objects",
"_distinct",
"_having",
"_custom_filters",
"_group_bys",
)
def __init__(self, model: Type[MODEL]) -> None:
super().__init__(model)
self.fields: Set[str] = model._meta.db_fields
self._prefetch_map: Dict[str, Set[Union[str, Prefetch]]] = {}
self._prefetch_queries: Dict[str, QuerySet] = {}
self._single: bool = False
self._raise_does_not_exist: bool = False
self._limit: Optional[int] = None
self._offset: Optional[int] = None
self._filter_kwargs: Dict[str, Any] = {}
self._orderings: List[Tuple[str, Any]] = []
self._q_objects: List[Q] = []
self._distinct: bool = False
self._having: Dict[str, Any] = {}
self._custom_filters: Dict[str, dict] = {}
self._fields_for_select: Tuple[str,...] = ()
self._group_bys: Tuple[str,...] = ()
def _clone(self) -> "QuerySet[MODEL]":
queryset = QuerySet.__new__(QuerySet)
queryset.fields = self.fields
queryset.model = self.model
queryset.query = self.query
queryset.capabilities = self.capabilities
queryset._prefetch_map = copy(self._prefetch_map)
queryset._prefetch_queries = copy(self._prefetch_queries)
queryset._single = self._single
queryset._raise_does_not_exist = self._raise_does_not_exist
queryset._db = self._db
queryset._limit = self._limit
queryset._offset = self._offset
queryset._fields_for_select = self._fields_for_select
queryset._filter_kwargs = copy(self._filter_kwargs)
queryset._orderings = copy(self._orderings)
queryset._joined_tables = copy(self._joined_tables)
queryset._q_objects = copy(self._q_objects)
queryset._distinct = self._distinct
queryset._annotations = copy(self._annotations)
queryset._having = copy(self._having)
queryset._custom_filters = copy(self._custom_filters)
queryset._group_bys = copy(self._group_bys)
return queryset
def _filter_or_exclude(self, *args: Q, negate: bool, **kwargs: Any) -> "QuerySet[MODEL]":
queryset = self._clone()
for arg in args:
if not isinstance(arg, Q):
raise TypeError("expected Q objects as args")
if negate:
queryset._q_objects.append(~arg)
else:
queryset._q_objects.append(arg)
for key, value in kwargs.items():
if negate:
queryset._q_objects.append(~Q(**{key: value}))
else:
queryset._q_objects.append(Q(**{key: value}))
return queryset
def filter(self, *args: Q, **kwargs: Any) -> "QuerySet[MODEL]":
"""
Filters QuerySet by given kwargs. You can filter by related objects like this:
.. code-block:: python3
Team.filter(events__tournament__name='Test')
You can also pass Q objects to filters as args.
"""
return self._filter_or_exclude(negate=False, *args, **kwargs)
def exclude(self, *args: Q, **kwargs: Any) -> "QuerySet[MODEL]":
"""
Same as.filter(), but with appends all args with NOT
"""
return self._filter_or_exclude(negate=True, *args, **kwargs)
def order_by(self, *orderings: str) -> "QuerySet[MODEL]":
"""
Accept args to filter by in format like this:
.. code-block:: python3
.order_by('name', '-tournament__name')
Supports ordering by related models too.
:raises FieldError: If unknown field has been provided.
"""
queryset = self._clone()
new_ordering = []
for ordering in orderings:
field_name, order_type = self._resolve_ordering_string(ordering)
if not (
field_name.split("__")[0] in self.model._meta.fields
or field_name in self._annotations
):
raise FieldError(f"Unknown field {field_name} for model {self.model.__name__}")
new_ordering.append((field_name, order_type))
queryset._orderings = new_ordering
return queryset
def limit(self, limit: int) -> "QuerySet[MODEL]":
"""
Limits QuerySet to given length.
:raises ParamsError: Limit should be non-negative number.
"""
if limit < 0:
raise ParamsError("Limit should be non-negative number")
queryset = self._clone()
queryset._limit = limit
return queryset
def offset(self, offset: int) -> "QuerySet[MODEL]":
"""
Query offset for QuerySet.
:raises ParamsError: Offset should be non-negative number.
"""
if offset < 0:
raise ParamsError("Offset should be non-negative number")
queryset = self._clone()
queryset._offset = offset
if self.capabilities.requires_limit and queryset._limit is None:
queryset._limit = 1000000
return queryset
def distinct(self) -> "QuerySet[MODEL]":
"""
Make QuerySet distinct.
Only makes sense in combination with a ``.values()`` or ``.values_list()`` as it
precedes all the fetched fields with a distinct.
"""
queryset = self._clone()
queryset._distinct = True
return queryset
def annotate(self, **kwargs: Function) -> "QuerySet[MODEL]":
"""
Annotate result with aggregation or function result.
:raises TypeError: Value of kwarg is expected to be a ``Function`` instance.
"""
queryset = self._clone()
for key, annotation in kwargs.items():
if not isinstance(annotation, Function):
raise TypeError("value is expected to be Function instance")
queryset._annotations[key] = annotation
from tortoise.models import get_filters_for_field
queryset._custom_filters.update(get_filters_for_field(key, None, key))
return queryset
def group_by(self, *fields: str) -> "QuerySet[MODEL]":
"""
Make QuerySet returns list of dict or tuple with group by.
Must call before.values() or.values_list()
"""
queryset = self._clone()
queryset._group_bys = fields
return queryset
def values_list(self, *fields_: str, flat: bool = False) -> "ValuesListQuery":
"""
Make QuerySet returns list of tuples for given args instead of objects.
If ```flat=True`` and only one arg is passed can return flat list.
If no arguments are passed it will default to a tuple containing all fields
in order of declaration.
"""
return ValuesListQuery(
db=self._db,
model=self.model,
q_objects=self._q_objects,
flat=flat,
fields_for_select_list=fields_ # type: ignore
or [
field
for field in self.model._meta.fields_map.keys()
if field in self.model._meta.db_fields
]
+ list(self._annotations.keys()),
distinct=self._distinct,
limit=self._limit,
offset=self._offset,
orderings=self._orderings,
annotations=self._annotations,
custom_filters=self._custom_filters,
group_bys=self._group_bys,
)
def values(self, *args: str, **kwargs: str) -> "ValuesQuery":
"""
Make QuerySet return dicts instead of objects.
Can pass names of fields to fetch, or as a ``field_name='name_in_dict'`` kwarg.
If no arguments are passed it will default to a dict containing all fields.
:raises FieldError: If duplicate key has been provided.
"""
if args or kwargs:
fields_for_select: Dict[str, str] = {}
for field in args:
if field in fields_for_select:
raise FieldError(f"Duplicate key {field}")
fields_for_select[field] = field
for return_as, field in kwargs.items():
if return_as in fields_for_select:
raise FieldError(f"Duplicate key {return_as}")
fields_for_select[return_as] = field
else:
_fields = [
field
for field in self.model._meta.fields_map.keys()
if field in self.model._meta.db_fields
] + list(self._annotations.keys())
fields_for_select = {field: field for field in _fields}
return ValuesQuery(
db=self._db,
model=self.model,
q_objects=self._q_objects,
fields_for_select=fields_for_select,
distinct=self._distinct,
limit=self._limit,
offset=self._offset,
orderings=self._orderings,
annotations=self._annotations,
custom_filters=self._custom_filters,
group_bys=self._group_bys,
)
def delete(self) -> "DeleteQuery":
"""
Delete all objects in QuerySet.
"""
return DeleteQuery(
db=self._db,
model=self.model,
q_objects=self._q_objects,
annotations=self._annotations,
custom_filters=self._custom_filters,
)
def update(self, **kwargs: Any) -> "UpdateQuery":
"""
Update all objects in QuerySet with given kwargs.
.. admonition: Example:
.. code-block:: py3
await Employee.filter(occupation='developer').update(salary=5000)
Will instead of returning a resultset, update the data in the DB itself.
"""
return UpdateQuery(
db=self._db,
model=self.model,
update_kwargs=kwargs,
q_objects=self._q_objects,
annotations=self._annotations,
custom_filters=self._custom_filters,
)
def count(self) -> "CountQuery":
"""
Return count of objects in queryset instead of objects.
"""
return CountQuery(
db=self._db,
model=self.model,
q_objects=self._q_objects,
annotations=self._annotations,
custom_filters=self._custom_filters,
limit=self._limit,
offset=self._offset,
)
def all(self) -> "QuerySet[MODEL]":
"""
Return the whole QuerySet.
Essentially a no-op except as the only operation.
"""
return self._clone()
def first(self) -> QuerySetSingle[Optional[MODEL]]:
"""
Limit queryset to one object and return one object instead of list.
"""
queryset = self._clone()
queryset._limit = 1
queryset._single = True
return queryset # type: ignore
def get(self, *args: Q, **kwargs: Any) -> QuerySetSingle[MODEL]:
"""
Fetch exactly one object matching the parameters.
"""
queryset = self.filter(*args, **kwargs)
queryset._limit = 2
queryset._single = True
queryset._raise_does_not_exist = True
return queryset # type: ignore
def get_or_none(self, *args: Q, **kwargs: Any) -> QuerySetSingle[Optional[MODEL]]:
"""
Fetch exactly one object matching the parameters.
"""
queryset = self.filter(*args, **kwargs)
queryset._limit = 2
queryset._single = True
return queryset # type: ignore
def only(self, *fields_for_select: str) -> "QuerySet[MODEL]":
"""
Fetch ONLY the specified fields to create a partial model.
Persisting changes on the model is allowed only when:
* All the fields you want to update is specified in ``<model>.save(update_fields=[...])``
* You included the Model primary key in the `.only(...)``
To protect against common mistakes we ensure that errors get raised:
* If you access a field that is not specified, you will get an ``AttributeError``.
* If you do a ``<model>.save()`` a ``IncompleteInstanceError`` will be raised as the model is, as requested, incomplete.
* If you do a ``<model>.save(update_fields=[...])`` and you didn't include the primary key in the ``.only(...)``,
then ``IncompleteInstanceError`` will be raised indicating that updates can't be done without the primary key being known.
* If you do a ``<model>.save(update_fields=[...])`` and one of the fields in ``update_fields`` was not in the ``.only(...)``,
then ``IncompleteInstanceError`` as that field is not available to be updated.
"""
queryset = self._clone()
queryset._fields_for_select = fields_for_select
return queryset
def prefetch_related(self, *args: Union[str, Prefetch]) -> "QuerySet[MODEL]":
"""
Like ``.fetch_related()`` on instance, but works on all objects in QuerySet.
:raises FieldError: If the field to prefetch on is not a relation, or not found.
"""
queryset = self._clone()
queryset._prefetch_map = {}
for relation in args:
if isinstance(relation, Prefetch):
relation.resolve_for_queryset(queryset)
continue
relation_split = relation.split("__")
first_level_field = relation_split[0]
if first_level_field not in self.model._meta.fetch_fields:
if first_level_field in self.model._meta.fields:
raise FieldError(
f"Field {first_level_field} on {self.model._meta.full_name} is not a relation"
)
raise FieldError(
f"Relation {first_level_field} for {self.model._meta.full_name} not found"
)
if first_level_field not in queryset._prefetch_map.keys():
queryset._prefetch_map[first_level_field] = set()
forwarded_prefetch = "__".join(relation_split[1:])
if forwarded_prefetch:
queryset._prefetch_map[first_level_field].add(forwarded_prefetch)
return queryset
async def explain(self) -> Any:
"""Fetch and return information about the query execution plan.
This is done by executing an ``EXPLAIN`` query whose exact prefix depends
on the database backend, as documented below.
- PostgreSQL: ``EXPLAIN (FORMAT JSON, VERBOSE)...``
- SQLite: ``EXPLAIN QUERY PLAN...``
- MySQL: ``EXPLAIN FORMAT=JSON...``
.. note::
This is only meant to be used in an interactive environment for debugging
and query optimization.
**The output format may (and will) vary greatly depending on the database backend.**
"""
if self._db is None:
self._db = self.model._meta.db # type: ignore
self._make_query()
return await self._db.executor_class(model=self.model, db=self._db).execute_explain(
self.query
)
def using_db(self, _db: BaseDBAsyncClient) -> "QuerySet[MODEL]":
"""
Executes query in provided db client.
Useful for transactions workaround.
"""
queryset = self._clone()
queryset._db = _db
return queryset
def _make_query(self) -> None:
if self._fields_for_select:
table = self.model._meta.basetable
db_fields_for_select = [
table[self.model._meta.fields_db_projection[field]].as_(field)
for field in self._fields_for_select
]
self.query = copy(self.model._meta.basequery).select(*db_fields_for_select)
else:
self.query = copy(self.model._meta.basequery_all_fields)
self.resolve_ordering(
self.model, self.model._meta.basetable, self._orderings, self._annotations
)
self.resolve_filters(
model=self.model,
q_objects=self._q_objects,
annotations=self._annotations,
custom_filters=self._custom_filters,
)
if self._limit:
self.query._limit = self._limit
if self._offset:
self.query._offset = self._offset
if self._distinct:
self.query._distinct = True
def __await__(self) -> Generator[Any, None, List[MODEL]]:
if self._db is None:
self._db = self.model._meta.db # type: ignore
self._make_query()
return self._execute().__await__()
async def __aiter__(self) -> AsyncIterator[MODEL]:
for val in await self:
yield val
async def _execute(self) -> List[MODEL]:
instance_list = await self._db.executor_class(
model=self.model,
db=self._db,
prefetch_map=self._prefetch_map,
prefetch_queries=self._prefetch_queries,
).execute_select(self.query, custom_fields=list(self._annotations.keys()))
if self._single:
if len(instance_list) == 1:
return instance_list[0]
if not instance_list:
if self._raise_does_not_exist:
raise DoesNotExist("Object does not exist")
return None # type: ignore
raise MultipleObjectsReturned("Multiple objects returned, expected exactly one")
return instance_list
class UpdateQuery(AwaitableQuery):
__slots__ = ("update_kwargs", "q_objects", "annotations", "custom_filters")
def __init__(
self,
model: Type[MODEL],
update_kwargs: Dict[str, Any],
db: BaseDBAsyncClient,
q_objects: List[Q],
annotations: Dict[str, Any],
custom_filters: Dict[str, Dict[str, Any]],
) -> None:
super().__init__(model)
self.update_kwargs = update_kwargs
self.q_objects = q_objects
self.annotations = annotations
self.custom_filters = custom_filters
self._db = db
def _make_query(self) -> None:
table = self.model._meta.basetable
self.query = self._db.query_class.update(table)
self.resolve_filters(
model=self.model,
q_objects=self.q_objects,
annotations=self.annotations,
custom_filters=self.custom_filters,
)
# Need to get executor to get correct column_map
executor = self._db.executor_class(model=self.model, db=self._db)
for key, value in self.update_kwargs.items():
field_object = self.model._meta.fields_map.get(key)
if not field_object:
raise FieldError(f"Unknown keyword argument {key} for model {self.model}")
if field_object.pk:
raise IntegrityError(f"Field {key} is PK and can not be updated")
if isinstance(field_object, (ForeignKeyFieldInstance, OneToOneFieldInstance)):
fk_field: str = field_object.source_field # type: ignore
db_field = self.model._meta.fields_map[fk_field].source_field
value = executor.column_map[fk_field](
getattr(value, field_object.to_field_instance.model_field_name), None
)
else:
try:
db_field = self.model._meta.fields_db_projection[key]
except KeyError:
raise FieldError(f"Field {key} is virtual and can not be updated")
if isinstance(value, Term):
value = F.resolver_arithmetic_expression(self.model, value)[0]
elif isinstance(value, Function):
value = value.resolve(self.model, table)["field"]
else:
value = executor.column_map[key](value, None)
self.query = self.query.set(db_field, value)
def __await__(self) -> Generator[Any, None, int]:
if self._db is None:
self._db = self.model._meta.db # type: ignore
self._make_query()
return self._execute().__await__()
async def _execute(self) -> int:
return (await self._db.execute_query(str(self.query)))[0]
class DeleteQuery(AwaitableQuery):
__slots__ = ("q_objects", "annotations", "custom_filters")
def __init__(
self,
model: Type[MODEL],
db: BaseDBAsyncClient,
q_objects: List[Q],
annotations: Dict[str, Any],
custom_filters: Dict[str, Dict[str, Any]],
) -> None:
super().__init__(model)
self.q_objects = q_objects
self.annotations = annotations
self.custom_filters = custom_filters
self._db = db
def _make_query(self) -> None:
self.query = copy(self.model._meta.basequery)
self.resolve_filters(
model=self.model,
q_objects=self.q_objects,
annotations=self.annotations,
custom_filters=self.custom_filters,
)
self.query._delete_from = True
def __await__(self) -> Generator[Any, None, int]:
if self._db is None:
self._db = self.model._meta.db # type: ignore
self._make_query()
return self._execute().__await__()
async def _execute(self) -> int:
return (await self._db.execute_query(str(self.query)))[0]
class CountQuery(AwaitableQuery):
__slots__ = ("q_objects", "annotations", "custom_filters", "limit", "offset")
def __init__(
self,
model: Type[MODEL],
db: BaseDBAsyncClient,
q_objects: List[Q],
annotations: Dict[str, Any],
custom_filters: Dict[str, Dict[str, Any]],
limit: Optional[int],
offset: Optional[int],
) -> None:
super().__init__(model)
self.q_objects = q_objects
self.annotations = annotations
self.custom_filters = custom_filters
self.limit = limit
self.offset = offset or 0
self._db = db
def _make_query(self) -> None:
self.query = copy(self.model._meta.basequery)
self.resolve_filters(
model=self.model,
q_objects=self.q_objects,
annotations=self.annotations,
custom_filters=self.custom_filters,
)
self.query._select_other(Count("*"))
def __await__(self) -> Generator[Any, None, int]:
if self._db is None:
self._db = self.model._meta.db # type: ignore
self._make_query()
return self._execute().__await__()
async def _execute(self) -> int:
_, result = await self._db.execute_query(str(self.query))
count = list(dict(result[0]).values())[0] - self.offset
if self.limit and count > self.limit:
return self.limit
return count
class FieldSelectQuery(AwaitableQuery):
# pylint: disable=W0223
__slots__ = ("annotations",)
def __init__(self, model: Type[MODEL], annotations: Dict[str, Any]) -> None:
super().__init__(model)
self.annotations = annotations
def _join_table_with_forwarded_fields(
self, model: Type[MODEL], table: Table, field: str, forwarded_fields: str
) -> Tuple[Table, str]:
if field in model._meta.fields_db_projection and not forwarded_fields:
return table, model._meta.fields_db_projection[field]
if field in model._meta.fields_db_projection and forwarded_fields:
raise FieldError(f'Field "{field}" for model "{model.__name__}" is not relation')
if field in self.model._meta.fetch_fields and not forwarded_fields:
raise ValueError(
'Selecting relation "{}" is not possible, select concrete '
"field on related model".format(field)
)
field_object = cast(RelationalField, model._meta.fields_map.get(field))
if not field_object:
raise FieldError(f'Unknown field "{field}" for model "{model.__name__}"')
table = self._join_table_by_field(table, field, field_object)
forwarded_fields_split = forwarded_fields.split("__")
return self._join_table_with_forwarded_fields(
model=field_object.related_model,
table=table,
field=forwarded_fields_split[0],
forwarded_fields="__".join(forwarded_fields_split[1:]),
)
def add_field_to_select_query(self, field: str, return_as: str) -> None:
table = self.model._meta.basetable
if field in self.model._meta.fields_db_projection:
db_field = self.model._meta.fields_db_projection[field]
self.query._select_field(table[db_field].as_(return_as))
return
if field in self.model._meta.fetch_fields:
raise ValueError(
'Selecting relation "{}" is not possible, select '
"concrete field on related model".format(field)
)
if field in self.annotations:
self._annotations[return_as] = self.annotations[field]
return
field_split = field.split("__")
if field_split[0] in self.model._meta.fetch_fields:
related_table, related_db_field = self._join_table_with_forwarded_fields(
model=self.model,
table=table,
field=field_split[0],
forwarded_fields="__".join(field_split[1:]),
)
self.query._select_field(related_table[related_db_field].as_(return_as))
return
raise FieldError(f'Unknown field "{field}" for model "{self.model.__name__}"')
def resolve_to_python_value(self, model: Type[MODEL], field: str) -> Callable:
if field in model._meta.fetch_fields:
# return as is to get whole model objects
return lambda x: x
if field in (x[1] for x in model._meta.db_native_fields):
return lambda x: x
if field in self.annotations:
field_object = self.annotations[field].field_object
if field_object:
return field_object.to_python_value
return lambda x: x
if field in model._meta.fields_map:
return model._meta.fields_map[field].to_python_value
field_split = field.split("__")
if field_split[0] in model._meta.fetch_fields:
new_model = model._meta.fields_map[field_split[0]].related_model # type: ignore
return self.resolve_to_python_value(new_model, "__".join(field_split[1:]))
raise FieldError(f'Unknown field "{field}" for model "{model}"')
def _resolve_group_bys(self, *field_names: str):
group_bys = []
for field_name in field_names:
field_split = field_name.split("__")
related_table, related_db_field = self._join_table_with_forwarded_fields(
model=self.model,
table=self.model._meta.basetable,
field=field_split[0],
forwarded_fields="__".join(field_split[1:]) if len(field_split) > 1 else "",
)
field = related_table[related_db_field].as_(field_name)
group_bys.append(field)
return group_bys
class ValuesListQuery(FieldSelectQuery):
__slots__ = (
"flat",
"fields",
"limit",
"offset",
"distinct",
"orderings",
"annotations",
"custom_filters",
"q_objects",
"fields_for_select_list",
"group_bys",
)
def __init__(
self,
model: Type[MODEL],
db: BaseDBAsyncClient,
q_objects: List[Q],
fields_for_select_list: List[str],
limit: Optional[int],
offset: Optional[int],
distinct: bool,
orderings: List[Tuple[str, str]],
flat: bool,
annotations: Dict[str, Any],
custom_filters: Dict[str, Dict[str, Any]],
group_bys: Tuple[str,...],
) -> None:
super().__init__(model, annotations)
if flat and (len(fields_for_select_list)!= 1):
raise TypeError("You can flat value_list only if contains one field")
fields_for_select = {str(i): field for i, field in enumerate(fields_for_select_list)}
self.fields = fields_for_select
self.limit = limit
self.offset = offset
self.distinct = distinct
self.orderings = orderings
self.custom_filters = custom_filters
self.q_objects = q_objects
self.fields_for_select_list = fields_for_select_list
self.flat = flat
self._db = db
self.group_bys = group_bys
def _make_query(self) -> None:
self.query = copy(self.model._meta.basequery)
for positional_number, field in self.fields.items():
self.add_field_to_select_query(field, positional_number)
self.resolve_ordering(
self.model, self.model._meta.basetable, self.orderings, self.annotations
)
self.resolve_filters(
model=self.model,
q_objects=self.q_objects,
annotations=self.annotations,
custom_filters=self.custom_filters,
)
if self.limit:
self.query._limit = self.limit
if self.offset:
self.query._offset = self.offset
if self.distinct:
self.query._distinct = True
if self.group_bys:
self.query._groupbys = self._resolve_group_bys(*self.group_bys)
def __await__(self) -> Generator[Any, None, List[Any]]:
if self._db is None:
self._db = self.model._meta.db # type: ignore
self._make_query()
return self._execute().__await__() # pylint: disable=E1101
async def __aiter__(self) -> AsyncIterator[Any]:
for val in await self:
yield val
async def _execute(self) -> List[Any]:
_, result = await self._db.execute_query(str(self.query))
columns = [
(key, self.resolve_to_python_value(self.model, name))
for key, name in sorted(self.fields.items())
]
if self.flat:
func = columns[0][1]
flatmap = lambda entry: func(entry["0"]) # noqa
return list(map(flatmap, result))
listmap = lambda entry: tuple(func(entry[column]) for column, func in columns) # noqa
return list(map(listmap, result))
class ValuesQuery(FieldSelectQuery):
__slots__ = (
"fields_for_select",
"limit",
"offset",
"distinct",
"orderings",
"annotations",
"custom_filters",
"q_objects",
"group_bys",
)
def __init__(
self,
model: Type[MODEL],
db: BaseDBAsyncClient,
q_objects: List[Q],
fields_for_select: Dict[str, str],
limit: Optional[int],
offset: Optional[int],
distinct: bool,
orderings: List[Tuple[str, str]],
annotations: Dict[str, Any],
custom_filters: Dict[str, Dict[str, Any]],
group_bys: Tuple[str,...],
) -> None:
super().__init__(model, annotations)
self.fields_for_select = fields_for_select
self.limit = limit
self.offset = offset
self.distinct = distinct
self.orderings = orderings
self.custom_filters = custom_filters
self.q_objects = q_objects
self._db = db
self.group_bys = group_bys
def _make_query(self) -> None:
self.query = copy(self.model._meta.basequery)
for return_as, field in self.fields_for_select.items():
self.add_field_to_select_query(field, return_as)
self.resolve_ordering(
self.model, self.model._meta.basetable, self.orderings, self.annotations
)
self.resolve_filters(
model=self.model,
q_objects=self.q_objects,
annotations=self.annotations,
custom_filters=self.custom_filters,
)
if self.limit:
self.query._limit = self.limit
if self.offset:
self.query._offset = self.offset
if self.distinct:
self.query._distinct = True
if self.group_bys:
self.query._groupbys = self._resolve_group_bys(*self.group_bys)
def __await__(self) -> Generator[Any, None, List[dict]]:
if self._db is None:
self._db = self.model._meta.db # type: ignore
self._make_query()
return self._execute().__await__() # pylint: disable=E1101
async def __aiter__(self) -> AsyncIterator[dict]:
for val in await self:
yield val
async def _execute(self) -> List[dict]:
result = await self._db.execute_query_dict(str(self.query))
columns = [
val
for val in [
(alias, self.resolve_to_python_value(self.model, field_name))
for alias, field_name in self.fields_for_select.items()
]
if not isinstance(val[1], types.LambdaType)
]
if columns:
for row in result:
for col, func in columns:
row[col] = func(row[col])
return result
from copy import copy
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, cast
from pypika import Table
from pypika.terms import Criterion
from tortoise.exceptions import FieldError, OperationalError
from tortoise.fields.relational import BackwardFKRelation, ManyToManyFieldInstance, RelationalField
if TYPE_CHECKING: # pragma: nocoverage
from tortoise.models import Model
from tortoise.queryset import QuerySet
def _process_filter_kwarg(
model: "Type[Model]", key: str, value: Any, table: Table
) -> Tuple[Criterion, Optional[Tuple[Table, Criterion]]]:
join = None
if value is None and f"{key}__isnull" in model._meta.filters:
param = model._meta.get_filter(f"{key}__isnull")
value = True
else:
param = model._meta.get_filter(key)
pk_db_field = model._meta.db_pk_column
if param.get("table"):
join = (
param["table"],
table[pk_db_field] == param["table"][param["backward_key"]],
)
if param.get("value_encoder"):
value = param["value_encoder"](value, model)
criterion = param["operator"](param["table"][param["field"]], value)
else:
field_object = model._meta.fields_map[param["field"]]
encoded_value = (
param["value_encoder"](value, model, field_object)
if param.get("value_encoder")
else model._meta.db.executor_class._field_to_db(field_object, value, model)
)
criterion = param["operator"](table[param["source_field"]], encoded_value)
return criterion, join
def _get_joins_for_related_field(
table: Table, related_field: RelationalField, related_field_name: str
) -> List[Tuple[Table, Criterion]]:
required_joins = []
related_table: Table = related_field.related_model._meta.basetable
if isinstance(related_field, ManyToManyFieldInstance):
through_table = Table(related_field.through)
required_joins.append(
(
through_table,
table[related_field.model._meta.db_pk_column]
== through_table[related_field.backward_key],
)
)
required_joins.append(
(
related_table,
through_table[related_field.forward_key]
== related_table[related_field.related_model._meta.db_pk_column],
)
)
elif isinstance(related_field, BackwardFKRelation):
to_field_source_field = (
related_field.to_field_instance.source_field
or related_field.to_field_instance.model_field_name
)
if table == related_table:
related_table = related_table.as_(f"{table.get_table_name()}__{related_field_name}")
required_joins.append(
(
related_table,
table[to_field_source_field] == related_table[related_field.relation_source_field],
)
)
else:
to_field_source_field = (
related_field.to_field_instance.source_field
or related_field.to_field_instance.model_field_name
)
from_field = related_field.model._meta.fields_map[related_field.source_field] # type: ignore
from_field_source_field = from_field.source_field or from_field.model_field_name
related_table = related_table.as_(f"{table.get_table_name()}__{related_field_name}")
required_joins.append(
(related_table, related_table[to_field_source_field] == table[from_field_source_field],)
)
return required_joins
class EmptyCriterion(Criterion): # type: ignore
def __or__(self, other: Criterion) -> Criterion:
return other
def __and__(self, other: Criterion) -> Criterion:
return other
def __bool__(self) -> bool:
return False
def _and(left: Criterion, right: Criterion) -> Criterion:
if left and not right:
return left
return left & right
def _or(left: Criterion, right: Criterion) -> Criterion:
if left and not right:
return left
return left | right
class QueryModifier:
"""
Internal structure used to generate SQL Queries.
"""
def __init__(
self,
where_criterion: Optional[Criterion] = None,
joins: Optional[List[Tuple[Table, Criterion]]] = None,
having_criterion: Optional[Criterion] = None,
) -> None:
self.where_criterion: Criterion = where_criterion or EmptyCriterion()
self.joins = joins if joins else []
self.having_criterion: Criterion = having_criterion or EmptyCriterion()
def __and__(self, other: "QueryModifier") -> "QueryModifier":
return QueryModifier(
where_criterion=_and(self.where_criterion, other.where_criterion),
joins=self.joins + other.joins,
having_criterion=_and(self.having_criterion, other.having_criterion),
)
def __or__(self, other: "QueryModifier") -> "QueryModifier":
if self.having_criterion or other.having_criterion:
# TODO: This could be optimized?
result_having_criterion = _or(
_and(self.where_criterion, self.having_criterion),
_and(other.where_criterion, other.having_criterion),
)
return QueryModifier(
joins=self.joins + other.joins, having_criterion=result_having_criterion
)
if self.where_criterion and other.where_criterion:
return QueryModifier(
where_criterion=self.where_criterion | other.where_criterion,
joins=self.joins + other.joins,
)
return QueryModifier(
where_criterion=self.where_criterion or other.where_criterion,
joins=self.joins + other.joins,
)
def __invert__(self) -> "QueryModifier":
if not self.where_criterion and not self.having_criterion:
return QueryModifier(joins=self.joins)
if self.having_criterion:
# TODO: This could be optimized?
return QueryModifier(
joins=self.joins,
having_criterion=_and(self.where_criterion, self.having_criterion).negate(),
)
return QueryModifier(where_criterion=self.where_criterion.negate(), joins=self.joins)
def get_query_modifiers(self) -> Tuple[Criterion, List[Tuple[Table, Criterion]], Criterion]:
"""
Returns a tuple of the query criterion.
"""
return self.where_criterion, self.joins, self.having_criterion
class Q:
"""
Q Expression container.
Q Expressions are a useful tool to compose a query from many small parts.
:param join_type: Is the join an AND or OR join type?
:param args: Inner ``Q`` expressions that you want to wrap.
:param kwargs: Filter statements that this Q object should encapsulate.
"""
__slots__ = (
"children",
"filters",
"join_type",
"_is_negated",
"_annotations",
"_custom_filters",
)
AND = "AND"
OR = "OR"
def __init__(self, *args: "Q", join_type: str = AND, **kwargs: Any) -> None:
if args and kwargs:
newarg = Q(join_type=join_type, **kwargs)
args = (newarg,) + args
kwargs = {}
if not all(isinstance(node, Q) for node in args):
raise OperationalError("All ordered arguments must be Q nodes")
#: Contains the sub-Q's that this Q is made up of
self.children: Tuple[Q,...] = args
#: Contains the filters applied to this Q
self.filters: Dict[str, Any] = kwargs
if join_type not in {self.AND, self.OR}:
raise OperationalError("join_type must be AND or OR")
#: Specifies if this Q does an AND or OR on its children
self.join_type = join_type
self._is_negated = False
self._annotations: Dict[str, Any] = {}
self._custom_filters: Dict[str, Dict[str, Any]] = {}
def __and__(self, other: "Q") -> "Q":
"""
Returns a binary AND of Q objects, use ``AND`` operator.
:raises OperationalError: AND operation requires a Q node
"""
if not isinstance(other, Q):
raise OperationalError("AND operation requires a Q node")
return Q(self, other, join_type=self.AND)
def __or__(self, other: "Q") -> "Q":
"""
Returns a binary OR of Q objects, use ``OR`` operator.
:raises OperationalError: OR operation requires a Q node
"""
if not isinstance(other, Q):
raise OperationalError("OR operation requires a Q node")
return Q(self, other, join_type=self.OR)
def __invert__(self) -> "Q":
"""
Returns a negated instance of the Q object, use ``~`` operator.
"""
q = Q(*self.children, join_type=self.join_type, **self.filters)
q.negate()
return q
def negate(self) -> None:
"""
Negates the curent Q object. (mutation)
"""
self._is_negated = not self._is_negated
def _resolve_nested_filter(
self, model: "Type[Model]", key: str, value: Any, table: Table
) -> QueryModifier:
related_field_name = key.split("__")[0]
related_field = cast(RelationalField, model._meta.fields_map[related_field_name])
required_joins = _get_joins_for_related_field(table, related_field, related_field_name)
modifier = Q(**{"__".join(key.split("__")[1:]): value}).resolve(
model=related_field.related_model,
annotations=self._annotations,
custom_filters=self._custom_filters,
table=required_joins[-1][0],
)
return QueryModifier(joins=required_joins) & modifier
def _resolve_custom_kwarg(
self, model: "Type[Model]", key: str, value: Any, table: Table
) -> QueryModifier:
having_info = self._custom_filters[key]
annotation = self._annotations[having_info["field"]]
annotation_info = annotation.resolve(model, table)
operator = having_info["operator"]
overridden_operator = model._meta.db.executor_class.get_overridden_filter_func(
filter_func=operator
)
if overridden_operator:
operator = overridden_operator
if annotation_info["field"].is_aggregate:
modifier = QueryModifier(having_criterion=operator(annotation_info["field"], value))
else:
modifier = QueryModifier(where_criterion=operator(annotation_info["field"], value))
return modifier
def _resolve_regular_kwarg(
self, model: "Type[Model]", key: str, value: Any, table: Table
) -> QueryModifier:
if key not in model._meta.filters and key.split("__")[0] in model._meta.fetch_fields:
modifier = self._resolve_nested_filter(model, key, value, table)
else:
criterion, join = _process_filter_kwarg(model, key, value, table)
joins = [join] if join else []
modifier = QueryModifier(where_criterion=criterion, joins=joins)
return modifier
def _get_actual_filter_params(
self, model: "Type[Model]", key: str, value: Table
) -> Tuple[str, Any]:
filter_key = key
if key in model._meta.fk_fields or key in model._meta.o2o_fields:
field_object = model._meta.fields_map[key]
if hasattr(value, "pk"):
filter_value = value.pk
else:
filter_value = value
filter_key = cast(str, field_object.source_field)
elif key in model._meta.m2m_fields:
if hasattr(value, "pk"):
filter_value = value.pk
else:
filter_value = value
elif (
key.split("__")[0] in model._meta.fetch_fields
or key in self._custom_filters
or key in model._meta.filters
):
filter_value = value
else:
allowed = sorted(
model._meta.fields | model._meta.fetch_fields | set(self._custom_filters)
)
raise FieldError(f"Unknown filter param '{key}'. Allowed base values are {allowed}")
return filter_key, filter_value
def _resolve_kwargs(self, model: "Type[Model]", table: Table) -> QueryModifier:
modifier = QueryModifier()
for raw_key, raw_value in self.filters.items():
key, value = self._get_actual_filter_params(model, raw_key, raw_value)
if key in self._custom_filters:
filter_modifier = self._resolve_custom_kwarg(model, key, value, table)
else:
filter_modifier = self._resolve_regular_kwarg(model, key, value, table)
if self.join_type == self.AND:
modifier &= filter_modifier
else:
modifier |= filter_modifier
if self._is_negated:
modifier = ~modifier
return modifier
def _resolve_children(self, model: "Type[Model]", table: Table) -> QueryModifier:
modifier = QueryModifier()
for node in self.children:
node_modifier = node.resolve(model, self._annotations, self._custom_filters, table)
if self.join_type == self.AND:
modifier &= node_modifier
else:
modifier |= node_modifier
if self._is_negated:
modifier = ~modifier
return modifier
def resolve(
self,
model: "Type[Model]",
annotations: Dict[str, Any],
custom_filters: Dict[str, Dict[str, Any]],
table: Table,
) -> QueryModifier:
"""
Resolves the logical Q chain into the parts of a SQL statement.
:param model: The Model this Q Expression should be resolved on.
:param annotations: Extra annotations one wants to inject into the resultset.
:param custom_filters: Pre-resolved filters to be passed though.
:param table: ``pypika.Table`` to keep track of the virtual SQL table
(to allow self referential joins)
"""
self._annotations = annotations
self._custom_filters = custom_filters
if self.filters:
return self._resolve_kwargs(model, table)
return self._resolve_children(model, table)
class Prefetch:
"""
Prefetcher container. One would directly use this when wanting to attach a custom QuerySet
for specialised prefetching.
:param relation: Related field name.
:param queryset: Custom QuerySet to use for prefetching.
"""
__slots__ = ("relation", "queryset")
def __init__(self, relation: str, queryset: "QuerySet") -> None:
self.relation = relation
self.queryset = queryset
self.queryset.query = copy(self.queryset.model._meta.basequery)
def resolve_for_queryset(self, queryset: "QuerySet") -> None:
"""
Called internally to generate prefetching query.
:param queryset: Custom QuerySet to use for prefetching.
:raises OperationalError: If field does not exist in model.
"""
relation_split = self.relation.split("__")
first_level_field = relation_split[0]
if first_level_field not in queryset.model._meta.fetch_fields:
raise OperationalError(
f"relation {first_level_field} for {queryset.model._meta.db_table} not found"
)
forwarded_prefetch = "__".join(relation_split[1:])
if forwarded_prefetch:
if first_level_field not in queryset._prefetch_map.keys():
queryset._prefetch_map[first_level_field] = set()
queryset._prefetch_map[first_level_field].add(
Prefetch(forwarded_prefetch, self.queryset)
)
else:
queryset._prefetch_queries[first_level_field] = self.queryset |
|
tortoise__tortoise-orm | schema.rst | Tutorial | How to generate schema | Apache License 2.0 | tortoise__tortoise-orm/docs/schema.rst | [
"tortoise__tortoise-orm/tortoise/utils.py"
] | Schema Creation
Here we create connection to SQLite database client and then we discover
& initialize models.
tortoise.Tortoise.generate_schema generates schema on empty database. There is also the
default option when generating the schemas to set the safe parameter to
True which will only insert the tables if they don't already exist. | import logging
from typing import TYPE_CHECKING
logger = logging.getLogger("tortoise")
if TYPE_CHECKING: # pragma: nocoverage
from tortoise.backends.base.client import BaseDBAsyncClient
def get_schema_sql(client: "BaseDBAsyncClient", safe: bool) -> str:
"""
Generates the SQL schema for the given client.
:param client: The DB client to generate Schema SQL for
:param safe: When set to true, creates the table only when it does not already exist.
"""
generator = client.schema_generator(client)
return generator.get_create_schema_sql(safe)
async def generate_schema_for_client(client: "BaseDBAsyncClient", safe: bool) -> None:
"""
Generates and applies the SQL schema directly to the given client.
:param client: The DB client to generate Schema SQL for
:param safe: When set to true, creates the table only when it does not already exist.
"""
generator = client.schema_generator(client)
schema = get_schema_sql(client, safe)
logger.debug("Creating schema: %s", schema)
if schema: # pragma: nobranch
await generator.generate_from_string(schema) |
|
teskalabs__asab | config.rst | Module doc / Tutorial | Config usage | BSD 3-Clause New or Revised License | teskalabs__asab/old_docs/asab/config.rst | [
"teskalabs__asab/asab/config.py"
] | teskalabs__asab/asab | Configuration
The configuration is provided by Config object which is a singleton. It
means that you can access Config from any place of your code, without
need of explicit initialisation.
import asab
# Initialize application object and hence the configuration
app = asab.Application()
# Access configuration values anywhere
my_conf_value = asab.Config['section_name']['key1']
Based on ConfigParser
The Config is inherited from Python Standard Library
configparser.ConfigParser class. which implements a basic configuration
language which provides a structure similar to what’s found in Microsoft
Windows INI files.
Example of the configuration file:
[bitbucket.org]
User = hg
[topsecret.server.com]
Port = 50022
ForwardX11 = no
And this is how you access configuration values:
>>> asab.Config['topsecret.server.com']['ForwardX11']
'no'
Multiline configuration entry
A multiline configuration entries are supported. An example:
[section]
key=
line1
line2
line3
another_key=foo
Automatic load of configuration
If a configuration file name is specified, the configuration is
automatically loaded from a configuration file during initialiation time
of Application. The configuration file name can be specified by one of
-c command-line argument (1), ASAB_CONFIG environment variable (2) or
config [general] config_file default value (3).
./sample_app.py -c ./etc/sample.conf
Including other configuration files
You can specify one or more additional configuration files that are
loaded and merged from an main configuration file. It is done by
[general] include configuration value. Multiple paths are separated by
os.pathsep (: on Unix). The path can be specified as a glob (e.g. use
of * and ? wildcard characters), it will be expanded by glob module from
Python Standard Library. Included configuration files may not exists,
this situation is silently ignored.
[general]
include=./etc/site.conf:./etc/site.d/*.conf
You can also use a multiline configuration entry:
[general]
include=
./etc/site.conf
./etc/site.d/*.conf
Configuration default values
This is how you can extend configuration default values:
asab.Config.add_defaults(
{
'section_name': {
'key1': 'value',
'key2': 'another value'
},
'other_section': {
'key3': 'value',
},
}
)
Only simple types (string, int and float) are allowed in the
configuration values. Don't use complex types such as lists,
dictionaries or objects because these are impossible to provide via
configuration files etc.
Environment variables in configration
Environment variables found in values are automatically expanded.
[section_name]
persistent_dir=${HOME}/.myapp/
>>> asab.Config['section_name']['persistent_dir']
'/home/user/.myapp/'
There is a special environment variable ${THIS_DIR} that is expanded to
a directory that contains a current configuration file. It is useful in
complex configurations that utilizes included configuration files etc.
[section_name]
my_file=${THIS_DIR}/my_file.txt
Another environment variable ${HOSTNAME} contains the application
hostname to be used f. e. in logging file path.
[section_name]
my_file=${THIS_DIR}/${HOSTNAME}/my_file.txt
Passwords in configration
[passwords] section in the configuration serves to securely store
passwords, which are then not shown publicly in the default API config
endpoint's output.
It is convenient for the user to store passwords at one place, so that
they are not repeated in many sections of the config file(s).
Usage is as follows:
[connection:KafkaConnection]
password=${passwords:kafka_password}
[passwords]
kafka_password=<MY_SECRET_PASSWORD>
Obtaining seconds
The seconds can be obtained using getseconds() method for values with
different time units specified in the configuration:
[sleep]
sleep_time=5.2s
another_sleep_time=10d
The available units are:
- y ... years
- M ... months
- w ... weeks
- d ... days
- h ... hours
- m ... minutes
- s ... seconds
- ms .. miliseconds
If no unit is specified, float of seconds is expected.
The obtainment of the second value in the code can be achieved in two
ways:
self.SleepTime = asab.Config["sleep"].getseconds("sleep_time")
self.AnotherSleepTime = asab.Config.getseconds("sleep", "another_sleep_time")
Obtaining URLs
A URL can be obtained using a geturl() method that takes the URL from
the config and removes leading and trailing whitespaces and trailing
backslashes.
There is an optional parameter called scheme that can have any URL
scheme like http, https, mongodb etc. Setting it to None, scheme
validation gets bypassed.
Setting the scheme parameter to the same scheme as in the config, it
will return the URL. If it's not the same it will raise an error.
There are two ways of obtaining the URL:
asab.Config["urls"].geturl("teskalabs", scheme="https")
asab.Config.geturl("urls", "github", scheme=None)
Example:
>>> asab.Config["urls"].geturl("teskalabs", scheme="https")
'https://www.teskalabs.com'
For reference this would be the configuration file:
[urls]
teskalabs=https://www.teskalabs.com/
github=github.com
| import os
import sys
import re
import glob
import logging
import inspect
import platform
import configparser
import urllib.parse
import collections.abc
import typing
from. import utils
L = logging.getLogger(__name__)
class ConfigParser(configparser.ConfigParser):
"""
ConfigParser enhanced with new features such as adding default configuration, URL validation, automatic reading from Zookeeper etc.
"""
_syslog_sockets = {
'Darwin': '/var/run/syslog'
}
_syslog_format = {
'Darwin':'m'
}
_default_values = {
'general': {
'config_file': os.environ.get('ASAB_CONFIG', ''),
'tick_period': 1, # In seconds
'var_dir': os.path.expanduser('~/.' + os.path.splitext(os.path.basename(sys.argv[0]))[0]),
'changelog': '',
'manifest': '',
# Daemonization
'pidfile': '!', # '!' has a special meaning => it transforms into platform specific location of pid file
'working_dir': '.',
'uid': '',
'gid': '',
},
"asab:metrics": {
"native_metrics": "true",
"web_requests_metrics": False, # False is a default, web_requests_metrics won't be generated.
"expiration": 60,
},
"asab:doc": {
"default_route_tag": "module_name"
},
"logging": {
'verbose': os.environ.get('ASAB_VERBOSE', False),
"app_name": os.path.basename(sys.argv[0]),
"sd_id": "sd", # Structured data id, see RFC5424
"level": "NOTICE",
"levels": "",
},
"logging:console": {
"format": "%(asctime)s %(levelname)s %(name)s %(struct_data)s%(message)s",
"datefmt": "%d-%b-%Y %H:%M:%S.%f",
},
"logging:syslog": {
"enabled": "false",
# TODO: "facility": 'local1',
"address": _syslog_sockets.get(platform.system(), "/dev/log"),
"format": _syslog_format.get(platform.system(), "3"),
},
"logging:file": {
"path": "",
"format": "%(asctime)s %(levelname)s %(name)s %(struct_data)s%(message)s",
"datefmt": "%d-%b-%Y %H:%M:%S.%f",
"backup_count": 3,
"backup_max_bytes": 0,
"rotate_every": "",
},
"library": {
"azure_cache": "false", # true or the actual path of where the cache should be located
},
# "passwords" section serves to securely store passwords
# in the configuration file; the passwords are not
# shown in the default API
#
# Usage in the configuration file:
#
# [connection:KafkaConnection]
# password=${passwords:kafka_password}
#
# [passwords]
# kafka_password=<MY_SECRET_PASSWORD>
"passwords": {
},
"housekeeping": {
"at": "03:00",
"limit": "05:00",
"run_at_startup": "no",
},
}
if 'ASAB_ZOOKEEPER_SERVERS' in os.environ:
# If `ASAB_ZOOKEEPER_SERVERS` are specified, use that as a default value
_default_values['zookeeper'] = {'servers': os.environ['ASAB_ZOOKEEPER_SERVERS']}
def add_defaults(self, dictionary: dict) -> None:
"""Add defaults to a current configuration.
Args:
dictionary: Arguments to be added to the default configuration.
"""
for section, keys in dictionary.items():
section = str(section)
if section not in self._sections:
try:
self.add_section(section)
except ValueError:
if self._strict:
raise
for key, value in keys.items():
key = self.optionxform(str(key))
if key in self._sections[section]:
# Value exists, no default needed
continue
if value is not None:
value = str(value)
if value is not None and "$" in value:
self.set(section, key, os.path.expandvars(value))
else:
self.set(section, key, value)
def _traverse_includes(self, includes: str, this_dir: str) -> None:
"""
Read included config files. Nested including is supported.
"""
if '\n' in includes:
sep = '\n'
else:
sep = " "
for include_glob in includes.split(sep):
include_glob = include_glob.strip()
if len(include_glob) == 0:
continue
if include_glob.startswith("zookeeper"):
self._include_from_zookeeper(include_glob)
include_glob = os.path.expandvars(include_glob.strip())
for include in glob.glob(include_glob):
include = os.path.abspath(include)
if include in self._included:
# Preventing infinite dependency looping
L.warn("Config file '{}' can be included only once.".format(include))
continue
self._included.add(include)
self.set('general', 'include', '')
self._load_dir_stack.append(os.path.dirname(include))
try:
self.read(include)
finally:
self._load_dir_stack.pop()
includes = self.get('general', 'include', fallback='')
self._traverse_includes(includes, os.path.dirname(include_glob))
def _load(self):
"""
This method should be called only once, any subsequent call will lead to undefined behaviour.
"""
self._load_dir_stack = []
self.config_contents_list = []
self.config_name_list = []
config_fname = ConfigParser._default_values['general']['config_file']
if config_fname!= '':
if not os.path.isfile(config_fname):
print("Config file '{}' not found".format(config_fname), file=sys.stderr)
sys.exit(1)
self._load_dir_stack.append(os.path.dirname(config_fname))
try:
self.read(config_fname)
finally:
self._load_dir_stack.pop()
self.add_defaults(ConfigParser._default_values)
includes = self.get('general', 'include', fallback='')
self._included = set()
self._traverse_includes(includes, this_dir=os.path.dirname(config_fname))
del self._load_dir_stack
def _include_from_zookeeper(self, zkurl):
"""
Load the configuration from a ZooKeeper server and append it to the `self.config_contents_list` attribute.
The method establishes a connection to the ZooKeeper server specified in the configuration file mentioned above.
It retrieves the configuration by accessing the path specified in the `general` section, using the key `includes`.
The server URL is provided as a list of server names: server1, server2, server3.
The path to the configuration file follows this format: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181/asab/etc/zk-site.conf.'
The loaded configuration is then appended to the `self.config_contents_list` attribute, allowing further processing or usage.
This method supports loading configuration files in various formats, such as.json,.yaml, and.conf.
Example:
```ini
[asab:zookeeper]
url=server1 server2 server3
[general]
include=zookeeper://zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181/asab/etc/zk-site.conf.
```
"""
# parse include value into hostname and path
url_pieces = urllib.parse.urlparse(zkurl)
url_path = url_pieces.path
url_netloc = url_pieces.netloc
if not url_netloc:
if "asab:zookeeper" in self:
# Backward compatibility
url_netloc = self["asab:zookeeper"]["servers"]
else:
url_netloc = self["zookeeper"]["servers"]
if url_path.startswith("./"):
if "asab:zookeeper" in self:
# Backward compatibility
url_path = self["asab:zookeeper"]["path"] + url_path[1:]
else:
url_path = self["zookeeper"]["path"] + url_path[1:]
head, tail = os.path.split(url_path)
self.config_name_list.append(tail)
try:
# Delayed import to minimize a hard dependency footprint
import kazoo.client
import json
import yaml
zk = kazoo.client.KazooClient(url_netloc)
zk.start()
data = zk.get(url_path)[0]
if url_path.endswith(".json"):
config = json.loads(data)
self.read_dict(config)
elif url_path.endswith(".yaml"):
config = yaml.safe_load(data)
self.read_dict(config)
elif url_path.endswith(".conf"):
config = data.decode("utf-8")
self.read_string(config)
else:
raise NotImplementedError("Unknown configuration format '{}'".format(url_path))
zk.stop()
zk.close()
# Include in the list of config file contents
self.config_contents_list.append(config)
except Exception as e:
L.error("Failed to obtain configuration from Zookeeper server(s): '{}'.".format(e))
sys.exit(1)
def get_config_contents_list(self):
return self.config_contents_list, self.config_name_list
def getseconds(self, section, option, *, raw=False, vars=None, fallback=None, **kwargs) -> float:
"""
Get time data from config and convert time string into seconds with `convert_to_seconds()` method.
The available units are:
- `y` - years
- `M` - months
- `w` - weeks
- `d` - days
- `h` - hours
- `m` - minutes
- `s` - seconds
- `ms` - milliseconds
Returns:
float: Time in seconds.
Examples:
```python
self.SleepTime = asab.Config["sleep"].getseconds("sleep_time")
self.AnotherSleepTime = asab.Config.getseconds("sleep", "another_sleep_time")
```
"""
if fallback is None:
fallback = configparser._UNSET
return self._get_conv(section, option, utils.convert_to_seconds, raw=raw, vars=vars, fallback=fallback, **kwargs)
def geturl(self, section, option, *, raw=False, vars=None, fallback=None, scheme=None, **kwargs):
"""
Get URL from config and remove all leading and trailing whitespaces and trailing slashes.
Args:
scheme (str | tuple): URL scheme(s) awaited. If `None`, scheme validation is bypassed.
Returns:
Validated URL.
Raises:
ValueError: Scheme requirements are not met if set.
Examples:
```ini
[urls]
teskalabs=https://www.teskalabs.com/
github=github.com
```
``` python
asab.Config["urls"].geturl("teskalabs", scheme="https")
asab.Config.geturl("urls", "github", scheme=None)
```
"""
return utils.validate_url(self.get(section, option, raw=raw, vars=vars, fallback=fallback), scheme)
def getmultiline(self, section, option, *, raw=False, vars=None, fallback=None, **kwargs) -> typing.List[str]:
"""
Get multiline data from config.
Examples:
```ini
[places]
visited:
Praha
Brno
Pardubice Plzeň
unvisited:
```
```python
>>> asab.Config.getmultiline("places", "visited")
["Praha", "Brno", "Pardubice", "Plzeň"]
>>> asab.Config.getmultiline("places", "unvisited")
[]
>>> asab.Config.getmultiline("places", "nonexisting", fallback=["Gottwaldov"])
["Gottwaldov"]
```
"""
values = self.get(section, option, raw=raw, vars=vars, fallback=fallback)
if isinstance(values, str):
return [item.strip() for item in re.split(r"\s+", values) if len(item) > 0]
else:
# fallback can be anything
return values
class _Interpolation(configparser.ExtendedInterpolation):
"""Interpolation which expands environment variables in values."""
def before_read(self, parser, section, option, value):
# Expand environment variables
if '$' in value:
os.environ['THIS_DIR'] = os.path.abspath(parser._load_dir_stack[-1])
value = os.path.expandvars(value)
return super().before_read(parser, section, option, value)
Config = ConfigParser(interpolation=_Interpolation())
"""
Object for accessing the configuration of the ASAB application.
Examples:
```python
my_conf_value = asab.Config['section_name']['key']
```
"""
class Configurable(object):
"""
Custom object whose attributes can be loaded from the configuration.
Example:
```python
class ConfigurableObject(asab.Configurable):
ConfigDefaults = {
'foo': 'bar',
}
def __init__(self, config_section_name, config=None):
super().__init__(config_section_name=config_section_name, config=config)
config_foo = self.Config.get('foo')
```
"""
ConfigDefaults: dict = {}
def __init__(self, config_section_name: str, config: typing.Optional[dict] = None):
self.Config = ConfigurableDict()
for base_class in inspect.getmro(self.__class__):
if not hasattr(base_class, 'ConfigDefaults'):
continue
if len(base_class.ConfigDefaults) == 0:
continue
# Merge config defaults of each base class in the 'inheritance' way
for key, value in base_class.ConfigDefaults.items():
if value is None:
raise ValueError("None value not allowed in ConfigDefaults. Found in %s:%s " % (
config_section_name, key))
if key not in self.Config:
self.Config[key] = value
if Config.has_section(config_section_name):
for key, value in Config.items(config_section_name):
self.Config[key] = value
if config is not None:
self.Config.update(config)
# This is for backward compatibility
ConfigObject = Configurable
class ConfigurableDict(collections.abc.MutableMapping):
"""
A dictionary supplemented with custom methods for obtaining bools, seconds, urls etc.
"""
def __init__(self):
self._data = {}
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __delitem__(self, key):
del self._data[key]
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def getboolean(self, key) -> bool:
"""
Obtain the corresponding value of the key and convert it into bool.
"""
value = self._data[key]
return utils.string_to_boolean(value)
def getseconds(self, key) -> float:
"""
Obtain the corresponding value of the key and convert it into seconds via `convert_to_seconds()` method.
"""
value = self._data[key]
return utils.convert_to_seconds(value)
def getint(self, key) -> int:
"""
Obtain the corresponding value of the key and convert it into integer.
"""
value = self._data[key]
return int(value)
def getfloat(self, key) -> float:
"""
Obtain the corresponding value of the key and convert it into float.
"""
value = self._data[key]
return float(value)
def geturl(self, key, scheme):
"""
Obtain the corresponding value of the key and parse it via `validate_url()` method.
"""
value = self._data[key]
return utils.validate_url(value, scheme)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self._data) |
teskalabs__asab | library.rst | Module doc | Generate documentation for this module | BSD 3-Clause New or Revised License | teskalabs__asab/old_docs/asab/library.rst | [
"teskalabs__asab/asab/library/providers/azurestorage.py",
"teskalabs__asab/asab/library/providers/zookeeper.py",
"teskalabs__asab/asab/library/providers/filesystem.py",
"teskalabs__asab/asab/library/providers/git.py"
] | teskalabs__asab/asab/library | Library
The ASAB Library (asab.library) is a concept of the shared data content
across microservices in the cluster. The asab.library provides a
read-only interface for listing and reading this content. The library
can also notify the ASAB microservice about changes, eg. for automated
update/reload.
There is a companion microservice asab-library that can be used for
management and editation of the library content. The asab.library can
however operate without asab-library microservice.
Library structure
The library content is organized in simplified filesystem manner, with
directories and files.
Example of the library structure:
+ /folder1/
- /folder1/item1.yaml
- /folder1/item2.json
+ /folder2/
- /folder2/item3.yaml
+ /folder2folder2.3/
- /folder2/folder2.3/item4.json
Library path rules
- Any path must start with /, including the root path (/).
- The folder path must end with /.
- The item path must end with extension (eg. .json).
Library service
LibraryService
Example of the use:
import asab
class MyApplication(asab.Application):
def __init__(self):
super().__init__()
# Initialize the library service
self.LibraryService = asab.library.LibraryService(self, "LibraryService")
self.PubSub.subscribe("Library.ready!", self.on_library_ready)
async def on_library_ready(self, event_name, library):
print("# Library\n")
for item in await self.LibraryService.list("", recursive=True):
print(" *", item)
if item.type == 'item':
itemio = await self.LibraryService.read(item.name)
if itemio is not None:
with itemio:
content = itemio.read()
print(" - content: {} bytes".format(len(content)))
else:
print(" - (DISABLED)")
Providers
The library can be configured to work with following "backends" (aka
providers):
Git repository
Connection to git repositories requires pygit2 library to be installed.
Example of configuration:
[library]
providers: git+https://github.com/john/awesome_project.git
Functionality
The git provider clones the repository into a temporary directory and
then uses the File System Provider to read the files from it. The
default path for the cloned repository is /tmp/asab.library.git/ and it
can be changed manually:
[library:git]
repodir=path/to/repository/cache
Deploy tokens in GitLab
GitLab uses deploy tokens to enable authentication of deployment tasks,
independent of a user account. A deploy token is an SSH key that grants
access to a single repository. The public part of the key is attached
directly to the repository instead of a personal account, and the
private part of the key remains on the server. It is the preferred
preferred way over changing local SSH settings.
If you want to create a deploy token for your GitLab repository, follow
these steps from the manual:
1. Go to Settings > Repository > Deploy tokens section in your
repository. (Note that you have to possess "Maintainer" or "Owner"
role for the repository.)
2. Expand the "Deploy tokens" section. The list of current Active
Deploy Tokens will be displayed.
3. Complete the fields and scopes. We recommend to specify custom
"username", as you will need it later for the url in configuration.
4. Record the deploy token's values before leaving or refreshing the
page! After that, you cannot access it again.
After the deploy token is created, use the URL for repository in the
following format:
https://<username>:<deploy_token>@gitlab.example.com/john/awesome_project.git
Layers
The library content can be organized into unlimmited number of layers.
Each layer is represented by a provider with a specific configuration.
Library configuration
Example:
[library]
providers:
provider+1://...
provider+2://...
provider+3://...
| import os
import io
import typing
import hashlib
import logging
import tempfile
import dataclasses
import urllib.parse
import xml.dom.minidom
import aiohttp
from...config import Config
from..item import LibraryItem
from.abc import LibraryProviderABC
#
L = logging.getLogger(__name__)
#
class AzureStorageLibraryProvider(LibraryProviderABC):
'''
AzureStorageLibraryProvider is a library provider that reads
from an Microsoft Azure Storage container.
Configure by:
azure+https://ACCOUNT-NAME.blob.core.windows.net/BLOB-CONTAINER
If Container Public Access Level is not set to "Public access",
then "Access Policy" must be created with "Read" and "List" permissions
and "Shared Access Signature" (SAS) query string must be added to a URL in a configuration:
azure+https://ACCOUNT-NAME.blob.core.windows.net/BLOB-CONTAINER?sv=2020-10-02&si=XXXX&sr=c&sig=XXXXXXXXXXXXXX
'''
def __init__(self, library, path, layer):
super().__init__(library, layer)
assert path[:6] == "azure+"
self.URL = urllib.parse.urlparse(path[6:])
self.Model = None # Will be set by `_load_model` method
self.Path = path
self.CacheDir = Config.get("library", "azure_cache")
if self.CacheDir == 'false':
self.CacheDir = None
elif self.CacheDir == 'true':
self.CacheDir = os.path.join(tempfile.gettempdir(), "asab.library.azure.{}".format(hashlib.sha256(path.encode('utf-8')).hexdigest()))
# Ensure that the case directory exists
if self.CacheDir is not None:
try:
os.makedirs(self.CacheDir)
except FileExistsError:
pass # Cache directory already exists
self.App.TaskService.schedule(self._start())
async def _start(self):
await self._load_model()
if self.Model is not None:
await self._set_ready()
# TODO: Call this periodically
async def _load_model(self):
url = urllib.parse.urlunparse(urllib.parse.ParseResult(
scheme=self.URL.scheme,
netloc=self.URL.netloc,
path=self.URL.path,
params='',
query=self.URL.query + "&restype=container&comp=list",
fragment=''
))
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status == 200:
content = await resp.text()
else:
err = await resp.text()
L.warning("Failed to list blobs from `{}`:\n{}".format(url, err))
return
model = AzureDirectory("/", sub=dict())
dom = xml.dom.minidom.parseString(content)
for blob in dom.getElementsByTagName("Blob"):
path = get_xml_text(blob.getElementsByTagName("Name"))
path = path.split('/')
curmodel = model
for i in range(len(path) - 1):
newmodel = curmodel.sub.get(path[i])
if newmodel is None:
curmodel.sub[path[i]] = newmodel = AzureDirectory(
name='/' + '/'.join(path[:i + 1]),
sub=dict()
)
curmodel = newmodel
curmodel.sub[path[-1]] = AzureItem(
name='/' + '/'.join(path)
)
self.Model = model
# TODO: If the cache is active, remove items from the cache that:
# 1) are not in the list
# 2) their etag differs
L.info("is connected.", struct_data={'path': self.Path})
async def list(self, path: str) -> list:
if self.Model is None:
L.warning("Azure Storage library provider is not ready. Cannot list {}".format(path))
raise RuntimeError("Not ready")
assert path[:1] == '/'
assert '//' not in path
assert len(path) == 1 or path[-1:]!= '/'
if path == '/':
pathparts = []
else:
pathparts = path.split("/")[1:]
curmodel = self.Model
for p in pathparts:
curmodel = curmodel.sub.get(p)
if curmodel is None:
raise KeyError("Not '{}' found".format(path))
if curmodel.type!= 'dir':
raise KeyError("Not '{}' found".format(path))
items = []
for i in curmodel.sub.values():
items.append(LibraryItem(
name=i.name,
type=i.type,
layer=self.Layer,
providers=[self],
))
return items
async def read(self, path: str) -> typing.IO:
assert path[:1] == '/'
assert '//' not in path
assert len(path) == 1 or path[-1:]!= '/'
headers = {}
pathhash = hashlib.sha256(path.encode('utf-8')).hexdigest()
cachefname = os.path.join(self.CacheDir, pathhash)
if self.CacheDir is not None:
try:
with open(cachefname + '.etag', "r") as etagf:
etag = etagf.read()
# We found a local cached file with the etag, we will use that in the request
# if the request returns "304 Not Modified" then we will ship the local version of the file
headers['If-None-Match'] = etag
except FileNotFoundError:
pass
url = urllib.parse.urlunparse(urllib.parse.ParseResult(
scheme=self.URL.scheme,
netloc=self.URL.netloc,
path=self.URL.path + path,
params='',
query=self.URL.query,
fragment=''
))
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=headers) as resp:
if resp.status == 200:
etag = resp.headers.get('ETag')
if self.CacheDir is not None and etag is not None:
output = open(cachefname, "w+b")
with open(cachefname + '.etag', "w") as etagf:
etagf.write(etag)
else:
# Store the response into the temporary file
#... that's to avoid storing the whole (and possibly large) file in the memory
output = tempfile.TemporaryFile()
async for chunk in resp.content.iter_chunked(16 * io.DEFAULT_BUFFER_SIZE):
output.write(chunk)
elif resp.status == 304 and self.CacheDir is not None: # 304 is Not Modified
# The file should be read from cache
output = open(cachefname, "r+b")
else:
L.warning("Failed to get blob:\n{}".format(await resp.text()), struct_data={'status': resp.status})
return None
# Rewind the file so the reader can start consuming from the beginning
output.seek(0)
return output
@dataclasses.dataclass
class AzureDirectory:
name: str
sub: dict
type: str = "dir"
@dataclasses.dataclass
class AzureItem:
name: str
type: str = "item"
def get_xml_text(nodelist):
rc = []
for node in nodelist:
for textnode in node.childNodes:
if textnode.nodeType == textnode.TEXT_NODE:
rc.append(textnode.data)
return ''.join(rc)
import io
import asyncio
import hashlib
import typing
import logging
import functools
import os.path
import urllib.parse
import kazoo.exceptions
from.abc import LibraryProviderABC
from..item import LibraryItem
from...zookeeper import ZooKeeperContainer
#
L = logging.getLogger(__name__)
#
class ZooKeeperLibraryProvider(LibraryProviderABC):
"""
Configuration variant:
1) ZooKeeper provider is fully configured from [zookeeper] section
.. code::
[zookeeper]
servers=zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181
path=/library
[library]
providers:
zk://
2) ZooKeeper provider is configured by `servers` from [zookeeper] section and path from URL
Path will be `/library`.
.. code::
[zookeeper]
servers=zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181
path=/else
[library]
providers:
zk:///library
2.1) ZooKeeper provider is configured by `servers` from [zookeeper] section and path from URL
Path will be `/`, this is a special case to 2)
.. code::
[zookeeper]
servers=zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181
path=/else
[library]
providers:
zk:///
3) ZooKeeper provider is fully configured from URL
.. code::
[library]
providers:
zk://zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181/library
4) ZooKeeper provider is configured by `servers` from [zookeeper] section and joined `path` from [zookeeper] and
path from URL
Path will be `/else/library`
.. code::
[zookeeper]
servers=zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181
path=/else
[library]
providers:
zk://./library
If `path` from [zookeeper] section is missing, an application class name will be used
Ex. `/BSQueryApp/library`
"""
def __init__(self, library, path, layer):
super().__init__(library, layer)
url_pieces = urllib.parse.urlparse(path)
self.FullPath = url_pieces.scheme + '://'
self.BasePath = url_pieces.path.lstrip("/")
while self.BasePath.endswith("/"):
self.BasePath = self.BasePath[:-1]
self.BasePath = '/' + self.BasePath
if self.BasePath == '/':
self.BasePath = ''
if url_pieces.netloc in ["", "."]:
# if netloc is not provided `zk:///path`, then use `zookeeper` section from config
config_section_name = 'zookeeper'
z_url = None
else:
config_section_name = ''
z_url = path
# Initialize ZooKeeper client
zksvc = self.App.get_service("asab.ZooKeeperService")
self.ZookeeperContainer = ZooKeeperContainer(
zksvc,
config_section_name=config_section_name,
z_path=z_url
)
self.Zookeeper = self.ZookeeperContainer.ZooKeeper
if config_section_name == 'zookeeper':
self.FullPath += self.ZookeeperContainer.Config['servers']
else:
self.FullPath += url_pieces.netloc
# Handle `zk://` configuration
if z_url is None and url_pieces.netloc == "" and url_pieces.path == "" and self.ZookeeperContainer.Path!= '':
self.BasePath = '/' + self.ZookeeperContainer.Path
# Handle `zk://./path` configuration
if z_url is None and url_pieces.netloc == "." and self.ZookeeperContainer.Path!= '':
self.BasePath = '/' + self.ZookeeperContainer.Path + self.BasePath
self.FullPath += self.BasePath
self.VersionNodePath = self.build_path('/.version.yaml')
self.Version = None # Will be read when a library become ready
self.VersionWatch = None
self.App.PubSub.subscribe("ZooKeeperContainer.state/CONNECTED!", self._on_zk_connected)
self.App.PubSub.subscribe("ZooKeeperContainer.state/LOST!", self._on_zk_lost)
self.App.PubSub.subscribe("ZooKeeperContainer.state/SUSPENDED!", self._on_zk_lost)
self.App.PubSub.subscribe("Application.tick/60!", self._get_version_counter)
# This will check a library for changes in subscribed folders even without version counter change.
self.App.PubSub.subscribe("Application.tick/60!", self._on_library_changed)
self.Subscriptions = {}
async def finalize(self, app):
"""
The `finalize` function is called when the application is shutting down
"""
await self.Zookeeper._stop()
async def _on_zk_connected(self, event_name, zkcontainer):
"""
When the Zookeeper container is connected, set the self.Zookeeper property to the Zookeeper object.
"""
if zkcontainer!= self.ZookeeperContainer:
return
L.info("is connected.", struct_data={'path': self.FullPath})
def on_version_changed(version, event):
self.App.Loop.call_soon_threadsafe(self._check_version_counter, version)
def install_watcher():
return kazoo.recipe.watchers.DataWatch(self.Zookeeper.Client, self.VersionNodePath, on_version_changed)
self.VersionWatch = await self.Zookeeper.ProactorService.execute(install_watcher)
await self._set_ready()
async def _on_zk_lost(self, event_name, zkcontainer):
if zkcontainer!= self.ZookeeperContainer:
return
await self._set_ready(ready=False)
async def _get_version_counter(self, event_name=None):
if self.Zookeeper is None:
return
version = await self.Zookeeper.get_data(self.VersionNodePath)
self._check_version_counter(version)
def _check_version_counter(self, version):
# If version is `None` aka `/.version.yaml` doesn't exists, then assume version -1
if version is not None:
try:
version = int(version)
except ValueError:
version = 1
else:
version = 1
if self.Version is None:
# Initial grab of the version
self.Version = version
return
if self.Version == version:
# The version has not changed
return
asyncio.create_task(self._on_library_changed())
async def read(self, path: str) -> typing.IO:
if self.Zookeeper is None:
L.warning("Zookeeper Client has not been established (yet). Cannot read {}".format(path))
raise RuntimeError("Zookeeper Client has not been established (yet). Not ready.")
node_path = self.build_path(path)
try:
node_data = await self.Zookeeper.get_data(node_path)
except kazoo.exceptions.ConnectionClosedError:
L.warning("Zookeeper library provider is not ready")
raise RuntimeError("Zookeeper library provider is not ready")
except kazoo.exceptions.NoNodeError:
return None
# Consider adding other exceptions from Kazoo to indicate common non-critical errors
if node_data is not None:
return io.BytesIO(initial_bytes=node_data)
else:
return None
async def list(self, path: str) -> list:
if self.Zookeeper is None:
L.warning("Zookeeper Client has not been established (yet). Cannot list {}".format(path))
raise RuntimeError("Zookeeper Client has not been established (yet). Not ready.")
node_path = self.build_path(path)
nodes = await self.Zookeeper.get_children(node_path)
if nodes is None:
raise KeyError("Not '{}' found".format(node_path))
items = []
for node in nodes:
# Remove any component that starts with '.'
startswithdot = functools.reduce(lambda x, y: x or y.startswith('.'), node.split(os.path.sep), False)
if startswithdot:
continue
if '.' in node: # We detect files in zookeeper by presence of the dot in the filename,
fname = path + node
ftype = "item"
else:
fname = path + node + '/'
ftype = "dir"
items.append(LibraryItem(
name=fname,
type=ftype,
layer=self.Layer,
providers=[self],
))
return items
def build_path(self, path):
"""
It takes a path in the library and transforms in into a path within Zookeeper.
It does also series of sanity checks (asserts).
IMPORTANT: If you encounter asserting failure, don't remove assert.
It means that your code is incorrect.
"""
assert path[:1] == '/'
if path!= '/':
node_path = self.BasePath + path
else:
node_path = self.BasePath
# Zookeeper path should not have forward slash at the end of path
node_path = node_path.rstrip("/")
assert '//' not in node_path
assert node_path[0] == '/'
return node_path
async def subscribe(self, path):
path = self.BasePath + path
self.Subscriptions[path] = await self._get_directory_hash(path)
async def _get_directory_hash(self, path):
def recursive_traversal(path, digest):
if not self.Zookeeper.Client.exists(path):
return
children = self.Zookeeper.Client.get_children(path)
for child in children:
if path!= "/":
child_path = "{}/{}".format(path, child)
else:
child_path = "/{}".format(child)
zstat = self.Zookeeper.Client.exists(child_path)
digest.update("{}\n{}\n".format(child_path, zstat.version).encode('utf-8'))
recursive_traversal(child_path, digest)
digest = hashlib.sha1()
await self.Zookeeper.ProactorService.execute(recursive_traversal, path, digest)
return digest.digest()
async def _on_library_changed(self, event_name=None):
for path, digest in self.Subscriptions.items():
try:
newdigest = await self._get_directory_hash(path)
if newdigest!= digest:
self.Subscriptions[path] = newdigest
self.App.PubSub.publish("Library.change!", self, path)
except Exception as e:
L.error("Failed to process library change for path: '{}'. Reason: '{}'".format(path, e))
import io
import os
import os.path
import stat
import glob
import struct
import typing
import logging
from.abc import LibraryProviderABC
from..item import LibraryItem
from...timer import Timer
try:
from.filesystem_inotify import inotify_init, inotify_add_watch, IN_CREATE, IN_ISDIR, IN_ALL_EVENTS, EVENT_FMT, EVENT_SIZE, IN_MOVED_TO, IN_IGNORED
except OSError:
inotify_init = None
#
L = logging.getLogger(__name__)
#
class FileSystemLibraryProvider(LibraryProviderABC):
def __init__(self, library, path, layer, *, set_ready=True):
'''
`set_ready` can be used to disable/defer `self._set_ready` call.
'''
super().__init__(library, layer)
self.BasePath = os.path.abspath(path)
while self.BasePath.endswith("/"):
self.BasePath = self.BasePath[:-1]
L.info("is connected.", struct_data={'path': path})
# Filesystem is always ready (or you have a serious problem)
if set_ready:
self.App.TaskService.schedule(self._set_ready())
# Open inotify file descriptor
if inotify_init is not None:
init = inotify_init()
if init == -1:
L.warning("Subscribing to library changes in filesystem provider is not available. Inotify was not initialized.")
self.FD = None
else:
self.FD = init
self.App.Loop.add_reader(self.FD, self._on_inotify_read)
self.AggrTimer = Timer(self.App, self._on_aggr_timer)
else:
self.FD = None
self.AggrEvents = []
self.WDs = {}
async def read(self, path: str) -> typing.IO:
node_path = self.BasePath + path
# File path must start with '/'
assert node_path[:1] == '/', "File path must start with a forward slash (/). For example: /library/Templates/file.json"
# File path must end with the extension
assert len(os.path.splitext(node_path)[1]) > 0, "File path must end with an extension. For example: /library/Templates/item.json"
# File cannot contain '//'
assert '//' not in node_path
try:
return io.FileIO(node_path, 'rb')
except FileNotFoundError:
return None
except IsADirectoryError:
return None
async def list(self, path: str) -> list:
# This list method is completely synchronous, but it should look like asynchronous to make all list methods unified among providers.
return self._list(path)
def _list(self, path: str):
node_path = self.BasePath + path
# Directory path must start with '/'
assert node_path[:1] == '/', "Directory path must start with a forward slash (/). For example: /library/Templates/"
# Directory path must end with '/'
assert node_path[-1:] == '/', "Directory path must end with a forward slash (/). For example: /library/Templates/"
# Directory cannot contain '//'
assert '//' not in node_path
exists = os.access(node_path, os.R_OK) and os.path.isdir(node_path)
if not exists:
raise KeyError(" '{}' not found".format(path))
items = []
for fname in glob.iglob(os.path.join(node_path, "*")):
fstat = os.stat(fname)
assert fname.startswith(self.BasePath)
fname = fname[len(self.BasePath):]
if stat.S_ISREG(fstat.st_mode):
ftype = "item"
elif stat.S_ISDIR(fstat.st_mode):
ftype = "dir"
fname += '/'
else:
ftype = "?"
# Remove any component that starts with '.'
if any(x.startswith('.') for x in fname.split('/')):
continue
items.append(LibraryItem(
name=fname,
type=ftype,
layer=self.Layer,
providers=[self],
))
return items
def _on_inotify_read(self):
data = os.read(self.FD, 64 * 1024)
pos = 0
while pos < len(data):
wd, mask, cookie, namesize = struct.unpack_from(EVENT_FMT, data, pos)
pos += EVENT_SIZE + namesize
name = (data[pos - namesize: pos].split(b'\x00', 1)[0]).decode()
if mask & IN_ISDIR == IN_ISDIR and ((mask & IN_CREATE == IN_CREATE) or (mask & IN_MOVED_TO == IN_MOVED_TO)):
subscribed_path, child_path = self.WDs[wd]
self._subscribe_recursive(subscribed_path, "/".join([child_path, name]))
if mask & IN_IGNORED == IN_IGNORED:
# cleanup
del self.WDs[wd]
continue
self.AggrEvents.append((wd, mask, cookie, os.fsdecode(name)))
self.AggrTimer.restart(0.2)
async def _on_aggr_timer(self):
to_advertise = set()
for wd, mask, cookie, name in self.AggrEvents:
# When wathed directory is being removed, more than one inotify events are being produced.
# When IN_IGNORED event occurs, respective wd is removed from self.WDs,
# but some other events (like IN_DELETE_SELF) get to this point, without having its reference in self.WDs.
subscribed_path, _ = self.WDs.get(wd, (None, None))
to_advertise.add(subscribed_path)
self.AggrEvents.clear()
for path in to_advertise:
if path is None:
continue
self.App.PubSub.publish("Library.change!", self, path)
async def subscribe(self, path):
if not os.path.isdir(self.BasePath + path):
return
if self.FD is None:
L.warning("Cannot subscribe to changes in the filesystem layer of the library: '{}'".format(self.BasePath))
return
self._subscribe_recursive(path, path)
def _subscribe_recursive(self, subscribed_path, path_to_be_listed):
binary = (self.BasePath + path_to_be_listed).encode()
wd = inotify_add_watch(self.FD, binary, IN_ALL_EVENTS)
if wd == -1:
L.error("Error in inotify_add_watch")
return
self.WDs[wd] = (subscribed_path, path_to_be_listed)
try:
items = self._list(path_to_be_listed)
except KeyError:
# subscribing to non-existing directory is silent
return
for item in items:
if item.type == "dir":
self._subscribe_recursive(subscribed_path, item.name)
async def finalize(self, app):
if self.FD is not None:
self.App.Loop.remove_reader(self.FD)
os.close(self.FD)
import os
import tempfile
import logging
import hashlib
import re
from.filesystem import FileSystemLibraryProvider
from...config import Config
#
L = logging.getLogger(__name__)
#
try:
import pygit2
except ImportError:
L.critical("Please install pygit2 package to enable Git Library Provider. >>> pip install pygit2")
raise SystemExit("Application exiting....")
class GitLibraryProvider(FileSystemLibraryProvider):
"""
Read-only git provider to read from remote repository.
It clones a remote git repository to a temporary directory and then uses the
FileSystemLibraryProvider to read the files.
To read from local git repository, please use FileSystemProvider.
.. code::
[library]
providers=git+<URL or deploy token>#<branch name>
[library:git]
repodir=<optional location of the repository cache>
"""
def __init__(self, library, path, layer):
# format: 'git+http[s]://[<username>:<deploy token>@]<url>[#<branch>]'
pattern = re.compile(r"git\+(https?://)((.*):(.*)@)?([^#]*)(?:#(.*))?$")
path_split = pattern.findall(path)[0]
L.debug(path_split)
self.URLScheme, self.UserInfo, self.User, self.DeployToken, self.URLPath, self.Branch = path_split
self.URL = "".join([self.URLScheme, self.UserInfo, self.URLPath])
self.Branch = self.Branch if self.Branch!= '' else None
repodir = Config.get("library:git", "repodir", fallback=None)
if repodir is not None:
self.RepoPath = os.path.abspath(repodir)
else:
tempdir = tempfile.gettempdir()
self.RepoPath = os.path.join(
tempdir,
"asab.library.git",
hashlib.sha256(path.encode('utf-8')).hexdigest()
)
super().__init__(library, self.RepoPath, layer, set_ready=False)
self.GitRepository = None
from...proactor import Module
self.App.add_module(Module)
self.ProactorService = self.App.get_service("asab.ProactorService")
self.PullLock = False
self.SubscribedPaths = set()
self.App.TaskService.schedule(self.initialize_git_repository())
self.App.PubSub.subscribe("Application.tick/60!", self._periodic_pull)
async def _periodic_pull(self, event_name):
"""
Changes in remote repository are being pulled every minute. `PullLock` flag ensures that only if previous "pull" has finished, new one can start.
"""
if self.GitRepository is None:
return
if self.PullLock:
return
self.PullLock = True
try:
to_publish = await self.ProactorService.execute(self._do_pull)
# Once reset of the head is finished, PubSub message about the change in the subscribed directory gets published.
for path in to_publish:
self.App.PubSub.publish("Library.change!", self, path)
except pygit2.GitError:
L.warning("Periodic pull from the remote repository failed.")
finally:
self.PullLock = False
async def initialize_git_repository(self):
def init_task():
if pygit2.discover_repository(self.RepoPath) is None:
# For a new repository, clone the remote bit
os.makedirs(self.RepoPath, mode=0o700, exist_ok=True)
self.GitRepository = pygit2.clone_repository(
url=self.URL,
path=self.RepoPath,
checkout_branch=self.Branch
)
else:
# For existing repository, pull the latest changes
self.GitRepository = pygit2.Repository(self.RepoPath)
self._do_pull()
try:
await self.ProactorService.execute(init_task)
except KeyError as err:
pygit_message = str(err).replace('\"', '')
if pygit_message == "'refs/remotes/origin/{}'".format(self.Branch):
# branch does not exist
L.exception(
"Branch does not exist.",
struct_data={
"url": self.URLPath,
"branch": self.Branch
}
)
else:
L.exception("Error when initializing git repository: {}".format(pygit_message))
self.App.stop() # NOTE: raising Exception doesn't exit the app
except pygit2.GitError as err:
pygit_message = str(err).replace('\"', '')
if pygit_message == "unexpected http status code: 404":
# repository not found
L.exception(
"Git repository not found.",
struct_data={
"url": self.URLPath
}
)
elif pygit_message == "remote authentication required but no callback set":
# either repository not found or authentication failed
L.exception(
"Authentication failed when initializing git repository.\n"
"Check if the 'providers' option satisfies the format: 'git+<username>:<deploy token>@<URL>#<branch name>'",
struct_data={
"url": self.URLPath,
"username": self.User,
"deploy_token": self.DeployToken
}
)
elif 'cannot redirect from':
# bad URL
L.exception(
"Git repository not found.",
struct_data={
"url": self.URLPath
}
)
elif 'Temporary failure in name resolution' in pygit_message:
# Internet connection does
L.exception(
"Git repository not initialized: connection failed. Check your network connection.",
struct_data={
"url": self.URLPath
}
)
else:
L.exception("Git repository not initialized: {}".format(err))
self.App.stop()
except Exception as err:
L.exception(err)
assert self.GitRepository.remotes["origin"] is not None, "Git repository not initialized."
await self._set_ready()
def _do_fetch(self):
"""
It fetches the remote repository and returns the commit ID of the remote branch
:return: The commit id of the latest commit on the remote repository.
"""
if self.GitRepository is None:
return None
self.GitRepository.remotes["origin"].fetch()
if self.Branch is None:
reference = self.GitRepository.lookup_reference("refs/remotes/origin/HEAD")
else:
reference = self.GitRepository.lookup_reference("refs/remotes/origin/{}".format(self.Branch))
commit_id = reference.peel().id
return commit_id
def _do_pull(self):
new_commit_id = self._do_fetch()
if new_commit_id == self.GitRepository.head.target:
return []
# Before new head is set, check the diffs. If changes in subscribed directory occured, add path to "to_publish" list.
to_publish = []
for path in self.SubscribedPaths:
for i in self.GitRepository.diff(self.GitRepository.head.target, new_commit_id).deltas:
if ("/" + i.old_file.path).startswith(path):
to_publish.append(path)
# Reset HEAD
self.GitRepository.head.set_target(new_commit_id)
self.GitRepository.reset(new_commit_id, pygit2.GIT_RESET_HARD)
return to_publish
async def subscribe(self, path):
if not os.path.isdir(self.BasePath + path):
return
self.SubscribedPaths.add(path) |
teskalabs__asab | log.rst | Module doc | Generate documentation for this module | BSD 3-Clause New or Revised License | teskalabs__asab/old_docs/asab/log.rst | [
"teskalabs__asab/asab/log.py"
] | teskalabs__asab/asab | Logging
ASAB logging is built on top of a standard Python logging module. It
means that it logs to stderr when running on a console and ASAB also
provides file and syslog output (both RFC5424 and RFC3164) for
background mode of operations.
Log timestamps are captured with sub-second precision (depending on the
system capabilities) and displayed including microsecond part.
Recommended use
We recommend to create a logger L in every module that captures all
necessary logging output. Alternative logging strategies are also
supported.
import logging
L = logging.getLogger(__name__)
...
L.warning("Hello world!")
Example of the output to the console:
25-Mar-2018 23:33:58.044595 WARNING myapp.mymodule Hello world!
Logging Levels
ASAB uses Python logging levels with the addition of LOG_NOTICE level.
LOG_NOTICE level is similar to logging.INFO level but it is visible in
even in non-verbose mode.
L.log(asab.LOG_NOTICE, "This message will be visible regardless verbose configuration.")
---------------------------------------------------------------
Level Numeric value Syslog Severity level
---------------- --------------- ------------------------------
CRITICAL 50 Critical / crit / 2
ERROR 40 Error / err / 3
WARNING 30 Warning / warning / 4
LOG_NOTICE 25 Notice / notice / 5
INFO 20 Informational / info / 6
DEBUG 10 Debug / debug / 7
NOTSET 0
---------------------------------------------------------------
Example of a custom level configuration:
[logging]
levels=
myApp.module1 DEBUG
myApp.module2 WARNING
customLogger ERROR
The logger name and the corresponding logging level are separated by a
space, each logger is on a separate line.
Verbose mode
The command-line argument -v enables verbose logging. It means that log
entries with levels DEBUG and INFO will be visible. It also enables
asyncio debug logging.
The actual verbose mode is avaiable at asab.Config["logging"]["verbose"]
boolean option.
L.debug("This message will be visible only in verbose mode.")
Structured data
ASAB supports a structured data to be added to a log entry. It follows
the RFC 5424, section STRUCTURED-DATA. Structured data are a dictionary,
that has to be seriazable to JSON.
L.warning("Hello world!", struct_data={'key1':'value1', 'key2':2})
Example of the output to the console:
25-Mar-2018 23:33:58.044595 WARNING myapp.mymodule [sd key1="value1" key2="2"] Hello world!
Logging to file
The command-line argument -l on command-line enables logging to file.
Also non-empty path option in the section [logging:file] of
configuration file enables logging to file as well.
Example of the configuration file section:
[logging:file]
path=/var/log/asab.log
format="%%(asctime)s %%(levelname)s %%(name)s %%(struct_data)s%%(message)s",
datefmt="%%d-%%b-%%Y %%H:%%M:%%S.%%f"
backup_count=3
rotate_every=1d
When the deployment expects more instances of the same application to be
logging into the same file, it is recommended, that the variable
hostname is used in the file path:
[logging:file]
path=/var/log/${HOSTNAME}/asab.log
In this way, the applications will log to seperate log files in
different folders, which is an intended behavior, since race conditions
may occur when different application instances log into the same file.
Logging to console
ASAB will log to the console only if it detects that it runs in the
foreground respectively on the terminal using os.isatty or if the
environment variable ASABFORCECONSOLE is set to 1. This is useful setup
for eg. PyCharm.
Log rotation
ASAB supports a log rotation. The log rotation is triggered by a UNIX
signal SIGHUP, which can be used e.g. to integrate with logrotate
utility. It is implemented using logging.handlers.RotatingFileHandler
from a Python standard library. Also, a time-based log rotation can be
configured using rotate_every option.
backup_count specifies a number of old files to be kept prior their
removal. The system will save old log files by appending the extensions
‘.1’, ‘.2’ etc., to the filename.
rotate_every specifies an time interval of a log rotation. Default value
is empty string, which means that the time-based log rotation is
disabled. The interval is specified by an integer value and an unit,
e.g. 1d (for 1 day) or 30M (30 minutes). Known units are H for hours, M
for minutes, d for days and s for seconds.
Logging to syslog
The command-line argument -s enables logging to syslog.
A configuration section [logging:syslog] can be used to specify details
about desired syslog logging.
Example of the configuration file section:
[logging:syslog]
enabled=true
format=5
address=tcp://syslog.server.lan:1554/
enabled is equivalent to command-line switch -s and it enables syslog
logging target.
format speficies which logging format will be used. Possible values are:
- 5 for (new) syslog format (RFC 5424 ) ,
- 3 for old BSD syslog format (RFC 3164 ), typically used by /dev/log
and
- m for Mac OSX syslog flavour that is based on BSD syslog format but
it is not fully compatible.
The default value is 3 on Linux and m on Mac OSX.
address specifies the location of the Syslog server. It could be a UNIX
path such as /dev/log or URL. Possible URL values:
- tcp://syslog.server.lan:1554/ for Syslog over TCP
- udp://syslog.server.lan:1554/ for Syslog over UDP
- unix-connect:///path/to/syslog.socket for Syslog over UNIX socket
(stream)
- unix-sendto:///path/to/syslog.socket for Syslog over UNIX socket
(datagram), equivalent to /path/to/syslog.socket, used by a
/dev/log.
The default value is a /dev/log on Linux or /var/run/syslog on Mac OSX.
Logging of obsolete features
It proved to be essential to inform operators about features that are
going to be obsoleted. ASAB offers the unified "obsolete" logger. This
logger can indicate that a particular feature is marked as "obsolete"
thru logs. Such a log message can then be "grepped" from logs uniformly.
It is recommended to include eol attribute in the struct_data of the log
with a YYYY-MM-DD date/time of the planned obsoletion of the feature.
Hint: We suggest automating the detection of obsolete warnings in logs
so that the operations are informed well ahead of the actual removal of
the feature. The string to seek in logs is " OBSOLETE ".
Example of the use:
asab.LogObsolete.warning("Use of the obsolete function", struct_data={'eol':'2022-31-12'})
Log example:
21-Jul-2022 14:32:40.983884 WARNING OBSOLETE [eol="2022-31-12"] Use of the obsolete function
| import asyncio
import datetime
import logging
import logging.handlers
import os
import pprint
import queue
import re
import socket
import sys
import time
import traceback
import urllib.parse
from.config import Config
from.timer import Timer
from.utils import running_in_container
LOG_NOTICE = 25
"""
Info log level that is visible in non-verbose mode. It should not be used for warnings and errors.
"""
logging.addLevelName(LOG_NOTICE, "NOTICE")
L = logging.getLogger(__name__)
_NAME_TO_LEVEL = {
"NOTSET": logging.NOTSET,
"NOT SET": logging.NOTSET,
"NOT_SET": logging.NOTSET,
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"NOTICE": LOG_NOTICE,
"LOG_NOTICE": LOG_NOTICE,
"LOG NOTICE": LOG_NOTICE,
"WARNING": logging.WARNING,
"WARN": logging.WARNING,
"ERROR": logging.ERROR,
"FATAL": logging.CRITICAL,
"CRITICAL": logging.CRITICAL,
}
class Logging(object):
def __init__(self, app):
self.RootLogger = logging.getLogger()
self.ConsoleHandler = None
self.FileHandler = None
self.SyslogHandler = None
if not self.RootLogger.hasHandlers():
# Add console logger if needed
if os.isatty(sys.stdout.fileno()) or os.environ.get('ASABFORCECONSOLE', '0')!= '0':
self._configure_console_logging()
# Initialize file handler
file_path = Config["logging:file"]["path"]
if len(file_path) > 0:
# Ensure file path
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
self.FileHandler = logging.handlers.RotatingFileHandler(
file_path,
backupCount=Config.getint("logging:file", "backup_count"),
maxBytes=Config.getint("logging:file", "backup_max_bytes"),
)
self.FileHandler.setLevel(logging.DEBUG)
self.FileHandler.setFormatter(StructuredDataFormatter(
fmt=Config["logging:file"]["format"],
datefmt=Config["logging:file"]["datefmt"],
sd_id=Config["logging"]["sd_id"],
))
self.RootLogger.addHandler(self.FileHandler)
rotate_every = Config.get("logging:file", "rotate_every")
if rotate_every!= '':
rotate_every = re.match(r"^([0-9]+)([dMHs])$", rotate_every)
if rotate_every is not None:
i, u = rotate_every.groups()
i = int(i)
if i <= 0:
self.RootLogger.error("Invalid 'rotate_every' configuration value.")
else:
if u == 'H':
i = i * 60 * 60
elif u == 'M':
i = i * 60
elif u == 'd':
i = i * 60 * 60 * 24
elif u =='s':
pass
# PubSub is not ready at this moment, we need to create timer in a future
async def schedule(app, interval):
self.LogRotatingTime = Timer(app, self._on_tick_rotate_check, autorestart=True)
self.LogRotatingTime.start(i)
asyncio.ensure_future(schedule(app, i))
else:
self.RootLogger.error("Invalid 'rotate_every' configuration value.")
# Initialize syslog
if Config["logging:syslog"].getboolean("enabled"):
address = Config["logging:syslog"]["address"]
if address[:1] == '/':
self.SyslogHandler = AsyncIOHandler(app.Loop, socket.AF_UNIX, socket.SOCK_DGRAM, address)
else:
url = urllib.parse.urlparse(address)
if url.scheme == 'tcp':
self.SyslogHandler = AsyncIOHandler(app.Loop, socket.AF_INET, socket.SOCK_STREAM, (
url.hostname if url.hostname is not None else 'localhost',
url.port if url.port is not None else logging.handlers.SYSLOG_UDP_PORT
))
elif url.scheme == 'udp':
self.SyslogHandler = AsyncIOHandler(app.Loop, socket.AF_INET, socket.SOCK_DGRAM, (
url.hostname if url.hostname is not None else 'localhost',
url.port if url.port is not None else logging.handlers.SYSLOG_UDP_PORT
))
elif url.scheme == 'unix-connect':
self.SyslogHandler = AsyncIOHandler(app.Loop, socket.AF_UNIX, socket.SOCK_STREAM, url.path)
elif url.scheme == 'unix-sendto':
self.SyslogHandler = AsyncIOHandler(app.Loop, socket.AF_UNIX, socket.SOCK_DGRAM, url.path)
else:
self.RootLogger.warning("Invalid logging:syslog address '{}'".format(address))
address = None
if self.SyslogHandler is not None:
self.SyslogHandler.setLevel(logging.DEBUG)
format = Config["logging:syslog"]["format"]
if format =='m':
self.SyslogHandler.setFormatter(MacOSXSyslogFormatter(sd_id=Config["logging"]["sd_id"]))
elif format == '5':
self.SyslogHandler.setFormatter(SyslogRFC5424Formatter(sd_id=Config["logging"]["sd_id"]))
elif format == '5micro':
self.SyslogHandler.setFormatter(SyslogRFC5424microFormatter(sd_id=Config["logging"]["sd_id"]))
else:
self.SyslogHandler.setFormatter(SyslogRFC3164Formatter(sd_id=Config["logging"]["sd_id"]))
self.RootLogger.addHandler(self.SyslogHandler)
# No logging is configured
if self.ConsoleHandler is None and self.FileHandler is None and self.SyslogHandler is None:
# Let's check if we run in Docker and if so, then log on stderr
if running_in_container():
self._configure_console_logging()
else:
self.RootLogger.warning("Logging seems to be already configured. Proceed with caution.")
if Config["logging"].getboolean("verbose"):
self.RootLogger.setLevel(logging.DEBUG)
else:
level_name = Config["logging"]["level"].upper()
try:
self.RootLogger.setLevel(_NAME_TO_LEVEL.get(level_name, level_name))
except ValueError:
L.error("Cannot detect logging level '{}'".format(level_name))
# Fine-grained log level configurations
levels = Config["logging"].get('levels')
for level_line in levels.split('\n'):
level_line = level_line.strip()
if len(level_line) == 0 or level_line.startswith('#') or level_line.startswith(';'):
# line starts with a comment
continue
try:
logger_name, level_name = level_line.split(' ', 1)
except ValueError:
L.error("Cannot read line '{}' in '[logging] levels' section, expected format: 'logger_name level_name'.".format(level_line))
continue
level = _NAME_TO_LEVEL.get(level_name.upper(), level_name.upper())
try:
logging.getLogger(logger_name).setLevel(level)
except ValueError:
L.error("Cannot detect logging level '{}' for {} logger".format(level_name, logger_name))
def rotate(self):
if self.FileHandler is not None:
self.RootLogger.log(LOG_NOTICE, "Rotating logs")
self.FileHandler.doRollover()
async def _on_tick_rotate_check(self):
if self.FileHandler is not None:
if self.FileHandler.stream.tell() > 1000:
self.rotate()
def _configure_console_logging(self):
self.ConsoleHandler = logging.StreamHandler(stream=sys.stderr)
# Disable colors when running in container
if running_in_container():
self.ConsoleHandler.setFormatter(StructuredDataFormatter(
fmt=Config["logging:console"]["format"],
datefmt=Config["logging:console"]["datefmt"],
sd_id=Config["logging"]["sd_id"],
use_color=False
))
else:
self.ConsoleHandler.setFormatter(StructuredDataFormatter(
fmt=Config["logging:console"]["format"],
datefmt=Config["logging:console"]["datefmt"],
sd_id=Config["logging"]["sd_id"],
use_color=True
))
self.ConsoleHandler.setLevel(logging.DEBUG)
self.RootLogger.addHandler(self.ConsoleHandler)
class _StructuredDataLogger(logging.Logger):
'''
This class extends a default python logger class, specifically by adding ``struct_data`` parameter to logging functions.
It means that you can use expressions such as ``logger.info("Hello world!", struct_data={'key':'value'})``.
'''
def _log(self, level, msg, args, exc_info=None, struct_data=None, extra=None, stack_info=False):
if struct_data is not None:
if extra is None:
extra = dict()
extra['_struct_data'] = struct_data
super()._log(level, msg, args, exc_info=exc_info, extra=extra, stack_info=stack_info)
logging.setLoggerClass(_StructuredDataLogger)
class StructuredDataFormatter(logging.Formatter):
'''
The logging formatter that renders log messages that includes structured data.
'''
empty_sd = ""
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
def __init__(self, facility=16, fmt=None, datefmt=None, style='%', sd_id='sd', use_color: bool = False):
super().__init__(fmt, datefmt, style)
self.SD_id = sd_id
self.Facility = facility
self.UseColor = use_color
def format(self, record):
'''
Format the specified record as text.
'''
record.struct_data = self.render_struct_data(record.__dict__.get("_struct_data"))
# The Priority value is calculated by first multiplying the Facility number by 8 and then adding the numerical value of the Severity.
if record.levelno <= logging.DEBUG:
severity = 7 # Debug
color = self.BLUE
elif record.levelno <= logging.INFO:
severity = 6 # Informational
color = self.GREEN
elif record.levelno <= LOG_NOTICE:
severity = 5 # Notice
color = self.CYAN
elif record.levelno <= logging.WARNING:
severity = 4 # Warning
color = self.YELLOW
elif record.levelno <= logging.ERROR:
severity = 3 # Error
color = self.RED
elif record.levelno <= logging.CRITICAL:
severity = 2 # Critical
color = self.MAGENTA
else:
severity = 1 # Alert
color = self.WHITE
if self.UseColor:
levelname = record.levelname
levelname_color = _COLOR_SEQ % (30 + color) + levelname + _RESET_SEQ
record.levelname = levelname_color
record.priority = (self.Facility << 3) + severity
return super().format(record)
def formatTime(self, record, datefmt=None):
'''
Return the creation time of the specified LogRecord as formatted text.
'''
try:
ct = datetime.datetime.fromtimestamp(record.created)
if datefmt is not None:
s = ct.strftime(datefmt)
else:
t = ct.strftime("%Y-%m-%d %H:%M:%S")
s = "%s.%03d" % (t, record.msecs)
return s
except BaseException as e:
print("ERROR when logging: {}".format(e), file=sys.stderr)
return str(ct)
def render_struct_data(self, struct_data):
'''
Return the string with structured data.
'''
if struct_data is None:
return self.empty_sd
else:
return "[{sd_id} {sd_params}] ".format(
sd_id=self.SD_id,
sd_params=" ".join(['{}="{}"'.format(key, val) for key, val in struct_data.items()]))
def _loop_exception_handler(loop, context):
'''
This is an logging exception handler for asyncio.
It's purpose is to nicely log any unhandled excpetion that arises in the asyncio tasks.
'''
exception = context.pop('exception', None)
message = context.pop('message', '')
if len(message) > 0:
message += '\n'
if len(context) > 0:
message += pprint.pformat(context)
if exception is not None:
ex_traceback = exception.__traceback__
tb_lines = [line.rstrip('\n') for line in traceback.format_exception(exception.__class__, exception, ex_traceback)]
message += '\n' + '\n'.join(tb_lines)
logging.getLogger().error(message)
class MacOSXSyslogFormatter(StructuredDataFormatter):
"""
It implements Syslog formatting for Mac OSX syslog (aka format ``m``).
"""
def __init__(self, fmt=None, datefmt=None, style='%', sd_id='sd'):
fmt = '<%(priority)s>%(asctime)s {app_name}[{proc_id}]: %(levelname)s %(name)s %(struct_data)s%(message)s\000'.format(
app_name=Config["logging"]["app_name"],
proc_id=os.getpid(),
)
# Initialize formatter
super().__init__(fmt=fmt, datefmt='%b %d %H:%M:%S', style=style, sd_id=sd_id)
class SyslogRFC3164Formatter(StructuredDataFormatter):
"""
Implementation of a legacy or BSD Syslog (RFC 3164) formatting (aka format ``3``).
"""
def __init__(self, fmt=None, datefmt=None, style='%', sd_id='sd'):
fmt = '<%(priority)s>%(asctime)s {hostname} {app_name}[{proc_id}]:%(levelname)s %(name)s %(struct_data)s%(message)s\000'.format(
app_name=Config["logging"]["app_name"],
hostname=socket.gethostname(),
proc_id=os.getpid(),
)
# Initialize formatter
super().__init__(fmt=fmt, datefmt='%b %d %H:%M:%S', style=style, sd_id=sd_id)
class SyslogRFC5424Formatter(StructuredDataFormatter):
"""
It implements Syslog formatting for Mac OSX syslog (aka format ``5``).
"""
empty_sd = " "
def __init__(self, fmt=None, datefmt=None, style='%', sd_id='sd'):
fmt = '<%(priority)s>1 %(asctime)s.%(msecs)dZ {hostname} {app_name} {proc_id} %(name)s [log l="%(levelname)s"]%(struct_data)s%(message)s'.format(
app_name=Config["logging"]["app_name"],
hostname=socket.gethostname(),
proc_id=os.getpid(),
)
# Initialize formatter
super().__init__(fmt=fmt, datefmt='%Y-%m-%dT%H:%M:%S', style=style, sd_id=sd_id)
# Convert time to GMT
self.converter = time.gmtime
class SyslogRFC5424microFormatter(StructuredDataFormatter):
"""
It implements Syslog formatting for syslog (aka format ``micro``) in RFC5424micro format.
"""
empty_sd = "-"
def __init__(self, fmt=None, datefmt=None, style='%', sd_id='sd'):
fmt = '<%(priority)s>1 %(asctime)sZ {hostname} {app_name} {proc_id} %(name)s [log l="%(levelname)s"]%(struct_data)s%(message)s'.format(
app_name=Config["logging"]["app_name"],
hostname=socket.gethostname(),
proc_id=os.getpid(),
)
super().__init__(fmt=fmt, datefmt='%Y-%m-%dT%H:%M:%S.%f', style=style, sd_id=sd_id)
self.converter = time.gmtime
class AsyncIOHandler(logging.Handler):
"""
A logging handler similar to a standard `logging.handlers.SocketHandler` that utilizes `asyncio`.
It implements a queue for decoupling logging from a networking. The networking is fully event-driven via `asyncio` mechanisms.
"""
def __init__(self, loop, family, sock_type, address, facility=logging.handlers.SysLogHandler.LOG_LOCAL1):
logging.Handler.__init__(self)
self._family = family
self._type = sock_type
self._address = address
self._loop = loop
self._socket = None
self._reset()
self._queue = queue.Queue()
self._loop.call_soon(self._connect, self._loop)
def _reset(self):
self._write_ready = False
if self._socket is not None:
self._loop.remove_writer(self._socket)
self._loop.remove_reader(self._socket)
self._socket.close()
self._socket = None
def _connect(self, loop):
self._reset()
try:
self._socket = socket.socket(self._family, self._type)
self._socket.setblocking(0)
self._socket.connect(self._address)
except Exception as e:
print("Error when opening syslog connection to '{}'".format(self._address), e, file=sys.stderr)
return
self._loop.add_writer(self._socket, self._on_write)
self._loop.add_reader(self._socket, self._on_read)
def _on_write(self):
self._write_ready = True
self._loop.remove_writer(self._socket)
while not self._queue.empty():
msg = self._queue.get_nowait()
try:
self._socket.sendall(msg)
except Exception as e:
# Contingency dump when the socket is not ready
print(msg.decode("utf-8"), file=sys.stderr)
print(
"Error when writing to syslog '{}': {}".format(self._address, e),
traceback.format_exc(),
sep="\n",
file=sys.stderr
)
def _on_read(self):
try:
_ = self._socket.recvfrom(1024)
# We receive "something"... let's ignore that!
return
except Exception as e:
print("Error on the syslog socket '{}'".format(self._address), e, file=sys.stderr)
# Close a socket - there is no reason for reading or socket is actually closed
self._reset()
def emit(self, record):
"""
This is the entry point for log entries.
"""
try:
msg = self.format(record).encode('utf-8')
if self._write_ready:
try:
self._socket.sendall(msg)
except Exception as e:
print("Error when writing to syslog '{}'".format(self._address), e, file=sys.stderr)
self._enqueue(msg)
else:
self._enqueue(msg)
except Exception as e:
print("Error when emit to syslog '{}'".format(self._address), e, file=sys.stderr)
self.handleError(record)
def _enqueue(self, record):
self._queue.put(record)
_RESET_SEQ = "\033[0m"
_COLOR_SEQ = "\033[1;%dm"
_BOLD_SEQ = "\033[1m" |
teskalabs__asab | storage.rst | Module doc | Generate documentation for this module | BSD 3-Clause New or Revised License | teskalabs__asab/old_docs/asab/storage.rst | [
"teskalabs__asab/asab/storage/mongodb.py",
"teskalabs__asab/asab/storage/upsertor.py",
"teskalabs__asab/asab/storage/service.py",
"teskalabs__asab/asab/storage/inmemory.py",
"teskalabs__asab/asab/storage/elasticsearch.py"
] | teskalabs__asab/asab/storage | Storage
The ASAB's Storage Service supports data storage in-memory or in
dedicated document databases, including MongoDB and ElasticSearch.
Configuration
First, specify the storage type in the configuration. The options for
the storage type are:
- `inmemory`: Collects data directly in memory
- `mongodb`: Collects data using MongoDB database. Depends on pymongo
and motor libraries.
- `elasticsearch`: Collects data using ElasticSearch database. Depends
on aiohttp library.
Storage Service provides a unified interface for accessing and
manipulating collections across multiple database technologies.
[asab:storage]
type=mongodb
For accessing the storage, simply add asab.storage.Module` when
initializing and register the service.
class MyApplication(asab.Application):
async def initialize(self):
self.add_module(asab.storage.Module)
async def main(self):
storage = self.get_service("asab.StorageService")
Manipulation with databases
Upsertor
Upsertor is an object that works like a pointer to the specified
database and optionally to object id. It is used for inserting new
objects, updating existing objects and deleting them.
u = storage.upsertor("test-collection")
The StorageService.upsertor() method creates an upsertor object
associated with the specified collection. It takes collection as an
argument and can have two parameters obj_id and version, which are used
for getting an existing object by its ID and version.
Inserting an object
For inserting an object to the collection, use the Upsertor.set()
method.
u.set("key", "value")
To execute these procedures, simply run the Upsertor.execute() coroutine
method, which commits the upsertor data to the storage and returns the
ID of the object. Since it is a coroutine, it must be awaited.
object_id = await u.execute()
The Upsertor.execute() method has optional parameters custom_data and
event_type, which are used for webhook requests.
object_id = await u.execute(
custom_data= {"foo": "bar"},
event_type="object_created"
)
Getting a single object
For getting a single object, use StorageService.get() coroutine method
that takes two arguments collection and obj_id and finds an object by
its ID in collection.
obj = await storage.get(collection="test-collection", obj_id=object_id)
print(obj)
When the requested object is not found in the collection, the method
raises KeyError. Remember to handle this exception properly when using
databases in your services and prevent them from crashing!
Note
MongoDB storage service in addition provides a coroutine method get_by()
which is used for accessing an object by finding its key-value pair.
obj = await storage.get_by(database="test-collection", key="key", value="value")
Updating an object
For updating an object, first obtain the upsertor specifying its obj_id
and version.
u = storage.upsertor(
collection="test-collection",
obj_id=object_id,
version=obj['_v']
)
We strongly recommend to read the version from the object such as above.
That creates a soft lock on the record. It means that if the object is
updated by other component in meanwhile, your upsertor will fail and you
should retry the whole operation. The new objects should have a version
set to 0, which is done by default.
After obtaining an upsertor, you can update the object via the
Upsertor.set() coroutine.
u.set("key", "new_value")
object_id = await u.execute()
Deleting an object
For deleting an object from database, use the StorageService.delete()
coroutine method which takes arguments collection and obj_id, deletes
the object and returns its ID.
deleted_id = await u.delete("test-collection", object_id)
Storing data in memory
If the option inmemory is set, ASAB will store data in its own memory.
In particular, asab.StorageService is initialized with an attribute
InMemoryCollections which is a dictionary where all the collections are
stored in.
Note
You can go through all the databases directly by accessing
InMemoryCollections attribute, although we do not recommend that.
import pprint
storage = self.get_service("asab.StorageService")
pprint.pprint(storage.InMemoryCollections, indent=2)
Storing data in MongoDB
If the option mongodb is set, ASAB will store data in MongoDB database.
ASAB uses motor library which provides non-blocking MongoDB driver for
asyncio.
You can specify the database name and URL for MongoDB in config file
(the following example is the default configuration):
[asab:storage]
type=mongodb
mongodb_uri=mongodb://localhost:27017
mongodb_database=asabdb
You can use all the methods from the abstract class. MongoDB Storage
class provides in addition two methods, StorageService.get_by() and
StorageService.collection().
The method StorageService.get_by() is used in the same way as
StorageService.get() except that it takes the arguments key and value
instead of obj_id.
obj = await storage.get_by(database="test-collection", key="key", value="value")
The method collection() is used for accessing the database directly. It
takes collection as the argument and returns
motor.motor_asyncio.AsyncIOMotorCollection object, which can be used for
calling MongoDB directives.
collection = await storage.collection("test-collection")
cursor = collection.find({})
while await cursor.fetch_next:
data = cursor.next_object()
pprint.pprint(data)
The full list of methods suitable for this object is described in the
official documentation.
Storing data in ElasticSearch
When using ElasticSearch, add configurations for URL, username and
password.
[asab:storage]
type=elasticsearch
elasticsearch_url=http://localhost:9200/
elasticsearch_username=JohnDoe
elasticsearch_password=lorem_ipsum_dolor?sit_amet!2023
You can also specify the refreshing parameter and scroll timeout for
ElasticSearch Scroll API.
[asab:storage]
refresh=true
scroll_timeout=1m
ElasticSearch Storage provides in addition other methods for creating
index templates, mappings etc (see the Reference section).
Encryption and decryption
Data stored in the database can be encrypted using an algorithm that
adheres to the Advanced Encryption Standard (AES).
AES Key settings
In order to use encryption, first make sure you have the cryptography
package installed. Then specify the AES Key in the config file.
[asab:storage]
aes_key=random_key_string
Note
The AES Key is used as both an encryption and decryption key. It is
recommended to keep it in a separate configuration file that is not
exposed anywhere publicly.
The actual binary AES Key is obtained from the aes_key specified in the
config file by encoding and hashing it using the standard hashlib
algorithms, so do not worry about the length and type of the key.
Encrypting data
The Upsertor.set() method has an optional boolean parameter encrypt for
encrypting the data before they are stored. Only values of the type
bytes can be encrypted. If you want to encrypt other values, encode them
first.
message = "This is a super secret message!"
number = 2023
message_binary = message.encode("ascii")
number_binary = number.encode("ascii")
u.set("message", message_binary, encrypt=True)
u.set("number", number_binary, encrypt=True)
object_id = await u.execute()
Decrypting data
The StorageService.get() coroutine method has an optional parameter
decrypt which takes an iterable object (i.e. a list, tuple, set, ...)
with the names of keys whose values are to be decrypted.
data = await storage.get(
collection="test-collection",
obj_id=object_id,
decrypt=["message", "number"]
)
If some of the keys to be decrypted are missing in the required
document, the method will ignore them and continue.
Note
Data that has been encrypted can be identified by the prefix "$aes-cbc$"
and are stored in a binary format.
Under the hood
For encrypting data, we use the certified symmetric AES-CBC algorithm.
In fact, the abstract base class StorageServiceABC provides two methods
aes_encrypt() and aes_decrypt() that are called automatically in
Upsertor.set() and StorageService.get() methods when the parameter
encrypt or decrypt is specified.
AES-CBC is a mode of operation for the Advanced Encryption Standard
(AES) algorithm that provides confidentiality and integrity for data. In
AES-CBC, the plaintext is divided into blocks of fixed size (usually 128
bits), and each block is encrypted using the AES algorithm with a secret
key.
CBC stands for "Cipher Block Chaining" and it is a technique that adds
an extra step to the encryption process to ensure that each ciphertext
block depends on the previous one. This means that any modification to
the ciphertext will produce a completely different plaintext after
decryption.
The algorithm is a symmetric cipher, which is suitable for encrypting
large amounts of data. It requires much less computation power than
asymmetric ciphers and is much more useful for bulk encrypting large
amounts of data. | import datetime
import typing
import motor.motor_asyncio
import pymongo
import bson
import asab
from.exceptions import DuplicateError
from.service import StorageServiceABC
from.upsertor import UpsertorABC
asab.Config.add_defaults(
{
'asab:storage': {
'mongodb_uri':'mongodb://localhost:27017',
'mongodb_database': 'asabdb',
}
}
)
class StorageService(StorageServiceABC):
'''
StorageService for MongoDB. Depends on `pymongo` and `motor`.
'''
def __init__(self, app, service_name, config_section_name='asab:storage'):
super().__init__(app, service_name)
self.Client = motor.motor_asyncio.AsyncIOMotorClient(asab.Config.get(config_section_name,'mongodb_uri'))
self.Database = self.Client.get_database(
asab.Config.get(config_section_name,'mongodb_database'),
codec_options=bson.codec_options.CodecOptions(tz_aware=True, tzinfo=datetime.timezone.utc),
)
assert self.Database is not None
def upsertor(self, collection: str, obj_id=None, version=0):
return MongoDBUpsertor(self, collection, obj_id, version)
async def get(self, collection: str, obj_id, decrypt=None) -> dict:
coll = self.Database[collection]
ret = await coll.find_one({'_id': obj_id})
if ret is None:
raise KeyError("NOT-FOUND")
if decrypt is not None:
for field in decrypt:
if field in ret:
ret[field] = self.aes_decrypt(ret[field])
return ret
async def get_by(self, collection: str, key: str, value, decrypt=None) -> dict:
coll = self.Database[collection]
ret = await coll.find_one({key: value})
if ret is None:
raise KeyError("NOT-FOUND")
if decrypt is not None:
for field in decrypt:
if field in ret:
ret[field] = self.aes_decrypt(ret[field])
return ret
async def collection(self, collection: str) -> motor.motor_asyncio.AsyncIOMotorCollection:
"""
Get collection. Useful for custom operations.
Args:
collection: Collection to get.
Returns:
`AsyncIOMotorCollection` object connected to the queried database.
Examples:
>>> coll = await storage.collection("test-collection")
>>> cursor = coll.find({})
>>> while await cursor.fetch_next:
... obj = cursor.next_object()
... pprint.pprint(obj)
"""
return self.Database[collection]
async def delete(self, collection: str, obj_id):
coll = self.Database[collection]
ret = await coll.find_one_and_delete({'_id': obj_id})
if ret is None:
raise KeyError("NOT-FOUND")
return ret['_id']
class MongoDBUpsertor(UpsertorABC):
@classmethod
def generate_id(cls):
return bson.objectid.ObjectId()
async def execute(self, custom_data: typing.Optional[dict] = None, event_type: typing.Optional[str] = None):
id_name = self.get_id_name()
addobj = {}
if len(self.ModSet) > 0:
addobj['$set'] = self.ModSet
if len(self.ModInc) > 0:
addobj['$inc'] = self.ModInc
if len(self.ModPush) > 0:
addobj['$push'] = {k: {'$each': v} for k, v in self.ModPush.items()}
if len(self.ModUnset) > 0:
addobj['$unset'] = {k: "" for k in self.ModUnset}
filtr = {}
if self.ObjId is not None:
filtr[id_name] = self.ObjId
else:
# We are going to insert a new object without explicit Id
assert (self.Version == 0) or (self.Version is None)
if self.Version is not None:
filtr['_v'] = int(self.Version)
# First wave (adding stuff)
if len(addobj) > 0:
coll = self.Storage.Database[self.Collection]
try:
ret = await coll.find_one_and_update(
filtr,
update=addobj,
upsert=True if (self.Version == 0) or (self.Version is None) else False,
return_document=pymongo.collection.ReturnDocument.AFTER
)
except pymongo.errors.DuplicateKeyError as e:
if hasattr(e, "details"):
raise DuplicateError("Duplicate key error: {}".format(e), self.ObjId, key_value=e.details.get("keyValue"))
else:
raise DuplicateError("Duplicate key error: {}".format(e), self.ObjId)
if ret is None:
# Object might have been changed in the meantime
raise KeyError("NOT-FOUND")
self.ObjId = ret[id_name]
# for k, v in self.ModPull.items():
# o = obj.pop(k, None)
# if o is None: o = list()
# for x in v:
# try:
# o.remove(x)
# except ValueError:
# pass
# obj[k] = o
if self.Storage.WebhookURIs is not None:
webhook_data = {
"collection": self.Collection,
}
if custom_data is not None:
webhook_data["custom"] = custom_data
if event_type is not None:
webhook_data["event_type"] = event_type
# Add upsetor data; do not include fields that start with "__"
upsertor_data = {
"id_field_name": id_name,
"id": self.ObjId,
"_v": int(self.Version),
}
if len(self.ModSet) > 0:
upsertor_data["set"] = {k: v for k, v in self.ModSet.items() if not k.startswith("__")}
if len(self.ModInc) > 0:
upsertor_data["inc"] = {k: v for k, v in self.ModInc.items() if not k.startswith("__")}
if len(self.ModPush) > 0:
upsertor_data["push"] = {k: v for k, v in self.ModPush.items() if not k.startswith("__")}
if len(self.ModUnset) > 0:
upsertor_data["unset"] = {k: v for k, v in self.ModUnset.items() if not k.startswith("__")}
webhook_data["upsertor"] = upsertor_data
await self.webhook(webhook_data)
return self.ObjId
import abc
import json
import urllib.parse
import uuid
import hashlib
import datetime
import logging
import asab.web.rest.json
import http.client
import typing
#
L = logging.getLogger(__name__)
#
class UpsertorABC(abc.ABC):
def __init__(self, storage, collection, obj_id, version=None):
self.Storage = storage
self.Collection = collection
self.ObjId = obj_id
self.Version = version
now = datetime.datetime.now(datetime.timezone.utc)
self.ModSet = {
'_m': now, # Set the modification datetime
}
if version == 0:
self.ModSet['_c'] = now # Set the creation datetime
self.ModUnset = {}
self.ModInc = {
'_v': 1, # Increment '_v' at every change
}
self.ModPush = {}
self.ModPull = {}
self.WebhookResponseData = {}
def get_id_name(self):
return "_id"
@classmethod
def generate_id(cls) -> bytes:
"""
Generate a unique ID string using a combination of a random UUID and a SHA-256 hash.
Returns:
A string representation of the generated ID.
"""
m = hashlib.sha256()
m.update(uuid.uuid4().bytes)
return m.digest()
def set(self, objField, value, encrypt=False, encrypt_iv=None):
"""
Add key and value to the upsertor.
Args:
objField: Key of the object.
value: Value of the object.
encrypt: Allow encryption.
encrypt_iv: Custom initialization vector.
"""
if encrypt:
value = self.Storage.aes_encrypt(value, iv=encrypt_iv)
self.ModSet[objField] = value
def unset(self, obj_field):
'''
Scalar unset
'''
self.ModUnset[obj_field] = ""
def increment(self, field_name, amount=1):
'''
Scalar increment
'''
self.ModInc[field_name] = amount
def decrement(self, field_name, amount=1):
'''
Scalar decrement
'''
return self.increment(field_name, -amount)
def push(self, field_name, value):
'''
Push an item into a list
'''
if self.ModPush.get(field_name) is None:
self.ModPush[field_name] = []
self.ModPush[field_name].append(value)
def pull(self, field_name, value):
'''
Pull an item from a list
'''
if self.ModPull.get(field_name) is None:
self.ModPull[field_name] = []
self.ModPull[field_name].append(value)
@abc.abstractmethod
async def execute(self, custom_data: typing.Optional[dict] = None, event_type: typing.Optional[str] = None):
"""
Commit upsertor data to the storage. Afterwards, send a webhook request with upsertion details.
Args:
custom_data: Custom execution data. Included in webhook payload.
event_type: Event type included in webhook payload.
Raises:
DuplicateError: Raised if there is a colliding object already stored in a storage.
"""
pass
async def webhook(self, data: dict):
# TODO: add docstring
assert self.Storage.WebhookURIs is not None
json_dump = asab.web.rest.json.JSONDumper(pretty=False)(data)
for uri in self.Storage.WebhookURIs:
self.WebhookResponseData[uri] = await self.Storage.ProactorService.execute(
self._webhook, json_dump, uri, self.Storage.WebhookAuth)
def _webhook(self, data, uri, auth=None):
u = urllib.parse.urlparse(uri)
if u.scheme == "https":
conn = http.client.HTTPSConnection(u.netloc)
else:
conn = http.client.HTTPConnection(u.netloc)
headers = {"Content-Type": "application/json"}
if auth is not None:
headers["Authorization"] = auth
try:
conn.request("PUT", uri, data, headers)
response = conn.getresponse()
if response.status // 100!= 2:
text = response.read()
L.error(
"Webhook endpoint responded with {}: {}".format(response.status, text),
struct_data={"uri": uri})
return
self.WebhookResponseData = json.load(response)
except ConnectionRefusedError:
L.error("Webhook call failed: Connection refused.", struct_data={"uri": uri})
return
except json.decoder.JSONDecodeError as e:
L.error("Failed to decode JSON response from webhook: {}".format(str(e)), struct_data={"uri": uri})
except Exception as e:
L.error("Webhook call failed with {}: {}".format(type(e).__name__, str(e)), struct_data={"uri": uri})
finally:
conn.close()
import abc
import secrets
import hashlib
import logging
import asab
import re
try:
import cryptography.hazmat.primitives.ciphers
import cryptography.hazmat.primitives.ciphers.algorithms
import cryptography.hazmat.primitives.ciphers.modes
except ModuleNotFoundError:
cryptography = None
#
L = logging.getLogger(__name__)
#
ENCRYPTED_PREFIX = b"$aes-cbc$"
class StorageServiceABC(asab.Service):
"""
An abstract class for the Storage Service.
"""
def __init__(self, app, service_name):
super().__init__(app, service_name)
self.WebhookURIs = asab.Config.get("asab:storage:changestream", "webhook_uri", fallback="") or None
if self.WebhookURIs is not None:
self.WebhookURIs = [uri for uri in re.split(r"\s+", self.WebhookURIs) if len(uri) > 0]
try:
self.ProactorService = app.get_service("asab.ProactorService")
except KeyError as e:
raise Exception("Storage webhooks require ProactorService") from e
self.WebhookAuth = asab.Config.get("asab:storage:changestream", "webhook_auth", fallback="") or None
# Specify a non-empty AES key to enable AES encryption of selected fields
self._AESKey = asab.Config.get("asab:storage", "aes_key", fallback="")
if len(self._AESKey) > 0:
if cryptography is None:
raise ModuleNotFoundError(
"You are using storage encryption without 'cryptography' installed. "
"Please run 'pip install cryptography' "
"or install asab with'storage_encryption' optional dependency.")
self._AESKey = hashlib.sha256(self._AESKey.encode("utf-8")).digest()
else:
self._AESKey = None
@abc.abstractmethod
def upsertor(self, collection: str, obj_id=None, version: int = 0) -> None:
"""
Create an upsertor object for the specified collection.
If updating an existing object, please specify its `obj_id` and also `version` that you need to read from a storage upfront.
If `obj_id` is None, we assume that you want to insert a new object and generate its new `obj_id`, `version` should be set to 0 (default) in that case.
If you want to insert a new object with a specific `obj_id`, specify `obj_id` and set a version to 0.
- If there will be a colliding object already stored in a storage, `execute()` method will fail on `DuplicateError`.
Args:
collection: Name of collection to work with
obj_id: Primary identification of an object in the storage (e.g. primary key)
version: Specify a current version of the object and hence prevent byzantine faults. \
You should always read the version from the storage upfront, prior using an upsertor. \
That creates a soft lock on the record. It means that if the object is updated by other \
component in meanwhile, your upsertor will fail and you should retry the whole operation. \
The new objects should have a `version` set to 0.
"""
pass
@abc.abstractmethod
async def get(self, collection: str, obj_id, decrypt: bool = None) -> dict:
"""
Get object from collection by its ID.
Args:
collection: Collection to get from.
obj_id: Object identification.
decrypt: Set of fields to decrypt.
Returns:
The object retrieved from a storage.
Raises:
KeyError: Raised if `obj_id` is not found in `collection`.
"""
pass
@abc.abstractmethod
async def get_by(self, collection: str, key: str, value, decrypt=None) -> dict:
"""
Get object from collection by its key and value.
Args:
collection: Collection to get from
key: Key to filter on
value: Value to filter on
decrypt: Set of fields to decrypt
Returns:
The object retrieved from a storage.
Raises:
KeyError: If object {key: value} not found in `collection`
"""
pass
@abc.abstractmethod
async def delete(self, collection: str, obj_id):
"""
Delete object from collection.
Args:
collection: Collection to get from
obj_id: Object identification
Returns:
ID of the deleted object.
Raises:
KeyError: Raised when obj_id cannot be found in collection.
"""
pass
def aes_encrypt(self, raw: bytes, iv: bytes = None) -> bytes:
"""
Take an array of bytes and encrypt it using AES-CBC.
Args:
raw: The data to be encrypted.
iv: AES-CBC initialization vector, 16 bytes long. If left empty, a random 16-byte array will be used.
Returns:
The encrypted data.
Raises:
TypeError: The data are not in binary format.
"""
block_size = cryptography.hazmat.primitives.ciphers.algorithms.AES.block_size // 8
if self._AESKey is None:
raise RuntimeError(
"No aes_key specified in asab:storage configuration. "
"If you want to use encryption, specify a non-empty aes_key."
)
if not isinstance(raw, bytes):
if isinstance(raw, str):
raise TypeError("String objects must be encoded before encryption")
else:
raise TypeError("Only 'bytes' objects can be encrypted")
# Pad the text to fit the blocks
pad_length = -len(raw) % block_size
if pad_length!= 0:
raw = raw + b"\00" * pad_length
if iv is None:
iv = secrets.token_bytes(block_size)
algorithm = cryptography.hazmat.primitives.ciphers.algorithms.AES(self._AESKey)
mode = cryptography.hazmat.primitives.ciphers.modes.CBC(iv)
cipher = cryptography.hazmat.primitives.ciphers.Cipher(algorithm, mode)
encryptor = cipher.encryptor()
encrypted = ENCRYPTED_PREFIX + iv + (encryptor.update(raw) + encryptor.finalize())
return encrypted
def aes_decrypt(self, encrypted: bytes) -> bytes:
"""
Decrypt encrypted data using AES-CBC.
Args:
encrypted: The encrypted data to decrypt. It must start with b"$aes-cbc$" prefix, followed by one-block-long initialization vector.
Returns:
The decrypted data.
"""
block_size = cryptography.hazmat.primitives.ciphers.algorithms.AES.block_size // 8
if self._AESKey is None:
raise RuntimeError(
"No aes_key specified in asab:storage configuration. "
"If you want to use encryption, specify a non-empty aes_key."
)
if not isinstance(encrypted, bytes):
raise TypeError("Only values of type 'bytes' can be decrypted")
# Strip the prefix
if not encrypted.startswith(ENCRYPTED_PREFIX):
raise ValueError("Encrypted data must start with {!r} prefix".format(ENCRYPTED_PREFIX))
encrypted = encrypted[len(ENCRYPTED_PREFIX):]
# Separate the initialization vector
iv, encrypted = encrypted[:block_size], encrypted[block_size:]
algorithm = cryptography.hazmat.primitives.ciphers.algorithms.AES(self._AESKey)
mode = cryptography.hazmat.primitives.ciphers.modes.CBC(iv)
cipher = cryptography.hazmat.primitives.ciphers.Cipher(algorithm, mode)
decryptor = cipher.decryptor()
raw = decryptor.update(encrypted) + decryptor.finalize()
# Strip padding
raw = raw.rstrip(b"\x00")
return raw
def encryption_enabled(self) -> bool:
"""
Check if AESKey is not empty.
Returns:
True if AESKey is not empty.
"""
return self._AESKey is not None
import typing
from.service import StorageServiceABC
from.upsertor import UpsertorABC
from.exceptions import DuplicateError
class InMemoryUpsertor(UpsertorABC):
def __init__(self, storage, collection, obj_id, version=None):
super().__init__(storage, collection, obj_id, version)
if self.ObjId is None:
# generate a random unique binary ID
self.ObjId = self.generate_id()
async def execute(self, custom_data: typing.Optional[dict] = None, event_type: typing.Optional[str] = None) -> typing.Union[str, bytes]:
"""Commit the changes prepared in upsertor.
:custom_data (dict, optional): Not implemented yet. Defaults to None.
:event_type (str, optional): Not implemented yet. Defaults to None.
Raises: :RuntimeError: Raised if the object ID was not found in the previous version.
Returns:
:str | bytes: ID of the created or updated document.
"""
# TODO: Implement webhook call
id_name = self.get_id_name()
# Get the object
if self.Version == 0:
obj = {
id_name: self.ObjId
}
self.Storage._set(self.Collection, self.ObjId, obj)
else:
obj = await self.Storage.get(self.Collection, self.ObjId)
if obj is None:
if self.Version is None:
obj = {
id_name: self.ObjId
}
self.Storage._set(self.Collection, self.ObjId, obj)
else:
raise RuntimeError("Previous version of '{}' not found".format(self.ObjId))
for k, v in self.ModSet.items():
obj[k] = v
for k, v in self.ModUnset.items():
obj.pop(k, None)
for k, v in self.ModInc.items():
o = obj.pop(k, 0)
obj[k] = o + v
for k, v in self.ModPush.items():
o = obj.pop(k, None)
if o is None:
o = list()
o.extend(v)
obj[k] = o
for k, v in self.ModPull.items():
o = obj.pop(k, None)
if o is None:
o = list()
for x in v:
try:
o.remove(x)
except ValueError:
pass
obj[k] = o
return self.ObjId
class StorageService(StorageServiceABC):
def __init__(self, app, service_name):
super().__init__(app, service_name)
self.InMemoryCollections = {}
def upsertor(self, collection: str, obj_id=None, version=0) -> InMemoryUpsertor:
"""Obtain an in-memory upsertor for given collection and possibly for the specified object.
:collection (str): The name of the collection.
:obj_id (_type_, optional): The ID of the document to retrieve. Defaults to None.
:version (int, optional): The version of the collection. Defaults to 0.
Returns:
:InMemoryUpsertor: Upsertor for given collection.
"""
return InMemoryUpsertor(self, collection, obj_id, version)
async def get(self, collection: str, obj_id: typing.Union[str, bytes], decrypt=None) -> dict:
"""Retrieve a document from an in-memory collection by its ID.
:collection (str): The name of the collection to retrieve the document from.
:obj_id (str | bytes): The ID of the document to retrieve.
:decrypt (_type_, optional): A list of field names to decrypt. Defaults to None.
Returns:
:dict: A dictionary representing the retrieved document.bIf `decrypt` is not None, the specified fields in the document are decrypted using AES decryption algorithm.
"""
coll = self.InMemoryCollections[collection]
data = coll[obj_id]
if decrypt is not None:
for field in decrypt:
if field in data:
data[field] = self.aes_decrypt(data[field])
return data
async def get_by(self, collection: str, key: str, value, decrypt=None) -> dict:
"""
Retrieve a document from an in-memory collection by key and value. Not implemented yet.
Raises:
:NotImplementedError: Not implemented on InMemoryStorage
"""
raise NotImplementedError()
async def delete(self, collection: str, obj_id):
"""
Delete a document from an in-memory collection.
:param collection: Collection to delete from
:param obj_id: Object identification
Raises:
:KeyError: If `obj_id` not found in `collection`
"""
coll = self.InMemoryCollections[collection]
del coll[obj_id]
def _set(self, collection: str, obj_id, obj):
try:
coll = self.InMemoryCollections[collection]
except KeyError:
coll = {}
self.InMemoryCollections[collection] = coll
nobj = coll.setdefault(obj_id, obj)
if nobj!= obj:
raise DuplicateError("Already exists", obj_id)
import time
import json
import aiohttp
import logging
import datetime
import urllib.parse
import typing
from.service import StorageServiceABC
from.upsertor import UpsertorABC
from..config import Config
from..tls import SSLContextBuilder
import ssl
#
L = logging.getLogger(__name__)
#
Config.add_defaults(
{
'asab:storage': {
# You may specify multiple ElasticSearch nodes by e.g. http://es01:9200,es02:9200,es03:9200/
'elasticsearch_url': 'http://localhost:9200/',
'elasticsearch_username': '',
'elasticsearch_password': '',
'elasticsearch_api_key': '',
# make the operation visible to search directly, options: true, false, wait_for
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html
'refresh': 'true',
'scroll_timeout': '1m',
# For SSL options such as `cafile`, please refer to tls.py
}
}
)
class StorageService(StorageServiceABC):
"""
StorageService for Elastic Search. Depends on `aiohttp` library.
"""
def __init__(self, app, service_name, config_section_name='asab:storage'):
super().__init__(app, service_name)
self.Loop = app.Loop
self.URL = Config.get(config_section_name, 'elasticsearch_url')
parsed_url = urllib.parse.urlparse(self.URL)
self.ServerUrls = [
urllib.parse.urlunparse((parsed_url.scheme, netloc, parsed_url.path, None, None, None))
for netloc in parsed_url.netloc.split(',')
]
self.Refresh = Config.get(config_section_name,'refresh')
self.ScrollTimeout = Config.get(config_section_name,'scroll_timeout')
# Authorization: username or API-key
username = Config.get(config_section_name, 'elasticsearch_username')
api_key = Config.get(config_section_name, 'elasticsearch_api_key')
if username!= '' and api_key!= '':
L.warning("Both username and API key specified. ES Storage service may not function properly. Please choose one option.")
if username == '':
self._auth = None
else:
password = Config.get(config_section_name, 'elasticsearch_password')
self._auth = aiohttp.BasicAuth(login=username, password=password)
self._ClientSession = None
# Create headers for requests
self.Headers = {'Content-Type': 'application/json'}
if api_key!= '':
self.Headers['Authorization'] = "ApiKey {}".format(api_key)
self.SSLContextBuilder = SSLContextBuilder(config_section_name)
async def finalize(self, app):
"""
Close the current client session.
"""
if self._ClientSession is not None and not self._ClientSession.closed:
await self._ClientSession.close()
self._ClientSession = None
def session(self):
"""
Get the current client session.
"""
if self._ClientSession is None:
self._ClientSession = aiohttp.ClientSession(auth=self._auth)
elif self._ClientSession.closed:
self._ClientSession = aiohttp.ClientSession(auth=self._auth)
return self._ClientSession
async def is_connected(self) -> bool:
"""Check if the service is connected to ElasticSearch cluster.
Raises:
ConnectionError: Connection failed.
Returns:
bool: True if the service is connected.
"""
for url in self.ServerUrls:
if url.startswith('https://'):
ssl_context = self.SSLContextBuilder.build(ssl.PROTOCOL_TLS_CLIENT)
else:
ssl_context = None
try:
async with self.session().request(
method="GET",
url=url,
ssl=ssl_context,
headers=self.Headers,
) as resp:
await self.session().close()
if resp.status not in {200, 201}:
resp = await resp.json()
L.error("Failed to connect to ElasticSearch.", struct_data={
"code": resp.get("status"),
"reason": resp.get("error", {}).get("reason")
})
return False
except aiohttp.client_exceptions.ClientConnectorError:
if url == self.ServerUrls[-1]:
raise ConnectionError("Failed to connect to '{}'.".format(url))
else:
L.warning("Failed to connect to '{}', iterating to another cluster node".format(url))
L.info("Connected to ElasticSearch.", struct_data={"urls": self.ServerUrls})
return True
async def get(self, index: str, obj_id: str, decrypt=None) -> dict:
"""Get object by its index and object ID.
Args:
index (str): Index for the query.
obj_id (str): ID of the object.
decrypt (None): Not implemented yet. Defaults to None.
Raises:
NotImplementedError: Encryption and decryption has not yet been implemented for ECS.
ConnectionError: Connection failed.
ConnectionRefusedError: Authorization required.
KeyError: Object with the ID does not exist.
Returns:
The query result.
"""
if decrypt is not None:
raise NotImplementedError("AES encryption for ElasticSearch not implemented")
for url in self.ServerUrls:
request_url = "{}{}/_doc/{}".format(url, index, obj_id)
try:
if url.startswith('https://'):
ssl_context = self.SSLContextBuilder.build(ssl.PROTOCOL_TLS_CLIENT)
else:
ssl_context = None
async with self.session().request(
method="GET",
url=request_url,
ssl=ssl_context,
headers=self.Headers,
) as resp:
if resp.status == 401:
raise ConnectionRefusedError("Response code 401: Unauthorized. Provide authorization by specifying either user name and password or api key.")
elif resp.status not in {200, 201}:
resp = await resp.json()
raise ConnectionError("Failed to retrieve data from ElasticSearch. Got {}: {}".format(
resp.get("status"),
resp.get("error", {}).get("reason")
))
else:
obj = await resp.json()
if not obj.get("found"):
raise KeyError("No existing object with ID {}".format(obj_id))
ret = obj['_source']
ret['_v'] = obj['_version']
ret['_id'] = obj['_id']
return ret
except aiohttp.client_exceptions.ClientConnectorError:
if url == self.ServerUrls[-1]:
raise ConnectionError("Failed to connect to '{}'".format(url))
else:
L.warning("Failed to connect to '{}', iterating to another cluster node".format(url))
async def get_by(self, collection: str, key: str, value, decrypt=None):
raise NotImplementedError("get_by")
async def delete(self, index: str, _id=None) -> dict:
"""Delete an entire index or document from that index.
Args:
index: Index to delete.
_id: If specified, only document with the ID is deleted.
Raises:
ConnectionRefusedError: Authorization required (status 401)
KeyError: No existing object with ID
ConnectionError: Unexpected status code
Exception: ClientConnectorError
Returns:
The deleted document or message that the entire index was deleted.
"""
for url in self.ServerUrls:
try:
if _id:
request_url = "{}{}/_doc/{}?refresh={}".format(url, index, _id, self.Refresh)
else:
request_url = "{}{}".format(url, index)
if url.startswith('https://'):
ssl_context = self.SSLContextBuilder.build(ssl.PROTOCOL_TLS_CLIENT)
else:
ssl_context = None
async with self.session().request(
method="DELETE",
url=request_url,
ssl=ssl_context,
headers=self.Headers
) as resp:
if resp.status == 401:
raise ConnectionRefusedError("Response code 401: Unauthorized. Provide authorization by specifying either user name and password or api key.")
elif resp.status == 404:
raise KeyError("No existing object with ID {}".format(_id))
elif resp.status not in {200, 201}:
raise ConnectionError("Failed to retrieve data from ElasticSearch. Got {}: {}".format(
resp.get("status"),
resp.get("error", {}).get("reason")
))
else:
json_response = await resp.json()
if json_response.get("acknowledged", False):
return json_response
assert json_response["result"] == "deleted", "Document was not deleted"
await self.session().close()
return json_response
except aiohttp.client_exceptions.ClientConnectorError:
if url == self.ServerUrls[-1]:
raise Exception("Failed to connect to '{}'".format(url))
else:
L.warning("Failed to connect to '{}', iterating to another cluster node".format(url))
async def mapping(self, index: str) -> dict:
"""Retrieve mapping definitions for one index.
:param index: Specified index.
:type index: str
:raise Exception: Connection failed.
Returns:
dict: Mapping definitions for the index.
"""
for url in self.ServerUrls:
request_url = "{}{}/_mapping".format(url, index)
if url.startswith('https://'):
ssl_context = self.SSLContextBuilder.build(ssl.PROTOCOL_TLS_CLIENT)
else:
ssl_context = None
try:
async with self.session().request(
method="GET",
url=request_url,
ssl=ssl_context,
headers=self.Headers
) as resp:
obj = await resp.json()
await self.session().close()
return obj
except aiohttp.client_exceptions.ClientConnectorError:
if url == self.ServerUrls[-1]:
raise ConnectionError("Failed to connect to '{}'".format(url))
else:
L.warning("Failed to connect to '{}', iterating to another cluster node".format(url))
async def get_index_template(self, template_name: str) -> dict:
"""Retrieve ECS Index template for the given template name.
:param template_name: The name of the ECS template to retrieve.
:type template_name: str
:raise Exception: Raised if connection to all server URLs fails.
:return: ElasticSearch Index template.
"""
for url in self.ServerUrls:
request_url = "{}_template/{}?format=json".format(url, template_name)
if url.startswith('https://'):
ssl_context = self.SSLContextBuilder.build(ssl.PROTOCOL_TLS_CLIENT)
else:
ssl_context = None
try:
async with self.session().request(
method="GET",
url=request_url,
headers=self.Headers,
ssl=ssl_context,
) as resp:
assert resp.status == 200, "Unexpected response code: {}".format(resp.status)
content = await resp.json()
await self.session().close()
return content
except aiohttp.client_exceptions.ClientConnectorError:
if url == self.ServerUrls[-1]:
raise Exception("Failed to connect to '{}'".format(url))
else:
L.warning("Failed to connect to '{}', iterating to another cluster node".format(url))
async def put_index_template(self, template_name: str, template: dict) -> dict:
"""Create a new ECS index template.
:param template_name: The name of ECS template.
:param template: Body for the request.
:return: JSON response.
:raise Exception: Raised if connection to all server URLs fails.
"""
for url in self.ServerUrls:
request_url = "{}_template/{}?include_type_name".format(url, template_name)
L.warning("Posting index template into url: {}".format(request_url))
if url.startswith('https://'):
ssl_context = self.SSLContextBuilder.build(ssl.PROTOCOL_TLS_CLIENT)
else:
ssl_context = None
try:
async with self.session().request(
method="POST",
url=request_url,
data=json.dumps(template),
headers=self.Headers,
ssl=ssl_context,
) as resp:
assert resp.status == 200, "Unexpected response code: {}".format(resp.status)
resp = await resp.json()
await self.session().close()
return resp
except aiohttp.client_exceptions.ClientConnectorError:
if url == self.ServerUrls[-1]:
raise Exception("Failed to connect to '{}'".format(url))
else:
L.warning("Failed to connect to '{}', iterating to another cluster node".format(url))
return {}
async def reindex(self, previous_index, new_index):
for url in self.ServerUrls:
try:
if url.endswith('/'):
request_url = "{}_reindex".format(url)
else:
request_url = "{}/_reindex".format(url)
if url.startswith('https://'):
ssl_context = self.SSLContextBuilder.build(ssl.PROTOCOL_TLS_CLIENT)
else:
ssl_context = None
async with self.session().request(
method="POST",
url=request_url,
headers=self.Headers,
ssl=ssl_context,
data=json.dumps({
"source": {
"index": previous_index,
},
"dest": {
"index": new_index,
}
})
) as resp:
if resp.status!= 200:
raise AssertionError(
"Unexpected response code when reindexing: {}, {}".format(
resp.status, await resp.text()
)
)
resp = await resp.json()
await self.session().close()
return resp
except aiohttp.client_exceptions.ClientConnectorError:
if url == self.ServerUrls[-1]:
raise ConnectionError("Failed to connect to '{}'".format(url))
else:
L.warning("Failed to connect to '{}', iterating to another cluster node".format(url))
async def scroll(self, index: str, body: typing.Optional[dict] = None) -> dict:
"""Retrieve the next batch of results for a scrolling search.
:param index: The index name.
:type index: str
:param body: Custom body for the request. Defaults to None.
:type body: dict
:return: JSON response.
:raise Exception: Raised if connection to all server URLs fails.
"""
if body is None:
body = {
"query": {"bool": {"must": {"match_all": {}}}}
}
scroll_id = None
while True:
for url in self.ServerUrls:
if url.startswith('https://'):
ssl_context = self.SSLContextBuilder.build(ssl.PROTOCOL_TLS_CLIENT)
else:
ssl_context = None
if scroll_id is None:
path = "{}/_search?scroll={}".format(
index, self.ScrollTimeout
)
request_body = body
else:
path = "_search/scroll"
request_body = {
"scroll": self.ScrollTimeout,
"scroll_id": scroll_id,
}
request_url = "{}{}".format(url, path)
try:
async with self.session().request(
method="POST",
url=request_url,
json=request_body,
headers=self.Headers,
ssl=ssl_context,
) as resp:
if resp.status!= 200:
data = await resp.text()
L.error(
"Failed to fetch data from ElasticSearch: {} from {}\n{}".format(
resp.status, url, data
)
)
break
response_json = await resp.json()
await self.session().close()
except aiohttp.client_exceptions.ClientConnectorError:
if url == self.ServerUrls[-1]:
raise Exception(
"Failed to connect to '{}'".format(
url
)
)
else:
L.warning(
"Failed to connect to '{}', iterating to another cluster node".format(
url
)
)
scroll_id = response_json.get("_scroll_id")
if scroll_id is None:
break
return response_json
def upsertor(self, index: str, obj_id=None, version: int = 0):
return ElasticSearchUpsertor(self, index, obj_id, version)
async def list(self, index: str, _from: int = 0, size: int = 10000, body: typing.Optional[dict] = None) -> dict:
"""List data matching the index.
:param index: Specified index.
:param _from: Starting document offset. Defaults to 0.
:type _from: int
:param size: The number of hits to return. Defaults to 10000.
:type size: int
:param body: An optional request body. Defaults to None.
:type body: dict
:return: The query search result.
:raise Exception: Raised if connection to all server URLs fails.
"""
if body is None:
body = {
'query': {
'bool': {
'must': {
'match_all': {}
}
}
}
}
for url in self.ServerUrls:
if url.startswith('https://'):
ssl_context = self.SSLContextBuilder.build(ssl.PROTOCOL_TLS_CLIENT)
else:
ssl_context = None
try:
request_url = "{}{}/_search?size={}&from={}&version=true".format(url, index, size, _from)
async with self.session().request(
method="GET",
url=request_url,
json=body,
headers=self.Headers,
ssl=ssl_context,
) as resp:
assert resp.status == 200, "Unexpected response code: {}".format(resp.status)
content = await resp.json()
return content
except aiohttp.client_exceptions.ClientConnectorError:
if url == self.ServerUrls[-1]:
raise Exception("Failed to connect to '{}'".format(url))
else:
L.warning("Failed to connect to '{}', iterating to another cluster node".format(url))
async def count(self, index) -> int:
"""
Get the number of matches for a given index.
:param index: The specified index.
:return: The number of matches for a given index.
:raise Exception: Connection failed.
"""
for url in self.ServerUrls:
try:
count_url = "{}{}/_count".format(url, index)
if url.startswith('https://'):
ssl_context = self.SSLContextBuilder.build(ssl.PROTOCOL_TLS_CLIENT)
else:
ssl_context = None
async with self.session().request(
method="GET",
url=count_url,
ssl=ssl_context,
headers=self.Headers
) as resp:
assert resp.status == 200, "Unexpected response code: {}".format(resp.status)
total_count = await resp.json()
return total_count
except aiohttp.client_exceptions.ClientConnectorError:
if url == self.ServerUrls[-1]:
raise Exception("Failed to connect to '{}'".format(url))
else:
L.warning("Failed to connect to '{}', iterating to another cluster node".format(url))
async def indices(self, search_string=None):
"""
Return high-level information about indices in a cluster, including backing indices for data streams.
:param search_string: A search string. Default to None.
"""
for url in self.ServerUrls:
if url.startswith('https://'):
ssl_context = self.SSLContextBuilder.build(ssl.PROTOCOL_TLS_CLIENT)
else:
ssl_context = None
try:
request_url = "{}_cat/indices/{}?format=json".format(url, search_string if search_string is not None else "*")
async with self.session().request(
method="GET",
url=request_url,
ssl=ssl_context,
headers=self.Headers
) as resp:
assert resp.status == 200, "Unexpected response code: {}".format(resp.status)
return await resp.json()
except aiohttp.client_exceptions.ClientConnectorError:
if url == self.ServerUrls[-1]:
raise Exception("Failed to connect to '{}'".format(url))
else:
L.warning("Failed to connect to '{}', iterating to another cluster node".format(url))
async def empty_index(self, index):
'''
Create an empty ECS index.
'''
# TODO: There is an option here to specify settings (e.g. shard number, replica number etc) and mappings here
for url in self.ServerUrls:
if url.startswith('https://'):
ssl_context = self.SSLContextBuilder.build(ssl.PROTOCOL_TLS_CLIENT)
else:
ssl_context = None
try:
request_url = "{}{}".format(url, index)
async with self.session().request(
method="PUT",
url=request_url,
ssl=ssl_context,
headers=self.Headers
) as resp:
assert resp.status == 200, "Unexpected response code: {}".format(resp.status)
return await resp.json()
except aiohttp.client_exceptions.ClientConnectorError:
if url == self.ServerUrls[-1]:
raise Exception("Failed to connect to '{}'".format(url))
else:
L.warning("Failed to connect to '{}', iterating to another cluster node".format(url))
class ElasticSearchUpsertor(UpsertorABC):
def __init__(self, storage, collection, obj_id, version=None):
super().__init__(storage, collection, obj_id, version)
now = int(time.time())
self.ModSet['_m'] = now
if version == 0:
self.ModSet['_c'] = now # Set the creation timestamp
api_key = Config.get('asab:storage', 'elasticsearch_api_key')
self.Headers = {'Content-Type': 'application/json'}
if api_key!= '':
self.Headers['Authorization'] = "ApiKey {}".format(api_key)
@classmethod
def generate_id(cls):
raise NotImplementedError("generate_id")
async def execute(self, custom_data: typing.Optional[dict] = None, event_type: typing.Optional[str] = None):
# TODO: Implement webhook call
if self.ObjId is None:
return await self._insert_new_object()
else:
return await self._update_existing_object()
async def _insert_new_object(self):
upsert_data = {}
if self.Version is None:
self.Version = 0
if len(self.ModSet) > 0:
for k, v in self.ModSet.items():
upsert_data[k] = serialize(self.ModSet[k])
if len(self.ModInc) > 0:
# addobj['$inc'] = self.ModInc
# raise NotImplementedError("yet")
pass
if len(self.ModPush) > 0:
# addobj['$push'] = {k: {'$each': v} for k, v in self.ModPush.items()}
raise NotImplementedError("yet")
# This is insert of the new document, the ObjId is to be generated by the ElasicSearch
for url in self.Storage.ServerUrls:
request_url = "{}{}/_doc?refresh={}".format(
url, self.Collection, self.Storage.Refresh
)
if url.startswith('https://'):
ssl_context = self.Storage.SSLContextBuilder.build(ssl.PROTOCOL_TLS_CLIENT)
else:
ssl_context = None
try:
async with self.Storage.session().request(
method="POST",
url=request_url,
headers=self.Headers,
json=upsert_data,
ssl=ssl_context
) as resp:
if resp.status == 401:
raise ConnectionRefusedError("Response code 401: Unauthorized. Provide authorization by specifying either user name and password or api key.")
elif resp.status not in {200, 201}:
raise ConnectionError("Unexpected response code: {}".format(resp.status))
else:
resp_json = await resp.json()
self.ObjId = resp_json['_id']
await self.Storage.session().close()
return self.ObjId
except aiohttp.client_exceptions.ClientConnectorError:
if url == self.Storage.ServerUrls[-1]:
raise Exception("Failed to connect to '{}'".format(url))
else:
L.warning("Failed to connect to '{}', iterating to another cluster node".format(url))
except aiohttp.client_exceptions.ServerDisconnectedError:
raise Exception("Failed to connect to '{}'".format(url))
except ValueError as err:
raise ConnectionError("Both username and API key specified. Please choose one option. {}".format(err))
# except Exception:
# raise Exception("Failed to connect to '{}'".format(url))
async def _update_existing_object(self):
upsert_data = {"doc": {}, "doc_as_upsert": True}
if len(self.ModSet) > 0:
for k, v in self.ModSet.items():
upsert_data["doc"][k] = serialize(self.ModSet[k])
for url in self.Storage.ServerUrls:
if url.startswith('https://'):
ssl_context = self.Storage.SSLContextBuilder.build(ssl.PROTOCOL_TLS_CLIENT)
else:
ssl_context = None
try:
request_url = "{}{}/_update/{}?refresh={}".format(url, self.Collection, self.ObjId, self.Storage.Refresh)
async with self.Storage.session().request(
method="POST",
url=request_url,
json=upsert_data,
headers=self.Headers,
ssl=ssl_context,
) as resp:
if resp.status == 401:
raise ConnectionRefusedError("Response code 401: Unauthorized. Provide authorization by specifying either user name and password or api key.")
elif resp.status not in {200, 201}:
raise ConnectionError("Unexpected response code: {}".format(resp.status))
else:
resp_json = await resp.json()
assert resp_json["result"] == "updated" or resp_json[
"result"] == "created", "Creating/updating was unsuccessful"
await self.Storage.session().close()
return self.ObjId
except aiohttp.client_exceptions.ClientConnectorError:
if url == self.Storage.ServerUrls[-1]:
raise Exception("Failed to connect to '{}'".format(url))
else:
L.warning("Failed to connect to '{}', iterating to another cluster node".format(url))
except aiohttp.client_exceptions.ServerDisconnectedError:
raise Exception("Failed to connect to '{}'".format(url))
def serialize(v):
if isinstance(v, datetime.datetime):
return v.timestamp()
else:
return v |
statsmodels__statsmodels | contingency_tables.rst | Module doc / Directory summarization | Generate documentation for this module | BSD 3-Clause New or Revised License | statsmodels__statsmodels/docs/source/contingency_tables.rst | [
"statsmodels__statsmodels/statsmodels/stats/contingency_tables.py"
] | Contingency tables
Statsmodels supports a variety of approaches for analyzing contingency
tables, including methods for assessing independence, symmetry,
homogeneity, and methods for working with collections of tables from a
stratified population.
The methods described here are mainly for two-way tables. Multi-way
tables can be analyzed using log-linear models. Statsmodels does not
currently have a dedicated API for loglinear modeling, but Poisson
regression in statsmodels.genmod.GLM can be used for this purpose.
A contingency table is a multi-way table that describes a data set in
which each observation belongs to one category for each of several
variables. For example, if there are two variables, one with r levels
and one with c levels, then we have a r × c contingency table. The table
can be described in terms of the number of observations that fall into a
given cell of the table, e.g. T_(ij) is the number of observations that
have level i for the first variable and level j for the second variable.
Note that each variable must have a finite number of levels (or
categories), which can be either ordered or unordered. In different
contexts, the variables defining the axes of a contingency table may be
called categorical variables or factor variables. They may be either
nominal (if their levels are unordered) or ordinal (if their levels are
ordered).
The underlying population for a contingency table is described by a
distribution table P_(i, j). The elements of P are probabilities, and
the sum of all elements in P is 1. Methods for analyzing contingency
tables use the data in T to learn about properties of P.
The statsmodels.stats.Table is the most basic class for working with
contingency tables. We can create a Table object directly from any
rectangular array-like object containing the contingency table cell
counts:
import numpy as np import pandas as pd import statsmodels.api as sm
df = sm.datasets.get_rdataset("Arthritis", "vcd").data
tab = pd.crosstab(df['Treatment'], df['Improved']) tab = tab.loc[:,
["None", "Some", "Marked"]] table = sm.stats.Table(tab)
Alternatively, we can pass the raw data and let the Table class
construct the array of cell counts for us:
data = df[["Treatment", "Improved"]] table =
sm.stats.Table.from_data(data)
Independence
Independence is the property that the row and column factors occur
independently. Association is the lack of independence. If the joint
distribution is independent, it can be written as the outer product of
the row and column marginal distributions:
P_(ij) = ∑_(k)P_(ij) ⋅ ∑_(k)P_(kj) for all i, j
We can obtain the best-fitting independent distribution for our observed
data, and then view residuals which identify particular cells that most
strongly violate independence:
print(table.table_orig) print(table.fittedvalues)
print(table.resid_pearson)
In this example, compared to a sample from a population in which the
rows and columns are independent, we have too many observations in the
placebo/no improvement and treatment/marked improvement cells, and too
few observations in the placebo/marked improvement and treated/no
improvement cells. This reflects the apparent benefits of the treatment.
If the rows and columns of a table are unordered (i.e. are nominal
factors), then the most common approach for formally assessing
independence is using Pearson's χ² statistic. It's often useful to look
at the cell-wise contributions to the χ² statistic to see where the
evidence for dependence is coming from.
rslt = table.test_nominal_association() print(rslt.pvalue)
print(table.chi2_contribs)
For tables with ordered row and column factors, we can us the linear by
linear association test to obtain more power against alternative
hypotheses that respect the ordering. The test statistic for the linear
by linear association test is
∑_(k)r_(i)c_(j)T_(ij)
where r_(i) and c_(j) are row and column scores. Often these scores are
set to the sequences 0, 1, .... This gives the 'Cochran-Armitage trend
test'.
python
rslt = table.test_ordinal_association() print(rslt.pvalue)
We can assess the association in a r × x table by constructing a series
of 2 × 2 tables and calculating their odds ratios. There are two ways to
do this. The local odds ratios construct 2 × 2 tables from adjacent row
and column categories.
python
print(table.local_oddsratios) taloc = sm.stats.Table2x2(np.asarray([[7,
29], [21, 13]])) print(taloc.oddsratio) taloc =
sm.stats.Table2x2(np.asarray([[29, 7], [13, 7]])) print(taloc.oddsratio)
The cumulative odds ratios construct 2 × 2 tables by dichotomizing the
row and column factors at each possible point.
print(table.cumulative_oddsratios) tab1 = np.asarray([[7, 29 + 7], [21,
13 + 7]]) tacum = sm.stats.Table2x2(tab1) print(tacum.oddsratio) tab1 =
np.asarray([[7 + 29, 7], [21 + 13, 7]]) tacum = sm.stats.Table2x2(tab1)
print(tacum.oddsratio)
A mosaic plot is a graphical approach to informally assessing dependence
in two-way tables.
from statsmodels.graphics.mosaicplot import mosaic fig, _ = mosaic(data,
index=["Treatment", "Improved"])
Symmetry and homogeneity
Symmetry is the property that P_(i, j) = P_(j, i) for every i and j.
Homogeneity is the property that the marginal distribution of the row
factor and the column factor are identical, meaning that
∑_(j)P_(ij) = ∑_(j)P_(ji)∀i
Note that for these properties to be applicable the table P (and T) must
be square, and the row and column categories must be identical and must
occur in the same order.
To illustrate, we load a data set, create a contingency table, and
calculate the row and column margins. The Table class contains methods
for analyzing r × c contingency tables. The data set loaded below
contains assessments of visual acuity in people's left and right eyes.
We first load the data and create a contingency table.
df = sm.datasets.get_rdataset("VisualAcuity", "vcd").data df =
df.loc[df.gender == "female", :] tab = df.set_index(['left', 'right'])
del tab["gender"] tab = tab.unstack() tab.columns =
tab.columns.get_level_values(1) print(tab)
Next we create a SquareTable object from the contingency table.
sqtab = sm.stats.SquareTable(tab) row, col =
sqtab.marginal_probabilities print(row) print(col)
The summary method prints results for the symmetry and homogeneity
testing procedures.
print(sqtab.summary())
If we had the individual case records in a dataframe called data, we
could also perform the same analysis by passing the raw data using the
SquareTable.from_data class method.
sqtab = sm.stats.SquareTable.from_data(data[['left', 'right']])
print(sqtab.summary())
A single 2x2 table
Several methods for working with individual 2x2 tables are provided in
the sm.stats.Table2x2 class. The summary method displays several
measures of association between the rows and columns of the table.
table = np.asarray([[35, 21], [25, 58]]) t22 = sm.stats.Table2x2(table)
print(t22.summary())
Note that the risk ratio is not symmetric so different results will be
obtained if the transposed table is analyzed.
table = np.asarray([[35, 21], [25, 58]]) t22 =
sm.stats.Table2x2(table.T) print(t22.summary())
Stratified 2x2 tables
Stratification occurs when we have a collection of contingency tables
defined by the same row and column factors. In the example below, we
have a collection of 2x2 tables reflecting the joint distribution of
smoking and lung cancer in each of several regions of China. It is
possible that the tables all have a common odds ratio, even while the
marginal probabilities vary among the strata. The 'Breslow-Day'
procedure tests whether the data are consistent with a common odds
ratio. It appears below as the Test of constant OR. The Mantel-Haenszel
procedure tests whether this common odds ratio is equal to one. It
appears below as the Test of OR=1. It is also possible to estimate the
common odds and risk ratios and obtain confidence intervals for them.
The summary method displays all of these results. Individual results can
be obtained from the class methods and attributes.
data = sm.datasets.china_smoking.load_pandas()
mat = np.asarray(data.data) tables = [np.reshape(x.tolist(), (2, 2)) for
x in mat]
st = sm.stats.StratifiedTable(tables) print(st.summary()) | """
Methods for analyzing two-way contingency tables (i.e. frequency
tables for observations that are cross-classified with respect to two
categorical variables).
The main classes are:
* Table : implements methods that can be applied to any two-way
contingency table.
* SquareTable : implements methods that can be applied to a square
two-way contingency table.
* Table2x2 : implements methods that can be applied to a 2x2
contingency table.
* StratifiedTable : implements methods that can be applied to a
collection of 2x2 contingency tables.
Also contains functions for conducting McNemar's test and Cochran's q
test.
Note that the inference procedures may depend on how the data were
sampled. In general the observed units are independent and
identically distributed.
"""
from statsmodels.tools.decorators import cache_readonly
import numpy as np
from scipy import stats
import pandas as pd
import warnings
from statsmodels import iolib
from statsmodels.tools import sm_exceptions
def _make_df_square(table):
"""
Reindex a pandas DataFrame so that it becomes square, meaning that
the row and column indices contain the same values, in the same
order. The row and column index are extended to achieve this.
"""
if not isinstance(table, pd.DataFrame):
return table
# If the table is not square, make it square
if not table.index.equals(table.columns):
ix = list(set(table.index) | set(table.columns))
ix.sort()
table = table.reindex(index=ix, columns=ix, fill_value=0)
# Ensures that the rows and columns are in the same order.
table = table.reindex(table.columns)
return table
class _Bunch(object):
def __repr__(self):
return "<bunch containing results, print to see contents>"
def __str__(self):
ky = [k for k, _ in self.__dict__.items()]
ky.sort()
m = max([len(k) for k in ky])
tab = []
f = "{:" + str(m) + "} {}"
for k in ky:
tab.append(f.format(k, self.__dict__[k]))
return "\n".join(tab)
class Table(object):
"""
A two-way contingency table.
Parameters
----------
table : array_like
A contingency table.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
Attributes
----------
table_orig : array_like
The original table is cached as `table_orig`.
See Also
--------
statsmodels.graphics.mosaicplot.mosaic
scipy.stats.chi2_contingency
Notes
-----
The inference procedures used here are all based on a sampling
model in which the units are independent and identically
distributed, with each unit being classified with respect to two
categorical variables.
References
----------
Definitions of residuals:
https://onlinecourses.science.psu.edu/stat504/node/86
"""
def __init__(self, table, shift_zeros=True):
self.table_orig = table
self.table = np.asarray(table, dtype=np.float64)
if shift_zeros and (self.table.min() == 0):
self.table[self.table == 0] = 0.5
def __str__(self):
s = ("A %dx%d contingency table with counts:\n" %
tuple(self.table.shape))
s += np.array_str(self.table)
return s
@classmethod
def from_data(cls, data, shift_zeros=True):
"""
Construct a Table object from data.
Parameters
----------
data : array_like
The raw data, from which a contingency table is constructed
using the first two columns.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
Returns
-------
A Table instance.
"""
if isinstance(data, pd.DataFrame):
table = pd.crosstab(data.iloc[:, 0], data.iloc[:, 1])
else:
table = pd.crosstab(data[:, 0], data[:, 1])
return cls(table, shift_zeros)
def test_nominal_association(self):
"""
Assess independence for nominal factors.
Assessment of independence between rows and columns using
chi^2 testing. The rows and columns are treated as nominal
(unordered) categorical variables.
Returns
-------
A bunch containing the following attributes:
statistic : float
The chi^2 test statistic.
df : integer
The degrees of freedom of the reference distribution
pvalue : float
The p-value for the test.
"""
statistic = np.asarray(self.chi2_contribs).sum()
df = np.prod(np.asarray(self.table.shape) - 1)
pvalue = 1 - stats.chi2.cdf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.df = df
b.pvalue = pvalue
return b
def test_ordinal_association(self, row_scores=None, col_scores=None):
"""
Assess independence between two ordinal variables.
This is the 'linear by linear' association test, which uses
weights or scores to target the test to have more power
against ordered alternatives.
Parameters
----------
row_scores : array_like
An array of numeric row scores
col_scores : array_like
An array of numeric column scores
Returns
-------
A bunch with the following attributes:
statistic : float
The test statistic.
null_mean : float
The expected value of the test statistic under the null
hypothesis.
null_sd : float
The standard deviation of the test statistic under the
null hypothesis.
zscore : float
The Z-score for the test statistic.
pvalue : float
The p-value for the test.
Notes
-----
The scores define the trend to which the test is most sensitive.
Using the default row and column scores gives the
Cochran-Armitage trend test.
"""
if row_scores is None:
row_scores = np.arange(self.table.shape[0])
if col_scores is None:
col_scores = np.arange(self.table.shape[1])
if len(row_scores)!= self.table.shape[0]:
msg = ("The length of `row_scores` must match the first " +
"dimension of `table`.")
raise ValueError(msg)
if len(col_scores)!= self.table.shape[1]:
msg = ("The length of `col_scores` must match the second " +
"dimension of `table`.")
raise ValueError(msg)
# The test statistic
statistic = np.dot(row_scores, np.dot(self.table, col_scores))
# Some needed quantities
n_obs = self.table.sum()
rtot = self.table.sum(1)
um = np.dot(row_scores, rtot)
u2m = np.dot(row_scores**2, rtot)
ctot = self.table.sum(0)
vn = np.dot(col_scores, ctot)
v2n = np.dot(col_scores**2, ctot)
# The null mean and variance of the test statistic
e_stat = um * vn / n_obs
v_stat = (u2m - um**2 / n_obs) * (v2n - vn**2 / n_obs) / (n_obs - 1)
sd_stat = np.sqrt(v_stat)
zscore = (statistic - e_stat) / sd_stat
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
b = _Bunch()
b.statistic = statistic
b.null_mean = e_stat
b.null_sd = sd_stat
b.zscore = zscore
b.pvalue = pvalue
return b
@cache_readonly
def marginal_probabilities(self):
"""
Estimate marginal probability distributions for the rows and columns.
Returns
-------
row : ndarray
Marginal row probabilities
col : ndarray
Marginal column probabilities
"""
n = self.table.sum()
row = self.table.sum(1) / n
col = self.table.sum(0) / n
if isinstance(self.table_orig, pd.DataFrame):
row = pd.Series(row, self.table_orig.index)
col = pd.Series(col, self.table_orig.columns)
return row, col
@cache_readonly
def independence_probabilities(self):
"""
Returns fitted joint probabilities under independence.
The returned table is outer(row, column), where row and
column are the estimated marginal distributions
of the rows and columns.
"""
row, col = self.marginal_probabilities
itab = np.outer(row, col)
if isinstance(self.table_orig, pd.DataFrame):
itab = pd.DataFrame(itab, self.table_orig.index,
self.table_orig.columns)
return itab
@cache_readonly
def fittedvalues(self):
"""
Returns fitted cell counts under independence.
The returned cell counts are estimates under a model
where the rows and columns of the table are independent.
"""
probs = self.independence_probabilities
fit = self.table.sum() * probs
return fit
@cache_readonly
def resid_pearson(self):
"""
Returns Pearson residuals.
The Pearson residuals are calculated under a model where
the rows and columns of the table are independent.
"""
fit = self.fittedvalues
resids = (self.table - fit) / np.sqrt(fit)
return resids
@cache_readonly
def standardized_resids(self):
"""
Returns standardized residuals under independence.
"""
row, col = self.marginal_probabilities
sresids = self.resid_pearson / np.sqrt(np.outer(1 - row, 1 - col))
return sresids
@cache_readonly
def chi2_contribs(self):
"""
Returns the contributions to the chi^2 statistic for independence.
The returned table contains the contribution of each cell to the chi^2
test statistic for the null hypothesis that the rows and columns
are independent.
"""
return self.resid_pearson**2
@cache_readonly
def local_log_oddsratios(self):
"""
Returns local log odds ratios.
The local log odds ratios are the log odds ratios
calculated for contiguous 2x2 sub-tables.
"""
ta = self.table.copy()
a = ta[0:-1, 0:-1]
b = ta[0:-1, 1:]
c = ta[1:, 0:-1]
d = ta[1:, 1:]
tab = np.log(a) + np.log(d) - np.log(b) - np.log(c)
rslt = np.empty(self.table.shape, np.float64)
rslt *= np.nan
rslt[0:-1, 0:-1] = tab
if isinstance(self.table_orig, pd.DataFrame):
rslt = pd.DataFrame(rslt, index=self.table_orig.index,
columns=self.table_orig.columns)
return rslt
@cache_readonly
def local_oddsratios(self):
"""
Returns local odds ratios.
See documentation for local_log_oddsratios.
"""
return np.exp(self.local_log_oddsratios)
@cache_readonly
def cumulative_log_oddsratios(self):
"""
Returns cumulative log odds ratios.
The cumulative log odds ratios for a contingency table
with ordered rows and columns are calculated by collapsing
all cells to the left/right and above/below a given point,
to obtain a 2x2 table from which a log odds ratio can be
calculated.
"""
ta = self.table.cumsum(0).cumsum(1)
a = ta[0:-1, 0:-1]
b = ta[0:-1, -1:] - a
c = ta[-1:, 0:-1] - a
d = ta[-1, -1] - (a + b + c)
tab = np.log(a) + np.log(d) - np.log(b) - np.log(c)
rslt = np.empty(self.table.shape, np.float64)
rslt *= np.nan
rslt[0:-1, 0:-1] = tab
if isinstance(self.table_orig, pd.DataFrame):
rslt = pd.DataFrame(rslt, index=self.table_orig.index,
columns=self.table_orig.columns)
return rslt
@cache_readonly
def cumulative_oddsratios(self):
"""
Returns the cumulative odds ratios for a contingency table.
See documentation for cumulative_log_oddsratio.
"""
return np.exp(self.cumulative_log_oddsratios)
class SquareTable(Table):
"""
Methods for analyzing a square contingency table.
Parameters
----------
table : array_like
A square contingency table, or DataFrame that is converted
to a square form.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
These methods should only be used when the rows and columns of the
table have the same categories. If `table` is provided as a
Pandas DataFrame, the row and column indices will be extended to
create a square table, inserting zeros where a row or column is
missing. Otherwise the table should be provided in a square form,
with the (implicit) row and column categories appearing in the
same order.
"""
def __init__(self, table, shift_zeros=True):
table = _make_df_square(table) # Non-pandas passes through
k1, k2 = table.shape
if k1!= k2:
raise ValueError('table must be square')
super(SquareTable, self).__init__(table, shift_zeros)
def symmetry(self, method="bowker"):
"""
Test for symmetry of a joint distribution.
This procedure tests the null hypothesis that the joint
distribution is symmetric around the main diagonal, that is
.. math::
p_{i, j} = p_{j, i} for all i, j
Returns
-------
A bunch with attributes:
statistic : float
chisquare test statistic
p-value : float
p-value of the test statistic based on chisquare distribution
df : int
degrees of freedom of the chisquare distribution
Notes
-----
The implementation is based on the SAS documentation. R includes
it in `mcnemar.test` if the table is not 2 by 2. However a more
direct generalization of the McNemar test to larger tables is
provided by the homogeneity test (TableSymmetry.homogeneity).
The p-value is based on the chi-square distribution which requires
that the sample size is not very small to be a good approximation
of the true distribution. For 2x2 contingency tables the exact
distribution can be obtained with `mcnemar`
See Also
--------
mcnemar
homogeneity
"""
if method.lower()!= "bowker":
raise ValueError("method for symmetry testing must be 'bowker'")
k = self.table.shape[0]
upp_idx = np.triu_indices(k, 1)
tril = self.table.T[upp_idx] # lower triangle in column order
triu = self.table[upp_idx] # upper triangle in row order
statistic = ((tril - triu)**2 / (tril + triu + 1e-20)).sum()
df = k * (k-1) / 2.
pvalue = stats.chi2.sf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
b.df = df
return b
def homogeneity(self, method="stuart_maxwell"):
"""
Compare row and column marginal distributions.
Parameters
----------
method : string
Either'stuart_maxwell' or 'bhapkar', leading to two different
estimates of the covariance matrix for the estimated
difference between the row margins and the column margins.
Returns a bunch with attributes:
statistic : float
The chi^2 test statistic
pvalue : float
The p-value of the test statistic
df : integer
The degrees of freedom of the reference distribution
Notes
-----
For a 2x2 table this is equivalent to McNemar's test. More
generally the procedure tests the null hypothesis that the
marginal distribution of the row factor is equal to the
marginal distribution of the column factor. For this to be
meaningful, the two factors must have the same sample space
(i.e. the same categories).
"""
if self.table.shape[0] < 1:
raise ValueError('table is empty')
elif self.table.shape[0] == 1:
b = _Bunch()
b.statistic = 0
b.pvalue = 1
b.df = 0
return b
method = method.lower()
if method not in ["bhapkar", "stuart_maxwell"]:
raise ValueError("method '%s' for homogeneity not known" % method)
n_obs = self.table.sum()
pr = self.table.astype(np.float64) / n_obs
# Compute margins, eliminate last row/column so there is no
# degeneracy
row = pr.sum(1)[0:-1]
col = pr.sum(0)[0:-1]
pr = pr[0:-1, 0:-1]
# The estimated difference between row and column margins.
d = col - row
# The degrees of freedom of the chi^2 reference distribution.
df = pr.shape[0]
if method == "bhapkar":
vmat = -(pr + pr.T) - np.outer(d, d)
dv = col + row - 2*np.diag(pr) - d**2
np.fill_diagonal(vmat, dv)
elif method == "stuart_maxwell":
vmat = -(pr + pr.T)
dv = row + col - 2*np.diag(pr)
np.fill_diagonal(vmat, dv)
try:
statistic = n_obs * np.dot(d, np.linalg.solve(vmat, d))
except np.linalg.LinAlgError:
warnings.warn("Unable to invert covariance matrix",
sm_exceptions.SingularMatrixWarning)
b = _Bunch()
b.statistic = np.nan
b.pvalue = np.nan
b.df = df
return b
pvalue = 1 - stats.chi2.cdf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
b.df = df
return b
def summary(self, alpha=0.05, float_format="%.3f"):
"""
Produce a summary of the analysis.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the interval.
float_format : str
Used to format numeric values in the table.
method : str
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
fmt = float_format
headers = ["Statistic", "P-value", "DF"]
stubs = ["Symmetry", "Homogeneity"]
sy = self.symmetry()
hm = self.homogeneity()
data = [[fmt % sy.statistic, fmt % sy.pvalue, '%d' % sy.df],
[fmt % hm.statistic, fmt % hm.pvalue, '%d' % hm.df]]
tab = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
return tab
class Table2x2(SquareTable):
"""
Analyses that can be performed on a 2x2 contingency table.
Parameters
----------
table : array_like
A 2x2 contingency table
shift_zeros : boolean
If true, 0.5 is added to all cells of the table if any cell is
equal to zero.
Notes
-----
The inference procedures used here are all based on a sampling
model in which the units are independent and identically
distributed, with each unit being classified with respect to two
categorical variables.
Note that for the risk ratio, the analysis is not symmetric with
respect to the rows and columns of the contingency table. The two
rows define population subgroups, column 0 is the number of
'events', and column 1 is the number of 'non-events'.
"""
def __init__(self, table, shift_zeros=True):
if type(table) is list:
table = np.asarray(table)
if (table.ndim!= 2) or (table.shape[0]!= 2) or (table.shape[1]!= 2):
raise ValueError("Table2x2 takes a 2x2 table as input.")
super(Table2x2, self).__init__(table, shift_zeros)
@classmethod
def from_data(cls, data, shift_zeros=True):
"""
Construct a Table object from data.
Parameters
----------
data : array_like
The raw data, the first column defines the rows and the
second column defines the columns.
shift_zeros : boolean
If True, and if there are any zeros in the contingency
table, add 0.5 to all four cells of the table.
"""
if isinstance(data, pd.DataFrame):
table = pd.crosstab(data.iloc[:, 0], data.iloc[:, 1])
else:
table = pd.crosstab(data[:, 0], data[:, 1])
return cls(table, shift_zeros)
@cache_readonly
def log_oddsratio(self):
"""
Returns the log odds ratio for a 2x2 table.
"""
f = self.table.flatten()
return np.dot(np.log(f), np.r_[1, -1, -1, 1])
@cache_readonly
def oddsratio(self):
"""
Returns the odds ratio for a 2x2 table.
"""
return (self.table[0, 0] * self.table[1, 1] /
(self.table[0, 1] * self.table[1, 0]))
@cache_readonly
def log_oddsratio_se(self):
"""
Returns the standard error for the log odds ratio.
"""
return np.sqrt(np.sum(1 / self.table))
def oddsratio_pvalue(self, null=1):
"""
P-value for a hypothesis test about the odds ratio.
Parameters
----------
null : float
The null value of the odds ratio.
"""
return self.log_oddsratio_pvalue(np.log(null))
def log_oddsratio_pvalue(self, null=0):
"""
P-value for a hypothesis test about the log odds ratio.
Parameters
----------
null : float
The null value of the log odds ratio.
"""
zscore = (self.log_oddsratio - null) / self.log_oddsratio_se
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
return pvalue
def log_oddsratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence level for the log odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
f = -stats.norm.ppf(alpha / 2)
lor = self.log_oddsratio
se = self.log_oddsratio_se
lcb = lor - f * se
ucb = lor + f * se
return lcb, ucb
def oddsratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
lcb, ucb = self.log_oddsratio_confint(alpha, method=method)
return np.exp(lcb), np.exp(ucb)
@cache_readonly
def riskratio(self):
"""
Returns the risk ratio for a 2x2 table.
The risk ratio is calculated with respect to the rows.
"""
p = self.table[:, 0] / self.table.sum(1)
return p[0] / p[1]
@cache_readonly
def log_riskratio(self):
"""
Returns the log of the risk ratio.
"""
return np.log(self.riskratio)
@cache_readonly
def log_riskratio_se(self):
"""
Returns the standard error of the log of the risk ratio.
"""
n = self.table.sum(1)
p = self.table[:, 0] / n
va = np.sum((1 - p) / (n*p))
return np.sqrt(va)
def riskratio_pvalue(self, null=1):
"""
p-value for a hypothesis test about the risk ratio.
Parameters
----------
null : float
The null value of the risk ratio.
"""
return self.log_riskratio_pvalue(np.log(null))
def log_riskratio_pvalue(self, null=0):
"""
p-value for a hypothesis test about the log risk ratio.
Parameters
----------
null : float
The null value of the log risk ratio.
"""
zscore = (self.log_riskratio - null) / self.log_riskratio_se
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
return pvalue
def log_riskratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the log risk ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
f = -stats.norm.ppf(alpha / 2)
lrr = self.log_riskratio
se = self.log_riskratio_se
lcb = lrr - f * se
ucb = lrr + f * se
return lcb, ucb
def riskratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the risk ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
lcb, ucb = self.log_riskratio_confint(alpha, method=method)
return np.exp(lcb), np.exp(ucb)
def summary(self, alpha=0.05, float_format="%.3f", method="normal"):
"""
Summarizes results for a 2x2 table analysis.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the confidence
intervals.
float_format : str
Used to format the numeric values in the table.
method : str
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
def fmt(x):
if isinstance(x, str):
return x
return float_format % x
headers = ["Estimate", "SE", "LCB", "UCB", "p-value"]
stubs = ["Odds ratio", "Log odds ratio", "Risk ratio",
"Log risk ratio"]
lcb1, ucb1 = self.oddsratio_confint(alpha, method)
lcb2, ucb2 = self.log_oddsratio_confint(alpha, method)
lcb3, ucb3 = self.riskratio_confint(alpha, method)
lcb4, ucb4 = self.log_riskratio_confint(alpha, method)
data = [[fmt(x) for x in [self.oddsratio, "", lcb1, ucb1,
self.oddsratio_pvalue()]],
[fmt(x) for x in [self.log_oddsratio, self.log_oddsratio_se,
lcb2, ucb2, self.oddsratio_pvalue()]],
[fmt(x) for x in [self.riskratio, "", lcb3, ucb3,
self.riskratio_pvalue()]],
[fmt(x) for x in [self.log_riskratio, self.log_riskratio_se,
lcb4, ucb4, self.riskratio_pvalue()]]]
tab = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
return tab
class StratifiedTable(object):
"""
Analyses for a collection of 2x2 contingency tables.
Such a collection may arise by stratifying a single 2x2 table with
respect to another factor. This class implements the
'Cochran-Mantel-Haenszel' and 'Breslow-Day' procedures for
analyzing collections of 2x2 contingency tables.
Parameters
----------
tables : list or ndarray
Either a list containing several 2x2 contingency tables, or
a 2x2xk ndarray in which each slice along the third axis is a
2x2 contingency table.
Notes
-----
This results are based on a sampling model in which the units are
independent both within and between strata.
"""
def __init__(self, tables, shift_zeros=False):
if isinstance(tables, np.ndarray):
sp = tables.shape
if (len(sp)!= 3) or (sp[0]!= 2) or (sp[1]!= 2):
raise ValueError("If an ndarray, argument must be 2x2xn")
table = tables
else:
# Create a data cube
table = np.dstack(tables).astype(np.float64)
if shift_zeros:
zx = (table == 0).sum(0).sum(0)
ix = np.flatnonzero(zx > 0)
if len(ix) > 0:
table = table.copy()
table[:, :, ix] += 0.5
self.table = table
self._cache = {}
# Quantities to precompute. Table entries are [[a, b], [c,
# d]], 'ad' is 'a * d', 'apb' is 'a + b', 'dma' is 'd - a',
# etc.
self._apb = table[0, 0, :] + table[0, 1, :]
self._apc = table[0, 0, :] + table[1, 0, :]
self._bpd = table[0, 1, :] + table[1, 1, :]
self._cpd = table[1, 0, :] + table[1, 1, :]
self._ad = table[0, 0, :] * table[1, 1, :]
self._bc = table[0, 1, :] * table[1, 0, :]
self._apd = table[0, 0, :] + table[1, 1, :]
self._dma = table[1, 1, :] - table[0, 0, :]
self._n = table.sum(0).sum(0)
@classmethod
def from_data(cls, var1, var2, strata, data):
"""
Construct a StratifiedTable object from data.
Parameters
----------
var1 : int or string
The column index or name of `data` specifying the variable
defining the rows of the contingency table. The variable
must have only two distinct values.
var2 : int or string
The column index or name of `data` specifying the variable
defining the columns of the contingency table. The variable
must have only two distinct values.
strata : int or string
The column index or name of `data` specifying the variable
defining the strata.
data : array_like
The raw data. A cross-table for analysis is constructed
from the first two columns.
Returns
-------
A StratifiedTable instance.
"""
if not isinstance(data, pd.DataFrame):
data1 = pd.DataFrame(index=np.arange(data.shape[0]),
columns=[var1, var2, strata])
data1.loc[:, var1] = data[:, var1]
data1.loc[:, var2] = data[:, var2]
data1.loc[:, strata] = data[:, strata]
else:
data1 = data[[var1, var2, strata]]
gb = data1.groupby(strata).groups
tables = []
for g in gb:
ii = gb[g]
tab = pd.crosstab(data1.loc[ii, var1], data1.loc[ii, var2])
if (tab.shape!= np.r_[2, 2]).any():
msg = "Invalid table dimensions"
raise ValueError(msg)
tables.append(np.asarray(tab))
return cls(tables)
def test_null_odds(self, correction=False):
"""
Test that all tables have odds ratio equal to 1.
This is the 'Mantel-Haenszel' test.
Parameters
----------
correction : boolean
If True, use the continuity correction when calculating the
test statistic.
Returns
-------
A bunch containing the chi^2 test statistic and p-value.
"""
statistic = np.sum(self.table[0, 0, :] -
self._apb * self._apc / self._n)
statistic = np.abs(statistic)
if correction:
statistic -= 0.5
statistic = statistic**2
denom = self._apb * self._apc * self._bpd * self._cpd
denom /= (self._n**2 * (self._n - 1))
denom = np.sum(denom)
statistic /= denom
# df is always 1
pvalue = 1 - stats.chi2.cdf(statistic, 1)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
@cache_readonly
def oddsratio_pooled(self):
"""
The pooled odds ratio.
The value is an estimate of a common odds ratio across all of the
stratified tables.
"""
odds_ratio = np.sum(self._ad / self._n) / np.sum(self._bc / self._n)
return odds_ratio
@cache_readonly
def logodds_pooled(self):
"""
Returns the logarithm of the pooled odds ratio.
See oddsratio_pooled for more information.
"""
return np.log(self.oddsratio_pooled)
@cache_readonly
def riskratio_pooled(self):
"""
Estimate of the pooled risk ratio.
"""
acd = self.table[0, 0, :] * self._cpd
cab = self.table[1, 0, :] * self._apb
rr = np.sum(acd / self._n) / np.sum(cab / self._n)
return rr
@cache_readonly
def risk_pooled(self):
# Deprecated due to name being misleading
msg = "'risk_pooled' is deprecated, use 'riskratio_pooled' instead"
warnings.warn(msg, DeprecationWarning)
return self.riskratio_pooled
@cache_readonly
def logodds_pooled_se(self):
"""
Estimated standard error of the pooled log odds ratio
References
----------
Robins, James, Norman Breslow, and Sander Greenland. "Estimators of
the Mantel-Haenszel Variance Consistent in Both Sparse Data and
Large-Strata Limiting Models." Biometrics 42, no. 2 (1986): 311-23.
"""
adns = np.sum(self._ad / self._n)
bcns = np.sum(self._bc / self._n)
lor_va = np.sum(self._apd * self._ad / self._n**2) / adns**2
mid = self._apd * self._bc / self._n**2
mid += (1 - self._apd / self._n) * self._ad / self._n
mid = np.sum(mid)
mid /= (adns * bcns)
lor_va += mid
lor_va += np.sum((1 - self._apd / self._n) *
self._bc / self._n) / bcns**2
lor_va /= 2
lor_se = np.sqrt(lor_va)
return lor_se
def logodds_pooled_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the pooled log odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
Returns
-------
lcb : float
The lower confidence limit.
ucb : float
The upper confidence limit.
"""
lor = np.log(self.oddsratio_pooled)
lor_se = self.logodds_pooled_se
f = -stats.norm.ppf(alpha / 2)
lcb = lor - f * lor_se
ucb = lor + f * lor_se
return lcb, ucb
def oddsratio_pooled_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the pooled odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
Returns
-------
lcb : float
The lower confidence limit.
ucb : float
The upper confidence limit.
"""
lcb, ucb = self.logodds_pooled_confint(alpha, method=method)
lcb = np.exp(lcb)
ucb = np.exp(ucb)
return lcb, ucb
def test_equal_odds(self, adjust=False):
"""
Test that all odds ratios are identical.
This is the 'Breslow-Day' testing procedure.
Parameters
----------
adjust : boolean
Use the 'Tarone' adjustment to achieve the chi^2
asymptotic distribution.
Returns
-------
A bunch containing the following attributes:
statistic : float
The chi^2 test statistic.
p-value : float
The p-value for the test.
"""
table = self.table
r = self.oddsratio_pooled
a = 1 - r
b = r * (self._apb + self._apc) + self._dma
c = -r * self._apb * self._apc
# Expected value of first cell
e11 = (-b + np.sqrt(b**2 - 4*a*c)) / (2*a)
# Variance of the first cell
v11 = (1 / e11 + 1 / (self._apc - e11) + 1 / (self._apb - e11) +
1 / (self._dma + e11))
v11 = 1 / v11
statistic = np.sum((table[0, 0, :] - e11)**2 / v11)
if adjust:
adj = table[0, 0, :].sum() - e11.sum()
adj = adj**2
adj /= np.sum(v11)
statistic -= adj
pvalue = 1 - stats.chi2.cdf(statistic, table.shape[2] - 1)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
def summary(self, alpha=0.05, float_format="%.3f", method="normal"):
"""
A summary of all the main results.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence intervals.
float_format : str
Used for formatting numeric values in the summary.
method : str
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
def fmt(x):
if isinstance(x, str):
return x
return float_format % x
co_lcb, co_ucb = self.oddsratio_pooled_confint(
alpha=alpha, method=method)
clo_lcb, clo_ucb = self.logodds_pooled_confint(
alpha=alpha, method=method)
headers = ["Estimate", "LCB", "UCB"]
stubs = ["Pooled odds", "Pooled log odds", "Pooled risk ratio", ""]
data = [[fmt(x) for x in [self.oddsratio_pooled, co_lcb, co_ucb]],
[fmt(x) for x in [self.logodds_pooled, clo_lcb, clo_ucb]],
[fmt(x) for x in [self.riskratio_pooled, "", ""]],
['', '', '']]
tab1 = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
headers = ["Statistic", "P-value", ""]
stubs = ["Test of OR=1", "Test constant OR"]
rslt1 = self.test_null_odds()
rslt2 = self.test_equal_odds()
data = [[fmt(x) for x in [rslt1.statistic, rslt1.pvalue, ""]],
[fmt(x) for x in [rslt2.statistic, rslt2.pvalue, ""]]]
tab2 = iolib.SimpleTable(data, headers, stubs, data_aligns="r")
tab1.extend(tab2)
headers = ["", "", ""]
stubs = ["Number of tables", "Min n", "Max n", "Avg n", "Total n"]
ss = self.table.sum(0).sum(0)
data = [["%d" % self.table.shape[2], '', ''],
["%d" % min(ss), '', ''],
["%d" % max(ss), '', ''],
["%.0f" % np.mean(ss), '', ''],
["%d" % sum(ss), '', '', '']]
tab3 = iolib.SimpleTable(data, headers, stubs, data_aligns="r")
tab1.extend(tab3)
return tab1
def mcnemar(table, exact=True, correction=True):
"""
McNemar test of homogeneity.
Parameters
----------
table : array_like
A square contingency table.
exact : bool
If exact is true, then the binomial distribution will be used.
If exact is false, then the chisquare distribution will be
used, which is the approximation to the distribution of the
test statistic for large sample sizes.
correction : bool
If true, then a continuity correction is used for the chisquare
distribution (if exact is false.)
Returns
-------
A bunch with attributes:
statistic : float or int, array
The test statistic is the chisquare statistic if exact is
false. If the exact binomial distribution is used, then this
contains the min(n1, n2), where n1, n2 are cases that are zero
in one sample but one in the other sample.
pvalue : float or array
p-value of the null hypothesis of equal marginal distributions.
Notes
-----
This is a special case of Cochran's Q test, and of the homogeneity
test. The results when the chisquare distribution is used are
identical, except for continuity correction.
"""
table = _make_df_square(table)
table = np.asarray(table, dtype=np.float64)
n1, n2 = table[0, 1], table[1, 0]
if exact:
statistic = np.minimum(n1, n2)
# binom is symmetric with p=0.5
pvalue = stats.binom.cdf(statistic, n1 + n2, 0.5) * 2
pvalue = np.minimum(pvalue, 1) # limit to 1 if n1==n2
else:
corr = int(correction) # convert bool to 0 or 1
statistic = (np.abs(n1 - n2) - corr)**2 / (1. * (n1 + n2))
df = 1
pvalue = stats.chi2.sf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
def cochrans_q(x, return_object=True):
"""
Cochran's Q test for identical binomial proportions.
Parameters
----------
x : array_like, 2d (N, k)
data with N cases and k variables
return_object : boolean
Return values as bunch instead of as individual values.
Returns
-------
Returns a bunch containing the following attributes, or the
individual values according to the value of `return_object`.
statistic : float
test statistic
pvalue : float
pvalue from the chisquare distribution
Notes
-----
Cochran's Q is a k-sample extension of the McNemar test. If there
are only two groups, then Cochran's Q test and the McNemar test
are equivalent.
The procedure tests that the probability of success is the same
for every group. The alternative hypothesis is that at least two
groups have a different probability of success.
In Wikipedia terminology, rows are blocks and columns are
treatments. The number of rows N, should be large for the
chisquare distribution to be a good approximation.
The Null hypothesis of the test is that all treatments have the
same effect.
References
----------
https://en.wikipedia.org/wiki/Cochran_test
SAS Manual for NPAR TESTS
"""
x = np.asarray(x, dtype=np.float64)
gruni = np.unique(x)
N, k = x.shape
count_row_success = (x == gruni[-1]).sum(1, float)
count_col_success = (x == gruni[-1]).sum(0, float)
count_row_ss = count_row_success.sum()
count_col_ss = count_col_success.sum()
assert count_row_ss == count_col_ss # just a calculation check
# From the SAS manual
q_stat = ((k-1) * (k * np.sum(count_col_success**2) - count_col_ss**2)
/ (k * count_row_ss - np.sum(count_row_success**2)))
# Note: the denominator looks just like k times the variance of
# the columns
# Wikipedia uses a different, but equivalent expression
# q_stat = (k-1) * (k * np.sum(count_row_success**2) - count_row_ss**2)
# / (k * count_col_ss - np.sum(count_col_success**2))
df = k - 1
pvalue = stats.chi2.sf(q_stat, df)
if return_object:
b = _Bunch()
b.statistic = q_stat
b.df = df
b.pvalue = pvalue
return b
return q_stat, pvalue, df |
|
statsmodels__statsmodels | discretemod.rst | Module doc / Directory summarization | Generate documentation for this module | BSD 3-Clause New or Revised License | statsmodels__statsmodels/docs/source/discretemod.rst | [
"statsmodels__statsmodels/statsmodels/discrete/count_model.py",
"statsmodels__statsmodels/statsmodels/discrete/discrete_model.py"
] | Regression with Discrete Dependent Variable
Regression models for limited and qualitative dependent variables. The
module currently allows the estimation of models with binary (Logit,
Probit), nominal (MNLogit), or count (Poisson, NegativeBinomial) data.
Starting with version 0.9, this also includes new count models, that are
still experimental in 0.9, NegativeBinomialP, GeneralizedPoisson and
zero-inflated models, ZeroInflatedPoisson, ZeroInflatedNegativeBinomialP
and ZeroInflatedGeneralizedPoisson.
Examples
# Load the data from Spector and Mazzeo (1980) spector_data =
sm.datasets.spector.load_pandas() spector_data.exog =
sm.add_constant(spector_data.exog)
# Logit Model logit_mod = sm.Logit(spector_data.endog,
spector_data.exog) logit_res = logit_mod.fit()
print(logit_res.summary())
Technical Documentation
Currently all models are estimated by Maximum Likelihood and assume
independently and identically distributed errors.
All discrete regression models define the same methods and follow the
same structure, which is similar to the regression results but with some
methods specific to discrete models. Additionally some of them contain
additional model specific methods and attributes.
DiscreteModel is a superclass of all discrete regression models. The
estimation results are returned as an instance of one of the subclasses
of DiscreteResults. Each category of models, binary, count and
multinomial, have their own intermediate level of model and results
classes. This intermediate classes are mostly to facilitate the
implementation of the methods and attributes defined by DiscreteModel
and DiscreteResults. | __all__ = ["ZeroInflatedPoisson", "ZeroInflatedGeneralizedPoisson",
"ZeroInflatedNegativeBinomialP"]
import warnings
import numpy as np
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
import statsmodels.regression.linear_model as lm
from statsmodels.discrete.discrete_model import (DiscreteModel, CountModel,
Poisson, Logit, CountResults,
L1CountResults, Probit,
_discrete_results_docs,
_validate_l1_method,
GeneralizedPoisson,
NegativeBinomialP)
from statsmodels.distributions import zipoisson, zigenpoisson, zinegbin
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import ConvergenceWarning
_doc_zi_params = """
exog_infl : array_like or None
Explanatory variables for the binary inflation model, i.e. for
mixing probability model. If None, then a constant is used.
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
inflation : string, 'logit' or 'probit'
The model for the zero inflation, either Logit (default) or Probit
"""
class GenericZeroInflated(CountModel):
__doc__ = """
Generiz Zero Inflated model for count data
%(params)s
%(extra_params)s
Attributes
----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
exog_infl: array
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None,
inflation='logit', exposure=None, missing='none', **kwargs):
super(GenericZeroInflated, self).__init__(endog, exog, offset=offset,
exposure=exposure,
missing=missing, **kwargs)
if exog_infl is None:
self.k_inflate = 1
self.exog_infl = np.ones((endog.size, self.k_inflate),
dtype=np.float64)
else:
self.exog_infl = exog_infl
self.k_inflate = exog_infl.shape[1]
if len(exog.shape) == 1:
self.k_exog = 1
else:
self.k_exog = exog.shape[1]
self.infl = inflation
if inflation == 'logit':
self.model_infl = Logit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_logit
elif inflation == 'probit':
self.model_infl = Probit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_probit
else:
raise ValueError("inflation == %s, which is not handled"
% inflation)
self.inflation = inflation
self.k_extra = self.k_inflate
if len(self.exog)!= len(self.exog_infl):
raise ValueError('exog and exog_infl have different number of'
'observation. `missing` handling is not supported')
infl_names = ['inflate_%s' % i for i in self.model_infl.data.param_names]
self.exog_names[:] = infl_names + list(self.exog_names)
self.exog_infl = np.asarray(self.exog_infl, dtype=np.float64)
self._init_keys.extend(['exog_infl', 'inflation'])
self._null_drop_keys = ['exog_infl']
def loglike(self, params):
"""
Loglikelihood of Generic Zero Inflated model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
--------
.. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})
where P - pdf of main model, L - loglike function of main model.
"""
return np.sum(self.loglikeobs(params))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generic Zero Inflated model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
--------
.. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\ln(1-w_{i})+L_{main\\_model}
where P - pdf of main model, L - loglike function of main model.
for observations :math:`i=1,...,n`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
llf_main = self.model_main.loglikeobs(params_main)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
llf = np.zeros_like(y, dtype=np.float64)
llf[zero_idx] = (np.log(w[zero_idx] +
(1 - w[zero_idx]) * np.exp(llf_main[zero_idx])))
llf[nonzero_idx] = np.log(1 - w[nonzero_idx]) + llf_main[nonzero_idx]
return llf
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self._get_start_params()
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super(GenericZeroInflated, self).fit(start_params=start_params,
maxiter=maxiter, disp=disp, method=method,
full_output=full_output, callback=callback,
**kwargs)
zipfit = self.result_class(self, mlefit._results)
result = self.result_class_wrapper(zipfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
fit.__doc__ = DiscreteModel.fit.__doc__
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if np.size(alpha) == 1 and alpha!= 0:
k_params = self.k_exog + self.k_inflate
alpha = alpha * np.ones(k_params)
extra = self.k_extra - self.k_inflate
alpha_p = alpha[:-(self.k_extra - extra)] if (self.k_extra
and np.size(alpha) > 1) else alpha
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self.model_main.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params
start_params = np.append(np.ones(self.k_inflate), start_params)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = self.result_class_reg(self, cntfit)
return self.result_class_reg_wrapper(discretefit)
fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__
def score_obs(self, params):
"""
Generic Zero Inflated model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)
dldw = np.zeros_like(self.exog_infl, dtype=np.float64)
dldp[zero_idx,:] = (score_main[zero_idx].T *
(1 - (w[zero_idx]) / np.exp(llf[zero_idx]))).T
dldp[nonzero_idx,:] = score_main[nonzero_idx]
if self.inflation == 'logit':
dldw[zero_idx,:] = (self.exog_infl[zero_idx].T * w[zero_idx] *
(1 - w[zero_idx]) *
(1 - np.exp(llf_main[zero_idx])) /
np.exp(llf[zero_idx])).T
dldw[nonzero_idx,:] = -(self.exog_infl[nonzero_idx].T *
w[nonzero_idx]).T
elif self.inflation == 'probit':
return approx_fprime(params, self.loglikeobs)
return np.hstack((dldw, dldp))
def score(self, params):
return self.score_obs(params).sum(0)
def _hessian_main(self, params):
pass
def _hessian_logit(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
hess_arr = np.zeros((self.k_inflate, self.k_exog + self.k_inflate))
pmf = np.exp(llf)
#d2l/dw2
for i in range(self.k_inflate):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog_infl[zero_idx, i] * self.exog_infl[zero_idx, j] *
(w[zero_idx] * (1 - w[zero_idx]) * ((1 -
np.exp(llf_main[zero_idx])) * (1 - 2 * w[zero_idx]) *
np.exp(llf[zero_idx]) - (w[zero_idx] - w[zero_idx]**2) *
(1 - np.exp(llf_main[zero_idx]))**2) /
pmf[zero_idx]**2)).sum() -
(self.exog_infl[nonzero_idx, i] * self.exog_infl[nonzero_idx, j] *
w[nonzero_idx] * (1 - w[nonzero_idx])).sum())
#d2l/dpdw
for i in range(self.k_inflate):
for j in range(self.k_exog):
hess_arr[i, j + self.k_inflate] = -(score_main[zero_idx, j] *
w[zero_idx] * (1 - w[zero_idx]) *
self.exog_infl[zero_idx, i] / pmf[zero_idx]).sum()
return hess_arr
def _hessian_probit(self, params):
pass
def hessian(self, params):
"""
Generic Zero Inflated model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
"""
hess_arr_main = self._hessian_main(params)
hess_arr_infl = self._hessian_inflate(params)
if hess_arr_main is None or hess_arr_infl is None:
return approx_hess(params, self.loglike)
dim = self.k_exog + self.k_inflate
hess_arr = np.zeros((dim, dim))
hess_arr[:self.k_inflate,:] = hess_arr_infl
hess_arr[self.k_inflate:,self.k_inflate:] = hess_arr_main
tri_idx = np.triu_indices(self.k_exog + self.k_inflate, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr
def predict(self, params, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean'):
"""
Predict response variable of a count model given exogenous variables.
Parameters
----------
params : array_like
The parameters of the model
exog : array, optional
A reference to the exogenous design.
If not assigned, will be used exog from fitting.
exog_infl : array, optional
A reference to the zero-inflated exogenous design.
If not assigned, will be used exog from fitting.
offset : array, optional
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array, optional
Log(exposure) is added to the linear prediction with coefficient
equal to 1. If exposure is specified, then it will be logged by the method.
The user does not need to log it first.
which : string, optional
Define values that will be predicted.
'mean','mean-main', 'linear','mean-nonzero', 'prob-zero, 'prob', 'prob-main'
Default is'mean'.
Notes
-----
"""
if exog is None:
exog = self.exog
if exog_infl is None:
exog_infl = self.exog_infl
if exposure is None:
exposure = getattr(self, 'exposure', 0)
else:
exposure = np.log(exposure)
if offset is None:
offset = 0
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
prob_main = 1 - self.model_infl.predict(params_infl, exog_infl)
lin_pred = np.dot(exog, params_main[:self.exog.shape[1]]) + exposure + offset
# Refactor: This is pretty hacky,
# there should be an appropriate predict method in model_main
# this is just prob(y=0 | model_main)
tmp_exog = self.model_main.exog
tmp_endog = self.model_main.endog
tmp_offset = getattr(self.model_main, 'offset', ['no'])
tmp_exposure = getattr(self.model_main, 'exposure', ['no'])
self.model_main.exog = exog
self.model_main.endog = np.zeros((exog.shape[0]))
self.model_main.offset = offset
self.model_main.exposure = exposure
llf = self.model_main.loglikeobs(params_main)
self.model_main.exog = tmp_exog
self.model_main.endog = tmp_endog
# tmp_offset might be an array with elementwise equality testing
if len(tmp_offset) == 1 and tmp_offset[0] == 'no':
del self.model_main.offset
else:
self.model_main.offset = tmp_offset
if len(tmp_exposure) == 1 and tmp_exposure[0] == 'no':
del self.model_main.exposure
else:
self.model_main.exposure = tmp_exposure
# end hack
prob_zero = (1 - prob_main) + prob_main * np.exp(llf)
if which =='mean':
return prob_main * np.exp(lin_pred)
elif which =='mean-main':
return np.exp(lin_pred)
elif which == 'linear':
return lin_pred
elif which =='mean-nonzero':
return prob_main * np.exp(lin_pred) / (1 - prob_zero)
elif which == 'prob-zero':
return prob_zero
elif which == 'prob-main':
return prob_main
elif which == 'prob':
return self._predict_prob(params, exog, exog_infl, exposure, offset)
else:
raise ValueError('which = %s is not available' % which)
class ZeroInflatedPoisson(GenericZeroInflated):
__doc__ = """
Poisson Zero Inflated model for count data
%(params)s
%(extra_params)s
Attributes
----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
exog_infl: array
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', missing='none', **kwargs):
super(ZeroInflatedPoisson, self).__init__(endog, exog, offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = Poisson(self.endog, self.exog, offset=offset,
exposure=exposure)
self.distribution = zipoisson
self.result_class = ZeroInflatedPoissonResults
self.result_class_wrapper = ZeroInflatedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedPoissonResultsWrapper
def _hessian_main(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score = self.score(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
hess_arr = np.zeros((self.k_exog, self.k_exog))
coeff = (1 + w[zero_idx] * (np.exp(mu[zero_idx]) - 1))
#d2l/dp2
for i in range(self.k_exog):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog[zero_idx, i] * self.exog[zero_idx, j] *
mu[zero_idx] * (w[zero_idx] - 1) * (1 / coeff -
w[zero_idx] * mu[zero_idx] * np.exp(mu[zero_idx]) /
coeff**2)).sum() - (mu[nonzero_idx] * self.exog[nonzero_idx, i] *
self.exog[nonzero_idx, j]).sum())
return hess_arr
def _predict_prob(self, params, exog, exog_infl, exposure, offset):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
counts = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
offset=offset)[:, None]
result = self.distribution.pmf(counts, mu, w)
return result[0] if transform else result
def _get_start_params(self):
start_params = self.model_main.fit(disp=0, method="nm").params
start_params = np.append(np.ones(self.k_inflate) * 0.1, start_params)
return start_params
class ZeroInflatedGeneralizedPoisson(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Poisson model for count data
%(params)s
%(extra_params)s
Attributes
----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
exog_infl: array
A reference to the zero-inflated exogenous design.
p: scalar
P denotes parametrizations for ZIGP regression.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the GeneralizedPoisson model. p=1 for
ZIGP-1 and p=2 for ZIGP-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedGeneralizedPoisson, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = GeneralizedPoisson(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zigenpoisson
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedGeneralizedPoissonResults
self.result_class_wrapper = ZeroInflatedGeneralizedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedGeneralizedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedGeneralizedPoissonResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedGeneralizedPoisson, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization + 1
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization
counts = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w[w == 1.] = np.nextafter(1, 0)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(counts, mu, params_main[-1], p, w)
return result[0] if transform else result
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = ZeroInflatedPoisson(self.endog, self.exog,
exog_infl=self.exog_infl).fit(disp=0).params
start_params = np.append(start_params, 0.1)
return start_params
class ZeroInflatedNegativeBinomialP(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Negative Binomial model for count data
%(params)s
%(extra_params)s
Attributes
----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
exog_infl: array
A reference to the zero-inflated exogenous design.
p: scalar
P denotes parametrizations for ZINB regression. p=1 for ZINB-1 and
p=2 for ZINB-2. Default is p=2
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the NegativeBinomialP model. p=1 for
ZINB-1 and p=2 for ZINM-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedNegativeBinomialP, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = NegativeBinomialP(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zinegbin
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedNegativeBinomialResults
self.result_class_wrapper = ZeroInflatedNegativeBinomialResultsWrapper
self.result_class_reg = L1ZeroInflatedNegativeBinomialResults
self.result_class_reg_wrapper = L1ZeroInflatedNegativeBinomialResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedNegativeBinomialP, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization
counts = np.arange(0, np.max(self.endog)+1)
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(counts, mu, params_main[-1], p, w)
return result[0] if transform else result
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = self.model_main.fit(disp=0, method='nm').params
start_params = np.append(np.zeros(self.k_inflate), start_params)
return start_params
class ZeroInflatedPoissonResults(CountResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
mu = self.predict(which='linear')
w = 1 - self.predict() / np.exp(self.predict(which='linear'))
return (1 + w * np.exp(mu))
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedPoissonResults(L1CountResults, ZeroInflatedPoissonResults):
pass
class ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedPoissonResultsWrapper,
ZeroInflatedPoissonResults)
class L1ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedPoissonResultsWrapper,
L1ZeroInflatedPoissonResults)
class ZeroInflatedGeneralizedPoissonResults(CountResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return ((1 + alpha * mu**p)**2 + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedGeneralizedPoissonResults(L1CountResults,
ZeroInflatedGeneralizedPoissonResults):
pass
class ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedGeneralizedPoissonResultsWrapper,
ZeroInflatedGeneralizedPoissonResults)
class L1ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedGeneralizedPoissonResultsWrapper,
L1ZeroInflatedGeneralizedPoissonResults)
class ZeroInflatedNegativeBinomialResults(CountResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Genaralized Negative Binomial",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return (1 + alpha * mu**(p-1) + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedNegativeBinomialResults(L1CountResults,
ZeroInflatedNegativeBinomialResults):
pass
class ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedNegativeBinomialResultsWrapper,
ZeroInflatedNegativeBinomialResults)
class L1ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedNegativeBinomialResultsWrapper,
L1ZeroInflatedNegativeBinomialResults)
"""
Limited dependent variable and qualitative variables.
Includes binary outcomes, count data, (ordered) ordinal data and limited
dependent variables.
General References
--------------------
A.C. Cameron and P.K. Trivedi. `Regression Analysis of Count Data`.
Cambridge, 1998
G.S. Madalla. `Limited-Dependent and Qualitative Variables in Econometrics`.
Cambridge, 1983.
W. Greene. `Econometric Analysis`. Prentice Hall, 5th. edition. 2003.
"""
__all__ = ["Poisson", "Logit", "Probit", "MNLogit", "NegativeBinomial",
"GeneralizedPoisson", "NegativeBinomialP"]
from statsmodels.compat.python import range
from scipy.special import loggamma
import numpy as np
from pandas import get_dummies
from scipy.special import gammaln, digamma, polygamma
from scipy import stats, special
from scipy.stats import nbinom
import statsmodels.tools.tools as tools
from statsmodels.tools import data as data_tools
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import (PerfectSeparationError,
SpecificationWarning)
from statsmodels.tools.numdiff import approx_fprime_cs
import statsmodels.base.model as base
from statsmodels.base.data import handle_data # for mnlogit
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.base.l1_slsqp import fit_l1_slsqp
from statsmodels.distributions import genpoisson_p
try:
import cvxopt # noqa:F401
have_cvxopt = True
except ImportError:
have_cvxopt = False
import warnings
#TODO: When we eventually get user-settable precision, we need to change
# this
FLOAT_EPS = np.finfo(float).eps
#TODO: add options for the parameter covariance/variance
# ie., OIM, EIM, and BHHH see Green 21.4
_discrete_models_docs = """
"""
_discrete_results_docs = """
%(one_line_description)s
Parameters
----------
model : A DiscreteModel instance
params : array_like
The parameters of a fitted model.
hessian : array_like
The hessian of the fitted model.
scale : float
A scale parameter for the covariance matrix.
Attributes
----------
df_resid : float
See model definition.
df_model : float
See model definition.
llf : float
Value of the loglikelihood
%(extra_attr)s"""
_l1_results_attr = """ nnz_params : Integer
The number of nonzero parameters in the model. Train with
trim_params == True or else numerical error will distort this.
trimmed : Boolean array
trimmed[i] == True if the ith parameter was trimmed from the model."""
_get_start_params_null_docs = """
Compute one-step moment estimator for null (constant-only) model
This is a preliminary estimator used as start_params.
Returns
-------
params : ndarray
parameter estimate based one one-step moment matching
"""
# helper for MNLogit (will be generally useful later)
def _numpy_to_dummies(endog):
if endog.dtype.kind in ['S', 'O']:
endog_dummies, ynames = tools.categorical(endog, drop=True,
dictnames=True)
elif endog.ndim == 2:
endog_dummies = endog
ynames = range(endog.shape[1])
else:
endog_dummies, ynames = tools.categorical(endog, drop=True,
dictnames=True)
return endog_dummies, ynames
def _pandas_to_dummies(endog):
if endog.ndim == 2:
if endog.shape[1] == 1:
yname = endog.columns[0]
endog_dummies = get_dummies(endog.iloc[:, 0])
else: # series
yname = 'y'
endog_dummies = endog
else:
yname = endog.name
endog_dummies = get_dummies(endog)
ynames = endog_dummies.columns.tolist()
return endog_dummies, ynames, yname
def _validate_l1_method(method):
"""
As of 0.10.0, the supported values for `method` in `fit_regularized`
are "l1" and "l1_cvxopt_cp". If an invalid value is passed, raise
with a helpful error message
Parameters
----------
method : str
Raises
------
ValueError
"""
if method not in ['l1', 'l1_cvxopt_cp']:
raise ValueError('`method` = {method} is not supported, use either '
'"l1" or "l1_cvxopt_cp"'.format(method=method))
#### Private Model Classes ####
class DiscreteModel(base.LikelihoodModel):
"""
Abstract class for discrete choice models.
This class does not do anything itself but lays out the methods and
call signature expected of child classes in addition to those of
statsmodels.model.LikelihoodModel.
"""
def __init__(self, endog, exog, **kwargs):
super(DiscreteModel, self).__init__(endog, exog, **kwargs)
self.raise_on_perfect_prediction = True
def initialize(self):
"""
Initialize is called by
statsmodels.model.LikelihoodModel.__init__
and should contain any preprocessing that needs to be done for a model.
"""
# assumes constant
rank = np.linalg.matrix_rank(self.exog)
self.df_model = float(rank - 1)
self.df_resid = float(self.exog.shape[0] - rank)
def cdf(self, X):
"""
The cumulative distribution function of the model.
"""
raise NotImplementedError
def pdf(self, X):
"""
The probability density (mass) function of the model.
"""
raise NotImplementedError
def _check_perfect_pred(self, params, *args):
endog = self.endog
fittedvalues = self.cdf(np.dot(self.exog, params[:self.exog.shape[1]]))
if (self.raise_on_perfect_prediction and
np.allclose(fittedvalues - endog, 0)):
msg = "Perfect separation detected, results not available"
raise PerfectSeparationError(msg)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
"""
Fit the model using maximum likelihood.
The rest of the docstring is from
statsmodels.base.model.LikelihoodModel.fit
"""
if callback is None:
callback = self._check_perfect_pred
else:
pass # make a function factory to have multiple call-backs
mlefit = super(DiscreteModel, self).fit(start_params=start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, **kwargs)
return mlefit # up to subclasses to wrap results
fit.__doc__ += base.LikelihoodModel.fit.__doc__
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=True,
callback=None, alpha=0, trim_mode='auto',
auto_trim_tol=0.01, size_trim_tol=1e-4, qc_tol=0.03,
qc_verbose=False, **kwargs):
"""
Fit the model using a regularized maximum likelihood.
The regularization method AND the solver used is determined by the
argument method.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
method : 'l1' or 'l1_cvxopt_cp'
See notes for details.
maxiter : Integer or 'defined_by_method'
Maximum number of iterations to perform.
If 'defined_by_method', then use method defaults (see notes).
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool
Set to True to print convergence messages.
fargs : tuple
Extra arguments passed to the likelihood function, i.e.,
loglike(x,*args)
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
alpha : non-negative scalar or numpy array (same size as parameters)
The weight multiplying the l1 penalty term
trim_mode : 'auto,'size', or 'off'
If not 'off', trim (set to zero) parameters that would have been
zero if the solver reached the theoretical minimum.
If 'auto', trim params using the Theory above.
If'size', trim params if they have very small absolute value
size_trim_tol : float or 'auto' (default = 'auto')
For use when trim_mode =='size'
auto_trim_tol : float
For sue when trim_mode == 'auto'. Use
qc_tol : float
Print warning and don't allow auto trim when (ii) (above) is
violated by this much.
qc_verbose : Boolean
If true, print out a full QC report upon failure
Notes
-----
Extra parameters are not penalized if alpha is given as a scalar.
An example is the shape parameter in NegativeBinomial `nb1` and `nb2`.
Optional arguments for the solvers (available in Results.mle_settings)::
'l1'
acc : float (default 1e-6)
Requested accuracy as used by slsqp
'l1_cvxopt_cp'
abstol : float
absolute accuracy (default: 1e-7).
reltol : float
relative accuracy (default: 1e-6).
feastol : float
tolerance for feasibility conditions (default: 1e-7).
refinement : int
number of iterative refinement steps when solving KKT
equations (default: 1).
Optimization methodology
With :math:`L` the negative log likelihood, we solve the convex but
non-smooth problem
.. math:: \\min_\\beta L(\\beta) + \\sum_k\\alpha_k |\\beta_k|
via the transformation to the smooth, convex, constrained problem
in twice as many variables (adding the "added variables" :math:`u_k`)
.. math:: \\min_{\\beta,u} L(\\beta) + \\sum_k\\alpha_k u_k,
subject to
.. math:: -u_k \\leq \\beta_k \\leq u_k.
With :math:`\\partial_k L` the derivative of :math:`L` in the
:math:`k^{th}` parameter direction, theory dictates that, at the
minimum, exactly one of two conditions holds:
(i) :math:`|\\partial_k L| = \\alpha_k` and :math:`\\beta_k \\neq 0`
(ii) :math:`|\\partial_k L| \\leq \\alpha_k` and :math:`\\beta_k = 0`
"""
_validate_l1_method(method)
# Set attributes based on method
cov_params_func = self.cov_params_func_l1
### Bundle up extra kwargs for the dictionary kwargs. These are
### passed through super(...).fit() as kwargs and unpacked at
### appropriate times
alpha = np.array(alpha)
assert alpha.min() >= 0
try:
kwargs['alpha'] = alpha
except TypeError:
kwargs = dict(alpha=alpha)
kwargs['alpha_rescaled'] = kwargs['alpha'] / float(self.endog.shape[0])
kwargs['trim_mode'] = trim_mode
kwargs['size_trim_tol'] = size_trim_tol
kwargs['auto_trim_tol'] = auto_trim_tol
kwargs['qc_tol'] = qc_tol
kwargs['qc_verbose'] = qc_verbose
### Define default keyword arguments to be passed to super(...).fit()
if maxiter == 'defined_by_method':
if method == 'l1':
maxiter = 1000
elif method == 'l1_cvxopt_cp':
maxiter = 70
## Parameters to pass to super(...).fit()
# For the 'extra' parameters, pass all that are available,
# even if we know (at this point) we will only use one.
extra_fit_funcs = {'l1': fit_l1_slsqp}
if have_cvxopt and method == 'l1_cvxopt_cp':
from statsmodels.base.l1_cvxopt import fit_l1_cvxopt_cp
extra_fit_funcs['l1_cvxopt_cp'] = fit_l1_cvxopt_cp
elif method.lower() == 'l1_cvxopt_cp':
message = ("Attempt to use l1_cvxopt_cp failed since cvxopt "
"could not be imported")
if callback is None:
callback = self._check_perfect_pred
else:
pass # make a function factory to have multiple call-backs
mlefit = super(DiscreteModel, self).fit(start_params=start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, extra_fit_funcs=extra_fit_funcs,
cov_params_func=cov_params_func, **kwargs)
return mlefit # up to subclasses to wrap results
def cov_params_func_l1(self, likelihood_model, xopt, retvals):
"""
Computes cov_params on a reduced parameter space
corresponding to the nonzero parameters resulting from the
l1 regularized fit.
Returns a full cov_params matrix, with entries corresponding
to zero'd values set to np.nan.
"""
H = likelihood_model.hessian(xopt)
trimmed = retvals['trimmed']
nz_idx = np.nonzero(~trimmed)[0]
nnz_params = (~trimmed).sum()
if nnz_params > 0:
H_restricted = H[nz_idx[:, None], nz_idx]
# Covariance estimate for the nonzero params
H_restricted_inv = np.linalg.inv(-H_restricted)
else:
H_restricted_inv = np.zeros(0)
cov_params = np.nan * np.ones(H.shape)
cov_params[nz_idx[:, None], nz_idx] = H_restricted_inv
return cov_params
def predict(self, params, exog=None, linear=False):
"""
Predict response variable of a model given exogenous variables.
"""
raise NotImplementedError
def _derivative_exog(self, params, exog=None, dummy_idx=None,
count_idx=None):
"""
This should implement the derivative of the non-linear function
"""
raise NotImplementedError
def _derivative_exog_helper(self, margeff, params, exog, dummy_idx,
count_idx, transform):
"""
Helper for _derivative_exog to wrap results appropriately
"""
from.discrete_margins import _get_count_effects, _get_dummy_effects
if count_idx is not None:
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
class BinaryModel(DiscreteModel):
def __init__(self, endog, exog, **kwargs):
super(BinaryModel, self).__init__(endog, exog, **kwargs)
if (not issubclass(self.__class__, MultinomialModel) and
not np.all((self.endog >= 0) & (self.endog <= 1))):
raise ValueError("endog must be in the unit interval.")
def predict(self, params, exog=None, linear=False):
"""
Predict response variable of a model given exogenous variables.
Parameters
----------
params : array_like
Fitted parameters of the model.
exog : array_like
1d or 2d array of exogenous values. If not supplied, the
whole exog attribute of the model is used.
linear : bool, optional
If True, returns the linear predictor dot(exog,params). Else,
returns the value of the cdf at the linear predictor.
Returns
-------
array
Fitted values at exog.
"""
if exog is None:
exog = self.exog
if not linear:
return self.cdf(np.dot(exog, params))
else:
return np.dot(exog, params)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
bnryfit = super(BinaryModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = L1BinaryResults(self, bnryfit)
return L1BinaryResultsWrapper(discretefit)
fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""
For computing marginal effects standard errors.
This is used only in the case of discrete and count regressors to
get the variance-covariance of the marginal effects. It returns
[d F / d params] where F is the predict.
Transform can be 'dydx' or 'eydx'. Checking is done in margeff
computations for appropriate transform.
"""
if exog is None:
exog = self.exog
dF = self.pdf(np.dot(exog, params))[:,None] * exog
if 'ey' in transform:
dF /= self.predict(params, exog)[:,None]
return dF
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects returns dF(XB) / dX where F(.) is
the predicted probabilities
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# Note: this form should be appropriate for
# group 1 probit, logit, logistic, cloglog, heckprob, xtprobit
if exog is None:
exog = self.exog
margeff = np.dot(self.pdf(np.dot(exog, params))[:, None],
params[None, :])
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:, None]
return self._derivative_exog_helper(margeff, params, exog,
dummy_idx, count_idx, transform)
class MultinomialModel(BinaryModel):
def _handle_data(self, endog, exog, missing, hasconst, **kwargs):
if data_tools._is_using_ndarray_type(endog, None):
endog_dummies, ynames = _numpy_to_dummies(endog)
yname = 'y'
elif data_tools._is_using_pandas(endog, None):
endog_dummies, ynames, yname = _pandas_to_dummies(endog)
else:
endog = np.asarray(endog)
endog_dummies, ynames = _numpy_to_dummies(endog)
yname = 'y'
if not isinstance(ynames, dict):
ynames = dict(zip(range(endog_dummies.shape[1]), ynames))
self._ynames_map = ynames
data = handle_data(endog_dummies, exog, missing, hasconst, **kwargs)
data.ynames = yname # overwrite this to single endog name
data.orig_endog = endog
self.wendog = data.endog
# repeating from upstream...
for key in kwargs:
if key in ['design_info', 'formula']: # leave attached to data
continue
try:
setattr(self, key, data.__dict__.pop(key))
except KeyError:
pass
return data
def initialize(self):
"""
Preprocesses the data for MNLogit.
"""
super(MultinomialModel, self).initialize()
# This is also a "whiten" method in other models (eg regression)
self.endog = self.endog.argmax(1) # turn it into an array of col idx
self.J = self.wendog.shape[1]
self.K = self.exog.shape[1]
self.df_model *= (self.J-1) # for each J - 1 equation.
self.df_resid = self.exog.shape[0] - self.df_model - (self.J-1)
def predict(self, params, exog=None, linear=False):
"""
Predict response variable of a model given exogenous variables.
Parameters
----------
params : array_like
2d array of fitted parameters of the model. Should be in the
order returned from the model.
exog : array_like
1d or 2d array of exogenous values. If not supplied, the
whole exog attribute of the model is used. If a 1d array is given
it assumed to be 1 row of exogenous variables. If you only have
one regressor and would like to do prediction, you must provide
a 2d array with shape[1] == 1.
linear : bool, optional
If True, returns the linear predictor dot(exog,params). Else,
returns the value of the cdf at the linear predictor.
Notes
-----
Column 0 is the base case, the rest conform to the rows of params
shifted up one for the base case.
"""
if exog is None: # do here to accomodate user-given exog
exog = self.exog
if exog.ndim == 1:
exog = exog[None]
pred = super(MultinomialModel, self).predict(params, exog, linear)
if linear:
pred = np.column_stack((np.zeros(len(exog)), pred))
return pred
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
if start_params is None:
start_params = np.zeros((self.K * (self.J-1)))
else:
start_params = np.asarray(start_params)
callback = lambda x : None # placeholder until check_perfect_pred
# skip calling super to handle results from LikelihoodModel
mnfit = base.LikelihoodModel.fit(self, start_params = start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, **kwargs)
mnfit.params = mnfit.params.reshape(self.K, -1, order='F')
mnfit = MultinomialResults(self, mnfit)
return MultinomialResultsWrapper(mnfit)
fit.__doc__ = DiscreteModel.fit.__doc__
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
if start_params is None:
start_params = np.zeros((self.K * (self.J-1)))
else:
start_params = np.asarray(start_params)
mnfit = DiscreteModel.fit_regularized(
self, start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
mnfit.params = mnfit.params.reshape(self.K, -1, order='F')
mnfit = L1MultinomialResults(self, mnfit)
return L1MultinomialResultsWrapper(mnfit)
fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""
For computing marginal effects standard errors.
This is used only in the case of discrete and count regressors to
get the variance-covariance of the marginal effects. It returns
[d F / d params] where F is the predicted probabilities for each
choice. dFdparams is of shape nobs x (J*K) x (J-1)*K.
The zero derivatives for the base category are not included.
Transform can be 'dydx' or 'eydx'. Checking is done in margeff
computations for appropriate transform.
"""
if exog is None:
exog = self.exog
if params.ndim == 1: # will get flatted from approx_fprime
params = params.reshape(self.K, self.J-1, order='F')
eXB = np.exp(np.dot(exog, params))
sum_eXB = (1 + eXB.sum(1))[:,None]
J = int(self.J)
K = int(self.K)
repeat_eXB = np.repeat(eXB, J, axis=1)
X = np.tile(exog, J-1)
# this is the derivative wrt the base level
F0 = -repeat_eXB * X / sum_eXB ** 2
# this is the derivative wrt the other levels when
# dF_j / dParams_j (ie., own equation)
#NOTE: this computes too much, any easy way to cut down?
F1 = eXB.T[:,:,None]*X * (sum_eXB - repeat_eXB) / (sum_eXB**2)
F1 = F1.transpose((1,0,2)) # put the nobs index first
# other equation index
other_idx = ~np.kron(np.eye(J-1), np.ones(K)).astype(bool)
F1[:, other_idx] = (-eXB.T[:,:,None]*X*repeat_eXB / \
(sum_eXB**2)).transpose((1,0,2))[:, other_idx]
dFdX = np.concatenate((F0[:, None,:], F1), axis=1)
if 'ey' in transform:
dFdX /= self.predict(params, exog)[:, :, None]
return dFdX
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects returns dF(XB) / dX where F(.) is
the predicted probabilities
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
For Multinomial models the marginal effects are
P[j] * (params[j] - sum_k P[k]*params[k])
It is returned unshaped, so that each row contains each of the J
equations. This makes it easier to take derivatives of this for
standard errors. If you want average marginal effects you can do
margeff.reshape(nobs, K, J, order='F).mean(0) and the marginal effects
for choice J are in column J
"""
J = int(self.J) # number of alternative choices
K = int(self.K) # number of variables
# Note: this form should be appropriate for
# group 1 probit, logit, logistic, cloglog, heckprob, xtprobit
if exog is None:
exog = self.exog
if params.ndim == 1: # will get flatted from approx_fprime
params = params.reshape(K, J-1, order='F')
zeroparams = np.c_[np.zeros(K), params] # add base in
cdf = self.cdf(np.dot(exog, params))
# TODO: meaningful interpretation for `iterm`?
iterm = np.array([cdf[:, [i]] * zeroparams[:, i]
for i in range(int(J))]).sum(0)
margeff = np.array([cdf[:, [j]] * (zeroparams[:, j] - iterm)
for j in range(J)])
# swap the axes to make sure margeff are in order nobs, K, J
margeff = np.transpose(margeff, (1, 2, 0))
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:,None,:]
margeff = self._derivative_exog_helper(margeff, params, exog,
dummy_idx, count_idx, transform)
return margeff.reshape(len(exog), -1, order='F')
class CountModel(DiscreteModel):
def __init__(self, endog, exog, offset=None, exposure=None, missing='none',
**kwargs):
super(CountModel, self).__init__(endog, exog, missing=missing,
offset=offset,
exposure=exposure, **kwargs)
if exposure is not None:
self.exposure = np.log(self.exposure)
self._check_inputs(self.offset, self.exposure, self.endog)
if offset is None:
delattr(self, 'offset')
if exposure is None:
delattr(self, 'exposure')
# promote dtype to float64 if needed
dt = np.promote_types(self.endog.dtype, np.float64)
self.endog = np.asarray(self.endog, dt)
dt = np.promote_types(self.exog.dtype, np.float64)
self.exog = np.asarray(self.exog, dt)
def _check_inputs(self, offset, exposure, endog):
if offset is not None and offset.shape[0]!= endog.shape[0]:
raise ValueError("offset is not the same length as endog")
if exposure is not None and exposure.shape[0]!= endog.shape[0]:
raise ValueError("exposure is not the same length as endog")
def _get_init_kwds(self):
# this is a temporary fixup because exposure has been transformed
# see #1609
kwds = super(CountModel, self)._get_init_kwds()
if 'exposure' in kwds and kwds['exposure'] is not None:
kwds['exposure'] = np.exp(kwds['exposure'])
return kwds
def predict(self, params, exog=None, exposure=None, offset=None,
linear=False):
"""
Predict response variable of a count model given exogenous variables.
Notes
-----
If exposure is specified, then it will be logged by the method.
The user does not need to log it first.
"""
# the following is copied from GLM predict (without family/link check)
# Use fit offset if appropriate
if offset is None and exog is None and hasattr(self, 'offset'):
offset = self.offset
elif offset is None:
offset = 0.
# Use fit exposure if appropriate
if exposure is None and exog is None and hasattr(self, 'exposure'):
# Already logged
exposure = self.exposure
elif exposure is None:
exposure = 0.
else:
exposure = np.log(exposure)
if exog is None:
exog = self.exog
fitted = np.dot(exog, params[:exog.shape[1]])
linpred = fitted + exposure + offset
if not linear:
return np.exp(linpred) # not cdf
else:
return linpred
def _derivative_predict(self, params, exog=None, transform='dydx'):
"""
For computing marginal effects standard errors.
This is used only in the case of discrete and count regressors to
get the variance-covariance of the marginal effects. It returns
[d F / d params] where F is the predict.
Transform can be 'dydx' or 'eydx'. Checking is done in margeff
computations for appropriate transform.
"""
if exog is None:
exog = self.exog
#NOTE: this handles offset and exposure
dF = self.predict(params, exog)[:,None] * exog
if 'ey' in transform:
dF /= self.predict(params, exog)[:,None]
return dF
def _derivative_exog(self, params, exog=None, transform="dydx",
dummy_idx=None, count_idx=None):
"""
For computing marginal effects. These are the marginal effects
d F(XB) / dX
For the Poisson model F(XB) is the predicted counts rather than
the probabilities.
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# group 3 poisson, nbreg, zip, zinb
if exog is None:
exog = self.exog
k_extra = getattr(self, 'k_extra', 0)
params_exog = params if k_extra == 0 else params[:-k_extra]
margeff = self.predict(params, exog)[:,None] * params_exog[None,:]
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:,None]
return self._derivative_exog_helper(margeff, params, exog,
dummy_idx, count_idx, transform)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
cntfit = super(CountModel, self).fit(start_params=start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, **kwargs)
discretefit = CountResults(self, cntfit)
return CountResultsWrapper(discretefit)
fit.__doc__ = DiscreteModel.fit.__doc__
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = L1CountResults(self, cntfit)
return L1CountResultsWrapper(discretefit)
fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__
class OrderedModel(DiscreteModel):
pass
#### Public Model Classes ####
class Poisson(CountModel):
__doc__ = """
Poisson model for count data
%(params)s
%(extra_params)s
Attributes
----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' :
"""offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
""" + base._missing_param_doc}
@property
def family(self):
from statsmodels.genmod import families
return families.Poisson()
def cdf(self, X):
"""
Poisson model cumulative distribution function
Parameters
----------
X : array_like
`X` is the linear predictor of the model. See notes.
Returns
-------
The value of the Poisson CDF at each point.
Notes
-----
The CDF is defined as
.. math:: \\exp\\left(-\\lambda\\right)\\sum_{i=0}^{y}\\frac{\\lambda^{i}}{i!}
where :math:`\\lambda` assumes the loglinear model. I.e.,
.. math:: \\ln\\lambda_{i}=X\\beta
The parameter `X` is :math:`X\\beta` in the above formula.
"""
y = self.endog
return stats.poisson.cdf(y, np.exp(X))
def pdf(self, X):
"""
Poisson model probability mass function
Parameters
----------
X : array_like
`X` is the linear predictor of the model. See notes.
Returns
-------
pdf : ndarray
The value of the Poisson probability mass function, PMF, for each
point of X.
Notes
--------
The PMF is defined as
.. math:: \\frac{e^{-\\lambda_{i}}\\lambda_{i}^{y_{i}}}{y_{i}!}
where :math:`\\lambda` assumes the loglinear model. I.e.,
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
The parameter `X` is :math:`x_{i}\\beta` in the above formula.
"""
y = self.endog
return np.exp(stats.poisson.logpmf(y, np.exp(X)))
def loglike(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
--------
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
XB = np.dot(self.exog, params) + offset + exposure
endog = self.endog
return np.sum(-np.exp(XB) + endog*XB - gammaln(endog+1))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : array_like
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
--------
.. math:: \\ln L_{i}=\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
for observations :math:`i=1,...,n`
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
XB = np.dot(self.exog, params) + offset + exposure
endog = self.endog
#np.sum(stats.poisson.logpmf(endog, np.exp(XB)))
return -np.exp(XB) + endog*XB - gammaln(endog+1)
def _get_start_params_null(self):
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
const = (self.endog / np.exp(offset + exposure)).mean()
params = [np.log(const)]
return params
_get_start_params_null.__doc__ = _get_start_params_null_docs
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
if start_params is None and self.data.const_idx is not None:
# k_params or k_exog not available?
start_params = 0.001 * np.ones(self.exog.shape[1])
start_params[self.data.const_idx] = self._get_start_params_null()[0]
cntfit = super(CountModel, self).fit(start_params=start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, **kwargs)
if 'cov_type' in kwargs:
cov_kwds = kwargs.get('cov_kwds', {})
kwds = {'cov_type':kwargs['cov_type'], 'cov_kwds':cov_kwds}
else:
kwds = {}
discretefit = PoissonResults(self, cntfit, **kwds)
return PoissonResultsWrapper(discretefit)
fit.__doc__ = DiscreteModel.fit.__doc__
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = L1PoissonResults(self, cntfit)
return L1PoissonResultsWrapper(discretefit)
fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__
def fit_constrained(self, constraints, start_params=None, **fit_kwds):
"""fit the model subject to linear equality constraints
The constraints are of the form `R params = q`
where R is the constraint_matrix and q is the vector of
constraint_values.
The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.
Parameters
----------
constraints : formula expression or tuple
If it is a tuple, then the constraint needs to be given by two
arrays (constraint_matrix, constraint_value), i.e. (R, q).
Otherwise, the constraints can be given as strings or list of
strings.
see t_test for details
start_params : None or array_like
starting values for the optimization. `start_params` needs to be
given in the original parameter space and are internally
transformed.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the transformed model.
Returns
-------
results : Results instance
"""
#constraints = (R, q)
# TODO: temporary trailing underscore to not overwrite the monkey
# patched version
# TODO: decide whether to move the imports
from patsy import DesignInfo
from statsmodels.base._constraints import fit_constrained
# same pattern as in base.LikelihoodModel.t_test
lc = DesignInfo(self.exog_names).linear_constraint(constraints)
R, q = lc.coefs, lc.constants
# TODO: add start_params option, need access to tranformation
# fit_constrained needs to do the transformation
params, cov, res_constr = fit_constrained(self, R, q,
start_params=start_params,
fit_kwds=fit_kwds)
#create dummy results Instance, TODO: wire up properly
res = self.fit(maxiter=0, method='nm', disp=0,
warn_convergence=False) # we get a wrapper back
res.mle_retvals['fcall'] = res_constr.mle_retvals.get('fcall', np.nan)
res.mle_retvals['iterations'] = res_constr.mle_retvals.get(
'iterations', np.nan)
res.mle_retvals['converged'] = res_constr.mle_retvals['converged']
res._results.params = params
res._results.cov_params_default = cov
cov_type = fit_kwds.get('cov_type', 'nonrobust')
if cov_type!= 'nonrobust':
res._results.normalized_cov_params = cov # assume scale=1
else:
res._results.normalized_cov_params = None
k_constr = len(q)
res._results.df_resid += k_constr
res._results.df_model -= k_constr
res._results.constraints = lc
res._results.k_constr = k_constr
res._results.results_constrained = res_constr
return res
def score(self, params):
"""
Poisson model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left(y_{i}-\\lambda_{i}\\right)x_{i}
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + offset + exposure)
return np.dot(self.endog - L, X)
def score_obs(self, params):
"""
Poisson model Jacobian of the log-likelihood for each observation
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : array_like
The score vector (nobs, k_vars) of the model evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\lambda_{i}\\right)x_{i}
for observations :math:`i=1,...,n`
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + offset + exposure)
return (self.endog - L)[:,None] * X
def score_factor(self, params):
"""
Poisson model score_factor for each observation
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : array_like
The score factor (nobs, ) of the model evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\lambda_{i}\\right)
for observations :math:`i=1,...,n`
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + offset + exposure)
return (self.endog - L)
def hessian(self, params):
"""
Poisson model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i=1}^{n}\\lambda_{i}x_{i}x_{i}^{\\prime}
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + exposure + offset)
return -np.dot(L*X.T, X)
def hessian_factor(self, params):
"""
Poisson model Hessian factor
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (nobs,)
The Hessian factor, second derivative of loglikelihood function
with respect to the linear predictor evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i=1}^{n}\\lambda_{i}
where the loglinear model is assumed
.. math:: \\ln\\lambda_{i}=x_{i}\\beta
"""
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
X = self.exog
L = np.exp(np.dot(X,params) + exposure + offset)
return L
class GeneralizedPoisson(CountModel):
__doc__ = """
Generalized Poisson model for count data
%(params)s
%(extra_params)s
Attributes
----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' :
"""
p: scalar
P denotes parameterizations for GP regression. p=1 for GP-1 and
p=2 for GP-2. Default is p=1.
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
""" + base._missing_param_doc}
def __init__(self, endog, exog, p = 1, offset=None,
exposure=None, missing='none', **kwargs):
super(GeneralizedPoisson, self).__init__(endog, exog, offset=offset,
exposure=exposure,
missing=missing, **kwargs)
self.parameterization = p - 1
self.exog_names.append('alpha')
self.k_extra = 1
self._transparams = False
def _get_init_kwds(self):
kwds = super(GeneralizedPoisson, self)._get_init_kwds()
kwds['p'] = self.parameterization + 1
return kwds
def loglike(self, params):
"""
Loglikelihood of Generalized Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
--------
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[\\mu_{i}+(y_{i}-1)*ln(\\mu_{i}+
\\alpha*\\mu_{i}^{p-1}*y_{i})-y_{i}*ln(1+\\alpha*\\mu_{i}^{p-1})-
ln(y_{i}!)-\\frac{\\mu_{i}+\\alpha*\\mu_{i}^{p-1}*y_{i}}{1+\\alpha*
\\mu_{i}^{p-1}}\\right]
"""
return np.sum(self.loglikeobs(params))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generalized Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
--------
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[\\mu_{i}+(y_{i}-1)*ln(\\mu_{i}+
\\alpha*\\mu_{i}^{p-1}*y_{i})-y_{i}*ln(1+\\alpha*\\mu_{i}^{p-1})-
ln(y_{i}!)-\\frac{\\mu_{i}+\\alpha*\\mu_{i}^{p-1}*y_{i}}{1+\\alpha*
\\mu_{i}^{p-1}}\\right]
for observations :math:`i=1,...,n`
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
endog = self.endog
mu = self.predict(params)
mu_p = np.power(mu, p)
a1 = 1 + alpha * mu_p
a2 = mu + (a1 - 1) * endog
return (np.log(mu) + (endog - 1) * np.log(a2) - endog *
np.log(a1) - gammaln(endog + 1) - a2 / a1)
def _get_start_params_null(self):
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
const = (self.endog / np.exp(offset + exposure)).mean()
params = [np.log(const)]
mu = const * np.exp(offset + exposure)
resid = self.endog - mu
a = self._estimate_dispersion(mu, resid, df_resid=resid.shape[0] - 1)
params.append(a)
return np.array(params)
_get_start_params_null.__doc__ = _get_start_params_null_docs
def _estimate_dispersion(self, mu, resid, df_resid=None):
q = self.parameterization
if df_resid is None:
df_resid = resid.shape[0]
a = ((np.abs(resid) / np.sqrt(mu) - 1) * mu**(-q)).sum() / df_resid
return a
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None, use_transparams=False,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
# TODO: Fix doc string
"""
use_transparams : bool
This parameter enable internal transformation to impose
non-negativity. True to enable. Default is False.
use_transparams=True imposes the no underdispersion (alpha > 0)
constaint. In case use_transparams=True and method="newton" or
"ncg" transformation is ignored.
"""
if use_transparams and method not in ['newton', 'ncg']:
self._transparams = True
else:
if use_transparams:
warnings.warn('Parameter "use_transparams" is ignored',
RuntimeWarning)
self._transparams = False
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
optim_kwds_prelim = {'disp': 0,'skip_hessian': True,
'warn_convergence': False}
optim_kwds_prelim.update(kwargs.get('optim_kwds_prelim', {}))
mod_poi = Poisson(self.endog, self.exog, offset=offset)
res_poi = mod_poi.fit(**optim_kwds_prelim)
start_params = res_poi.params
a = self._estimate_dispersion(res_poi.predict(), res_poi.resid,
df_resid=res_poi.df_resid)
start_params = np.append(start_params, max(-0.1, a))
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super(GeneralizedPoisson, self).fit(start_params=start_params,
maxiter=maxiter, method=method, disp=disp,
full_output=full_output, callback=callback,
**kwargs)
if use_transparams and method not in ["newton", "ncg"]:
self._transparams = False
mlefit._results.params[-1] = np.exp(mlefit._results.params[-1])
gpfit = GeneralizedPoissonResults(self, mlefit._results)
result = GeneralizedPoissonResultsWrapper(gpfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
fit.__doc__ = DiscreteModel.fit.__doc__ + fit.__doc__
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if np.size(alpha) == 1 and alpha!= 0:
k_params = self.exog.shape[1] + self.k_extra
alpha = alpha * np.ones(k_params)
alpha[-1] = 0
alpha_p = alpha[:-1] if (self.k_extra and np.size(alpha) > 1) else alpha
self._transparams = False
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
mod_poi = Poisson(self.endog, self.exog, offset=offset)
start_params = mod_poi.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params
start_params = np.append(start_params, 0.1)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = L1GeneralizedPoissonResults(self, cntfit)
return L1GeneralizedPoissonResultsWrapper(discretefit)
fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__
def score_obs(self, params):
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
mu_p = np.power(mu, p)
a1 = 1 + alpha * mu_p
a2 = mu + alpha * mu_p * y
a3 = alpha * p * mu ** (p - 1)
a4 = a3 * y
dmudb = mu * exog
dalpha = (mu_p * (y * ((y - 1) / a2 - 2 / a1) + a2 / a1**2))
dparams = dmudb * (-a4 / a1 +
a3 * a2 / (a1 ** 2) +
(1 + a4) * ((y - 1) / a2 - 1 / a1) +
1 / mu)
return np.concatenate((dparams, np.atleast_2d(dalpha)),
axis=1)
def score(self, params):
score = np.sum(self.score_obs(params), axis=0)
if self._transparams:
score[-1] == score[-1] ** 2
return score
else:
return score
def _score_p(self, params):
"""
Generalized Poisson model derivative of the log-likelihood by p-parameter
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
dldp : float
dldp is first derivative of the loglikelihood function,
evaluated at `p-parameter`.
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
mu_p = np.power(mu, p)
a1 = 1 + alpha * mu_p
a2 = mu + alpha * mu_p * y
dp = np.sum((np.log(mu) * ((a2 - mu) * ((y - 1) / a2 - 2 / a1) +
(a1 - 1) * a2 / a1 ** 2)))
return dp
def hessian(self, params):
"""
Generalized Poisson model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
mu_p = np.power(mu, p)
a1 = 1 + alpha * mu_p
a2 = mu + alpha * mu_p * y
a3 = alpha * p * mu ** (p - 1)
a4 = a3 * y
a5 = p * mu ** (p - 1)
dmudb = mu * exog
# for dl/dparams dparams
dim = exog.shape[1]
hess_arr = np.empty((dim+1,dim+1))
for i in range(dim):
for j in range(i + 1):
hess_arr[i,j] = np.sum(mu * exog[:,i,None] * exog[:,j,None] *
(mu * (a3 * a4 / a1**2 -
2 * a3**2 * a2 / a1**3 +
2 * a3 * (a4 + 1) / a1**2 -
a4 * p / (mu * a1) +
a3 * p * a2 / (mu * a1**2) +
(y - 1) * a4 * (p - 1) / (a2 * mu) -
(y - 1) * (1 + a4)**2 / a2**2 -
a4 * (p - 1) / (a1 * mu)) +
((y - 1) * (1 + a4) / a2 -
(1 + a4) / a1)), axis=0)
tri_idx = np.triu_indices(dim, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
# for dl/dparams dalpha
dldpda = np.sum((2 * a4 * mu_p / a1**2 -
2 * a3 * mu_p * a2 / a1**3 -
mu_p * y * (y - 1) * (1 + a4) / a2**2 +
mu_p * (1 + a4) / a1**2 +
a5 * y * (y - 1) / a2 -
2 * a5 * y / a1 +
a5 * a2 / a1**2) * dmudb,
axis=0)
hess_arr[-1,:-1] = dldpda
hess_arr[:-1,-1] = dldpda
# for dl/dalpha dalpha
dldada = mu_p**2 * (3 * y / a1**2 -
(y / a2)**2. * (y - 1) -
2 * a2 / a1**3)
hess_arr[-1,-1] = dldada.sum()
return hess_arr
def predict(self, params, exog=None, exposure=None, offset=None,
which='mean'):
"""
Predict response variable of a count model given exogenous variables.
Notes
-----
If exposure is specified, then it will be logged by the method.
The user does not need to log it first.
"""
if exog is None:
exog = self.exog
if exposure is None:
exposure = getattr(self, 'exposure', 0)
elif exposure!= 0:
exposure = np.log(exposure)
if offset is None:
offset = getattr(self, 'offset', 0)
fitted = np.dot(exog, params[:exog.shape[1]])
linpred = fitted + exposure + offset
if which =='mean':
return np.exp(linpred)
elif which == 'linear':
return linpred
elif which =='prob':
counts = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
mu = self.predict(params, exog=exog, exposure=exposure,
offset=offset)[:,None]
return genpoisson_p.pmf(counts, mu, params[-1],
self.parameterization + 1)
else:
raise ValueError('keyword \'which\' not recognized')
class Logit(BinaryModel):
__doc__ = """
Binary choice logit model
%(params)s
%(extra_params)s
Attributes
----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc}
def cdf(self, X):
"""
The logistic cumulative distribution function
Parameters
----------
X : array_like
`X` is the linear predictor of the logit model. See notes.
Returns
-------
1/(1 + exp(-X))
Notes
-----
In the logit model,
.. math:: \\Lambda\\left(x^{\\prime}\\beta\\right)=
\\text{Prob}\\left(Y=1|x\\right)=
\\frac{e^{x^{\\prime}\\beta}}{1+e^{x^{\\prime}\\beta}}
"""
X = np.asarray(X)
return 1/(1+np.exp(-X))
def pdf(self, X):
"""
The logistic probability density function
Parameters
----------
X : array_like
`X` is the linear predictor of the logit model. See notes.
Returns
-------
pdf : ndarray
The value of the Logit probability mass function, PMF, for each
point of X. ``np.exp(-x)/(1+np.exp(-X))**2``
Notes
-----
In the logit model,
.. math:: \\lambda\\left(x^{\\prime}\\beta\\right)=\\frac{e^{-x^{\\prime}\\beta}}{\\left(1+e^{-x^{\\prime}\\beta}\\right)^{2}}
"""
X = np.asarray(X)
return np.exp(-X)/(1+np.exp(-X))**2
def loglike(self, params):
"""
Log-likelihood of logit model.
Parameters
----------
params : array_like
The parameters of the logit model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math::
\\ln L=\\sum_{i}\\ln\\Lambda
\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
Where :math:`q=2y-1`. This simplification comes from the fact that the
logistic distribution is symmetric.
"""
q = 2*self.endog - 1
X = self.exog
return np.sum(np.log(self.cdf(q*np.dot(X,params))))
def loglikeobs(self, params):
"""
Log-likelihood of logit model for each observation.
Parameters
----------
params : array_like
The parameters of the logit model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
-----
.. math::
\\ln L=\\sum_{i}\\ln\\Lambda
\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
for observations :math:`i=1,...,n`
where :math:`q=2y-1`. This simplification comes from the fact that the
logistic distribution is symmetric.
"""
q = 2*self.endog - 1
X = self.exog
return np.log(self.cdf(q*np.dot(X,params)))
def score(self, params):
"""
Logit model score (gradient) vector of the log-likelihood
Parameters
----------
params: array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left(y_{i}-\\Lambda_{i}\\right)x_{i}
"""
y = self.endog
X = self.exog
L = self.cdf(np.dot(X,params))
return np.dot(y - L,X)
def score_obs(self, params):
"""
Logit model Jacobian of the log-likelihood for each observation
Parameters
----------
params: array_like
The parameters of the model
Returns
-------
jac : array_like
The derivative of the loglikelihood for each observation evaluated
at `params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left(y_{i}-\\Lambda_{i}\\right)x_{i}
for observations :math:`i=1,...,n`
"""
y = self.endog
X = self.exog
L = self.cdf(np.dot(X, params))
return (y - L)[:,None] * X
def hessian(self, params):
"""
Logit model Hessian matrix of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\sum_{i}\\Lambda_{i}\\left(1-\\Lambda_{i}\\right)x_{i}x_{i}^{\\prime}
"""
X = self.exog
L = self.cdf(np.dot(X,params))
return -np.dot(L*(1-L)*X.T,X)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
bnryfit = super(Logit, self).fit(start_params=start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, **kwargs)
discretefit = LogitResults(self, bnryfit)
return BinaryResultsWrapper(discretefit)
fit.__doc__ = DiscreteModel.fit.__doc__
class Probit(BinaryModel):
__doc__ = """
Binary choice Probit model
%(params)s
%(extra_params)s
Attributes
----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : base._missing_param_doc}
def cdf(self, X):
"""
Probit (Normal) cumulative distribution function
Parameters
----------
X : array_like
The linear predictor of the model (XB).
Returns
-------
cdf : ndarray
The cdf evaluated at `X`.
Notes
-----
This function is just an alias for scipy.stats.norm.cdf
"""
return stats.norm._cdf(X)
def pdf(self, X):
"""
Probit (Normal) probability density function
Parameters
----------
X : array_like
The linear predictor of the model (XB).
Returns
-------
pdf : ndarray
The value of the normal density function for each point of X.
Notes
-----
This function is just an alias for scipy.stats.norm.pdf
"""
X = np.asarray(X)
return stats.norm._pdf(X)
def loglike(self, params):
"""
Log-likelihood of probit model (i.e., the normal distribution).
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{i}\\ln\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
Where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
q = 2*self.endog - 1
X = self.exog
return np.sum(np.log(np.clip(self.cdf(q*np.dot(X,params)),
FLOAT_EPS, 1)))
def loglikeobs(self, params):
"""
Log-likelihood of probit model for each observation
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : array_like
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
-----
.. math:: \\ln L_{i}=\\ln\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)
for observations :math:`i=1,...,n`
where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
q = 2*self.endog - 1
X = self.exog
return np.log(np.clip(self.cdf(q*np.dot(X,params)), FLOAT_EPS, 1))
def score(self, params):
"""
Probit model score (gradient) vector
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta}=\\sum_{i=1}^{n}\\left[\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}\\right]x_{i}
Where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
y = self.endog
X = self.exog
XB = np.dot(X,params)
q = 2*y - 1
# clip to get rid of invalid divide complaint
L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS)
return np.dot(L,X)
def score_obs(self, params):
"""
Probit model Jacobian for each observation
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
jac : array_like
The derivative of the loglikelihood for each observation evaluated
at `params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta}=\\left[\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}\\right]x_{i}
for observations :math:`i=1,...,n`
Where :math:`q=2y-1`. This simplification comes from the fact that the
normal distribution is symmetric.
"""
y = self.endog
X = self.exog
XB = np.dot(X,params)
q = 2*y - 1
# clip to get rid of invalid divide complaint
L = q*self.pdf(q*XB)/np.clip(self.cdf(q*XB), FLOAT_EPS, 1 - FLOAT_EPS)
return L[:,None] * X
def hessian(self, params):
"""
Probit model Hessian matrix of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta\\partial\\beta^{\\prime}}=-\\lambda_{i}\\left(\\lambda_{i}+x_{i}^{\\prime}\\beta\\right)x_{i}x_{i}^{\\prime}
where
.. math:: \\lambda_{i}=\\frac{q_{i}\\phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}{\\Phi\\left(q_{i}x_{i}^{\\prime}\\beta\\right)}
and :math:`q=2y-1`
"""
X = self.exog
XB = np.dot(X,params)
q = 2*self.endog - 1
L = q*self.pdf(q*XB)/self.cdf(q*XB)
return np.dot(-L*(L+XB)*X.T,X)
def fit(self, start_params=None, method='newton', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
bnryfit = super(Probit, self).fit(start_params=start_params,
method=method, maxiter=maxiter, full_output=full_output,
disp=disp, callback=callback, **kwargs)
discretefit = ProbitResults(self, bnryfit)
return BinaryResultsWrapper(discretefit)
fit.__doc__ = DiscreteModel.fit.__doc__
class MNLogit(MultinomialModel):
__doc__ = """
Multinomial logit model
Parameters
----------
endog : array_like
`endog` is an 1-d vector of the endogenous response. `endog` can
contain strings, ints, or floats. Note that if it contains strings,
every distinct string will be a category. No stripping of whitespace
is done.
exog : array_like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user. See `statsmodels.tools.add_constant`.
%(extra_params)s
Attributes
----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
J : float
The number of choices for the endogenous variable. Note that this
is zero-indexed.
K : float
The actual number of parameters for the exogenous design. Includes
the constant if the design has one.
names : dict
A dictionary mapping the column number in `wendog` to the variables
in `endog`.
wendog : array
An n x j array where j is the number of unique categories in `endog`.
Each column of j is a dummy variable indicating the category of
each observation. See `names` for a dictionary mapping each column to
its category.
Notes
-----
See developer notes for further information on `MNLogit` internals.
""" % {'extra_params' : base._missing_param_doc}
def pdf(self, eXB):
"""
NotImplemented
"""
raise NotImplementedError
def cdf(self, X):
"""
Multinomial logit cumulative distribution function.
Parameters
----------
X : array
The linear predictor of the model XB.
Returns
-------
cdf : ndarray
The cdf evaluated at `X`.
Notes
-----
In the multinomial logit model.
.. math:: \\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}
"""
eXB = np.column_stack((np.ones(len(X)), np.exp(X)))
return eXB/eXB.sum(1)[:,None]
def loglike(self, params):
"""
Log-likelihood of the multinomial logit model.
Parameters
----------
params : array_like
The parameters of the multinomial logit model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math::
\\ln L=\\sum_{i=1}^{n}\\sum_{j=0}^{J}d_{ij}\\ln
\\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}
{\\sum_{k=0}^{J}
\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)
where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0
if not.
"""
params = params.reshape(self.K, -1, order='F')
d = self.wendog
logprob = np.log(self.cdf(np.dot(self.exog,params)))
return np.sum(d * logprob)
def loglikeobs(self, params):
"""
Log-likelihood of the multinomial logit model for each observation.
Parameters
----------
params : array_like
The parameters of the multinomial logit model.
Returns
-------
loglike : array_like
The log likelihood for each observation of the model evaluated
at `params`. See Notes
Notes
-----
.. math::
\\ln L_{i}=\\sum_{j=0}^{J}d_{ij}\\ln
\\left(\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}
{\\sum_{k=0}^{J}
\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)
for observations :math:`i=1,...,n`
where :math:`d_{ij}=1` if individual `i` chose alternative `j` and 0
if not.
"""
params = params.reshape(self.K, -1, order='F')
d = self.wendog
logprob = np.log(self.cdf(np.dot(self.exog,params)))
return d * logprob
def score(self, params):
"""
Score matrix for multinomial logit model log-likelihood
Parameters
----------
params : array
The parameters of the multinomial logit model.
Returns
-------
score : ndarray, (K * (J-1),)
The 2-d score vector, i.e. the first derivative of the
loglikelihood function, of the multinomial logit model evaluated at
`params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L}{\\partial\\beta_{j}}=\\sum_{i}\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i}
for :math:`j=1,...,J`
In the multinomial model the score matrix is K x J-1 but is returned
as a flattened array to work with the solvers.
"""
params = params.reshape(self.K, -1, order='F')
firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog,
params))[:,1:]
#NOTE: might need to switch terms if params is reshaped
return np.dot(firstterm.T, self.exog).flatten()
def loglike_and_score(self, params):
"""
Returns log likelihood and score, efficiently reusing calculations.
Note that both of these returned quantities will need to be negated
before being minimized by the maximum likelihood fitting machinery.
"""
params = params.reshape(self.K, -1, order='F')
cdf_dot_exog_params = self.cdf(np.dot(self.exog, params))
loglike_value = np.sum(self.wendog * np.log(cdf_dot_exog_params))
firstterm = self.wendog[:, 1:] - cdf_dot_exog_params[:, 1:]
score_array = np.dot(firstterm.T, self.exog).flatten()
return loglike_value, score_array
def score_obs(self, params):
"""
Jacobian matrix for multinomial logit model log-likelihood
Parameters
----------
params : array
The parameters of the multinomial logit model.
Returns
-------
jac : array_like
The derivative of the loglikelihood for each observation evaluated
at `params`.
Notes
-----
.. math:: \\frac{\\partial\\ln L_{i}}{\\partial\\beta_{j}}=\\left(d_{ij}-\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right)x_{i}
for :math:`j=1,...,J`, for observations :math:`i=1,...,n`
In the multinomial model the score vector is K x (J-1) but is returned
as a flattened array. The Jacobian has the observations in rows and
the flatteded array of derivatives in columns.
"""
params = params.reshape(self.K, -1, order='F')
firstterm = self.wendog[:,1:] - self.cdf(np.dot(self.exog,
params))[:,1:]
#NOTE: might need to switch terms if params is reshaped
return (firstterm[:,:,None] * self.exog[:,None,:]).reshape(self.exog.shape[0], -1)
def hessian(self, params):
"""
Multinomial logit Hessian matrix of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (J*K, J*K)
The Hessian, second derivative of loglikelihood function with
respect to the flattened parameters, evaluated at `params`
Notes
-----
.. math:: \\frac{\\partial^{2}\\ln L}{\\partial\\beta_{j}\\partial\\beta_{l}}=-\\sum_{i=1}^{n}\\frac{\\exp\\left(\\beta_{j}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\left[\\boldsymbol{1}\\left(j=l\\right)-\\frac{\\exp\\left(\\beta_{l}^{\\prime}x_{i}\\right)}{\\sum_{k=0}^{J}\\exp\\left(\\beta_{k}^{\\prime}x_{i}\\right)}\\right]x_{i}x_{l}^{\\prime}
where
:math:`\\boldsymbol{1}\\left(j=l\\right)` equals 1 if `j` = `l` and 0
otherwise.
The actual Hessian matrix has J**2 * K x K elements. Our Hessian
is reshaped to be square (J*K, J*K) so that the solvers can use it.
This implementation does not take advantage of the symmetry of
the Hessian and could probably be refactored for speed.
"""
params = params.reshape(self.K, -1, order='F')
X = self.exog
pr = self.cdf(np.dot(X,params))
partials = []
J = self.J
K = self.K
for i in range(J-1):
for j in range(J-1): # this loop assumes we drop the first col.
if i == j:
partials.append(\
-np.dot(((pr[:,i+1]*(1-pr[:,j+1]))[:,None]*X).T,X))
else:
partials.append(-np.dot(((pr[:,i+1]*-pr[:,j+1])[:,None]*X).T,X))
H = np.array(partials)
# the developer's notes on multinomial should clear this math up
H = np.transpose(H.reshape(J-1, J-1, K, K), (0, 2, 1, 3)).reshape((J-1)*K, (J-1)*K)
return H
#TODO: Weibull can replaced by a survival analsysis function
# like stat's streg (The cox model as well)
#class Weibull(DiscreteModel):
# """
# Binary choice Weibull model
#
# Notes
# ------
# This is unfinished and untested.
# """
##TODO: add analytic hessian for Weibull
# def initialize(self):
# pass
#
# def cdf(self, X):
# """
# Gumbell (Log Weibull) cumulative distribution function
# """
## return np.exp(-np.exp(-X))
# return stats.gumbel_r.cdf(X)
# # these two are equivalent.
# # Greene table and discussion is incorrect.
#
# def pdf(self, X):
# """
# Gumbell (LogWeibull) probability distribution function
# """
# return stats.gumbel_r.pdf(X)
#
# def loglike(self, params):
# """
# Loglikelihood of Weibull distribution
# """
# X = self.exog
# cdf = self.cdf(np.dot(X,params))
# y = self.endog
# return np.sum(y*np.log(cdf) + (1-y)*np.log(1-cdf))
#
# def score(self, params):
# y = self.endog
# X = self.exog
# F = self.cdf(np.dot(X,params))
# f = self.pdf(np.dot(X,params))
# term = (y*f/F + (1 - y)*-f/(1-F))
# return np.dot(term,X)
#
# def hessian(self, params):
# hess = nd.Jacobian(self.score)
# return hess(params)
#
# def fit(self, start_params=None, method='newton', maxiter=35, tol=1e-08):
## The example had problems with all zero start values, Hessian = 0
# if start_params is None:
# start_params = OLS(self.endog, self.exog).fit().params
# mlefit = super(Weibull, self).fit(start_params=start_params,
# method=method, maxiter=maxiter, tol=tol)
# return mlefit
#
class NegativeBinomial(CountModel):
__doc__ = """
Negative Binomial Model for count data
%(params)s
%(extra_params)s
Attributes
----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
References
----------
Greene, W. 2008. "Functional forms for the negtive binomial model
for count data". Economics Letters. Volume 99, Number 3, pp.585-590.
Hilbe, J.M. 2011. "Negative binomial regression". Cambridge University
Press.
""" % {'params': base._model_params_doc,
'extra_params':
"""loglike_method : string
Log-likelihood type. 'nb2','nb1', or 'geometric'.
Fitted value :math:`\\mu`
Heterogeneity parameter :math:`\\alpha`
- nb2: Variance equal to :math:`\\mu + \\alpha\\mu^2` (most common)
- nb1: Variance equal to :math:`\\mu + \\alpha\\mu`
- geometric: Variance equal to :math:`\\mu + \\mu^2`
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
""" + base._missing_param_doc}
def __init__(self, endog, exog, loglike_method='nb2', offset=None,
exposure=None, missing='none', **kwargs):
super(NegativeBinomial, self).__init__(endog, exog, offset=offset,
exposure=exposure,
missing=missing, **kwargs)
self.loglike_method = loglike_method
self._initialize()
if loglike_method in ['nb2', 'nb1']:
self.exog_names.append('alpha')
self.k_extra = 1
else:
self.k_extra = 0
# store keys for extras if we need to recreate model instance
# we need to append keys that don't go to super
self._init_keys.append('loglike_method')
def _initialize(self):
if self.loglike_method == 'nb2':
self.hessian = self._hessian_nb2
self.score = self._score_nbin
self.loglikeobs = self._ll_nb2
self._transparams = True # transform lnalpha -> alpha in fit
elif self.loglike_method == 'nb1':
self.hessian = self._hessian_nb1
self.score = self._score_nb1
self.loglikeobs = self._ll_nb1
self._transparams = True # transform lnalpha -> alpha in fit
elif self.loglike_method == 'geometric':
self.hessian = self._hessian_geom
self.score = self._score_geom
self.loglikeobs = self._ll_geometric
else:
raise ValueError('Likelihood type must "nb1", "nb2" '
'or "geometric"')
# Workaround to pickle instance methods
def __getstate__(self):
odict = self.__dict__.copy() # copy the dict since we change it
del odict['hessian']
del odict['score']
del odict['loglikeobs']
return odict
def __setstate__(self, indict):
self.__dict__.update(indict)
self._initialize()
def _ll_nbin(self, params, alpha, Q=0):
if np.any(np.iscomplex(params)) or np.iscomplex(alpha):
gamma_ln = loggamma
else:
gamma_ln = gammaln
endog = self.endog
mu = self.predict(params)
size = 1/alpha * mu**Q
prob = size/(size+mu)
coeff = (gamma_ln(size+endog) - gamma_ln(endog+1) -
gamma_ln(size))
llf = coeff + size*np.log(prob) + endog*np.log(1-prob)
return llf
def _ll_nb2(self, params):
if self._transparams: # got lnalpha during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
return self._ll_nbin(params[:-1], alpha, Q=0)
def _ll_nb1(self, params):
if self._transparams: # got lnalpha during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
return self._ll_nbin(params[:-1], alpha, Q=1)
def _ll_geometric(self, params):
# we give alpha of 1 because it's actually log(alpha) where alpha=0
return self._ll_nbin(params, 1, 0)
def loglike(self, params):
r"""
Loglikelihood for negative binomial model
Parameters
----------
params : array_like
The parameters of the model. If `loglike_method` is nb1 or
nb2, then the ancillary parameter is expected to be the
last element.
Returns
-------
llf : float
The loglikelihood value at `params`
Notes
-----
Following notation in Greene (2008), with negative binomial
heterogeneity parameter :math:`\alpha`:
.. math::
\lambda_i &= exp(X\beta) \\
\theta &= 1 / \alpha \\
g_i &= \theta \lambda_i^Q \\
w_i &= g_i/(g_i + \lambda_i) \\
r_i &= \theta / (\theta+\lambda_i) \\
ln \mathcal{L}_i &= ln \Gamma(y_i+g_i) - ln \Gamma(1+y_i) + g_iln (r_i) + y_i ln(1-r_i)
where :math`Q=0` for NB2 and geometric and :math:`Q=1` for NB1.
For the geometric, :math:`\alpha=0` as well.
"""
llf = np.sum(self.loglikeobs(params))
return llf
def _score_geom(self, params):
exog = self.exog
y = self.endog[:, None]
mu = self.predict(params)[:, None]
dparams = exog * (y-mu)/(mu+1)
return dparams.sum(0)
def _score_nbin(self, params, Q=0):
"""
Score vector for NB2 model
"""
if self._transparams: # lnalpha came in during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
a1 = 1/alpha * mu**Q
prob = a1 / (a1 + mu) # a1 aka "size" in _ll_nbin
if Q == 1: # nb1
# Q == 1 --> a1 = mu / alpha --> prob = 1 / (alpha + 1)
dgpart = digamma(y + a1) - digamma(a1)
dparams = exog * a1 * (np.log(prob) +
dgpart)
dalpha = ((alpha * (y - mu * np.log(prob) -
mu*(dgpart + 1)) -
mu * (np.log(prob) +
dgpart))/
(alpha**2*(alpha + 1))).sum()
elif Q == 0: # nb2
dgpart = digamma(y + a1) - digamma(a1)
dparams = exog*a1 * (y-mu)/(mu+a1)
da1 = -alpha**-2
dalpha = (dgpart + np.log(a1)
- np.log(a1+mu) - (y-mu)/(a1+mu)).sum() * da1
#multiply above by constant outside sum to reduce rounding error
if self._transparams:
return np.r_[dparams.sum(0), dalpha*alpha]
else:
return np.r_[dparams.sum(0), dalpha]
def _score_nb1(self, params):
return self._score_nbin(params, Q=1)
def _hessian_geom(self, params):
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
# for dl/dparams dparams
dim = exog.shape[1]
hess_arr = np.empty((dim, dim))
const_arr = mu*(1+y)/(mu+1)**2
for i in range(dim):
for j in range(dim):
if j > i:
continue
hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] *
const_arr, axis=0)
tri_idx = np.triu_indices(dim, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr
def _hessian_nb1(self, params):
"""
Hessian of NB1 model.
"""
if self._transparams: # lnalpha came in during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
a1 = mu/alpha
dgpart = digamma(y + a1) - digamma(a1)
prob = 1 / (1 + alpha) # equiv: a1 / (a1 + mu)
# for dl/dparams dparams
dim = exog.shape[1]
hess_arr = np.empty((dim+1,dim+1))
#const_arr = a1*mu*(a1+y)/(mu+a1)**2
# not all of dparams
dparams = exog / alpha * (np.log(prob) +
dgpart)
dmudb = exog*mu
xmu_alpha = exog * a1
trigamma = (special.polygamma(1, a1 + y) -
special.polygamma(1, a1))
for i in range(dim):
for j in range(dim):
if j > i:
continue
hess_arr[i,j] = np.sum(dparams[:,i,None] * dmudb[:,j,None] +
xmu_alpha[:,i,None] * xmu_alpha[:,j,None] *
trigamma, axis=0)
tri_idx = np.triu_indices(dim, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
# for dl/dparams dalpha
da1 = -alpha**-2
dldpda = np.sum(-a1 * dparams + exog * a1 *
(-trigamma*mu/alpha**2 - prob), axis=0)
hess_arr[-1,:-1] = dldpda
hess_arr[:-1,-1] = dldpda
log_alpha = np.log(prob)
alpha3 = alpha**3
alpha2 = alpha**2
mu2 = mu**2
dada = ((alpha3*mu*(2*log_alpha + 2*dgpart + 3) -
2*alpha3*y +
4*alpha2*mu*(log_alpha + dgpart) +
alpha2 * (2*mu - y) +
2*alpha*mu2*trigamma + mu2 * trigamma + alpha2 * mu2 * trigamma +
2*alpha*mu*(log_alpha + dgpart)
)/(alpha**4*(alpha2 + 2*alpha + 1)))
hess_arr[-1,-1] = dada.sum()
return hess_arr
def _hessian_nb2(self, params):
"""
Hessian of NB2 model.
"""
if self._transparams: # lnalpha came in during fit
alpha = np.exp(params[-1])
else:
alpha = params[-1]
a1 = 1/alpha
params = params[:-1]
exog = self.exog
y = self.endog[:,None]
mu = self.predict(params)[:,None]
prob = a1 / (a1 + mu)
dgpart = digamma(a1 + y) - digamma(a1)
# for dl/dparams dparams
dim = exog.shape[1]
hess_arr = np.empty((dim+1,dim+1))
const_arr = a1*mu*(a1+y)/(mu+a1)**2
for i in range(dim):
for j in range(dim):
if j > i:
continue
hess_arr[i,j] = np.sum(-exog[:,i,None] * exog[:,j,None] *
const_arr, axis=0)
tri_idx = np.triu_indices(dim, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
# for dl/dparams dalpha
da1 = -alpha**-2
dldpda = -np.sum(mu*exog*(y-mu)*a1**2/(mu+a1)**2, axis=0)
hess_arr[-1,:-1] = dldpda
hess_arr[:-1,-1] = dldpda
# for dl/dalpha dalpha
#NOTE: polygamma(1,x) is the trigamma function
da2 = 2*alpha**-3
dalpha = da1 * (dgpart +
np.log(prob) - (y - mu)/(a1+mu))
dada = (da2 * dalpha/da1 + da1**2 * (special.polygamma(1, a1+y) -
special.polygamma(1, a1) + 1/a1 - 1/(a1 + mu) +
(y - mu)/(mu + a1)**2)).sum()
hess_arr[-1,-1] = dada
return hess_arr
#TODO: replace this with analytic where is it used?
def score_obs(self, params):
sc = approx_fprime_cs(params, self.loglikeobs)
return sc
def _get_start_params_null(self):
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
const = (self.endog / np.exp(offset + exposure)).mean()
params = [np.log(const)]
mu = const * np.exp(offset + exposure)
resid = self.endog - mu
a = self._estimate_dispersion(mu, resid, df_resid=resid.shape[0] - 1)
params.append(a)
return np.array(params)
_get_start_params_null.__doc__ = _get_start_params_null_docs
def _estimate_dispersion(self, mu, resid, df_resid=None):
if df_resid is None:
df_resid = resid.shape[0]
if self.loglike_method == 'nb2':
#params.append(np.linalg.pinv(mu[:,None]).dot(resid**2 / mu - 1))
a = ((resid**2 / mu - 1) / mu).sum() / df_resid
else: #self.loglike_method == 'nb1':
a = (resid**2 / mu - 1).sum() / df_resid
return a
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
# Note: don't let super handle robust covariance because it has
# transformed params
self._transparams = False # always define attribute
if self.loglike_method.startswith('nb') and method not in ['newton',
'ncg']:
self._transparams = True # in case same Model instance is refit
elif self.loglike_method.startswith('nb'): # method is newton/ncg
self._transparams = False # because we need to step in alpha space
if start_params is None:
# Use poisson fit as first guess.
#TODO, Warning: this assumes exposure is logged
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
optim_kwds_prelim = {'disp': 0,'skip_hessian': True,
'warn_convergence': False}
optim_kwds_prelim.update(kwargs.get('optim_kwds_prelim', {}))
mod_poi = Poisson(self.endog, self.exog, offset=offset)
res_poi = mod_poi.fit(**optim_kwds_prelim)
start_params = res_poi.params
if self.loglike_method.startswith('nb'):
a = self._estimate_dispersion(res_poi.predict(), res_poi.resid,
df_resid=res_poi.df_resid)
start_params = np.append(start_params, max(0.05, a))
else:
if self._transparams is True:
# transform user provided start_params dispersion, see #3918
start_params = np.array(start_params, copy=True)
start_params[-1] = np.log(start_params[-1])
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super(NegativeBinomial, self).fit(start_params=start_params,
maxiter=maxiter, method=method, disp=disp,
full_output=full_output, callback=callback,
**kwargs)
# TODO: Fix NBin _check_perfect_pred
if self.loglike_method.startswith('nb'):
# mlefit is a wrapped counts results
self._transparams = False # don't need to transform anymore now
# change from lnalpha to alpha
if method not in ["newton", "ncg"]:
mlefit._results.params[-1] = np.exp(mlefit._results.params[-1])
nbinfit = NegativeBinomialResults(self, mlefit._results)
result = NegativeBinomialResultsWrapper(nbinfit)
else:
result = mlefit
if cov_kwds is None:
cov_kwds = {} #TODO: make this unnecessary?
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if self.loglike_method.startswith('nb') and (np.size(alpha) == 1 and
alpha!= 0):
# don't penalize alpha if alpha is scalar
k_params = self.exog.shape[1] + self.k_extra
alpha = alpha * np.ones(k_params)
alpha[-1] = 0
# alpha for regularized poisson to get starting values
alpha_p = alpha[:-1] if (self.k_extra and np.size(alpha) > 1) else alpha
self._transparams = False
if start_params is None:
# Use poisson fit as first guess.
#TODO, Warning: this assumes exposure is logged
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
mod_poi = Poisson(self.endog, self.exog, offset=offset)
start_params = mod_poi.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params
if self.loglike_method.startswith('nb'):
start_params = np.append(start_params, 0.1)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = L1NegativeBinomialResults(self, cntfit)
return L1NegativeBinomialResultsWrapper(discretefit)
class NegativeBinomialP(CountModel):
__doc__ = """
Generalized Negative Binomial (NB-P) model for count data
%(params)s
%(extra_params)s
Attributes
----------
endog : array
A reference to the endogenous response variable
exog : array
A reference to the exogenous design.
p : scalar
P denotes parameterizations for NB-P regression. p=1 for NB-1 and
p=2 for NB-2. Default is p=1.
""" % {'params' : base._model_params_doc,
'extra_params' :
"""p: scalar
P denotes parameterizations for NB regression. p=1 for NB-1 and
p=2 for NB-2. Default is p=2.
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
""" + base._missing_param_doc}
def __init__(self, endog, exog, p=2, offset=None,
exposure=None, missing='none', **kwargs):
super(NegativeBinomialP, self).__init__(endog, exog, offset=offset,
exposure=exposure,
missing=missing, **kwargs)
self.parameterization = p
self.exog_names.append('alpha')
self.k_extra = 1
self._transparams = False
def _get_init_kwds(self):
kwds = super(NegativeBinomialP, self)._get_init_kwds()
kwds['p'] = self.parameterization
return kwds
def loglike(self, params):
"""
Loglikelihood of Generalized Negative Binomial (NB-P) model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
"""
return np.sum(self.loglikeobs(params))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generalized Negative Binomial (NB-P) model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = self.parameterization
y = self.endog
mu = self.predict(params)
mu_p = mu**(2 - p)
a1 = mu_p / alpha
a2 = mu + a1
llf = (gammaln(y + a1) - gammaln(y + 1) - gammaln(a1) +
a1 * np.log(a1) + y * np.log(mu) -
(y + a1) * np.log(a2))
return llf
def score_obs(self, params):
"""
Generalized Negative Binomial (NB-P) model score (gradient) vector of the log-likelihood for each observations.
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = 2 - self.parameterization
y = self.endog
mu = self.predict(params)
mu_p = mu**p
a1 = mu_p / alpha
a2 = mu + a1
a3 = y + a1
a4 = p * a1 / mu
dgpart = digamma(a3) - digamma(a1)
dgterm = dgpart + np.log(a1 / a2) + 1 - a3 / a2
# TODO: better name/interpretation for dgterm?
dparams = (a4 * dgterm -
a3 / a2 +
y / mu)
dparams = (self.exog.T * mu * dparams).T
dalpha = -a1 / alpha * dgterm
return np.concatenate((dparams, np.atleast_2d(dalpha).T),
axis=1)
def score(self, params):
"""
Generalized Negative Binomial (NB-P) model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
score = np.sum(self.score_obs(params), axis=0)
if self._transparams:
score[-1] == score[-1] ** 2
return score
else:
return score
def hessian(self, params):
"""
Generalized Negative Binomial (NB-P) model hessian maxtrix of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hessian : ndarray, 2-D
The hessian matrix of the model.
"""
if self._transparams:
alpha = np.exp(params[-1])
else:
alpha = params[-1]
params = params[:-1]
p = 2 - self.parameterization
y = self.endog
exog = self.exog
mu = self.predict(params)
mu_p = mu**p
a1 = mu_p / alpha
a2 = mu + a1
a3 = y + a1
a4 = p * a1 / mu
prob = a1 / a2
lprob = np.log(prob)
dgpart = digamma(a3) - digamma(a1)
pgpart = polygamma(1, a3) - polygamma(1, a1)
dim = exog.shape[1]
hess_arr = np.zeros((dim + 1, dim + 1))
coeff = mu**2 * (((1 + a4)**2 * a3 / a2**2 -
a3 / a2 * (p - 1) * a4 / mu -
y / mu**2 -
2 * a4 * (1 + a4) / a2 +
p * a4 / mu * (lprob + dgpart + 2) -
a4 / mu * (lprob + dgpart + 1) +
a4**2 * pgpart) +
(-(1 + a4) * a3 / a2 +
y / mu +
a4 * (lprob + dgpart + 1)) / mu)
for i in range(dim):
hess_arr[i, :-1] = np.sum(self.exog[:, :].T * self.exog[:, i] * coeff, axis=1)
hess_arr[-1,:-1] = (self.exog[:, :].T * mu * a1 *
((1 + a4) * (1 - a3 / a2) / a2 -
p * (lprob + dgpart + 2) / mu +
p / mu * (a3 + p * a1) / a2 -
a4 * pgpart) / alpha).sum(axis=1)
da2 = (a1 * (2 * lprob +
2 * dgpart + 3 -
2 * a3 / a2
+ a1 * pgpart
- 2 * prob +
prob * a3 / a2) / alpha**2)
hess_arr[-1, -1] = da2.sum()
tri_idx = np.triu_indices(dim + 1, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr
def _get_start_params_null(self):
offset = getattr(self, "offset", 0)
exposure = getattr(self, "exposure", 0)
q = self.parameterization - 1
const = (self.endog / np.exp(offset + exposure)).mean()
params = [np.log(const)]
mu = const * np.exp(offset + exposure)
resid = self.endog - mu
a = self._estimate_dispersion(mu, resid, df_resid=resid.shape[0] - 1)
params.append(a)
return np.array(params)
_get_start_params_null.__doc__ = _get_start_params_null_docs
def _estimate_dispersion(self, mu, resid, df_resid=None):
q = self.parameterization - 1
if df_resid is None:
df_resid = resid.shape[0]
a = ((resid**2 / mu - 1) * mu**(-q)).sum() / df_resid
return a
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None, use_transparams=False,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
# TODO: Fix doc string
"""
use_transparams : bool
This parameter enable internal transformation to impose
non-negativity. True to enable. Default is False.
use_transparams=True imposes the no underdispersion (alpha > 0)
constaint. In case use_transparams=True and method="newton" or
"ncg" transformation is ignored.
"""
if use_transparams and method not in ['newton', 'ncg']:
self._transparams = True
else:
if use_transparams:
warnings.warn('Parameter "use_transparams" is ignored',
RuntimeWarning)
self._transparams = False
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
optim_kwds_prelim = {'disp': 0,'skip_hessian': True,
'warn_convergence': False}
optim_kwds_prelim.update(kwargs.get('optim_kwds_prelim', {}))
mod_poi = Poisson(self.endog, self.exog, offset=offset)
res_poi = mod_poi.fit(**optim_kwds_prelim)
start_params = res_poi.params
a = self._estimate_dispersion(res_poi.predict(), res_poi.resid,
df_resid=res_poi.df_resid)
start_params = np.append(start_params, max(0.05, a))
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super(NegativeBinomialP, self).fit(start_params=start_params,
maxiter=maxiter, method=method, disp=disp,
full_output=full_output, callback=callback,
**kwargs)
if use_transparams and method not in ["newton", "ncg"]:
self._transparams = False
mlefit._results.params[-1] = np.exp(mlefit._results.params[-1])
nbinfit = NegativeBinomialResults(self, mlefit._results)
result = NegativeBinomialResultsWrapper(nbinfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
fit.__doc__ += DiscreteModel.fit.__doc__
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if np.size(alpha) == 1 and alpha!= 0:
k_params = self.exog.shape[1] + self.k_extra
alpha = alpha * np.ones(k_params)
alpha[-1] = 0
alpha_p = alpha[:-1] if (self.k_extra and np.size(alpha) > 1) else alpha
self._transparams = False
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
mod_poi = Poisson(self.endog, self.exog, offset=offset)
start_params = mod_poi.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params
start_params = np.append(start_params, 0.1)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = L1NegativeBinomialResults(self, cntfit)
return L1NegativeBinomialResultsWrapper(discretefit)
fit_regularized.__doc__ = DiscreteModel.fit_regularized.__doc__
def predict(self, params, exog=None, exposure=None, offset=None,
which='mean'):
"""
Predict response variable of a model given exogenous variables.
Parameters
----------
params : array_like
2d array of fitted parameters of the model. Should be in the
order returned from the model.
exog : array_like, optional
1d or 2d array of exogenous values. If not supplied, the
whole exog attribute of the model is used. If a 1d array is given
it assumed to be 1 row of exogenous variables. If you only have
one regressor and would like to do prediction, you must provide
a 2d array with shape[1] == 1.
linear : bool, optional
If True, returns the linear predictor dot(exog,params). Else,
returns the value of the cdf at the linear predictor.
offset : array_like, optional
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like, optional
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
which :'mean', 'linear', 'prob', optional.
'mean' returns the exp of linear predictor exp(dot(exog,params)).
'linear' returns the linear predictor dot(exog,params).
'prob' return probabilities for counts from 0 to max(endog).
Default is'mean'.
Notes
-----
"""
if exog is None:
exog = self.exog
if exposure is None:
exposure = getattr(self, 'exposure', 0)
elif exposure!= 0:
exposure = np.log(exposure)
if offset is None:
offset = getattr(self, 'offset', 0)
fitted = np.dot(exog, params[:exog.shape[1]])
linpred = fitted + exposure + offset
if which =='mean':
return np.exp(linpred)
elif which == 'linear':
return linpred
elif which =='prob':
counts = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
mu = self.predict(params, exog, exposure, offset)
size, prob = self.convert_params(params, mu)
return nbinom.pmf(counts, size[:,None], prob[:,None])
else:
raise ValueError('keyword "which" = %s not recognized' % which)
def convert_params(self, params, mu):
alpha = params[-1]
p = 2 - self.parameterization
size = 1. / alpha * mu**p
prob = size / (size + mu)
return (size, prob)
### Results Class ###
class DiscreteResults(base.LikelihoodModelResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"A results class for the discrete dependent variable models.",
"extra_attr" : ""}
def __init__(self, model, mlefit, cov_type='nonrobust', cov_kwds=None,
use_t=None):
#super(DiscreteResults, self).__init__(model, params,
# np.linalg.inv(-hessian), scale=1.)
self.model = model
self.df_model = model.df_model
self.df_resid = model.df_resid
self._cache = {}
self.nobs = model.exog.shape[0]
self.__dict__.update(mlefit.__dict__)
if not hasattr(self, 'cov_type'):
# do this only if super, i.e. mlefit didn't already add cov_type
# robust covariance
if use_t is not None:
self.use_t = use_t
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description' : 'Standard Errors assume that the'+
'covariance matrix of the errors is correctly'+
'specified.'}
else:
if cov_kwds is None:
cov_kwds = {}
from statsmodels.base.covtype import get_robustcov_results
get_robustcov_results(self, cov_type=cov_type, use_self=True,
**cov_kwds)
def __getstate__(self):
# remove unpicklable methods
mle_settings = getattr(self,'mle_settings', None)
if mle_settings is not None:
if 'callback' in mle_settings:
mle_settings['callback'] = None
if 'cov_params_func' in mle_settings:
mle_settings['cov_params_func'] = None
return self.__dict__
@cache_readonly
def prsquared(self):
"""
McFadden's pseudo-R-squared. `1 - (llf / llnull)`
"""
return 1 - self.llf/self.llnull
@cache_readonly
def llr(self):
"""
Likelihood ratio chi-squared statistic; `-2*(llnull - llf)`
"""
return -2*(self.llnull - self.llf)
@cache_readonly
def llr_pvalue(self):
"""
The chi-squared probability of getting a log-likelihood ratio
statistic greater than llr. llr has a chi-squared distribution
with degrees of freedom `df_model`.
"""
return stats.distributions.chi2.sf(self.llr, self.df_model)
def set_null_options(self, llnull=None, attach_results=True, **kwds):
"""set fit options for Null (constant-only) model
This resets the cache for related attributes which is potentially
fragile. This only sets the option, the null model is estimated
when llnull is accessed, if llnull is not yet in cache.
Parameters
----------
llnull : None or float
If llnull is not None, then the value will be directly assigned to
the cached attribute "llnull".
attach_results : bool
Sets an internal flag whether the results instance of the null
model should be attached. By default without calling this method,
thenull model results are not attached and only the loglikelihood
value llnull is stored.
kwds : keyword arguments
`kwds` are directly used as fit keyword arguments for the null
model, overriding any provided defaults.
Returns
-------
no returns, modifies attributes of this instance
"""
# reset cache, note we need to add here anything that depends on
# llnullor the null model. If something is missing, then the attribute
# might be incorrect.
self._cache.pop('llnull', None)
self._cache.pop('llr', None)
self._cache.pop('llr_pvalue', None)
self._cache.pop('prsquared', None)
if hasattr(self,'res_null'):
del self.res_null
if llnull is not None:
self._cache['llnull'] = llnull
self._attach_nullmodel = attach_results
self._optim_kwds_null = kwds
@cache_readonly
def llnull(self):
"""
Value of the constant-only loglikelihood
"""
model = self.model
kwds = model._get_init_kwds().copy()
for key in getattr(model, '_null_drop_keys', []):
del kwds[key]
# TODO: what parameters to pass to fit?
mod_null = model.__class__(model.endog, np.ones(self.nobs), **kwds)
# TODO: consider catching and warning on convergence failure?
# in the meantime, try hard to converge. see
# TestPoissonConstrained1a.test_smoke
optim_kwds = getattr(self, '_optim_kwds_null', {}).copy()
if'start_params' in optim_kwds:
# user provided
sp_null = optim_kwds.pop('start_params')
elif hasattr(model, '_get_start_params_null'):
# get moment estimates if available
sp_null = model._get_start_params_null()
else:
sp_null = None
opt_kwds = dict(method='bfgs', warn_convergence=False, maxiter=10000,
disp=0)
opt_kwds.update(optim_kwds)
if optim_kwds:
res_null = mod_null.fit(start_params=sp_null, **opt_kwds)
else:
# this should be a reasonably method case across versions
res_null = mod_null.fit(start_params=sp_null, method='nm',
warn_convergence=False,
maxiter=10000, disp=0)
res_null = mod_null.fit(start_params=res_null.params, method='bfgs',
warn_convergence=False,
maxiter=10000, disp=0)
if getattr(self, '_attach_nullmodel', False) is not False:
self.res_null = res_null
return res_null.llf
@cache_readonly
def fittedvalues(self):
"""
Linear predictor XB.
"""
return np.dot(self.model.exog, self.params[:self.model.exog.shape[1]])
@cache_readonly
def resid_response(self):
"""
Respnose residuals. The response residuals are defined as
`endog - fittedvalues`
"""
return self.model.endog - self.predict()
@cache_readonly
def aic(self):
"""
Akaike information criterion. `-2*(llf - p)` where `p` is the number
of regressors including the intercept.
"""
return -2*(self.llf - (self.df_model+1))
@cache_readonly
def bic(self):
"""
Bayesian information criterion. `-2*llf + ln(nobs)*p` where `p` is the
number of regressors including the intercept.
"""
return -2*self.llf + np.log(self.nobs)*(self.df_model+1)
def _get_endog_name(self, yname, yname_list):
if yname is None:
yname = self.model.endog_names
if yname_list is None:
yname_list = self.model.endog_names
return yname, yname_list
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
-'mean', The marginal effects at the mean of each regressor.
-'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is all
only margeff will be available from the returned object.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semielasticity -- dy/d(lnx)
- 'eydx' - estimate semeilasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array_like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
DiscreteMargins : marginal effects instance
Returns an object that holds the marginal effects, standard
errors, confidence intervals, etc. See
`statsmodels.discrete.discrete_margins.DiscreteMargins` for more
information.
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
from statsmodels.discrete.discrete_margins import DiscreteMargins
return DiscreteMargins(self, (at, method, atexog, dummy, count))
def summary(self, yname=None, xname=None, title=None, alpha=.05,
yname_list=None):
"""Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables, default is "var_xx".
Must match the number of parameters in the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
top_left = [('Dep. Variable:', None),
('Model:', [self.model.__class__.__name__]),
('Method:', ['MLE']),
('Date:', None),
('Time:', None),
('converged:', ["%s" % self.mle_retvals['converged']]),
]
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
('Pseudo R-squ.:', ["%#6.4g" % self.prsquared]),
('Log-Likelihood:', None),
('LL-Null:', ["%#8.5g" % self.llnull]),
('LLR p-value:', ["%#6.4g" % self.llr_pvalue])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
if title is None:
title = self.model.__class__.__name__ +'' + "Regression Results"
# boiler plate
from statsmodels.iolib.summary import Summary
smry = Summary()
yname, yname_list = self._get_endog_name(yname, yname_list)
# for top of table
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
# for parameters, etc
smry.add_table_params(self, yname=yname_list, xname=xname, alpha=alpha,
use_t=self.use_t)
if hasattr(self, 'constraints'):
smry.add_extra_txt(['Model has been estimated subject to linear '
'equality constraints.'])
return smry
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental function to summarize regression results
Parameters
----------
yname : str
Name of the dependent variable (optional)
xname : list[str], optional
List of strings of length equal to the number of parameters
Names of the independent variables (optional)
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format : str
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
if hasattr(self, 'constraints'):
smry.add_text('Model has been estimated subject to linear '
'equality constraints.')
return smry
class CountResults(DiscreteResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for count data",
"extra_attr": ""}
@cache_readonly
def resid(self):
"""
Residuals
Notes
-----
The residuals for Count models are defined as
.. math:: y - p
where :math:`p = \\exp(X\\beta)`. Any exposure and offset variables
are also handled.
"""
return self.model.endog - self.predict()
class NegativeBinomialResults(CountResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for NegativeBinomial 1 and 2",
"extra_attr": ""}
@cache_readonly
def lnalpha(self):
"""Natural log of alpha"""
return np.log(self.params[-1])
@cache_readonly
def lnalpha_std_err(self):
"""Natural log of standardized error"""
return self.bse[-1] / self.params[-1]
@cache_readonly
def aic(self):
# + 1 because we estimate alpha
k_extra = getattr(self.model, 'k_extra', 0)
return -2*(self.llf - (self.df_model + self.k_constant + k_extra))
@cache_readonly
def bic(self):
# + 1 because we estimate alpha
k_extra = getattr(self.model, 'k_extra', 0)
return -2*self.llf + np.log(self.nobs)*(self.df_model +
self.k_constant + k_extra)
class GeneralizedPoissonResults(NegativeBinomialResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Generalized Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = getattr(self.model, 'parameterization', 0)
mu = self.predict()
return (1 + self.params[-1] * mu**p)**2
class L1CountResults(DiscreteResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"A results class for count data fit by l1 regularization",
"extra_attr" : _l1_results_attr}
def __init__(self, model, cntfit):
super(L1CountResults, self).__init__(model, cntfit)
# self.trimmed is a boolean array with T/F telling whether or not that
# entry in params has been set zero'd out.
self.trimmed = cntfit.mle_retvals['trimmed']
self.nnz_params = (~self.trimmed).sum()
# Set degrees of freedom. In doing so,
# adjust for extra parameter in NegativeBinomial nb1 and nb2
# extra parameter is not included in df_model
k_extra = getattr(self.model, 'k_extra', 0)
self.df_model = self.nnz_params - 1 - k_extra
self.df_resid = float(self.model.endog.shape[0] - self.nnz_params) + k_extra
class PoissonResults(CountResults):
def predict_prob(self, n=None, exog=None, exposure=None, offset=None,
transform=True):
"""
Return predicted probability of each count level for each observation
Parameters
----------
n : array_like or int
The counts for which you want the probabilities. If n is None
then the probabilities for each count from 0 to max(y) are
given.
Returns
-------
ndarray
A nobs x n array where len(`n`) columns are indexed by the count
n. If n is None, then column 0 is the probability that each
observation is 0, column 1 is the probability that each
observation is 1, etc.
"""
if n is not None:
counts = np.atleast_2d(n)
else:
counts = np.atleast_2d(np.arange(0, np.max(self.model.endog)+1))
mu = self.predict(exog=exog, exposure=exposure, offset=offset,
transform=transform, linear=False)[:,None]
# uses broadcasting
return stats.poisson.pmf(counts, mu)
@property
def resid_pearson(self):
"""
Pearson residuals
Notes
-----
Pearson residuals are defined to be
.. math:: r_j = \\frac{(y - M_jp_j)}{\\sqrt{M_jp_j(1-p_j)}}
where :math:`p_j=cdf(X\\beta)` and :math:`M_j` is the total number of
observations sharing the covariate pattern :math:`j`.
For now :math:`M_j` is always set to 1.
"""
# Pearson residuals
p = self.predict() # fittedvalues is still linear
return (self.model.endog - p)/np.sqrt(p)
class L1PoissonResults(L1CountResults, PoissonResults):
pass
class L1NegativeBinomialResults(L1CountResults, NegativeBinomialResults):
pass
class L1GeneralizedPoissonResults(L1CountResults, GeneralizedPoissonResults):
pass
class OrderedResults(DiscreteResults):
__doc__ = _discrete_results_docs % {"one_line_description" : "A results class for ordered discrete data.", "extra_attr" : ""}
pass
class BinaryResults(DiscreteResults):
__doc__ = _discrete_results_docs % {"one_line_description" : "A results class for binary data", "extra_attr" : ""}
def pred_table(self, threshold=.5):
"""
Prediction table
Parameters
----------
threshold : scalar
Number between 0 and 1. Threshold above which a prediction is
considered 1 and below which a prediction is considered 0.
Notes
-----
pred_table[i,j] refers to the number of times "i" was observed and
the model predicted "j". Correct predictions are along the diagonal.
"""
model = self.model
actual = model.endog
pred = np.array(self.predict() > threshold, dtype=float)
bins = np.array([0, 0.5, 1])
return np.histogram2d(actual, pred, bins=bins)[0]
def summary(self, yname=None, xname=None, title=None, alpha=.05,
yname_list=None):
smry = super(BinaryResults, self).summary(yname, xname, title, alpha,
yname_list)
fittedvalues = self.model.cdf(self.fittedvalues)
absprederror = np.abs(self.model.endog - fittedvalues)
predclose_sum = (absprederror < 1e-4).sum()
predclose_frac = predclose_sum / len(fittedvalues)
# add warnings/notes
etext = []
if predclose_sum == len(fittedvalues): # TODO: nobs?
wstr = "Complete Separation: The results show that there is"
wstr += "complete separation.\n"
wstr += "In this case the Maximum Likelihood Estimator does "
wstr += "not exist and the parameters\n"
wstr += "are not identified."
etext.append(wstr)
elif predclose_frac > 0.1: # TODO: get better diagnosis
wstr = "Possibly complete quasi-separation: A fraction "
wstr += "%4.2f of observations can be\n" % predclose_frac
wstr += "perfectly predicted. This might indicate that there "
wstr += "is complete\nquasi-separation. In this case some "
wstr += "parameters will not be identified."
etext.append(wstr)
if etext:
smry.add_extra_txt(etext)
return smry
summary.__doc__ = DiscreteResults.summary.__doc__
@cache_readonly
def resid_dev(self):
"""
Deviance residuals
Notes
-----
Deviance residuals are defined
.. math:: d_j = \\pm\\left(2\\left[Y_j\\ln\\left(\\frac{Y_j}{M_jp_j}\\right) + (M_j - Y_j\\ln\\left(\\frac{M_j-Y_j}{M_j(1-p_j)} \\right) \\right] \\right)^{1/2}
where
:math:`p_j = cdf(X\\beta)` and :math:`M_j` is the total number of
observations sharing the covariate pattern :math:`j`.
For now :math:`M_j` is always set to 1.
"""
#These are the deviance residuals
#model = self.model
endog = self.model.endog
#exog = model.exog
# M = # of individuals that share a covariate pattern
# so M[i] = 2 for i = two share a covariate pattern
M = 1
p = self.predict()
#Y_0 = np.where(exog == 0)
#Y_M = np.where(exog == M)
#NOTE: Common covariate patterns are not yet handled
res = -(1-endog)*np.sqrt(2*M*np.abs(np.log(1-p))) + \
endog*np.sqrt(2*M*np.abs(np.log(p)))
return res
@cache_readonly
def resid_pearson(self):
"""
Pearson residuals
Notes
-----
Pearson residuals are defined to be
.. math:: r_j = \\frac{(y - M_jp_j)}{\\sqrt{M_jp_j(1-p_j)}}
where :math:`p_j=cdf(X\\beta)` and :math:`M_j` is the total number of
observations sharing the covariate pattern :math:`j`.
For now :math:`M_j` is always set to 1.
"""
# Pearson residuals
#model = self.model
endog = self.model.endog
#exog = model.exog
# M = # of individuals that share a covariate pattern
# so M[i] = 2 for i = two share a covariate pattern
# use unique row pattern?
M = 1
p = self.predict()
return (endog - M*p)/np.sqrt(M*p*(1-p))
@cache_readonly
def resid_response(self):
"""
The response residuals
Notes
-----
Response residuals are defined to be
.. math:: y - p
where :math:`p=cdf(X\\beta)`.
"""
return self.model.endog - self.predict()
class LogitResults(BinaryResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Logit Model",
"extra_attr": ""}
@cache_readonly
def resid_generalized(self):
"""
Generalized residuals
Notes
-----
The generalized residuals for the Logit model are defined
.. math:: y - p
where :math:`p=cdf(X\\beta)`. This is the same as the `resid_response`
for the Logit model.
"""
# Generalized residuals
return self.model.endog - self.predict()
class ProbitResults(BinaryResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Probit Model",
"extra_attr": ""}
@cache_readonly
def resid_generalized(self):
"""
Generalized residuals
Notes
-----
The generalized residuals for the Probit model are defined
.. math:: y\\frac{\\phi(X\\beta)}{\\Phi(X\\beta)}-(1-y)\\frac{\\phi(X\\beta)}{1-\\Phi(X\\beta)}
"""
# generalized residuals
model = self.model
endog = model.endog
XB = self.predict(linear=True)
pdf = model.pdf(XB)
cdf = model.cdf(XB)
return endog * pdf/cdf - (1-endog)*pdf/(1-cdf)
class L1BinaryResults(BinaryResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"Results instance for binary data fit by l1 regularization",
"extra_attr" : _l1_results_attr}
def __init__(self, model, bnryfit):
super(L1BinaryResults, self).__init__(model, bnryfit)
# self.trimmed is a boolean array with T/F telling whether or not that
# entry in params has been set zero'd out.
self.trimmed = bnryfit.mle_retvals['trimmed']
self.nnz_params = (~self.trimmed).sum()
self.df_model = self.nnz_params - 1
self.df_resid = float(self.model.endog.shape[0] - self.nnz_params)
class MultinomialResults(DiscreteResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"A results class for multinomial data", "extra_attr" : ""}
def __init__(self, model, mlefit):
super(MultinomialResults, self).__init__(model, mlefit)
self.J = model.J
self.K = model.K
def _maybe_convert_ynames_int(self, ynames):
# see if they're integers
issue_warning = False
msg = ('endog contains values are that not int-like. Uses string '
'representation of value. Use integer-valued endog to '
'suppress this warning.')
for i in ynames:
try:
if ynames[i] % 1 == 0:
ynames[i] = str(int(ynames[i]))
else:
issue_warning = True
ynames[i] = str(ynames[i])
except TypeError:
ynames[i] = str(ynames[i])
if issue_warning:
import warnings
warnings.warn(msg, SpecificationWarning)
return ynames
def _get_endog_name(self, yname, yname_list, all=False):
"""
If all is False, the first variable name is dropped
"""
model = self.model
if yname is None:
yname = model.endog_names
if yname_list is None:
ynames = model._ynames_map
ynames = self._maybe_convert_ynames_int(ynames)
# use range below to ensure sortedness
ynames = [ynames[key] for key in range(int(model.J))]
ynames = ['='.join([yname, name]) for name in ynames]
if not all:
yname_list = ynames[1:] # assumes first variable is dropped
else:
yname_list = ynames
return yname, yname_list
def pred_table(self):
"""
Returns the J x J prediction table.
Notes
-----
pred_table[i,j] refers to the number of times "i" was observed and
the model predicted "j". Correct predictions are along the diagonal.
"""
ju = self.model.J - 1 # highest index
# these are the actual, predicted indices
#idx = lzip(self.model.endog, self.predict().argmax(1))
bins = np.concatenate(([0], np.linspace(0.5, ju - 0.5, ju), [ju]))
return np.histogram2d(self.model.endog, self.predict().argmax(1),
bins=bins)[0]
@cache_readonly
def bse(self):
bse = np.sqrt(np.diag(self.cov_params()))
return bse.reshape(self.params.shape, order='F')
@cache_readonly
def aic(self):
return -2*(self.llf - (self.df_model+self.model.J-1))
@cache_readonly
def bic(self):
return -2*self.llf + np.log(self.nobs)*(self.df_model+self.model.J-1)
def conf_int(self, alpha=.05, cols=None):
confint = super(DiscreteResults, self).conf_int(alpha=alpha,
cols=cols)
return confint.transpose(2,0,1)
def margeff(self):
raise NotImplementedError("Use get_margeff instead")
@cache_readonly
def resid_misclassified(self):
"""
Residuals indicating which observations are misclassified.
Notes
-----
The residuals for the multinomial model are defined as
.. math:: argmax(y_i) \\neq argmax(p_i)
where :math:`argmax(y_i)` is the index of the category for the
endogenous variable and :math:`argmax(p_i)` is the index of the
predicted probabilities for each category. That is, the residual
is a binary indicator that is 0 if the category with the highest
predicted probability is the same as that of the observed variable
and 1 otherwise.
"""
# it's 0 or 1 - 0 for correct prediction and 1 for a missed one
return (self.model.wendog.argmax(1)!=
self.predict().argmax(1)).astype(float)
def summary2(self, alpha=0.05, float_format="%.4f"):
"""Experimental function to summarize regression results
Parameters
----------
alpha : float
significance level for the confidence intervals
float_format : str
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_dict(summary2.summary_model(self))
# One data frame per value of endog
eqn = self.params.shape[1]
confint = self.conf_int(alpha)
for i in range(eqn):
coefs = summary2.summary_params((self, self.params[:, i],
self.bse[:, i],
self.tvalues[:, i],
self.pvalues[:, i],
confint[i]),
alpha=alpha)
# Header must show value of endog
level_str = self.model.endog_names +'='+ str(i)
coefs[level_str] = coefs.index
coefs = coefs.iloc[:, [-1, 0, 1, 2, 3, 4, 5]]
smry.add_df(coefs, index=False, header=True,
float_format=float_format)
smry.add_title(results=self)
return smry
class L1MultinomialResults(MultinomialResults):
__doc__ = _discrete_results_docs % {"one_line_description" :
"A results class for multinomial data fit by l1 regularization",
"extra_attr" : _l1_results_attr}
def __init__(self, model, mlefit):
super(L1MultinomialResults, self).__init__(model, mlefit)
# self.trimmed is a boolean array with T/F telling whether or not that
# entry in params has been set zero'd out.
self.trimmed = mlefit.mle_retvals['trimmed']
self.nnz_params = (~self.trimmed).sum()
# Note: J-1 constants
self.df_model = self.nnz_params - (self.model.J - 1)
self.df_resid = float(self.model.endog.shape[0] - self.nnz_params)
#### Results Wrappers ####
class OrderedResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(OrderedResultsWrapper, OrderedResults)
class CountResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(CountResultsWrapper, CountResults)
class NegativeBinomialResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(NegativeBinomialResultsWrapper,
NegativeBinomialResults)
class GeneralizedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(GeneralizedPoissonResultsWrapper,
GeneralizedPoissonResults)
class PoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
#_methods = {
# "predict_prob" : "rows",
# }
#_wrap_methods = lm.wrap.union_dicts(
# lm.RegressionResultsWrapper._wrap_methods,
# _methods)
wrap.populate_wrapper(PoissonResultsWrapper, PoissonResults)
class L1CountResultsWrapper(lm.RegressionResultsWrapper):
pass
class L1PoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
#_methods = {
# "predict_prob" : "rows",
# }
#_wrap_methods = lm.wrap.union_dicts(
# lm.RegressionResultsWrapper._wrap_methods,
# _methods)
wrap.populate_wrapper(L1PoissonResultsWrapper, L1PoissonResults)
class L1NegativeBinomialResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1NegativeBinomialResultsWrapper,
L1NegativeBinomialResults)
class L1GeneralizedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1GeneralizedPoissonResultsWrapper,
L1GeneralizedPoissonResults)
class BinaryResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {"resid_dev" : "rows",
"resid_generalized" : "rows",
"resid_pearson" : "rows",
"resid_response" : "rows"
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(BinaryResultsWrapper, BinaryResults)
class L1BinaryResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1BinaryResultsWrapper, L1BinaryResults)
class MultinomialResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {"resid_misclassified" : "rows"}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(MultinomialResultsWrapper, MultinomialResults)
class L1MultinomialResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1MultinomialResultsWrapper, L1MultinomialResults) |
|
statsmodels__statsmodels | duration.rst | Module doc / Directory summarization | Generate documentation for this module | BSD 3-Clause New or Revised License | statsmodels__statsmodels/docs/source/duration.rst | [
"statsmodels__statsmodels/statsmodels/duration/survfunc.py",
"statsmodels__statsmodels/statsmodels/duration/hazard_regression.py"
] | statsmodels__statsmodels/statsmodels/duration | Methods for Survival and Duration Analysis
statsmodels.duration implements several standard methods for working
with censored data. These methods are most commonly used when the data
consist of durations between an origin time point and the time at which
some event of interest occurred. A typical example is a medical study in
which the origin is the time at which a subject is diagnosed with some
condition, and the event of interest is death (or disease progression,
recovery, etc.).
Currently only right-censoring is handled. Right censoring occurs when
we know that an event occurred after a given time t, but we do not know
the exact event time.
Survival function estimation and inference
The statsmodels.api.SurvfuncRight class can be used to estimate a
survival function using data that may be right censored. SurvfuncRight
implements several inference procedures including confidence intervals
for survival distribution quantiles, pointwise and simultaneous
confidence bands for the survival function, and plotting procedures. The
duration.survdiff function provides testing procedures for comparing
survival distributions.
Examples
Here we create a SurvfuncRight object using data from the flchain study,
which is available through the R datasets repository. We fit the
survival distribution only for the female subjects.
import statsmodels.api as sm
data = sm.datasets.get_rdataset("flchain", "survival").data
df = data.loc[data.sex == "F", :]
sf = sm.SurvfuncRight(df["futime"], df["death"])
The main features of the fitted survival distribution can be seen by
calling the summary method:
sf.summary().head()
We can obtain point estimates and confidence intervals for quantiles of
the survival distribution. Since only around 30% of the subjects died
during this study, we can only estimate quantiles below the 0.3
probability point:
sf.quantile(0.25)
sf.quantile_ci(0.25)
To plot a single survival function, call the plot method:
sf.plot()
Since this is a large dataset with a lot of censoring, we may wish to
not plot the censoring symbols:
fig = sf.plot()
ax = fig.get_axes()[0]
pt = ax.get_lines()[1]
pt.set_visible(False)
We can also add a 95% simultaneous confidence band to the plot.
Typically these bands only plotted for central part of the distribution.
fig = sf.plot()
lcb, ucb = sf.simultaneous_cb()
ax = fig.get_axes()[0]
ax.fill_between(sf.surv_times, lcb, ucb, color='lightgrey')
ax.set_xlim(365, 365*10)
ax.set_ylim(0.7, 1)
ax.set_ylabel("Proportion alive")
ax.set_xlabel("Days since enrollment")
Here we plot survival functions for two groups (females and males) on
the same axes:
gb = data.groupby("sex")
ax = plt.axes()
sexes = []
for g in gb:
sexes.append(g[0])
sf = sm.SurvfuncRight(g[1]["futime"], g[1]["death"])
sf.plot(ax)
li = ax.get_lines()
li[1].set_visible(False)
li[3].set_visible(False)
plt.figlegend((li[0], li[2]), sexes, "center right")
plt.ylim(0.6, 1)
ax.set_ylabel("Proportion alive")
ax.set_xlabel("Days since enrollment")
We can formally compare two survival distributions with survdiff, which
implements several standard nonparametric procedures. The default
procedure is the logrank test:
stat, pv = sm.duration.survdiff(data.futime, data.death, data.sex)
Here are some of the other testing procedures implemented by survdiff:
# Fleming-Harrington with p=1, i.e. weight by pooled survival time
stat, pv = sm.duration.survdiff(data.futime, data.death, data.sex, weight_type='fh', fh_p=1)
# Gehan-Breslow, weight by number at risk
stat, pv = sm.duration.survdiff(data.futime, data.death, data.sex, weight_type='gb')
# Tarone-Ware, weight by the square root of the number at risk
stat, pv = sm.duration.survdiff(data.futime, data.death, data.sex, weight_type='tw')
Regression methods
Proportional hazard regression models ("Cox models") are a regression
technique for censored data. They allow variation in the time to an
event to be explained in terms of covariates, similar to what is done in
a linear or generalized linear regression model. These models express
the covariate effects in terms of "hazard ratios", meaning the the
hazard (instantaneous event rate) is multiplied by a given factor
depending on the value of the covariates.
Examples
import statsmodels.api as sm
import statsmodels.formula.api as smf
data = sm.datasets.get_rdataset("flchain", "survival").data
del data["chapter"]
data = data.dropna()
data["lam"] = data["lambda"]
data["female"] = (data["sex"] == "F").astype(int)
data["year"] = data["sample.yr"] - min(data["sample.yr"])
status = data["death"].values
mod = smf.phreg("futime ~ 0 + age + female + creatinine + "
"np.sqrt(kappa) + np.sqrt(lam) + year + mgus",
data, status=status, ties="efron")
rslt = mod.fit()
print(rslt.summary())
See statsmodels-examples for more detailed examples.
There are some notebook examples on the Wiki: Wiki notebooks for PHReg
and Survival Analysis | import numpy as np
import pandas as pd
from scipy.stats.distributions import chi2, norm
from statsmodels.graphics import utils
def _calc_survfunc_right(time, status, weights=None, entry=None, compress=True,
retall=True):
"""
Calculate the survival function and its standard error for a single
group.
"""
# Convert the unique times to ranks (0, 1, 2,...)
if entry is None:
utime, rtime = np.unique(time, return_inverse=True)
else:
tx = np.concatenate((time, entry))
utime, rtime = np.unique(tx, return_inverse=True)
rtime = rtime[0:len(time)]
# Number of deaths at each unique time.
ml = len(utime)
if weights is None:
d = np.bincount(rtime, weights=status, minlength=ml)
else:
d = np.bincount(rtime, weights=status*weights, minlength=ml)
# Size of risk set just prior to each event time.
if weights is None:
n = np.bincount(rtime, minlength=ml)
else:
n = np.bincount(rtime, weights=weights, minlength=ml)
if entry is not None:
n = np.cumsum(n) - n
rentry = np.searchsorted(utime, entry, side='left')
if weights is None:
n0 = np.bincount(rentry, minlength=ml)
else:
n0 = np.bincount(rentry, weights=weights, minlength=ml)
n0 = np.cumsum(n0) - n0
n = n0 - n
else:
n = np.cumsum(n[::-1])[::-1]
# Only retain times where an event occured.
if compress:
ii = np.flatnonzero(d > 0)
d = d[ii]
n = n[ii]
utime = utime[ii]
# The survival function probabilities.
sp = 1 - d / n.astype(np.float64)
ii = sp < 1e-16
sp[ii] = 1e-16
sp = np.log(sp)
sp = np.cumsum(sp)
sp = np.exp(sp)
sp[ii] = 0
if not retall:
return sp, utime, rtime, n, d
# Standard errors
if weights is None:
# Greenwood's formula
denom = n * (n - d)
denom = np.clip(denom, 1e-12, np.inf)
se = d / denom.astype(np.float64)
se[(n == d) | (n == 0)] = np.nan
se = np.cumsum(se)
se = np.sqrt(se)
locs = np.isfinite(se) | (sp!= 0)
se[locs] *= sp[locs]
se[~locs] = np.nan
else:
# Tsiatis' (1981) formula
se = d / (n * n).astype(np.float64)
se = np.cumsum(se)
se = np.sqrt(se)
return sp, se, utime, rtime, n, d
def _calc_incidence_right(time, status, weights=None):
"""
Calculate the cumulative incidence function and its standard error.
"""
# Calculate the all-cause survival function.
status0 = (status >= 1).astype(np.float64)
sp, utime, rtime, n, d = _calc_survfunc_right(time, status0, weights,
compress=False, retall=False)
ngrp = int(status.max())
# Number of cause-specific deaths at each unique time.
d = []
for k in range(ngrp):
status0 = (status == k + 1).astype(np.float64)
if weights is None:
d0 = np.bincount(rtime, weights=status0, minlength=len(utime))
else:
d0 = np.bincount(rtime, weights=status0*weights,
minlength=len(utime))
d.append(d0)
# The cumulative incidence function probabilities.
ip = []
sp0 = np.r_[1, sp[:-1]] / n
for k in range(ngrp):
ip0 = np.cumsum(sp0 * d[k])
ip.append(ip0)
# The standard error of the cumulative incidence function.
if weights is not None:
return ip, None, utime
se = []
da = sum(d)
for k in range(ngrp):
ra = da / (n * (n - da))
v = ip[k]**2 * np.cumsum(ra)
v -= 2 * ip[k] * np.cumsum(ip[k] * ra)
v += np.cumsum(ip[k]**2 * ra)
ra = (n - d[k]) * d[k] / n
v += np.cumsum(sp0**2 * ra)
ra = sp0 * d[k] / n
v -= 2 * ip[k] * np.cumsum(ra)
v += 2 * np.cumsum(ip[k] * ra)
se.append(np.sqrt(v))
return ip, se, utime
def _checkargs(time, status, entry, freq_weights, exog):
if len(time)!= len(status):
raise ValueError("time and status must have the same length")
if entry is not None and (len(entry)!= len(time)):
msg = "entry times and event times must have the same length"
raise ValueError(msg)
if entry is not None and np.any(entry >= time):
msg = "Entry times must not occur on or after event times"
raise ValueError(msg)
if freq_weights is not None and (len(freq_weights)!= len(time)):
raise ValueError("weights, time and status must have the same length")
if exog is not None and (exog.shape[0]!= len(time)):
raise ValueError("the rows of exog should align with time")
class CumIncidenceRight(object):
"""
Estimation and inference for a cumulative incidence function.
If J = 1, 2,... indicates the event type, the cumulative
incidence function for cause j is:
I(t, j) = P(T <= t and J=j)
Only right censoring is supported. If frequency weights are provided,
the point estimate is returned without a standard error.
Parameters
----------
time : array_like
An array of times (censoring times or event times)
status : array_like
If status >= 1 indicates which event occured at time t. If
status = 0, the subject was censored at time t.
title : string
Optional title used for plots and summary output.
freq_weights : array_like
Optional frequency weights
exog : array_like
Optional, if present used to account for violation of
independent censoring.
bw_factor : float
Band-width multiplier for kernel-based estimation. Only
used if exog is provided.
dimred : boolean
If True, proportional hazards regression models are used to
reduce exog to two columns by predicting overall events and
censoring in two separate models. If False, exog is used
directly for calculating kernel weights without dimension
reduction.
Attributes
----------
times : array_like
The distinct times at which the incidence rates are estimated
cinc : list of arrays
cinc[k-1] contains the estimated cumulative incidence rates
for outcome k=1,2,...
cinc_se : list of arrays
The standard errors for the values in `cinc`. Not available when
exog and/or frequency weights are provided.
Notes
-----
When exog is provided, a local estimate of the cumulative incidence
rate around each point is provided, and these are averaged to
produce an estimate of the marginal cumulative incidence
functions. The procedure is analogous to that described in Zeng
(2004) for estimation of the marginal survival function. The
approach removes bias resulting from dependent censoring when the
censoring becomes independent conditioned on the columns of exog.
References
----------
The Stata stcompet procedure:
http://www.stata-journal.com/sjpdf.html?articlenum=st0059
Dinse, G. E. and M. G. Larson. 1986. A note on semi-Markov models
for partially censored data. Biometrika 73: 379-386.
Marubini, E. and M. G. Valsecchi. 1995. Analysing Survival Data
from Clinical Trials and Observational Studies. Chichester, UK:
John Wiley & Sons.
D. Zeng (2004). Estimating marginal survival function by
adjusting for dependent censoring using many covariates. Annals
of Statistics 32:4.
https://arxiv.org/pdf/math/0409180.pdf
"""
def __init__(self, time, status, title=None, freq_weights=None,
exog=None, bw_factor=1., dimred=True):
_checkargs(time, status, None, freq_weights, None)
time = self.time = np.asarray(time)
status = self.status = np.asarray(status)
if freq_weights is not None:
freq_weights = self.freq_weights = np.asarray(freq_weights)
if exog is not None:
from._kernel_estimates import _kernel_cumincidence
exog = self.exog = np.asarray(exog)
nobs = exog.shape[0]
kw = nobs**(-1/3.0) * bw_factor
kfunc = lambda x: np.exp(-x**2 / kw**2).sum(1)
x = _kernel_cumincidence(time, status, exog, kfunc, freq_weights,
dimred)
self.times = x[0]
self.cinc = x[1]
return
x = _calc_incidence_right(time, status, freq_weights)
self.cinc = x[0]
self.cinc_se = x[1]
self.times = x[2]
self.title = "" if not title else title
class SurvfuncRight(object):
"""
Estimation and inference for a survival function.
The survival function S(t) = P(T > t) is the probability that an
event time T is greater than t.
This class currently only supports right censoring.
Parameters
----------
time : array_like
An array of times (censoring times or event times)
status : array_like
Status at the event time, status==1 is the 'event'
(e.g. death, failure), meaning that the event
occurs at the given value in `time`; status==0
indicates that censoring has occured, meaning that
the event occurs after the given value in `time`.
entry : array_like, optional An array of entry times for handling
left truncation (the subject is not in the risk set on or
before the entry time)
title : string
Optional title used for plots and summary output.
freq_weights : array_like
Optional frequency weights
exog : array_like
Optional, if present used to account for violation of
independent censoring.
bw_factor : float
Band-width multiplier for kernel-based estimation. Only used
if exog is provided.
Attributes
----------
surv_prob : array_like
The estimated value of the survivor function at each time
point in `surv_times`.
surv_prob_se : array_like
The standard errors for the values in `surv_prob`. Not available
if exog is provided.
surv_times : array_like
The points where the survival function changes.
n_risk : array_like
The number of subjects at risk just before each time value in
`surv_times`. Not available if exog is provided.
n_events : array_like
The number of events (e.g. deaths) that occur at each point
in `surv_times`. Not available if exog is provided.
Notes
-----
If exog is None, the standard Kaplan-Meier estimator is used. If
exog is not None, a local estimate of the marginal survival
function around each point is constructed, and these are then
averaged. This procedure gives an estimate of the marginal
survival function that accounts for dependent censoring as long as
the censoring becomes independent when conditioning on the
covariates in exog. See Zeng et al. (2004) for details.
References
----------
D. Zeng (2004). Estimating marginal survival function by
adjusting for dependent censoring using many covariates. Annals
of Statistics 32:4.
https://arxiv.org/pdf/math/0409180.pdf
"""
def __init__(self, time, status, entry=None, title=None,
freq_weights=None, exog=None, bw_factor=1.):
_checkargs(time, status, entry, freq_weights, exog)
time = self.time = np.asarray(time)
status = self.status = np.asarray(status)
if freq_weights is not None:
freq_weights = self.freq_weights = np.asarray(freq_weights)
if entry is not None:
entry = self.entry = np.asarray(entry)
if exog is not None:
if entry is not None:
raise ValueError("exog and entry cannot both be present")
from._kernel_estimates import _kernel_survfunc
exog = self.exog = np.asarray(exog)
nobs = exog.shape[0]
kw = nobs**(-1/3.0) * bw_factor
kfunc = lambda x: np.exp(-x**2 / kw**2).sum(1)
x = _kernel_survfunc(time, status, exog, kfunc, freq_weights)
self.surv_prob = x[0]
self.surv_times = x[1]
return
x = _calc_survfunc_right(time, status, weights=freq_weights,
entry=entry)
self.surv_prob = x[0]
self.surv_prob_se = x[1]
self.surv_times = x[2]
self.n_risk = x[4]
self.n_events = x[5]
self.title = "" if not title else title
def plot(self, ax=None):
"""
Plot the survival function.
Examples
--------
Change the line color:
>>> import statsmodels.api as sm
>>> data = sm.datasets.get_rdataset("flchain", "survival").data
>>> df = data.loc[data.sex == "F", :]
>>> sf = sm.SurvfuncRight(df["futime"], df["death"])
>>> fig = sf.plot()
>>> ax = fig.get_axes()[0]
>>> li = ax.get_lines()
>>> li[0].set_color('purple')
>>> li[1].set_color('purple')
Don't show the censoring points:
>>> fig = sf.plot()
>>> ax = fig.get_axes()[0]
>>> li = ax.get_lines()
>>> li[1].set_visible(False)
"""
return plot_survfunc(self, ax)
def quantile(self, p):
"""
Estimated quantile of a survival distribution.
Parameters
----------
p : float
The probability point at which the quantile
is determined.
Returns the estimated quantile.
"""
# SAS uses a strict inequality here.
ii = np.flatnonzero(self.surv_prob < 1 - p)
if len(ii) == 0:
return np.nan
return self.surv_times[ii[0]]
def quantile_ci(self, p, alpha=0.05, method='cloglog'):
"""
Returns a confidence interval for a survival quantile.
Parameters
----------
p : float
The probability point for which a confidence interval is
determined.
alpha : float
The confidence interval has nominal coverage probability
1 - `alpha`.
method : string
Function to use for g-transformation, must be...
Returns
-------
lb : float
The lower confidence limit.
ub : float
The upper confidence limit.
Notes
-----
The confidence interval is obtained by inverting Z-tests. The
limits of the confidence interval will always be observed
event times.
References
----------
The method is based on the approach used in SAS, documented here:
http://support.sas.com/documentation/cdl/en/statug/68162/HTML/default/viewer.htm#statug_lifetest_details03.htm
"""
tr = norm.ppf(1 - alpha / 2)
method = method.lower()
if method == "cloglog":
g = lambda x: np.log(-np.log(x))
gprime = lambda x: -1 / (x * np.log(x))
elif method == "linear":
g = lambda x: x
gprime = lambda x: 1
elif method == "log":
g = lambda x: np.log(x)
gprime = lambda x: 1 / x
elif method == "logit":
g = lambda x: np.log(x / (1 - x))
gprime = lambda x: 1 / (x * (1 - x))
elif method == "asinsqrt":
g = lambda x: np.arcsin(np.sqrt(x))
gprime = lambda x: 1 / (2 * np.sqrt(x) * np.sqrt(1 - x))
else:
raise ValueError("unknown method")
r = g(self.surv_prob) - g(1 - p)
r /= (gprime(self.surv_prob) * self.surv_prob_se)
ii = np.flatnonzero(np.abs(r) <= tr)
if len(ii) == 0:
return np.nan, np.nan
lb = self.surv_times[ii[0]]
if ii[-1] == len(self.surv_times) - 1:
ub = np.inf
else:
ub = self.surv_times[ii[-1] + 1]
return lb, ub
def summary(self):
"""
Return a summary of the estimated survival function.
The summary is a datafram containing the unique event times,
estimated survival function values, and related quantities.
"""
df = pd.DataFrame(index=self.surv_times)
df.index.name = "Time"
df["Surv prob"] = self.surv_prob
df["Surv prob SE"] = self.surv_prob_se
df["num at risk"] = self.n_risk
df["num events"] = self.n_events
return df
def simultaneous_cb(self, alpha=0.05, method="hw", transform="log"):
"""
Returns a simultaneous confidence band for the survival function.
Parameters
----------
alpha : float
`1 - alpha` is the desired simultaneous coverage
probability for the confidence region. Currently alpha
must be set to 0.05, giving 95% simultaneous intervals.
method : string
The method used to produce the simultaneous confidence
band. Only the Hall-Wellner (hw) method is currently
implemented.
transform : string
The used to produce the interval (note that the returned
interval is on the survival probability scale regardless
of which transform is used). Only `log` and `arcsin` are
implemented.
Returns
-------
lcb : array_like
The lower confidence limits corresponding to the points
in `surv_times`.
ucb : array_like
The upper confidence limits corresponding to the points
in `surv_times`.
"""
method = method.lower()
if method!= "hw":
msg = "only the Hall-Wellner (hw) method is implemented"
raise ValueError(msg)
if alpha!= 0.05:
raise ValueError("alpha must be set to 0.05")
transform = transform.lower()
s2 = self.surv_prob_se**2 / self.surv_prob**2
nn = self.n_risk
if transform == "log":
denom = np.sqrt(nn) * np.log(self.surv_prob)
theta = 1.3581 * (1 + nn * s2) / denom
theta = np.exp(theta)
lcb = self.surv_prob**(1/theta)
ucb = self.surv_prob**theta
elif transform == "arcsin":
k = 1.3581
k *= (1 + nn * s2) / (2 * np.sqrt(nn))
k *= np.sqrt(self.surv_prob / (1 - self.surv_prob))
f = np.arcsin(np.sqrt(self.surv_prob))
v = np.clip(f - k, 0, np.inf)
lcb = np.sin(v)**2
v = np.clip(f + k, -np.inf, np.pi/2)
ucb = np.sin(v)**2
else:
raise ValueError("Unknown transform")
return lcb, ucb
def survdiff(time, status, group, weight_type=None, strata=None,
entry=None, **kwargs):
"""
Test for the equality of two survival distributions.
Parameters
----------
time : array_like
The event or censoring times.
status : array_like
The censoring status variable, status=1 indicates that the
event occured, status=0 indicates that the observation was
censored.
group : array_like
Indicators of the two groups
weight_type : string
The following weight types are implemented:
None (default) : logrank test
fh : Fleming-Harrington, weights by S^(fh_p),
requires exponent fh_p to be provided as keyword
argument; the weights are derived from S defined at
the previous event time, and the first weight is
always 1.
gb : Gehan-Breslow, weights by the number at risk
tw : Tarone-Ware, weights by the square root of the number
at risk
strata : array_like
Optional stratum indicators for a stratified test
entry : array_like
Entry times to handle left truncation. The subject is not in
the risk set on or before the entry time.
Returns
-------
chisq : The chi-square (1 degree of freedom) distributed test
statistic value
pvalue : The p-value for the chi^2 test
"""
# TODO: extend to handle more than two groups
time = np.asarray(time)
status = np.asarray(status)
group = np.asarray(group)
gr = np.unique(group)
if len(gr)!= 2:
raise ValueError("logrank only supports two groups")
if strata is None:
obs, var = _survdiff(time, status, group, weight_type, gr,
entry, **kwargs)
else:
strata = np.asarray(strata)
stu = np.unique(strata)
obs, var = 0., 0.
for st in stu:
# could be more efficient?
ii = (strata == st)
obs1, var1 = _survdiff(time[ii], status[ii], group[ii],
weight_type, gr, entry, **kwargs)
obs += obs1
var += var1
zstat = obs / np.sqrt(var)
# The chi^2 test statistic and p-value.
chisq = zstat**2
pvalue = 1 - chi2.cdf(chisq, 1)
return chisq, pvalue
def _survdiff(time, status, group, weight_type, gr, entry=None,
**kwargs):
# logrank test for one stratum
# Get the unique times.
if entry is None:
utimes, rtimes = np.unique(time, return_inverse=True)
else:
utimes, rtimes = np.unique(np.concatenate((time, entry)),
return_inverse=True)
rtimes = rtimes[0:len(time)]
# Split entry times by group if present (should use pandas groupby)
tse = [(gr[0], None), (gr[1], None)]
if entry is not None:
for k in 0, 1:
ii = (group == gr[k])
entry1 = entry[ii]
tse[k] = (gr[k], entry1)
# Event count and risk set size at each time point, per group and overall.
# TODO: should use Pandas groupby
nrisk, obsv = [], []
ml = len(utimes)
for g, entry0 in tse:
mk = (group == g)
n = np.bincount(rtimes, weights=mk, minlength=ml)
ob = np.bincount(rtimes, weights=status*mk, minlength=ml)
obsv.append(ob)
if entry is not None:
n = np.cumsum(n) - n
rentry = np.searchsorted(utimes, entry0, side='left')
n0 = np.bincount(rentry, minlength=ml)
n0 = np.cumsum(n0) - n0
nr = n0 - n
else:
nr = np.cumsum(n[::-1])[::-1]
nrisk.append(nr)
obs = sum(obsv)
nrisk_tot = sum(nrisk)
# The variance of event counts in the first group.
r = nrisk[0] / np.clip(nrisk_tot, 1e-10, np.inf)
denom = nrisk_tot - 1
denom = np.clip(denom, 1e-10, np.inf)
var = obs * r * (1 - r) * (nrisk_tot - obs) / denom
# The expected number of events in the first group.
exp1 = obs * r
weights = None
if weight_type is not None:
weight_type = weight_type.lower()
if weight_type == "gb":
weights = nrisk_tot
elif weight_type == "tw":
weights = np.sqrt(nrisk_tot)
elif weight_type == "fh":
if "fh_p" not in kwargs:
msg = "weight_type type 'fh' requires specification of fh_p"
raise ValueError(msg)
fh_p = kwargs["fh_p"]
# Calculate the survivor function directly to avoid the
# overhead of creating a SurvfuncRight object
sp = 1 - obs / nrisk_tot.astype(np.float64)
sp = np.log(sp)
sp = np.cumsum(sp)
sp = np.exp(sp)
weights = sp**fh_p
weights = np.roll(weights, 1)
weights[0] = 1
else:
raise ValueError("weight_type not implemented")
# The Z-scale test statistic (compare to normal reference
# distribution).
ix = np.flatnonzero(nrisk_tot > 1)
if weights is None:
obs = np.sum(obsv[0][ix] - exp1[ix])
var = np.sum(var[ix])
else:
obs = np.dot(weights[ix], obsv[0][ix] - exp1[ix])
var = np.dot(weights[ix]**2, var[ix])
return obs, var
def plot_survfunc(survfuncs, ax=None):
"""
Plot one or more survivor functions.
Parameters
----------
survfuncs : object or array_like
A single SurvfuncRight object, or a list or SurvfuncRight
objects that are plotted together.
Returns
-------
A figure instance on which the plot was drawn.
Examples
--------
Add a legend:
>>> import statsmodels.api as sm
>>> from statsmodels.duration.survfunc import plot_survfunc
>>> data = sm.datasets.get_rdataset("flchain", "survival").data
>>> df = data.loc[data.sex == "F", :]
>>> sf0 = sm.SurvfuncRight(df["futime"], df["death"])
>>> sf1 = sm.SurvfuncRight(3.0 * df["futime"], df["death"])
>>> fig = plot_survfunc([sf0, sf1])
>>> ax = fig.get_axes()[0]
>>> ax.set_position([0.1, 0.1, 0.64, 0.8])
>>> ha, lb = ax.get_legend_handles_labels()
>>> leg = fig.legend((ha[0], ha[1]), (lb[0], lb[1]), 'center right')
Change the line colors:
>>> fig = plot_survfunc([sf0, sf1])
>>> ax = fig.get_axes()[0]
>>> ax.set_position([0.1, 0.1, 0.64, 0.8])
>>> ha, lb = ax.get_legend_handles_labels()
>>> ha[0].set_color('purple')
>>> ha[1].set_color('orange')
"""
fig, ax = utils.create_mpl_ax(ax)
# If we have only a single survival function to plot, put it into
# a list.
try:
assert(type(survfuncs[0]) is SurvfuncRight)
except:
survfuncs = [survfuncs]
for gx, sf in enumerate(survfuncs):
# The estimated survival function does not include a point at
# time 0, include it here for plotting.
surv_times = np.concatenate(([0], sf.surv_times))
surv_prob = np.concatenate(([1], sf.surv_prob))
# If the final times are censoring times they are not included
# in the survival function so we add them here
mxt = max(sf.time)
if mxt > surv_times[-1]:
surv_times = np.concatenate((surv_times, [mxt]))
surv_prob = np.concatenate((surv_prob, [surv_prob[-1]]))
label = getattr(sf, "title", "Group %d" % (gx + 1))
li, = ax.step(surv_times, surv_prob, '-', label=label, lw=2,
where='post')
# Plot the censored points.
ii = np.flatnonzero(np.logical_not(sf.status))
ti = sf.time[ii]
jj = np.searchsorted(surv_times, ti) - 1
sp = surv_prob[jj]
ax.plot(ti, sp, '+', ms=12, color=li.get_color(),
label=label + " points")
ax.set_ylim(0, 1.01)
return fig
"""
Implementation of proportional hazards regression models for duration
data that may be censored ("Cox models").
References
----------
T Therneau (1996). Extending the Cox model. Technical report.
http://www.mayo.edu/research/documents/biostat-58pdf/DOC-10027288
G Rodriguez (2005). Non-parametric estimation in survival models.
http://data.princeton.edu/pop509/NonParametricSurvival.pdf
B Gillespie (2006). Checking the assumptions in the Cox proportional
hazards model.
http://www.mwsug.org/proceedings/2006/stats/MWSUG-2006-SD08.pdf
"""
import numpy as np
from statsmodels.base import model
import statsmodels.base.model as base
from statsmodels.tools.decorators import cache_readonly
_predict_docstring = """
Returns predicted values from the proportional hazards
regression model.
Parameters
----------%(params_doc)s
exog : array_like
Data to use as `exog` in forming predictions. If not
provided, the `exog` values from the model used to fit the
data are used.%(cov_params_doc)s
endog : array_like
Duration (time) values at which the predictions are made.
Only used if pred_type is either 'cumhaz' or'surv'. If
using model `exog`, defaults to model `endog` (time), but
may be provided explicitly to make predictions at
alternative times.
strata : array_like
A vector of stratum values used to form the predictions.
Not used (may be 'None') if pred_type is 'lhr' or 'hr'.
If `exog` is None, the model stratum values are used. If
`exog` is not None and pred_type is'surv' or 'cumhaz',
stratum values must be provided (unless there is only one
stratum).
offset : array_like
Offset values used to create the predicted values.
pred_type : string
If 'lhr', returns log hazard ratios, if 'hr' returns
hazard ratios, if'surv' returns the survival function, if
'cumhaz' returns the cumulative hazard function.
Returns
-------
A bunch containing two fields: `predicted_values` and
`standard_errors`.
Notes
-----
Standard errors are only returned when predicting the log
hazard ratio (pred_type is 'lhr').
Types `surv` and `cumhaz` require estimation of the cumulative
hazard function.
"""
_predict_params_doc = """
params : array_like
The proportional hazards model parameters."""
_predict_cov_params_docstring = """
cov_params : array_like
The covariance matrix of the estimated `params` vector,
used to obtain prediction errors if pred_type='lhr',
otherwise optional."""
class PHSurvivalTime(object):
def __init__(self, time, status, exog, strata=None, entry=None,
offset=None):
"""
Represent a collection of survival times with possible
stratification and left truncation.
Parameters
----------
time : array_like
The times at which either the event (failure) occurs or
the observation is censored.
status : array_like
Indicates whether the event (failure) occurs at `time`
(`status` is 1), or if `time` is a censoring time (`status`
is 0).
exog : array_like
The exogeneous (covariate) data matrix, cases are rows and
variables are columns.
strata : array_like
Grouping variable defining the strata. If None, all
observations are in a single stratum.
entry : array_like
Entry (left truncation) times. The observation is not
part of the risk set for times before the entry time. If
None, the entry time is treated as being zero, which
gives no left truncation. The entry time must be less
than or equal to `time`.
offset : array_like
An optional array of offsets
"""
# Default strata
if strata is None:
strata = np.zeros(len(time), dtype=np.int32)
# Default entry times
if entry is None:
entry = np.zeros(len(time))
# Parameter validity checks.
n1, n2, n3, n4 = len(time), len(status), len(strata),\
len(entry)
nv = [n1, n2, n3, n4]
if max(nv)!= min(nv):
raise ValueError("endog, status, strata, and " +
"entry must all have the same length")
if min(time) < 0:
raise ValueError("endog must be non-negative")
if min(entry) < 0:
raise ValueError("entry time must be non-negative")
# In Stata, this is entry >= time, in R it is >.
if np.any(entry > time):
raise ValueError("entry times may not occur " +
"after event or censoring times")
# Get the row indices for the cases in each stratum
stu = np.unique(strata)
#sth = {x: [] for x in stu} # needs >=2.7
sth = dict([(x, []) for x in stu])
for i,k in enumerate(strata):
sth[k].append(i)
stratum_rows = [np.asarray(sth[k], dtype=np.int32) for k in stu]
stratum_names = stu
# Remove strata with no events
ix = [i for i,ix in enumerate(stratum_rows) if status[ix].sum() > 0]
self.nstrat_orig = len(stratum_rows)
stratum_rows = [stratum_rows[i] for i in ix]
stratum_names = [stratum_names[i] for i in ix]
# The number of strata
nstrat = len(stratum_rows)
self.nstrat = nstrat
# Remove subjects whose entry time occurs after the last event
# in their stratum.
for stx,ix in enumerate(stratum_rows):
last_failure = max(time[ix][status[ix] == 1])
# Stata uses < here, R uses <=
ii = [i for i,t in enumerate(entry[ix]) if
t <= last_failure]
stratum_rows[stx] = stratum_rows[stx][ii]
# Remove subjects who are censored before the first event in
# their stratum.
for stx,ix in enumerate(stratum_rows):
first_failure = min(time[ix][status[ix] == 1])
ii = [i for i,t in enumerate(time[ix]) if
t >= first_failure]
stratum_rows[stx] = stratum_rows[stx][ii]
# Order by time within each stratum
for stx,ix in enumerate(stratum_rows):
ii = np.argsort(time[ix])
stratum_rows[stx] = stratum_rows[stx][ii]
if offset is not None:
self.offset_s = []
for stx in range(nstrat):
self.offset_s.append(offset[stratum_rows[stx]])
else:
self.offset_s = None
# Number of informative subjects
self.n_obs = sum([len(ix) for ix in stratum_rows])
# Split everything by stratum
self.time_s = []
self.exog_s = []
self.status_s = []
self.entry_s = []
for ix in stratum_rows:
self.time_s.append(time[ix])
self.exog_s.append(exog[ix,:])
self.status_s.append(status[ix])
self.entry_s.append(entry[ix])
self.stratum_rows = stratum_rows
self.stratum_names = stratum_names
# Precalculate some indices needed to fit Cox models.
# Distinct failure times within a stratum are always taken to
# be sorted in ascending order.
#
# ufailt_ix[stx][k] is a list of indices for subjects who fail
# at the k^th sorted unique failure time in stratum stx
#
# risk_enter[stx][k] is a list of indices for subjects who
# enter the risk set at the k^th sorted unique failure time in
# stratum stx
#
# risk_exit[stx][k] is a list of indices for subjects who exit
# the risk set at the k^th sorted unique failure time in
# stratum stx
self.ufailt_ix, self.risk_enter, self.risk_exit, self.ufailt =\
[], [], [], []
for stx in range(self.nstrat):
# All failure times
ift = np.flatnonzero(self.status_s[stx] == 1)
ft = self.time_s[stx][ift]
# Unique failure times
uft = np.unique(ft)
nuft = len(uft)
# Indices of cases that fail at each unique failure time
#uft_map = {x:i for i,x in enumerate(uft)} # requires >=2.7
uft_map = dict([(x, i) for i,x in enumerate(uft)]) # 2.6
uft_ix = [[] for k in range(nuft)]
for ix,ti in zip(ift,ft):
uft_ix[uft_map[ti]].append(ix)
# Indices of cases (failed or censored) that enter the
# risk set at each unique failure time.
risk_enter1 = [[] for k in range(nuft)]
for i,t in enumerate(self.time_s[stx]):
ix = np.searchsorted(uft, t, "right") - 1
if ix >= 0:
risk_enter1[ix].append(i)
# Indices of cases (failed or censored) that exit the
# risk set at each unique failure time.
risk_exit1 = [[] for k in range(nuft)]
for i,t in enumerate(self.entry_s[stx]):
ix = np.searchsorted(uft, t)
risk_exit1[ix].append(i)
self.ufailt.append(uft)
self.ufailt_ix.append([np.asarray(x, dtype=np.int32) for x in uft_ix])
self.risk_enter.append([np.asarray(x, dtype=np.int32) for x in risk_enter1])
self.risk_exit.append([np.asarray(x, dtype=np.int32) for x in risk_exit1])
class PHReg(model.LikelihoodModel):
"""
Fit the Cox proportional hazards regression model for right
censored data.
Parameters
----------
endog : array_like
The observed times (event or censoring)
exog : 2D array_like
The covariates or exogeneous variables
status : array_like
The censoring status values; status=1 indicates that an
event occured (e.g. failure or death), status=0 indicates
that the observation was right censored. If None, defaults
to status=1 for all cases.
entry : array_like
The entry times, if left truncation occurs
strata : array_like
Stratum labels. If None, all observations are taken to be
in a single stratum.
ties : string
The method used to handle tied times, must be either 'breslow'
or 'efron'.
offset : array_like
Array of offset values
missing : string
The method used to handle missing data
Notes
-----
Proportional hazards regression models should not include an
explicit or implicit intercept. The effect of an intercept is
not identified using the partial likelihood approach.
`endog`, `event`, `strata`, `entry`, and the first dimension
of `exog` all must have the same length
"""
def __init__(self, endog, exog, status=None, entry=None,
strata=None, offset=None, ties='breslow',
missing='drop', **kwargs):
# Default is no censoring
if status is None:
status = np.ones(len(endog))
super(PHReg, self).__init__(endog, exog, status=status,
entry=entry, strata=strata,
offset=offset, missing=missing,
**kwargs)
# endog and exog are automatically converted, but these are
# not
if self.status is not None:
self.status = np.asarray(self.status)
if self.entry is not None:
self.entry = np.asarray(self.entry)
if self.strata is not None:
self.strata = np.asarray(self.strata)
if self.offset is not None:
self.offset = np.asarray(self.offset)
self.surv = PHSurvivalTime(self.endog, self.status,
self.exog, self.strata,
self.entry, self.offset)
self.nobs = len(self.endog)
self.groups = None
# TODO: not used?
self.missing = missing
self.df_resid = (np.float(self.exog.shape[0] -
np.linalg.matrix_rank(self.exog)))
self.df_model = np.float(np.linalg.matrix_rank(self.exog))
ties = ties.lower()
if ties not in ("efron", "breslow"):
raise ValueError("`ties` must be either `efron` or " +
"`breslow`")
self.ties = ties
@classmethod
def from_formula(cls, formula, data, status=None, entry=None,
strata=None, offset=None, subset=None,
ties='breslow', missing='drop', *args, **kwargs):
"""
Create a proportional hazards regression model from a formula
and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array_like
The data for the model. See Notes.
status : array_like
The censoring status values; status=1 indicates that an
event occured (e.g. failure or death), status=0 indicates
that the observation was right censored. If None, defaults
to status=1 for all cases.
entry : array_like
The entry times, if left truncation occurs
strata : array_like
Stratum labels. If None, all observations are taken to be
in a single stratum.
offset : array_like
Array of offset values
subset : array_like
An array-like object of booleans, integers, or index
values that indicate the subset of df to use in the
model. Assumes df is a `pandas.DataFrame`
ties : string
The method used to handle tied times, must be either 'breslow'
or 'efron'.
missing : string
The method used to handle missing data
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : PHReg model instance
"""
# Allow array arguments to be passed by column name.
if isinstance(status, str):
status = data[status]
if isinstance(entry, str):
entry = data[entry]
if isinstance(strata, str):
strata = data[strata]
if isinstance(offset, str):
offset = data[offset]
import re
terms = re.split(r"[+\-~]", formula)
for term in terms:
term = term.strip()
if term in ("0", "1"):
import warnings
warnings.warn("PHReg formulas should not include any '0' or '1' terms")
mod = super(PHReg, cls).from_formula(formula, data,
status=status, entry=entry, strata=strata,
offset=offset, subset=subset, ties=ties,
missing=missing, drop_cols=["Intercept"], *args,
**kwargs)
return mod
def fit(self, groups=None, **args):
"""
Fit a proportional hazards regression model.
Parameters
----------
groups : array_like
Labels indicating groups of observations that may be
dependent. If present, the standard errors account for
this dependence. Does not affect fitted values.
Returns a PHregResults instance.
"""
# TODO process for missing values
if groups is not None:
if len(groups)!= len(self.endog):
msg = ("len(groups) = %d and len(endog) = %d differ" %
(len(groups), len(self.endog)))
raise ValueError(msg)
self.groups = np.asarray(groups)
else:
self.groups = None
if 'disp' not in args:
args['disp'] = False
fit_rslts = super(PHReg, self).fit(**args)
if self.groups is None:
cov_params = fit_rslts.cov_params()
else:
cov_params = self.robust_covariance(fit_rslts.params)
results = PHRegResults(self, fit_rslts.params, cov_params)
return results
def fit_regularized(self, method="elastic_net", alpha=0.,
start_params=None, refit=False, **kwargs):
r"""
Return a regularized fit to a linear regression model.
Parameters
----------
method :
Only the `elastic_net` approach is currently implemented.
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
start_params : array_like
Starting values for `params`.
refit : bool
If True, the model is refit using only the variables that
have non-zero coefficients in the regularized fit. The
refitted model is not regularized.
Returns
-------
A results object.
Notes
-----
The penalty is the ``elastic net`` penalty, which is a
combination of L1 and L2 penalties.
The function that is minimized is:
.. math::
-loglike/n + alpha*((1-L1\_wt)*|params|_2^2/2 + L1\_wt*|params|_1)
where :math:`|*|_1` and :math:`|*|_2` are the L1 and L2 norms.
Post-estimation results are based on the same data used to
select variables, hence may be subject to overfitting biases.
The elastic_net method uses the following keyword arguments:
maxiter : int
Maximum number of iterations
L1_wt : float
Must be in [0, 1]. The L1 penalty has weight L1_wt and the
L2 penalty has weight 1 - L1_wt.
cnvrg_tol : float
Convergence threshold for line searches
zero_tol : float
Coefficients below this threshold are treated as zero.
"""
from statsmodels.base.elastic_net import fit_elasticnet
if method!= "elastic_net":
raise ValueError("method for fit_regularied must be elastic_net")
defaults = {"maxiter" : 50, "L1_wt" : 1, "cnvrg_tol" : 1e-10,
"zero_tol" : 1e-10}
defaults.update(kwargs)
return fit_elasticnet(self, method=method,
alpha=alpha,
start_params=start_params,
refit=refit,
**defaults)
def loglike(self, params):
"""
Returns the log partial likelihood function evaluated at
`params`.
"""
if self.ties == "breslow":
return self.breslow_loglike(params)
elif self.ties == "efron":
return self.efron_loglike(params)
def score(self, params):
"""
Returns the score function evaluated at `params`.
"""
if self.ties == "breslow":
return self.breslow_gradient(params)
elif self.ties == "efron":
return self.efron_gradient(params)
def hessian(self, params):
"""
Returns the Hessian matrix of the log partial likelihood
function evaluated at `params`.
"""
if self.ties == "breslow":
return self.breslow_hessian(params)
else:
return self.efron_hessian(params)
def breslow_loglike(self, params):
"""
Returns the value of the log partial likelihood function
evaluated at `params`, using the Breslow method to handle tied
times.
"""
surv = self.surv
like = 0.
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0 = 0.
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
# Account for all cases that fail at this point.
ix = uft_ix[i]
like += (linpred[ix] - np.log(xp0)).sum()
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
return like
def efron_loglike(self, params):
"""
Returns the value of the log partial likelihood function
evaluated at `params`, using the Efron method to handle tied
times.
"""
surv = self.surv
like = 0.
# Loop over strata
for stx in range(surv.nstrat):
# exog and linear predictor for this stratum
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0 = 0.
# Iterate backward through the unique failure times.
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
xp0f = e_linpred[uft_ix[i]].sum()
# Account for all cases that fail at this point.
ix = uft_ix[i]
like += linpred[ix].sum()
m = len(ix)
J = np.arange(m, dtype=np.float64) / m
like -= np.log(xp0 - J*xp0f).sum()
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
return like
def breslow_gradient(self, params):
"""
Returns the gradient of the log partial likelihood, using the
Breslow method to handle tied times.
"""
surv = self.surv
grad = 0.
# Loop over strata
for stx in range(surv.nstrat):
# Indices of subjects in the stratum
strat_ix = surv.stratum_rows[stx]
# Unique failure times in the stratum
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
# exog and linear predictor for the stratum
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1 = 0., 0.
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 += e_linpred[ix].sum()
xp1 += (e_linpred[ix][:,None] * v).sum(0)
# Account for all cases that fail at this point.
ix = uft_ix[i]
grad += (exog_s[ix,:] - xp1 / xp0).sum(0)
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 -= e_linpred[ix].sum()
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
return grad
def efron_gradient(self, params):
"""
Returns the gradient of the log partial likelihood evaluated
at `params`, using the Efron method to handle tied times.
"""
surv = self.surv
grad = 0.
# Loop over strata
for stx in range(surv.nstrat):
# Indices of cases in the stratum
strat_ix = surv.stratum_rows[stx]
# exog and linear predictor of the stratum
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1 = 0., 0.
# Iterate backward through the unique failure times.
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 += e_linpred[ix].sum()
xp1 += (e_linpred[ix][:,None] * v).sum(0)
ixf = uft_ix[i]
if len(ixf) > 0:
v = exog_s[ixf,:]
xp0f = e_linpred[ixf].sum()
xp1f = (e_linpred[ixf][:,None] * v).sum(0)
# Consider all cases that fail at this point.
grad += v.sum(0)
m = len(ixf)
J = np.arange(m, dtype=np.float64) / m
numer = xp1 - np.outer(J, xp1f)
denom = xp0 - np.outer(J, xp0f)
ratio = numer / denom
rsum = ratio.sum(0)
grad -= rsum
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
v = exog_s[ix,:]
xp0 -= e_linpred[ix].sum()
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
return grad
def breslow_hessian(self, params):
"""
Returns the Hessian of the log partial likelihood evaluated at
`params`, using the Breslow method to handle tied times.
"""
surv = self.surv
hess = 0.
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1, xp2 = 0., 0., 0.
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
xp0 += e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 += (e_linpred[ix][:,None] * v).sum(0)
elx = e_linpred[ix]
xp2 += np.einsum("ij,ik,i->jk", v, v, elx)
# Account for all cases that fail at this point.
m = len(uft_ix[i])
hess += m*(xp2 / xp0 - np.outer(xp1, xp1) / xp0**2)
# Update for new cases entering the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
xp0 -= e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
elx = e_linpred[ix]
xp2 -= np.einsum("ij,ik,i->jk", v, v, elx)
return -hess
def efron_hessian(self, params):
"""
Returns the Hessian matrix of the partial log-likelihood
evaluated at `params`, using the Efron method to handle tied
times.
"""
surv = self.surv
hess = 0.
# Loop over strata
for stx in range(surv.nstrat):
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
xp0, xp1, xp2 = 0., 0., 0.
# Iterate backward through the unique failure times.
uft_ix = surv.ufailt_ix[stx]
nuft = len(uft_ix)
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
if len(ix) > 0:
xp0 += e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 += (e_linpred[ix][:,None] * v).sum(0)
elx = e_linpred[ix]
xp2 += np.einsum("ij,ik,i->jk", v, v, elx)
ixf = uft_ix[i]
if len(ixf) > 0:
v = exog_s[ixf,:]
xp0f = e_linpred[ixf].sum()
xp1f = (e_linpred[ixf][:,None] * v).sum(0)
elx = e_linpred[ixf]
xp2f = np.einsum("ij,ik,i->jk", v, v, elx)
# Account for all cases that fail at this point.
m = len(uft_ix[i])
J = np.arange(m, dtype=np.float64) / m
c0 = xp0 - J*xp0f
hess += xp2 * np.sum(1 / c0)
hess -= xp2f * np.sum(J / c0)
mat = (xp1[None, :] - np.outer(J, xp1f)) / c0[:, None]
hess -= np.einsum("ij,ik->jk", mat, mat)
# Update for new cases entering the risk set.
ix = surv.risk_exit[stx][i]
if len(ix) > 0:
xp0 -= e_linpred[ix].sum()
v = exog_s[ix,:]
xp1 -= (e_linpred[ix][:,None] * v).sum(0)
elx = e_linpred[ix]
xp2 -= np.einsum("ij,ik,i->jk", v, v, elx)
return -hess
def robust_covariance(self, params):
"""
Returns a covariance matrix for the proportional hazards model
regresion coefficient estimates that is robust to certain
forms of model misspecification.
Parameters
----------
params : ndarray
The parameter vector at which the covariance matrix is
calculated.
Returns
-------
The robust covariance matrix as a square ndarray.
Notes
-----
This function uses the `groups` argument to determine groups
within which observations may be dependent. The covariance
matrix is calculated using the Huber-White "sandwich" approach.
"""
if self.groups is None:
raise ValueError("`groups` must be specified to calculate the robust covariance matrix")
hess = self.hessian(params)
score_obs = self.score_residuals(params)
# Collapse
grads = {}
for i,g in enumerate(self.groups):
if g not in grads:
grads[g] = 0.
grads[g] += score_obs[i, :]
grads = np.asarray(list(grads.values()))
mat = grads[None, :, :]
mat = mat.T * mat
mat = mat.sum(1)
hess_inv = np.linalg.inv(hess)
cmat = np.dot(hess_inv, np.dot(mat, hess_inv))
return cmat
def score_residuals(self, params):
"""
Returns the score residuals calculated at a given vector of
parameters.
Parameters
----------
params : ndarray
The parameter vector at which the score residuals are
calculated.
Returns
-------
The score residuals, returned as a ndarray having the same
shape as `exog`.
Notes
-----
Observations in a stratum with no observed events have undefined
score residuals, and contain NaN in the returned matrix.
"""
surv = self.surv
score_resid = np.zeros(self.exog.shape, dtype=np.float64)
# Use to set undefined values to NaN.
mask = np.zeros(self.exog.shape[0], dtype=np.int32)
w_avg = self.weighted_covariate_averages(params)
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
strat_ix = surv.stratum_rows[stx]
xp0 = 0.
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
at_risk_ix = set([])
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
at_risk_ix |= set(ix)
xp0 += e_linpred[ix].sum()
atr_ix = list(at_risk_ix)
leverage = exog_s[atr_ix, :] - w_avg[stx][i, :]
# Event indicators
d = np.zeros(exog_s.shape[0])
d[uft_ix[i]] = 1
# The increment in the cumulative hazard
dchaz = len(uft_ix[i]) / xp0
# Piece of the martingale residual
mrp = d[atr_ix] - e_linpred[atr_ix] * dchaz
# Update the score residuals
ii = strat_ix[atr_ix]
score_resid[ii,:] += leverage * mrp[:, None]
mask[ii] = 1
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
at_risk_ix -= set(ix)
xp0 -= e_linpred[ix].sum()
jj = np.flatnonzero(mask == 0)
if len(jj) > 0:
score_resid[jj, :] = np.nan
return score_resid
def weighted_covariate_averages(self, params):
"""
Returns the hazard-weighted average of covariate values for
subjects who are at-risk at a particular time.
Parameters
----------
params : ndarray
Parameter vector
Returns
-------
averages : list of ndarrays
averages[stx][i,:] is a row vector containing the weighted
average values (for all the covariates) of at-risk
subjects a the i^th largest observed failure time in
stratum `stx`, using the hazard multipliers as weights.
Notes
-----
Used to calculate leverages and score residuals.
"""
surv = self.surv
averages = []
xp0, xp1 = 0., 0.
# Loop over strata
for stx in range(surv.nstrat):
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
average_s = np.zeros((len(uft_ix), exog_s.shape[1]),
dtype=np.float64)
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
linpred -= linpred.max()
e_linpred = np.exp(linpred)
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
xp1 += np.dot(e_linpred[ix], exog_s[ix, :])
average_s[i, :] = xp1 / xp0
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
xp1 -= np.dot(e_linpred[ix], exog_s[ix, :])
averages.append(average_s)
return averages
def baseline_cumulative_hazard(self, params):
"""
Estimate the baseline cumulative hazard and survival
functions.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
A list of triples (time, hazard, survival) containing the time
values and corresponding cumulative hazard and survival
function values for each stratum.
Notes
-----
Uses the Nelson-Aalen estimator.
"""
# TODO: some disagreements with R, not the same algorithm but
# hard to deduce what R is doing. Our results are reasonable.
surv = self.surv
rslt = []
# Loop over strata
for stx in range(surv.nstrat):
uft = surv.ufailt[stx]
uft_ix = surv.ufailt_ix[stx]
exog_s = surv.exog_s[stx]
nuft = len(uft_ix)
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
e_linpred = np.exp(linpred)
xp0 = 0.
h0 = np.zeros(nuft, dtype=np.float64)
# Iterate backward through the unique failure times.
for i in range(nuft)[::-1]:
# Update for new cases entering the risk set.
ix = surv.risk_enter[stx][i]
xp0 += e_linpred[ix].sum()
# Account for all cases that fail at this point.
ix = uft_ix[i]
h0[i] = len(ix) / xp0
# Update for cases leaving the risk set.
ix = surv.risk_exit[stx][i]
xp0 -= e_linpred[ix].sum()
cumhaz = np.cumsum(h0) - h0
current_strata_surv = np.exp(-cumhaz)
rslt.append([uft, cumhaz, current_strata_surv])
return rslt
def baseline_cumulative_hazard_function(self, params):
"""
Returns a function that calculates the baseline cumulative
hazard function for each stratum.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
A dict mapping stratum names to the estimated baseline
cumulative hazard function.
"""
from scipy.interpolate import interp1d
surv = self.surv
base = self.baseline_cumulative_hazard(params)
cumhaz_f = {}
for stx in range(surv.nstrat):
time_h = base[stx][0]
cumhaz = base[stx][1]
time_h = np.r_[-np.inf, time_h, np.inf]
cumhaz = np.r_[cumhaz[0], cumhaz, cumhaz[-1]]
func = interp1d(time_h, cumhaz, kind='zero')
cumhaz_f[self.surv.stratum_names[stx]] = func
return cumhaz_f
def predict(self, params, exog=None, cov_params=None, endog=None,
strata=None, offset=None, pred_type="lhr"):
# docstring attached below
pred_type = pred_type.lower()
if pred_type not in ["lhr", "hr", "surv", "cumhaz"]:
msg = "Type %s not allowed for prediction" % pred_type
raise ValueError(msg)
class bunch:
predicted_values = None
standard_errors = None
ret_val = bunch()
# Don't do anything with offset here because we want to allow
# different offsets to be specified even if exog is the model
# exog.
exog_provided = True
if exog is None:
exog = self.exog
exog_provided = False
lhr = np.dot(exog, params)
if offset is not None:
lhr += offset
# Never use self.offset unless we are also using self.exog
elif self.offset is not None and not exog_provided:
lhr += self.offset
# Handle lhr and hr prediction first, since they don't make
# use of the hazard function.
if pred_type == "lhr":
ret_val.predicted_values = lhr
if cov_params is not None:
mat = np.dot(exog, cov_params)
va = (mat * exog).sum(1)
ret_val.standard_errors = np.sqrt(va)
return ret_val
hr = np.exp(lhr)
if pred_type == "hr":
ret_val.predicted_values = hr
return ret_val
# Makes sure endog is defined
if endog is None and exog_provided:
msg = "If `exog` is provided `endog` must be provided."
raise ValueError(msg)
# Use model endog if using model exog
elif endog is None and not exog_provided:
endog = self.endog
# Make sure strata is defined
if strata is None:
if exog_provided and self.surv.nstrat > 1:
raise ValueError("`strata` must be provided")
if self.strata is None:
strata = [self.surv.stratum_names[0],] * len(endog)
else:
strata = self.strata
cumhaz = np.nan * np.ones(len(endog), dtype=np.float64)
stv = np.unique(strata)
bhaz = self.baseline_cumulative_hazard_function(params)
for stx in stv:
ix = np.flatnonzero(strata == stx)
func = bhaz[stx]
cumhaz[ix] = func(endog[ix]) * hr[ix]
if pred_type == "cumhaz":
ret_val.predicted_values = cumhaz
elif pred_type == "surv":
ret_val.predicted_values = np.exp(-cumhaz)
return ret_val
predict.__doc__ = _predict_docstring % {'params_doc': _predict_params_doc,
'cov_params_doc': _predict_cov_params_docstring}
def get_distribution(self, params):
"""
Returns a scipy distribution object corresponding to the
distribution of uncensored endog (duration) values for each
case.
Parameters
----------
params : array_like
The proportional hazards model parameters.
Returns
-------
A list of objects of type scipy.stats.distributions.rv_discrete
Notes
-----
The distributions are obtained from a simple discrete estimate
of the survivor function that puts all mass on the observed
failure times within a stratum.
"""
# TODO: this returns a Python list of rv_discrete objects, so
# nothing can be vectorized. It appears that rv_discrete does
# not allow vectorization.
surv = self.surv
bhaz = self.baseline_cumulative_hazard(params)
# The arguments to rv_discrete_float, first obtained by
# stratum
pk, xk = [], []
for stx in range(self.surv.nstrat):
exog_s = surv.exog_s[stx]
linpred = np.dot(exog_s, params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
e_linpred = np.exp(linpred)
# The unique failure times for this stratum (the support
# of the distribution).
pts = bhaz[stx][0]
# The individual cumulative hazards for everyone in this
# stratum.
ichaz = np.outer(e_linpred, bhaz[stx][1])
# The individual survival functions.
usurv = np.exp(-ichaz)
z = np.zeros((usurv.shape[0], 1))
usurv = np.concatenate((usurv, z), axis=1)
# The individual survival probability masses.
probs = -np.diff(usurv, 1)
pk.append(probs)
xk.append(np.outer(np.ones(probs.shape[0]), pts))
# Pad to make all strata have the same shape
mxc = max([x.shape[1] for x in xk])
for k in range(self.surv.nstrat):
if xk[k].shape[1] < mxc:
xk1 = np.zeros((xk[k].shape[0], mxc))
pk1 = np.zeros((pk[k].shape[0], mxc))
xk1[:, 0:xk[k].shape[1]] = xk[k]
pk1[:, 0:pk[k].shape[1]] = pk[k]
xk[k], pk[k] = xk1, pk1
# Put the support points and probabilities into single matrices
xka = np.nan * np.ones((len(self.endog), mxc))
pka = np.ones((len(self.endog), mxc), dtype=np.float64) / mxc
for stx in range(self.surv.nstrat):
ix = self.surv.stratum_rows[stx]
xka[ix, :] = xk[stx]
pka[ix, :] = pk[stx]
dist = rv_discrete_float(xka, pka)
return dist
class PHRegResults(base.LikelihoodModelResults):
'''
Class to contain results of fitting a Cox proportional hazards
survival model.
PHregResults inherits from statsmodels.LikelihoodModelResults
Parameters
----------
See statsmodels.LikelihoodModelResults
Attributes
----------
model : class instance
PHreg model instance that called fit.
normalized_cov_params : array
The sampling covariance matrix of the estimates
params : array
The coefficients of the fitted model. Each coefficient is the
log hazard ratio corresponding to a 1 unit difference in a
single covariate while holding the other covariates fixed.
bse : array
The standard errors of the fitted parameters.
See Also
--------
statsmodels.LikelihoodModelResults
'''
def __init__(self, model, params, cov_params, scale=1., covariance_type="naive"):
# There is no scale parameter, but we need it for
# meta-procedures that work with results.
self.covariance_type = covariance_type
self.df_resid = model.df_resid
self.df_model = model.df_model
super(PHRegResults, self).__init__(model, params, scale=1.,
normalized_cov_params=cov_params)
@cache_readonly
def standard_errors(self):
"""
Returns the standard errors of the parameter estimates.
"""
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def bse(self):
"""
Returns the standard errors of the parameter estimates.
"""
return self.standard_errors
def get_distribution(self):
"""
Returns a scipy distribution object corresponding to the
distribution of uncensored endog (duration) values for each
case.
Returns
-------
A list of objects of type scipy.stats.distributions.rv_discrete
Notes
-----
The distributions are obtained from a simple discrete estimate
of the survivor function that puts all mass on the observed
failure times wihtin a stratum.
"""
return self.model.get_distribution(self.params)
def predict(self, endog=None, exog=None, strata=None,
offset=None, transform=True, pred_type="lhr"):
# docstring attached below
return super(PHRegResults, self).predict(exog=exog,
transform=transform,
cov_params=self.cov_params(),
endog=endog,
strata=strata,
offset=offset,
pred_type=pred_type)
predict.__doc__ = _predict_docstring % {'params_doc': '',
'cov_params_doc': ''}
def _group_stats(self, groups):
"""
Descriptive statistics of the groups.
"""
gsizes = np.unique(groups, return_counts=True)
gsizes = gsizes[1]
return gsizes.min(), gsizes.max(), gsizes.mean(), len(gsizes)
@cache_readonly
def weighted_covariate_averages(self):
"""
The average covariate values within the at-risk set at each
event time point, weighted by hazard.
"""
return self.model.weighted_covariate_averages(self.params)
@cache_readonly
def score_residuals(self):
"""
A matrix containing the score residuals.
"""
return self.model.score_residuals(self.params)
@cache_readonly
def baseline_cumulative_hazard(self):
"""
A list (corresponding to the strata) containing the baseline
cumulative hazard function evaluated at the event points.
"""
return self.model.baseline_cumulative_hazard(self.params)
@cache_readonly
def baseline_cumulative_hazard_function(self):
"""
A list (corresponding to the strata) containing function
objects that calculate the cumulative hazard function.
"""
return self.model.baseline_cumulative_hazard_function(self.params)
@cache_readonly
def schoenfeld_residuals(self):
"""
A matrix containing the Schoenfeld residuals.
Notes
-----
Schoenfeld residuals for censored observations are set to zero.
"""
surv = self.model.surv
w_avg = self.weighted_covariate_averages
# Initialize at NaN since rows that belong to strata with no
# events have undefined residuals.
sch_resid = np.nan*np.ones(self.model.exog.shape, dtype=np.float64)
# Loop over strata
for stx in range(surv.nstrat):
uft = surv.ufailt[stx]
exog_s = surv.exog_s[stx]
time_s = surv.time_s[stx]
strat_ix = surv.stratum_rows[stx]
ii = np.searchsorted(uft, time_s)
# These subjects are censored after the last event in
# their stratum, so have empty risk sets and undefined
# residuals.
jj = np.flatnonzero(ii < len(uft))
sch_resid[strat_ix[jj], :] = exog_s[jj, :] - w_avg[stx][ii[jj], :]
jj = np.flatnonzero(self.model.status == 0)
sch_resid[jj, :] = np.nan
return sch_resid
@cache_readonly
def martingale_residuals(self):
"""
The martingale residuals.
"""
surv = self.model.surv
# Initialize at NaN since rows that belong to strata with no
# events have undefined residuals.
mart_resid = np.nan*np.ones(len(self.model.endog), dtype=np.float64)
cumhaz_f_list = self.baseline_cumulative_hazard_function
# Loop over strata
for stx in range(surv.nstrat):
cumhaz_f = cumhaz_f_list[stx]
exog_s = surv.exog_s[stx]
time_s = surv.time_s[stx]
linpred = np.dot(exog_s, self.params)
if surv.offset_s is not None:
linpred += surv.offset_s[stx]
e_linpred = np.exp(linpred)
ii = surv.stratum_rows[stx]
chaz = cumhaz_f(time_s)
mart_resid[ii] = self.model.status[ii] - e_linpred * chaz
return mart_resid
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the proportional hazards regression results.
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables, default is `x#` for ## in p the
number of regressors. Must match the number of parameters in
the model
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results
"""
from statsmodels.iolib import summary2
from collections import OrderedDict
smry = summary2.Summary()
float_format = "%8.3f"
info = OrderedDict()
info["Model:"] = "PH Reg"
if yname is None:
yname = self.model.endog_names
info["Dependent variable:"] = yname
info["Ties:"] = self.model.ties.capitalize()
info["Sample size:"] = str(self.model.surv.n_obs)
info["Num. events:"] = str(int(sum(self.model.status)))
if self.model.groups is not None:
mn, mx, avg, num = self._group_stats(self.model.groups)
info["Num groups:"] = "%.0f" % num
info["Min group size:"] = "%.0f" % mn
info["Max group size:"] = "%.0f" % mx
info["Avg group size:"] = "%.1f" % avg
if self.model.strata is not None:
mn, mx, avg, num = self._group_stats(self.model.strata)
info["Num strata:"] = "%.0f" % num
info["Min stratum size:"] = "%.0f" % mn
info["Max stratum size:"] = "%.0f" % mx
info["Avg stratum size:"] = "%.1f" % avg
smry.add_dict(info, align='l', float_format=float_format)
param = summary2.summary_params(self, alpha=alpha)
param = param.rename(columns={"Coef.": "log HR",
"Std.Err.": "log HR SE"})
param.insert(2, "HR", np.exp(param["log HR"]))
a = "[%.3f" % (alpha / 2)
param.loc[:, a] = np.exp(param.loc[:, a])
a = "%.3f]" % (1 - alpha / 2)
param.loc[:, a] = np.exp(param.loc[:, a])
if xname is not None:
param.index = xname
smry.add_df(param, float_format=float_format)
smry.add_title(title=title, results=self)
smry.add_text("Confidence intervals are for the hazard ratios")
dstrat = self.model.surv.nstrat_orig - self.model.surv.nstrat
if dstrat > 0:
if dstrat == 1:
smry.add_text("1 stratum dropped for having no events")
else:
smry.add_text("%d strata dropped for having no events" % dstrat)
if self.model.entry is not None:
n_entry = sum(self.model.entry!= 0)
if n_entry == 1:
smry.add_text("1 observation has a positive entry time")
else:
smry.add_text("%d observations have positive entry times" % n_entry)
if self.model.groups is not None:
smry.add_text("Standard errors account for dependence within groups")
if hasattr(self, "regularized"):
smry.add_text("Standard errors do not account for the regularization")
return smry
class rv_discrete_float(object):
"""
A class representing a collection of discrete distributions.
Parameters
----------
xk : 2d array_like
The support points, should be non-decreasing within each
row.
pk : 2d array_like
The probabilities, should sum to one within each row.
Notes
-----
Each row of `xk`, and the corresponding row of `pk` describe a
discrete distribution.
`xk` and `pk` should both be two-dimensional ndarrays. Each row
of `pk` should sum to 1.
This class is used as a substitute for scipy.distributions.
rv_discrete, since that class does not allow non-integer support
points, or vectorized operations.
Only a limited number of methods are implemented here compared to
the other scipy distribution classes.
"""
def __init__(self, xk, pk):
self.xk = xk
self.pk = pk
self.cpk = np.cumsum(self.pk, axis=1)
def rvs(self):
"""
Returns a random sample from the discrete distribution.
A vector is returned containing a single draw from each row of
`xk`, using the probabilities of the corresponding row of `pk`
"""
n = self.xk.shape[0]
u = np.random.uniform(size=n)
ix = (self.cpk < u[:, None]).sum(1)
ii = np.arange(n, dtype=np.int32)
return self.xk[(ii,ix)]
def mean(self):
"""
Returns a vector containing the mean values of the discrete
distributions.
A vector is returned containing the mean value of each row of
`xk`, using the probabilities in the corresponding row of
`pk`.
"""
return (self.xk * self.pk).sum(1)
def var(self):
"""
Returns a vector containing the variances of the discrete
distributions.
A vector is returned containing the variance for each row of
`xk`, using the probabilities in the corresponding row of
`pk`.
"""
mn = self.mean()
xkc = self.xk - mn[:, None]
return (self.pk * (self.xk - xkc)**2).sum(1)
def std(self):
"""
Returns a vector containing the standard deviations of the
discrete distributions.
A vector is returned containing the standard deviation for
each row of `xk`, using the probabilities in the corresponding
row of `pk`.
"""
return np.sqrt(self.var()) |
statsmodels__statsmodels | gam.rst | Example / Description | Generate example for this code | BSD 3-Clause New or Revised License | statsmodels__statsmodels/docs/source/gam.rst | [
"statsmodels__statsmodels/statsmodels/gam/api.py",
"statsmodels__statsmodels/statsmodels/gam/smooth_basis.py"
] | Generalized Additive Models (GAM)
Generalized Additive Models allow for penalized estimation of smooth
terms in generalized linear models.
See Module Reference for commands and arguments.
Examples
The following illustrates a Gaussian and a Poisson regression where
categorical variables are treated as linear terms and the effect of two
explanatory variables is captured by penalized B-splines. The data is
from the automobile dataset
https://archive.ics.uci.edu/ml/datasets/automobile We can load a
dataframe with selected columns from the unit test module.
import statsmodels.api as sm from statsmodels.gam.api import GLMGam,
BSplines
# import data from statsmodels.gam.tests.test_penalized import df_autos
# create spline basis for weight and hp x_spline = df_autos[['weight',
'hp']] bs = BSplines(x_spline, df=[12, 10], degree=[3, 3])
# penalization weight alpha = np.array([21833888.8, 6460.38479])
gam_bs = GLMGam.from_formula('city_mpg ~ fuel + drive', data=df_autos,
smoother=bs, alpha=alpha)
res_bs = gam_bs.fit() print(res_bs.summary())
# plot smooth components res_bs.plot_partial(0, cpr=True)
res_bs.plot_partial(1, cpr=True)
alpha = np.array([8283989284.5829611, 14628207.58927821]) gam_bs =
GLMGam.from_formula('city_mpg ~ fuel + drive', data=df_autos,
smoother=bs, alpha=alpha, family=sm.families.Poisson()) res_bs =
gam_bs.fit() print(res_bs.summary())
# Optimal penalization weights alpha can be obtaine through generalized
# cross-validation or k-fold cross-validation. # The alpha above are
from the unit tests against the R mgcv package.
gam_bs.select_penweight()[0] gam_bs.select_penweight_kfold()[0] | from.generalized_additive_model import GLMGam # noqa:F401
from.gam_cross_validation.gam_cross_validation import MultivariateGAMCVPath # noqa:F401,E501
from.smooth_basis import BSplines, CyclicCubicSplines # noqa:F401
# -*- coding: utf-8 -*-
"""
Spline and other smoother classes for Generalized Additive Models
Author: Luca Puggini
Author: Josef Perktold
Created on Fri Jun 5 16:32:00 2015
"""
# import useful only for development
from abc import ABCMeta, abstractmethod
from statsmodels.compat.python import with_metaclass
import numpy as np
import pandas as pd
from patsy import dmatrix
from patsy.mgcv_cubic_splines import _get_all_sorted_knots
from statsmodels.tools.linalg import transf_constraints
# Obtain b splines from patsy
def _equally_spaced_knots(x, df):
n_knots = df - 2
x_min = x.min()
x_max = x.max()
knots = np.linspace(x_min, x_max, n_knots)
return knots
def _R_compat_quantile(x, probs):
# return np.percentile(x, 100 * np.asarray(probs))
probs = np.asarray(probs)
quantiles = np.asarray([np.percentile(x, 100 * prob)
for prob in probs.ravel(order="C")])
return quantiles.reshape(probs.shape, order="C")
# FIXME: is this copy/pasted? If so, why do we need it? If not, get
# rid of the try/except for scipy import
# from patsy splines.py
def _eval_bspline_basis(x, knots, degree, deriv='all', include_intercept=True):
try:
from scipy.interpolate import splev
except ImportError:
raise ImportError("spline functionality requires scipy")
# 'knots' are assumed to be already pre-processed. E.g. usually you
# want to include duplicate copies of boundary knots; you should do
# that *before* calling this constructor.
knots = np.atleast_1d(np.asarray(knots, dtype=float))
assert knots.ndim == 1
knots.sort()
degree = int(degree)
x = np.atleast_1d(x)
if x.ndim == 2 and x.shape[1] == 1:
x = x[:, 0]
assert x.ndim == 1
# XX FIXME: when points fall outside of the boundaries, splev and R seem
# to handle them differently. I don't know why yet. So until we understand
# this and decide what to do with it, I'm going to play it safe and
# disallow such points.
if np.min(x) < np.min(knots) or np.max(x) > np.max(knots):
raise NotImplementedError("some data points fall outside the "
"outermost knots, and I'm not sure how "
"to handle them. (Patches accepted!)")
# Thanks to Charles Harris for explaining splev. It's not well
# documented, but basically it computes an arbitrary b-spline basis
# given knots and degree on some specificed points (or derivatives
# thereof, but we don't use that functionality), and then returns some
# linear combination of these basis functions. To get out the basis
# functions themselves, we use linear combinations like [1, 0, 0], [0,
# 1, 0], [0, 0, 1].
# NB: This probably makes it rather inefficient (though I haven't checked
# to be sure -- maybe the fortran code actually skips computing the basis
# function for coefficients that are zero).
# Note: the order of a spline is the same as its degree + 1.
# Note: there are (len(knots) - order) basis functions.
k_const = 1 - int(include_intercept)
n_bases = len(knots) - (degree + 1) - k_const
if deriv in ['all', 0]:
basis = np.empty((x.shape[0], n_bases), dtype=float)
ret = basis
if deriv in ['all', 1]:
der1_basis = np.empty((x.shape[0], n_bases), dtype=float)
ret = der1_basis
if deriv in ['all', 2]:
der2_basis = np.empty((x.shape[0], n_bases), dtype=float)
ret = der2_basis
for i in range(n_bases):
coefs = np.zeros((n_bases + k_const,))
# we are skipping the first column of the basis to drop constant
coefs[i + k_const] = 1
ii = i
if deriv in ['all', 0]:
basis[:, ii] = splev(x, (knots, coefs, degree))
if deriv in ['all', 1]:
der1_basis[:, ii] = splev(x, (knots, coefs, degree), der=1)
if deriv in ['all', 2]:
der2_basis[:, ii] = splev(x, (knots, coefs, degree), der=2)
if deriv == 'all':
return basis, der1_basis, der2_basis
else:
return ret
def compute_all_knots(x, df, degree):
order = degree + 1
n_inner_knots = df - order
lower_bound = np.min(x)
upper_bound = np.max(x)
knot_quantiles = np.linspace(0, 1, n_inner_knots + 2)[1:-1]
inner_knots = _R_compat_quantile(x, knot_quantiles)
all_knots = np.concatenate(([lower_bound, upper_bound] * order,
inner_knots))
return all_knots, lower_bound, upper_bound, inner_knots
def make_bsplines_basis(x, df, degree):
''' make a spline basis for x '''
all_knots, _, _, _ = compute_all_knots(x, df, degree)
basis, der_basis, der2_basis = _eval_bspline_basis(x, all_knots, degree)
return basis, der_basis, der2_basis
def get_knots_bsplines(x=None, df=None, knots=None, degree=3,
spacing='quantile', lower_bound=None,
upper_bound=None, all_knots=None):
"""knots for use in B-splines
There are two main options for the knot placement
- quantile spacing with multiplicity of boundary knots
- equal spacing extended to boundary or exterior knots
The first corresponds to splines as used by patsy. the second is the
knot spacing for P-Splines.
"""
# based on patsy memorize_finish
if all_knots is not None:
return all_knots
x_min = x.min()
x_max = x.max()
if degree < 0:
raise ValueError("degree must be greater than 0 (not %r)"
% (degree,))
if int(degree)!= degree:
raise ValueError("degree must be an integer (not %r)"
% (degree,))
# These are guaranteed to all be 1d vectors by the code above
# x = np.concatenate(tmp["xs"])
if df is None and knots is None:
raise ValueError("must specify either df or knots")
order = degree + 1
if df is not None:
n_inner_knots = df - order
if n_inner_knots < 0:
raise ValueError("df=%r is too small for degree=%r; must be >= %s"
% (df, degree,
# We know that n_inner_knots is negative;
# if df were that much larger, it would
# have been zero, and things would work.
df - n_inner_knots))
if knots is not None:
if len(knots)!= n_inner_knots:
raise ValueError("df=%s with degree=%r implies %s knots, "
"but %s knots were provided"
% (df, degree,
n_inner_knots, len(knots)))
elif spacing == 'quantile':
# Need to compute inner knots
knot_quantiles = np.linspace(0, 1, n_inner_knots + 2)[1:-1]
inner_knots = _R_compat_quantile(x, knot_quantiles)
elif spacing == 'equal':
# Need to compute inner knots
grid = np.linspace(0, 1, n_inner_knots + 2)[1:-1]
inner_knots = x_min + grid * (x_max - x_min)
diff_knots = inner_knots[1] - inner_knots[0]
else:
raise ValueError("incorrect option for spacing")
if knots is not None:
inner_knots = knots
if lower_bound is None:
lower_bound = np.min(x)
if upper_bound is None:
upper_bound = np.max(x)
if lower_bound > upper_bound:
raise ValueError("lower_bound > upper_bound (%r > %r)"
% (lower_bound, upper_bound))
inner_knots = np.asarray(inner_knots)
if inner_knots.ndim > 1:
raise ValueError("knots must be 1 dimensional")
if np.any(inner_knots < lower_bound):
raise ValueError("some knot values (%s) fall below lower bound "
"(%r)"
% (inner_knots[inner_knots < lower_bound],
lower_bound))
if np.any(inner_knots > upper_bound):
raise ValueError("some knot values (%s) fall above upper bound "
"(%r)"
% (inner_knots[inner_knots > upper_bound],
upper_bound))
if spacing == "equal":
diffs = np.arange(1, order + 1) * diff_knots
lower_knots = inner_knots[0] - diffs[::-1]
upper_knots = inner_knots[-1] + diffs
all_knots = np.concatenate((lower_knots, inner_knots, upper_knots))
else:
all_knots = np.concatenate(([lower_bound, upper_bound] * order,
inner_knots))
all_knots.sort()
return all_knots
def _get_integration_points(knots, k_points=3):
"""add points to each subinterval defined by knots
inserts k_points between each two consecutive knots
"""
k_points = k_points + 1
knots = np.unique(knots)
dxi = np.arange(k_points) / k_points
dxk = np.diff(knots)
dx = dxk[:, None] * dxi
x = np.concatenate(((knots[:-1, None] + dx).ravel(), [knots[-1]]))
return x
def get_covder2(smoother, k_points=4, integration_points=None,
skip_ctransf=False, deriv=2):
"""
Approximate integral of cross product of second derivative of smoother
This uses scipy.integrate simps to compute an approximation to the
integral of the smoother derivative cross-product at knots plus k_points
in between knots.
"""
from scipy.integrate import simps
knots = smoother.knots
x = _get_integration_points(knots, k_points=3)
if integration_points is None:
d2 = smoother.transform(x, deriv=deriv, skip_ctransf=skip_ctransf)
else:
x = integration_points
covd2 = simps(d2[:, :, None] * d2[:, None, :], x, axis=0)
return covd2
# TODO: this function should be deleted
def make_poly_basis(x, degree, intercept=True):
'''
given a vector x returns poly=(1, x, x^2,..., x^degree)
and its first and second derivative
'''
if intercept:
start = 0
else:
start = 1
nobs = len(x)
basis = np.zeros(shape=(nobs, degree + 1 - start))
der_basis = np.zeros(shape=(nobs, degree + 1 - start))
der2_basis = np.zeros(shape=(nobs, degree + 1 - start))
for i in range(start, degree + 1):
basis[:, i - start] = x ** i
der_basis[:, i - start] = i * x ** (i - 1)
der2_basis[:, i - start] = i * (i - 1) * x ** (i - 2)
return basis, der_basis, der2_basis
# TODO: try to include other kinds of splines from patsy
# x = np.linspace(0, 1, 30)
# df = 10
# degree = 3
# from patsy.mgcv_cubic_splines import cc, cr, te
# all_knots, lower, upper, inner = compute_all_knots(x, df, degree)
# result = cc(x, df=df, knots=all_knots, lower_bound=lower, upper_bound=upper,
# constraints=None)
#
# import matplotlib.pyplot as plt
#
# result = np.array(result)
# print(result.shape)
# plt.plot(result.T)
# plt.show()
class UnivariateGamSmoother(with_metaclass(ABCMeta)):
"""Base Class for single smooth component
"""
def __init__(self, x, constraints=None, variable_name='x'):
self.x = x
self.constraints = constraints
self.variable_name = variable_name
self.nobs, self.k_variables = len(x), 1
base4 = self._smooth_basis_for_single_variable()
if constraints == 'center':
constraints = base4[0].mean(0)[None, :]
if constraints is not None and not isinstance(constraints, str):
ctransf = transf_constraints(constraints)
self.ctransf = ctransf
else:
# subclasses might set ctransf directly
# only used if constraints is None
if not hasattr(self, 'ctransf'):
self.ctransf = None
self.basis, self.der_basis, self.der2_basis, self.cov_der2 = base4
if self.ctransf is not None:
ctransf = self.ctransf
# transform attributes that are not None
if base4[0] is not None:
self.basis = base4[0].dot(ctransf)
if base4[1] is not None:
self.der_basis = base4[1].dot(ctransf)
if base4[2] is not None:
self.der2_basis = base4[2].dot(ctransf)
if base4[3] is not None:
self.cov_der2 = ctransf.T.dot(base4[3]).dot(ctransf)
self.dim_basis = self.basis.shape[1]
self.col_names = [self.variable_name + "_s" + str(i)
for i in range(self.dim_basis)]
@abstractmethod
def _smooth_basis_for_single_variable(self):
return
class UnivariateGenericSmoother(UnivariateGamSmoother):
"""Generic single smooth component
"""
def __init__(self, x, basis, der_basis, der2_basis, cov_der2,
variable_name='x'):
self.basis = basis
self.der_basis = der_basis
self.der2_basis = der2_basis
self.cov_der2 = cov_der2
super(UnivariateGenericSmoother, self).__init__(
x, variable_name=variable_name)
def _smooth_basis_for_single_variable(self):
return self.basis, self.der_basis, self.der2_basis, self.cov_der2
class UnivariatePolynomialSmoother(UnivariateGamSmoother):
"""polynomial single smooth component
"""
def __init__(self, x, degree, variable_name='x'):
self.degree = degree
super(UnivariatePolynomialSmoother, self).__init__(
x, variable_name=variable_name)
def _smooth_basis_for_single_variable(self):
# TODO: unclear description
"""
given a vector x returns poly=(1, x, x^2,..., x^degree)
and its first and second derivative
"""
basis = np.zeros(shape=(self.nobs, self.degree))
der_basis = np.zeros(shape=(self.nobs, self.degree))
der2_basis = np.zeros(shape=(self.nobs, self.degree))
for i in range(self.degree):
dg = i + 1
basis[:, i] = self.x ** dg
der_basis[:, i] = dg * self.x ** (dg - 1)
if dg > 1:
der2_basis[:, i] = dg * (dg - 1) * self.x ** (dg - 2)
else:
der2_basis[:, i] = 0
cov_der2 = np.dot(der2_basis.T, der2_basis)
return basis, der_basis, der2_basis, cov_der2
class UnivariateBSplines(UnivariateGamSmoother):
"""B-Spline single smooth component
This creates and holds the B-Spline basis function for one
component.
Parameters
----------
x : array, 1-D
underlying explanatory variable for smooth terms.
df : int
numer of basis functions or degrees of freedom
degree : int
degree of the spline
include_intercept : bool
If False, then the basis functions are transformed so that they
do not include a constant. This avoids perfect collinearity if
a constant or several components are included in the model.
constraints : None, string or array
Constraints are used to transform the basis functions to satisfy
those constraints.
`constraints = 'center'` applies a linear transform to remove the
constant and center the basis functions.
variable_name : None or str
The name for the underlying explanatory variable, x, used in for
creating the column and parameter names for the basis functions.
covder2_kwds : None or dict
options for computing the penalty matrix from the second derivative
of the spline.
knot_kwds : None or list of dict
option for the knot selection.
By default knots are selected in the same way as in patsy, however the
number of knots is independent of keeping or removing the constant.
Interior knot selection is based on quantiles of the data and is the
same in patsy and mgcv. Boundary points are at the limits of the data
range.
The available options use with `get_knots_bsplines` are
- knots : None or array
interior knots
- spacing : 'quantile' or 'equal'
- lower_bound : None or float
location of lower boundary knots, all boundary knots are at the same
point
- upper_bound : None or float
location of upper boundary knots, all boundary knots are at the same
point
- all_knots : None or array
If all knots are provided, then those will be taken as given and
all other options will be ignored.
"""
def __init__(self, x, df, degree=3, include_intercept=False,
constraints=None, variable_name='x',
covder2_kwds=None, **knot_kwds):
self.degree = degree
self.df = df
self.include_intercept = include_intercept
self.knots = get_knots_bsplines(x, degree=degree, df=df, **knot_kwds)
self.covder2_kwds = (covder2_kwds if covder2_kwds is not None
else {})
super(UnivariateBSplines, self).__init__(
x, constraints=constraints, variable_name=variable_name)
def _smooth_basis_for_single_variable(self):
basis, der_basis, der2_basis = _eval_bspline_basis(
self.x, self.knots, self.degree,
include_intercept=self.include_intercept)
# cov_der2 = np.dot(der2_basis.T, der2_basis)
cov_der2 = get_covder2(self, skip_ctransf=True,
**self.covder2_kwds)
return basis, der_basis, der2_basis, cov_der2
def transform(self, x_new, deriv=0, skip_ctransf=False):
"""create the spline basis for new observations
The main use of this stateful transformation is for prediction
using the same specification of the spline basis.
Parameters
----------
x_new : array
observations of the underlying explanatory variable
deriv : int
which derivative of the spline basis to compute
This is an options for internal computation.
skip_ctransf : bool
whether to skip the constraint transform
This is an options for internal computation.
Returns
-------
basis : ndarray
design matrix for the spline basis for given ``x_new``
"""
if x_new is None:
x_new = self.x
exog = _eval_bspline_basis(x_new, self.knots, self.degree,
deriv=deriv,
include_intercept=self.include_intercept)
# ctransf does not exist yet when cov_der2 is computed
ctransf = getattr(self, 'ctransf', None)
if ctransf is not None and not skip_ctransf:
exog = exog.dot(self.ctransf)
return exog
class UnivariateCubicSplines(UnivariateGamSmoother):
"""Cubic Spline single smooth component
Cubic splines as described in the wood's book in chapter 3
"""
def __init__(self, x, df, constraints=None, transform='domain',
variable_name='x'):
self.degree = 3
self.df = df
self.transform_data_method = transform
self.x = x = self.transform_data(x, initialize=True)
self.knots = _equally_spaced_knots(x, df)
super(UnivariateCubicSplines, self).__init__(
x, constraints=constraints, variable_name=variable_name)
def transform_data(self, x, initialize=False):
tm = self.transform_data_method
if tm is None:
return x
if initialize is True:
if tm == 'domain':
self.domain_low = x.min(0)
self.domain_upp = x.max(0)
elif isinstance(tm, tuple):
self.domain_low = tm[0]
self.domain_upp = tm[1]
self.transform_data_method = 'domain'
else:
raise ValueError("transform should be None, 'domain' "
"or a tuple")
self.domain_diff = self.domain_upp - self.domain_low
if self.transform_data_method == 'domain':
x = (x - self.domain_low) / self.domain_diff
return x
else:
raise ValueError("incorrect transform_data_method")
def _smooth_basis_for_single_variable(self):
basis = self._splines_x()[:, :-1]
# demean except for constant, does not affect derivatives
if not self.constraints == 'none':
self.transf_mean = basis[:, 1:].mean(0)
basis[:, 1:] -= self.transf_mean
else:
self.transf_mean = np.zeros(basis.shape[1])
s = self._splines_s()[:-1, :-1]
if not self.constraints == 'none':
ctransf = np.diag(1/np.max(np.abs(basis), axis=0))
else:
ctransf = np.eye(basis.shape[1])
# use np.eye to avoid rescaling
# ctransf = np.eye(basis.shape[1])
if self.constraints == 'no-const':
ctransf = ctransf[1:]
self.ctransf = ctransf
return basis, None, None, s
def _rk(self, x, z):
p1 = ((z - 1 / 2) ** 2 - 1 / 12) * ((x - 1 / 2) ** 2 - 1 / 12) / 4
p2 = ((np.abs(z - x) - 1 / 2) ** 4 -
1 / 2 * (np.abs(z - x) - 1 / 2) ** 2 +
7 / 240) / 24.
return p1 - p2
def _splines_x(self, x=None):
if x is None:
x = self.x
n_columns = len(self.knots) + 2
nobs = x.shape[0]
basis = np.ones(shape=(nobs, n_columns))
basis[:, 1] = x
# for loop equivalent to outer(x, xk, fun=rk)
for i, xi in enumerate(x):
for j, xkj in enumerate(self.knots):
s_ij = self._rk(xi, xkj)
basis[i, j + 2] = s_ij
return basis
def _splines_s(self):
q = len(self.knots) + 2
s = np.zeros(shape=(q, q))
for i, x1 in enumerate(self.knots):
for j, x2 in enumerate(self.knots):
s[i + 2, j + 2] = self._rk(x1, x2)
return s
def transform(self, x_new):
x_new = self.transform_data(x_new, initialize=False)
exog = self._splines_x(x_new)
exog[:, 1:] -= self.transf_mean
if self.ctransf is not None:
exog = exog.dot(self.ctransf)
return exog
class UnivariateCubicCyclicSplines(UnivariateGamSmoother):
"""cyclic cubic regression spline single smooth component
This creates and holds the Cyclic CubicSpline basis function for one
component.
Parameters
----------
x : array, 1-D
underlying explanatory variable for smooth terms.
df : int
numer of basis functions or degrees of freedom
degree : int
degree of the spline
include_intercept : bool
If False, then the basis functions are transformed so that they
do not include a constant. This avoids perfect collinearity if
a constant or several components are included in the model.
constraints : None, string or array
Constraints are used to transform the basis functions to satisfy
those constraints.
`constraints = 'center'` applies a linear transform to remove the
constant and center the basis functions.
variable_name : None or str
The name for the underlying explanatory variable, x, used in for
creating the column and parameter names for the basis functions.
"""
def __init__(self, x, df, constraints=None, variable_name='x'):
self.degree = 3
self.df = df
self.x = x
self.knots = _equally_spaced_knots(x, df)
super(UnivariateCubicCyclicSplines, self).__init__(
x, constraints=constraints, variable_name=variable_name)
def _smooth_basis_for_single_variable(self):
basis = dmatrix("cc(x, df=" + str(self.df) + ") - 1", {"x": self.x})
self.design_info = basis.design_info
n_inner_knots = self.df - 2 + 1 # +n_constraints
# TODO: from CubicRegressionSplines class
all_knots = _get_all_sorted_knots(self.x, n_inner_knots=n_inner_knots,
inner_knots=None,
lower_bound=None, upper_bound=None)
b, d = self._get_b_and_d(all_knots)
s = self._get_s(b, d)
return basis, None, None, s
def _get_b_and_d(self, knots):
"""Returns mapping of cyclic cubic spline values to 2nd derivatives.
.. note:: See 'Generalized Additive Models', Simon N. Wood, 2006,
pp 146-147
Parameters
----------
knots : ndarray
The 1-d array knots used for cubic spline parametrization,
must be sorted in ascending order.
Returns
-------
b, d: ndarrays
arrays for mapping cyclic cubic spline values at knots to
second derivatives.
penalty matrix is equal to ``s = d.T.dot(b^-1).dot(d)``
"""
h = knots[1:] - knots[:-1]
n = knots.size - 1
# b and d are defined such that the penalty matrix is equivalent to:
# s = d.T.dot(b^-1).dot(d)
# reference in particular to pag 146 of Wood's book
b = np.zeros((n, n)) # the b matrix on page 146 of Wood's book
d = np.zeros((n, n)) # the d matrix on page 146 of Wood's book
b[0, 0] = (h[n - 1] + h[0]) / 3.
b[0, n - 1] = h[n - 1] / 6.
b[n - 1, 0] = h[n - 1] / 6.
d[0, 0] = -1. / h[0] - 1. / h[n - 1]
d[0, n - 1] = 1. / h[n - 1]
d[n - 1, 0] = 1. / h[n - 1]
for i in range(1, n):
b[i, i] = (h[i - 1] + h[i]) / 3.
b[i, i - 1] = h[i - 1] / 6.
b[i - 1, i] = h[i - 1] / 6.
d[i, i] = -1. / h[i - 1] - 1. / h[i]
d[i, i - 1] = 1. / h[i - 1]
d[i - 1, i] = 1. / h[i - 1]
return b, d
def _get_s(self, b, d):
return d.T.dot(np.linalg.inv(b)).dot(d)
def transform(self, x_new):
exog = dmatrix(self.design_info, {"x": x_new})
if self.ctransf is not None:
exog = exog.dot(self.ctransf)
return exog
class AdditiveGamSmoother(with_metaclass(ABCMeta)):
"""Base class for additive smooth components
"""
def __init__(self, x, variable_names=None, include_intercept=False,
**kwargs):
# get pandas names before using asarray
if isinstance(x, pd.DataFrame):
data_names = x.columns.tolist()
elif isinstance(x, pd.Series):
data_names = [x.name]
else:
data_names = None
x = np.asarray(x)
if x.ndim == 1:
self.x = x.copy()
self.x.shape = (len(x), 1)
else:
self.x = x
self.nobs, self.k_variables = self.x.shape
if isinstance(include_intercept, bool):
self.include_intercept = [include_intercept] * self.k_variables
else:
self.include_intercept = include_intercept
if variable_names is None:
if data_names is not None:
self.variable_names = data_names
else:
self.variable_names = ['x' + str(i)
for i in range(self.k_variables)]
else:
self.variable_names = variable_names
self.smoothers = self._make_smoothers_list()
self.basis = np.hstack(list(smoother.basis
for smoother in self.smoothers))
self.dim_basis = self.basis.shape[1]
self.penalty_matrices = [smoother.cov_der2
for smoother in self.smoothers]
self.col_names = []
for smoother in self.smoothers:
self.col_names.extend(smoother.col_names)
self.mask = []
last_column = 0
for smoother in self.smoothers:
mask = np.array([False] * self.dim_basis)
mask[last_column:smoother.dim_basis + last_column] = True
last_column = last_column + smoother.dim_basis
self.mask.append(mask)
@abstractmethod
def _make_smoothers_list(self):
pass
def transform(self, x_new):
"""create the spline basis for new observations
The main use of this stateful transformation is for prediction
using the same specification of the spline basis.
Parameters
----------
x_new: array
observations of the underlying explanatory variable
Returns
-------
basis : ndarray
design matrix for the spline basis for given ``x_new``.
"""
exog = np.hstack(list(self.smoothers[i].transform(x_new[:, i])
for i in range(self.k_variables)))
return exog
class GenericSmoothers(AdditiveGamSmoother):
"""generic class for additive smooth components for GAM
"""
def __init__(self, x, smoothers):
self.smoothers = smoothers
super(GenericSmoothers, self).__init__(x, variable_names=None)
def _make_smoothers_list(self):
return self.smoothers
class PolynomialSmoother(AdditiveGamSmoother):
"""additive polynomial components for GAM
"""
def __init__(self, x, degrees, variable_names=None):
self.degrees = degrees
super(PolynomialSmoother, self).__init__(x,
variable_names=variable_names)
def _make_smoothers_list(self):
smoothers = []
for v in range(self.k_variables):
uv_smoother = UnivariatePolynomialSmoother(
self.x[:, v],
degree=self.degrees[v],
variable_name=self.variable_names[v])
smoothers.append(uv_smoother)
return smoothers
class BSplines(AdditiveGamSmoother):
"""additive smooth components using B-Splines
This creates and holds the B-Spline basis function for several
components.
Parameters
----------
x : array_like, 1-D or 2-D
underlying explanatory variable for smooth terms.
If 2-dimensional, then observations should be in rows and
explanatory variables in columns.
df : int
numer of basis functions or degrees of freedom
degree : int
degree of the spline
include_intercept : bool
If False, then the basis functions are transformed so that they
do not include a constant. This avoids perfect collinearity if
a constant or several components are included in the model.
constraints : None, string or array
Constraints are used to transform the basis functions to satisfy
those constraints.
`constraints = 'center'` applies a linear transform to remove the
constant and center the basis functions.
variable_names : None or list of strings
The names for the underlying explanatory variables, x used in for
creating the column and parameter names for the basis functions.
If ``x`` is a pandas object, then the names will be taken from it.
knot_kwds : None or list of dict
option for the knot selection.
By default knots are selected in the same way as in patsy, however the
number of knots is independent of keeping or removing the constant.
Interior knot selection is based on quantiles of the data and is the
same in patsy and mgcv. Boundary points are at the limits of the data
range.
The available options use with `get_knots_bsplines` are
- knots : None or array
interior knots
- spacing : 'quantile' or 'equal'
- lower_bound : None or float
location of lower boundary knots, all boundary knots are at the same
point
- upper_bound : None or float
location of upper boundary knots, all boundary knots are at the same
point
- all_knots : None or array
If all knots are provided, then those will be taken as given and
all other options will be ignored.
Attributes
----------
smoothers : list of univariate smooth component instances
basis : design matrix, array of spline bases columns for all components
penalty_matrices : list of penalty matrices, one for each smooth term
dim_basis : number of columns in the basis
k_variables : number of smooth components
col_names : created names for the basis columns
There are additional attributes about the specification of the splines
and some attributes mainly for internal use.
Notes
-----
A constant in the spline basis function can be removed in two different
ways.
The first is by dropping one basis column and normalizing the
remaining columns. This is obtained by the default
``include_intercept=False, constraints=None``
The second option is by using the centering transform which is a linear
transformation of all basis functions. As a consequence of the
transformation, the B-spline basis functions do not have locally bounded
support anymore. This is obtained ``constraints='center'``. In this case
``include_intercept`` will be automatically set to True to avoid
dropping an additional column.
"""
def __init__(self, x, df, degree, include_intercept=False,
constraints=None, variable_names=None, knot_kwds=None):
self.degrees = degree
self.dfs = df
self.knot_kwds = knot_kwds
# TODO: move attaching constraints to super call
self.constraints = constraints
if constraints == 'center':
include_intercept = True
super(BSplines, self).__init__(x, include_intercept=include_intercept,
variable_names=variable_names)
def _make_smoothers_list(self):
smoothers = []
for v in range(self.k_variables):
kwds = self.knot_kwds[v] if self.knot_kwds else {}
uv_smoother = UnivariateBSplines(
self.x[:, v],
df=self.dfs[v], degree=self.degrees[v],
include_intercept=self.include_intercept[v],
constraints=self.constraints,
variable_name=self.variable_names[v], **kwds)
smoothers.append(uv_smoother)
return smoothers
class CubicSplines(AdditiveGamSmoother):
"""additive smooth components using cubic splines as in Wood 2006.
Note, these splines do NOT use the same spline basis as
``Cubic Regression Splines``.
"""
def __init__(self, x, df, constraints='center', transform='domain',
variable_names=None):
self.dfs = df
self.constraints = constraints
self.transform = transform
super(CubicSplines, self).__init__(x, constraints=constraints,
variable_names=variable_names)
def _make_smoothers_list(self):
smoothers = []
for v in range(self.k_variables):
uv_smoother = UnivariateCubicSplines(
self.x[:, v], df=self.dfs[v],
constraints=self.constraints,
transform=self.transform,
variable_name=self.variable_names[v])
smoothers.append(uv_smoother)
return smoothers
class CyclicCubicSplines(AdditiveGamSmoother):
"""additive smooth components using cyclic cubic regression splines
This spline basis is the same as in patsy.
Parameters
----------
x : array_like, 1-D or 2-D
underlying explanatory variable for smooth terms.
If 2-dimensional, then observations should be in rows and
explanatory variables in columns.
df : int
numer of basis functions or degrees of freedom
constraints : None, string or array
Constraints are used to transform the basis functions to satisfy
those constraints.
variable_names : None or list of strings
The names for the underlying explanatory variables, x used in for
creating the column and parameter names for the basis functions.
If ``x`` is a pandas object, then the names will be taken from it.
"""
def __init__(self, x, df, constraints=None, variable_names=None):
self.dfs = df
# TODO: move attaching constraints to super call
self.constraints = constraints
super(CyclicCubicSplines, self).__init__(x,
variable_names=variable_names)
def _make_smoothers_list(self):
smoothers = []
for v in range(self.k_variables):
uv_smoother = UnivariateCubicCyclicSplines(
self.x[:, v],
df=self.dfs[v], constraints=self.constraints,
variable_name=self.variable_names[v])
smoothers.append(uv_smoother)
return smoothers
# class CubicRegressionSplines(BaseCubicSplines):
# # TODO: this class is still not tested
#
# def __init__(self, x, df=10):
# import warnings
# warnings.warn("This class is still not tested and it is probably"
# " not working properly. "
# "I suggest to use another smoother", Warning)
#
# super(CubicRegressionSplines, self).__init__(x, df)
#
# self.basis = dmatrix("cc(x, df=" + str(df) + ") - 1", {"x": x})
# n_inner_knots = df - 2 + 1 # +n_constraints
# # TODO: ACcording to CubicRegressionSplines class this should be
# # n_inner_knots = df - 2
# all_knots = _get_all_sorted_knots(x, n_inner_knots=n_inner_knots,
# inner_knots=None,
# lower_bound=None, upper_bound=None)
#
# b, d = self._get_b_and_d(all_knots)
# self.s = self._get_s(b, d)
#
# self.dim_basis = self.basis.shape[1]
#
# def _get_b_and_d(self, knots):
#
# h = knots[1:] - knots[:-1]
# n = knots.size - 1
#
# # b and d are defined such that the penalty matrix is equivalent to:
# # s = d.T.dot(b^-1).dot(d)
# # reference in particular to pag 146 of Wood's book
# b = np.zeros((n, n)) # the b matrix on page 146 of Wood's book
# d = np.zeros((n, n)) # the d matrix on page 146 of Wood's book
#
# for i in range(n-2):
# d[i, i] = 1/h[i]
# d[i, i+1] = -1/h[i] - 1/h[i+1]
# d[i, i+2] = 1/h[i+1]
#
# b[i, i] = (h[i] + h[i+1])/3
#
# for i in range(n-3):
# b[i, i+1] = h[i+1]/6
# b[i+1, i] = h[i+1]/6
#
# return b, d
#
# def _get_s(self, b, d):
#
# return d.T.dot(np.linalg.pinv(b)).dot(d) |
|
statsmodels__statsmodels | gee.rst | Example / Description | Generate example for this module | BSD 3-Clause New or Revised License | statsmodels__statsmodels/docs/source/gee.rst | [
"statsmodels__statsmodels/statsmodels/genmod/families/family.py",
"statsmodels__statsmodels/statsmodels/genmod/qif.py",
"statsmodels__statsmodels/statsmodels/genmod/families/links.py",
"statsmodels__statsmodels/statsmodels/genmod/cov_struct.py",
"statsmodels__statsmodels/statsmodels/genmod/generalized_estimating_equations.py"
] | Generalized Estimating Equations
Generalized Estimating Equations estimate generalized linear models for
panel, cluster or repeated measures data when the observations are
possibly correlated withing a cluster but uncorrelated across clusters.
It supports estimation of the same one-parameter exponential families as
Generalized Linear models (GLM).
See Module Reference for commands and arguments.
Examples
The following illustrates a Poisson regression with exchangeable
correlation within clusters using data on epilepsy seizures.
import statsmodels.api as sm import statsmodels.formula.api as smf
data = sm.datasets.get_rdataset('epil', package='MASS').data
fam = sm.families.Poisson() ind = sm.cov_struct.Exchangeable() mod =
smf.gee("y ~ age + trt + base", "subject", data, cov_struct=ind,
family=fam) res = mod.fit() print(res.summary())
Several notebook examples of the use of GEE can be found on the Wiki:
Wiki notebooks for GEE
Link Functions
The link functions are the same as for GLM, currently implemented are
the following. Not all link functions are available for each
distribution family. The list of available link functions can be
obtained by
>>> sm.families.family.<familyname>.links | '''
The one parameter exponential family distributions used by GLM.
'''
# TODO: quasi, quasibinomial, quasipoisson
# see
# http://www.biostat.jhsph.edu/~qli/biostatistics_r_doc/library/stats/html/family.html
# for comparison to R, and McCullagh and Nelder
import warnings
import inspect
import numpy as np
from scipy import special
from. import links as L
from. import varfuncs as V
FLOAT_EPS = np.finfo(float).eps
class Family(object):
"""
The parent class for one-parameter exponential families.
Parameters
----------
link : a link function instance
Link is the linear transformation function.
See the individual families for available links.
variance : a variance function
Measures the variance as a function of the mean probabilities.
See the individual families for the default variance function.
See Also
--------
:ref:`links`
"""
# TODO: change these class attributes, use valid somewhere...
valid = [-np.inf, np.inf]
links = []
def _setlink(self, link):
"""
Helper method to set the link for a family.
Raises a ``ValueError`` exception if the link is not available. Note
that the error message might not be that informative because it tells
you that the link should be in the base class for the link function.
See statsmodels.genmod.generalized_linear_model.GLM for a list of
appropriate links for each family but note that not all of these are
currently available.
"""
# TODO: change the links class attribute in the families to hold
# meaningful information instead of a list of links instances such as
# [<statsmodels.family.links.Log object at 0x9a4240c>,
# <statsmodels.family.links.Power object at 0x9a423ec>,
# <statsmodels.family.links.Power object at 0x9a4236c>]
# for Poisson...
self._link = link
if not isinstance(link, L.Link):
raise TypeError("The input should be a valid Link object.")
if hasattr(self, "links"):
validlink = max([isinstance(link, _) for _ in self.links])
if not validlink:
errmsg = "Invalid link for family, should be in %s. (got %s)"
raise ValueError(errmsg % (repr(self.links), link))
def _getlink(self):
"""
Helper method to get the link for a family.
"""
return self._link
# link property for each family is a pointer to link instance
link = property(_getlink, _setlink, doc="Link function for family")
def __init__(self, link, variance):
if inspect.isclass(link):
warnmssg = "Calling Family(..) with a link class as argument "
warnmssg += "is deprecated.\n"
warnmssg += "Use an instance of a link class instead."
lvl = 2 if type(self) is Family else 3
warnings.warn(warnmssg,
category=DeprecationWarning, stacklevel=lvl)
self.link = link()
else:
self.link = link
self.variance = variance
def starting_mu(self, y):
r"""
Starting value for mu in the IRLS algorithm.
Parameters
----------
y : array
The untransformed response variable.
Returns
-------
mu_0 : array
The first guess on the transformed response variable.
Notes
-----
.. math::
\mu_0 = (Y + \overline{Y})/2
Only the Binomial family takes a different initial value.
"""
return (y + y.mean())/2.
def weights(self, mu):
r"""
Weights for IRLS steps
Parameters
----------
mu : array_like
The transformed mean response variable in the exponential family
Returns
-------
w : array
The weights for the IRLS steps
Notes
-----
.. math::
w = 1 / (g'(\mu)^2 * Var(\mu))
"""
return 1. / (self.link.deriv(mu)**2 * self.variance(mu))
def deviance(self, endog, mu, var_weights=1., freq_weights=1., scale=1.):
r"""
The deviance function evaluated at (endog, mu, var_weights,
freq_weights, scale) for the distribution.
Deviance is usually defined as twice the loglikelihood ratio.
Parameters
----------
endog : array_like
The endogenous response variable
mu : array_like
The inverse of the link function at the linear predicted values.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
freq_weights : array_like
1d array of frequency weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
-------
Deviance : array
The value of deviance function defined below.
Notes
-----
Deviance is defined
.. math::
D = 2\sum_i (freq\_weights_i * var\_weights *
(llf(endog_i, endog_i) - llf(endog_i, \mu_i)))
where y is the endogenous variable. The deviance functions are
analytically defined for each family.
Internally, we calculate deviance as:
.. math::
D = \sum_i freq\_weights_i * var\_weights * resid\_dev_i / scale
"""
resid_dev = self._resid_dev(endog, mu)
return np.sum(resid_dev * freq_weights * var_weights / scale)
def resid_dev(self, endog, mu, var_weights=1., scale=1.):
r"""
The deviance residuals
Parameters
----------
endog : array_like
The endogenous response variable
mu : array_like
The inverse of the link function at the linear predicted values.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
-------
resid_dev : float
Deviance residuals as defined below.
Notes
-----
The deviance residuals are defined by the contribution D_i of
observation i to the deviance as
.. math::
resid\_dev_i = sign(y_i-\mu_i) \sqrt{D_i}
D_i is calculated from the _resid_dev method in each family.
Distribution-specific documentation of the calculation is available
there.
"""
resid_dev = self._resid_dev(endog, mu)
resid_dev *= var_weights / scale
return np.sign(endog - mu) * np.sqrt(np.clip(resid_dev, 0., np.inf))
def fitted(self, lin_pred):
r"""
Fitted values based on linear predictors lin_pred.
Parameters
----------
lin_pred : array
Values of the linear predictor of the model.
:math:`X \cdot \beta` in a classical linear model.
Returns
-------
mu : array
The mean response variables given by the inverse of the link
function.
"""
fits = self.link.inverse(lin_pred)
return fits
def predict(self, mu):
"""
Linear predictors based on given mu values.
Parameters
----------
mu : array
The mean response variables
Returns
-------
lin_pred : array
Linear predictors based on the mean response variables. The value
of the link function at the given mu.
"""
return self.link(mu)
def loglike_obs(self, endog, mu, var_weights=1., scale=1.):
r"""
The log-likelihood function for each observation in terms of the fitted
mean response for the distribution.
Parameters
----------
endog : array
Usually the endogenous response variable.
mu : array
Usually but not always the fitted mean response variable.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float
The scale parameter. The default is 1.
Returns
-------
ll_i : float
The value of the loglikelihood evaluated at
(endog, mu, var_weights, scale) as defined below.
Notes
-----
This is defined for each family. endog and mu are not restricted to
``endog`` and ``mu`` respectively. For instance, you could call
both ``loglike(endog, endog)`` and ``loglike(endog, mu)`` to get the
log-likelihood ratio.
"""
raise NotImplementedError
def loglike(self, endog, mu, var_weights=1., freq_weights=1., scale=1.):
r"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array
Usually the endogenous response variable.
mu : array
Usually but not always the fitted mean response variable.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
freq_weights : array_like
1d array of frequency weights. The default is 1.
scale : float
The scale parameter. The default is 1.
Returns
-------
ll : float
The value of the loglikelihood evaluated at
(endog, mu, var_weights, freq_weights, scale) as defined below.
Notes
-----
Where :math:`ll_i` is the by-observation log-likelihood:
.. math::
ll = \sum(ll_i * freq\_weights_i)
``ll_i`` is defined for each family. endog and mu are not restricted
to ``endog`` and ``mu`` respectively. For instance, you could call
both ``loglike(endog, endog)`` and ``loglike(endog, mu)`` to get the
log-likelihood ratio.
"""
ll_obs = self.loglike_obs(endog, mu, var_weights, scale)
return np.sum(ll_obs * freq_weights)
def resid_anscombe(self, endog, mu, var_weights=1., scale=1.):
r"""
The Anscombe residuals
Parameters
----------
endog : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
See Also
--------
statsmodels.genmod.families.family.Family : `resid_anscombe` for the
individual families for more information
Notes
-----
Anscombe residuals are defined by
.. math::
resid\_anscombe_i = \frac{A(y)-A(\mu)}{A'(\mu)\sqrt{Var[\mu]}} *
\sqrt(var\_weights)
where :math:`A'(y)=v(y)^{-\frac{1}{3}}` and :math:`v(\mu)` is the
variance function :math:`Var[y]=\frac{\phi}{w}v(mu)`.
The transformation :math:`A(y)` makes the residuals more normal
distributed.
"""
raise NotImplementedError
def _clean(self, x):
"""
Helper function to trim the data so that it is in (0,inf)
Notes
-----
The need for this function was discovered through usage and its
possible that other families might need a check for validity of the
domain.
"""
return np.clip(x, FLOAT_EPS, np.inf)
class Poisson(Family):
"""
Poisson exponential family.
Parameters
----------
link : a link instance, optional
The default link for the Poisson family is the log link. Available
links are log, identity, and sqrt. See statsmodels.families.links for
more information.
Attributes
----------
Poisson.link : a link instance
The link function of the Poisson instance.
Poisson.variance : varfuncs instance
``variance`` is an instance of
statsmodels.genmod.families.varfuncs.mu
See Also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
"""
links = [L.log, L.identity, L.sqrt]
variance = V.mu
valid = [0, np.inf]
safe_links = [L.Log, ]
def __init__(self, link=None):
if link is None:
link = L.log()
super(Poisson, self).__init__(link=link, variance=Poisson.variance)
def _resid_dev(self, endog, mu):
r"""
Poisson deviance residuals
Parameters
----------
endog : array
The endogenous response variable.
mu : array
The inverse of the link function at the linear predicted values.
Returns
-------
resid_dev : float
Deviance residuals as defined below.
Notes
-----
.. math::
resid\_dev_i = 2 * (endog_i * \ln(endog_i / \mu_i) -
(endog_i - \mu_i))
"""
endog_mu = self._clean(endog / mu)
resid_dev = endog * np.log(endog_mu) - (endog - mu)
return 2 * resid_dev
def loglike_obs(self, endog, mu, var_weights=1., scale=1.):
r"""
The log-likelihood function for each observation in terms of the fitted
mean response for the Poisson distribution.
Parameters
----------
endog : array
Usually the endogenous response variable.
mu : array
Usually but not always the fitted mean response variable.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float
The scale parameter. The default is 1.
Returns
-------
ll_i : float
The value of the loglikelihood evaluated at
(endog, mu, var_weights, scale) as defined below.
Notes
-----
.. math::
ll_i = var\_weights_i / scale * (endog_i * \ln(\mu_i) - \mu_i -
\ln \Gamma(endog_i + 1))
"""
return var_weights / scale * (endog * np.log(mu) - mu -
special.gammaln(endog + 1))
def resid_anscombe(self, endog, mu, var_weights=1., scale=1.):
r"""
The Anscombe residuals
Parameters
----------
endog : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_anscombe : array
The Anscome residuals for the Poisson family defined below
Notes
-----
.. math::
resid\_anscombe_i = (3/2) * (endog_i^{2/3} - \mu_i^{2/3}) /
\mu_i^{1/6} * \sqrt(var\_weights)
"""
resid = ((3 / 2.) * (endog**(2 / 3.) - mu**(2 / 3.)) /
(mu ** (1 / 6.) * scale ** 0.5))
resid *= np.sqrt(var_weights)
return resid
class Gaussian(Family):
"""
Gaussian exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Gaussian family is the identity link.
Available links are log, identity, and inverse.
See statsmodels.genmod.families.links for more information.
Attributes
----------
Gaussian.link : a link instance
The link function of the Gaussian instance
Gaussian.variance : varfunc instance
``variance`` is an instance of
statsmodels.genmod.families.varfuncs.constant
See Also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
"""
links = [L.log, L.identity, L.inverse_power]
variance = V.constant
safe_links = links
def __init__(self, link=None):
if link is None:
link = L.identity()
super(Gaussian, self).__init__(link=link, variance=Gaussian.variance)
def _resid_dev(self, endog, mu):
r"""
Gaussian deviance residuals
Parameters
----------
endog : array
The endogenous response variable.
mu : array
The inverse of the link function at the linear predicted values.
Returns
-------
resid_dev : float
Deviance residuals as defined below.
Notes
--------
.. math::
resid\_dev_i = (endog_i - \mu_i) ** 2
"""
return (endog - mu) ** 2
def loglike_obs(self, endog, mu, var_weights=1., scale=1.):
r"""
The log-likelihood function for each observation in terms of the fitted
mean response for the Gaussian distribution.
Parameters
----------
endog : array
Usually the endogenous response variable.
mu : array
Usually but not always the fitted mean response variable.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float
The scale parameter. The default is 1.
Returns
-------
ll_i : float
The value of the loglikelihood evaluated at
(endog, mu, var_weights, scale) as defined below.
Notes
-----
If the link is the identity link function then the
loglikelihood function is the same as the classical OLS model.
.. math::
llf = -nobs / 2 * (\log(SSR) + (1 + \log(2 \pi / nobs)))
where
.. math::
SSR = \sum_i (Y_i - g^{-1}(\mu_i))^2
If the links is not the identity link then the loglikelihood
function is defined as
.. math::
ll_i = -1 / 2 \sum_i * var\_weights * ((Y_i - mu_i)^2 / scale +
\log(2 * \pi * scale))
"""
ll_obs = -var_weights * (endog - mu) ** 2 / scale
ll_obs += -np.log(scale / var_weights) - np.log(2 * np.pi)
ll_obs /= 2
return ll_obs
def resid_anscombe(self, endog, mu, var_weights=1., scale=1.):
r"""
The Anscombe residuals
Parameters
----------
endog : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_anscombe : array
The Anscombe residuals for the Gaussian family defined below
Notes
-----
For the Gaussian distribution, Anscombe residuals are the same as
deviance residuals.
.. math::
resid\_anscombe_i = (Y_i - \mu_i) / \sqrt{scale} *
\sqrt(var\_weights)
"""
resid = (endog - mu) / scale ** 0.5
resid *= np.sqrt(var_weights)
return resid
class Gamma(Family):
"""
Gamma exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Gamma family is the inverse link.
Available links are log, identity, and inverse.
See statsmodels.genmod.families.links for more information.
Attributes
----------
Gamma.link : a link instance
The link function of the Gamma instance
Gamma.variance : varfunc instance
``variance`` is an instance of
statsmodels.genmod.family.varfuncs.mu_squared
See Also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
"""
links = [L.log, L.identity, L.inverse_power]
variance = V.mu_squared
safe_links = [L.Log, ]
def __init__(self, link=None):
if link is None:
link = L.inverse_power()
super(Gamma, self).__init__(link=link, variance=Gamma.variance)
def _resid_dev(self, endog, mu):
r"""
Gamma deviance residuals
Parameters
----------
endog : array
The endogenous response variable.
mu : array
The inverse of the link function at the linear predicted values.
Returns
-------
resid_dev : float
Deviance residuals as defined below.
Notes
-----
.. math::
resid\_dev_i = 2 * ((endog_i - \mu_i) / \mu_i -
\log(endog_i / \mu_i))
"""
endog_mu = self._clean(endog / mu)
resid_dev = -np.log(endog_mu) + (endog - mu) / mu
return 2 * resid_dev
def loglike_obs(self, endog, mu, var_weights=1., scale=1.):
r"""
The log-likelihood function for each observation in terms of the fitted
mean response for the Gamma distribution.
Parameters
----------
endog : array
Usually the endogenous response variable.
mu : array
Usually but not always the fitted mean response variable.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float
The scale parameter. The default is 1.
Returns
-------
ll_i : float
The value of the loglikelihood evaluated at
(endog, mu, var_weights, scale) as defined below.
Notes
-----
.. math::
ll_i = var\_weights_i / scale * (\ln(var\_weights_i * endog_i /
(scale * \mu_i)) - (var\_weights_i * endog_i) /
(scale * \mu_i)) - \ln \Gamma(var\_weights_i / scale) - \ln(\mu_i)
"""
endog_mu = self._clean(endog / mu)
weight_scale = var_weights / scale
ll_obs = weight_scale * np.log(weight_scale * endog_mu)
ll_obs -= weight_scale * endog_mu
ll_obs -= special.gammaln(weight_scale) + np.log(endog)
return ll_obs
# in Stata scale is set to equal 1 for reporting llf
# in R it's the dispersion, though there is a loss of precision vs.
# our results due to an assumed difference in implementation
def resid_anscombe(self, endog, mu, var_weights=1., scale=1.):
r"""
The Anscombe residuals
Parameters
----------
endog : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_anscombe : array
The Anscombe residuals for the Gamma family defined below
Notes
-----
.. math::
resid\_anscombe_i = 3 * (endog_i^{1/3} - \mu_i^{1/3}) / \mu_i^{1/3}
/ \sqrt{scale} * \sqrt(var\_weights)
"""
resid = 3 * (endog**(1/3.) - mu**(1/3.)) / mu**(1/3.) / scale ** 0.5
resid *= np.sqrt(var_weights)
return resid
class Binomial(Family):
"""
Binomial exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Binomial family is the logit link.
Available links are logit, probit, cauchy, log, and cloglog.
See statsmodels.genmod.families.links for more information.
Attributes
----------
Binomial.link : a link instance
The link function of the Binomial instance
Binomial.variance : varfunc instance
``variance`` is an instance of
statsmodels.genmod.families.varfuncs.binary
See Also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
Notes
-----
endog for Binomial can be specified in one of three ways:
A 1d array of 0 or 1 values, indicating failure or success
respectively.
A 2d array, with two columns. The first column represents the
success count and the second column represents the failure
count.
A 1d array of proportions, indicating the proportion of
successes, with parameter `var_weights` containing the
number of trials for each row.
"""
links = [L.logit, L.probit, L.cauchy, L.log, L.cloglog, L.identity]
variance = V.binary # this is not used below in an effort to include n
# Other safe links, e.g. cloglog and probit are subclasses
safe_links = [L.Logit, L.CDFLink]
def __init__(self, link=None): #, n=1.):
if link is None:
link = L.logit()
# TODO: it *should* work for a constant n>1 actually, if freq_weights
# is equal to n
self.n = 1
# overwritten by initialize if needed but always used to initialize
# variance since endog is assumed/forced to be (0,1)
super(Binomial, self).__init__(link=link,
variance=V.Binomial(n=self.n))
def starting_mu(self, y):
r"""
The starting values for the IRLS algorithm for the Binomial family.
A good choice for the binomial family is :math:`\mu_0 = (Y_i + 0.5)/2`
"""
return (y +.5)/2
def initialize(self, endog, freq_weights):
'''
Initialize the response variable.
Parameters
----------
endog : array
Endogenous response variable
freq_weights : array
1d array of frequency weights
Returns
-------
If `endog` is binary, returns `endog`
If `endog` is a 2d array, then the input is assumed to be in the format
(successes, failures) and
successes/(success + failures) is returned. And n is set to
successes + failures.
'''
# if not np.all(np.asarray(freq_weights) == 1):
# self.variance = V.Binomial(n=freq_weights)
if (endog.ndim > 1 and endog.shape[1] > 1):
y = endog[:, 0]
# overwrite self.freq_weights for deviance below
self.n = endog.sum(1)
return y*1./self.n, self.n
else:
return endog, np.ones(endog.shape[0])
def _resid_dev(self, endog, mu):
r"""
Binomial deviance residuals
Parameters
----------
endog : array
The endogenous response variable.
mu : array
The inverse of the link function at the linear predicted values.
Returns
-------
resid_dev : float
Deviance residuals as defined below.
Notes
-----
.. math::
resid\_dev_i = 2 * n * (endog_i * \ln(endog_i /\mu_i) +
(1 - endog_i) * \ln((1 - endog_i) / (1 - \mu_i)))
"""
endog_mu = self._clean(endog / mu)
n_endog_mu = self._clean((1. - endog) / (1. - mu))
resid_dev = endog * np.log(endog_mu) + (1 - endog) * np.log(n_endog_mu)
return 2 * self.n * resid_dev
def loglike_obs(self, endog, mu, var_weights=1., scale=1.):
r"""
The log-likelihood function for each observation in terms of the fitted
mean response for the Binomial distribution.
Parameters
----------
endog : array
Usually the endogenous response variable.
mu : array
Usually but not always the fitted mean response variable.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float
The scale parameter. The default is 1.
Returns
-------
ll_i : float
The value of the loglikelihood evaluated at
(endog, mu, var_weights, scale) as defined below.
Notes
-----
If the endogenous variable is binary:
.. math::
ll_i = \sum_i (y_i * \log(\mu_i/(1-\mu_i)) + \log(1-\mu_i)) *
var\_weights_i
If the endogenous variable is binomial:
.. math::
ll_i = \sum_i var\_weights_i * (\ln \Gamma(n+1) -
\ln \Gamma(y_i + 1) - \ln \Gamma(n_i - y_i +1) + y_i *
\log(\mu_i / (n_i - \mu_i)) + n * \log(1 - \mu_i/n_i))
where :math:`y_i = Y_i * n_i` with :math:`Y_i` and :math:`n_i` as
defined in Binomial initialize. This simply makes :math:`y_i` the
original number of successes.
"""
n = self.n # Number of trials
y = endog * n # Number of successes
# note that mu is still in (0,1), i.e. not converted back
return (special.gammaln(n + 1) - special.gammaln(y + 1) -
special.gammaln(n - y + 1) + y * np.log(mu / (1 - mu)) +
n * np.log(1 - mu)) * var_weights
def resid_anscombe(self, endog, mu, var_weights=1., scale=1.):
r'''
The Anscombe residuals
Parameters
----------
endog : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
.. math::
n^{2/3}*(cox\_snell(endog)-cox\_snell(mu)) /
(mu*(1-mu/n)*scale^3)^{1/6} * \sqrt(var\_weights)
where cox_snell is defined as
cox_snell(x) = betainc(2/3., 2/3., x)*betainc(2/3.,2/3.)
where betainc is the incomplete beta function as defined in scipy,
which uses a regularized version (with the unregularized version, one
would just have :math:`cox_snell(x) = Betainc(2/3., 2/3., x)`).
The name 'cox_snell' is idiosyncratic and is simply used for
convenience following the approach suggested in Cox and Snell (1968).
Further note that
:math:`cox\_snell(x) = \frac{3}{2}*x^{2/3} *
hyp2f1(2/3.,1/3.,5/3.,x)`
where hyp2f1 is the hypergeometric 2f1 function. The Anscombe
residuals are sometimes defined in the literature using the
hyp2f1 formulation. Both betainc and hyp2f1 can be found in scipy.
References
----------
Anscombe, FJ. (1953) "Contribution to the discussion of H. Hotelling's
paper." Journal of the Royal Statistical Society B. 15, 229-30.
Cox, DR and Snell, EJ. (1968) "A General Definition of Residuals."
Journal of the Royal Statistical Society B. 30, 248-75.
'''
endog = endog * self.n # convert back to successes
mu = mu * self.n # convert back to successes
def cox_snell(x):
return special.betainc(2/3., 2/3., x) * special.beta(2/3., 2/3.)
resid = (self.n ** (2/3.) * (cox_snell(endog * 1. / self.n) -
cox_snell(mu * 1. / self.n)) /
(mu * (1 - mu * 1. / self.n) * scale ** 3) ** (1 / 6.))
resid *= np.sqrt(var_weights)
return resid
class InverseGaussian(Family):
"""
InverseGaussian exponential family.
Parameters
----------
link : a link instance, optional
The default link for the inverse Gaussian family is the
inverse squared link.
Available links are inverse_squared, inverse, log, and identity.
See statsmodels.genmod.families.links for more information.
Attributes
----------
InverseGaussian.link : a link instance
The link function of the inverse Gaussian instance
InverseGaussian.variance : varfunc instance
``variance`` is an instance of
statsmodels.genmod.families.varfuncs.mu_cubed
See Also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
Notes
-----
The inverse Guassian distribution is sometimes referred to in the
literature as the Wald distribution.
"""
links = [L.inverse_squared, L.inverse_power, L.identity, L.log]
variance = V.mu_cubed
safe_links = [L.inverse_squared, L.Log, ]
def __init__(self, link=None):
if link is None:
link = L.inverse_squared()
super(InverseGaussian, self).__init__(
link=link, variance=InverseGaussian.variance)
def _resid_dev(self, endog, mu):
r"""
Inverse Gaussian deviance residuals
Parameters
----------
endog : array
The endogenous response variable.
mu : array
The inverse of the link function at the linear predicted values.
Returns
-------
resid_dev : float
Deviance residuals as defined below.
Notes
-----
.. math::
resid\_dev_i = 1 / (endog_i * \mu_i^2) * (endog_i - \mu_i)^2
"""
return 1. / (endog * mu ** 2) * (endog - mu) ** 2
def loglike_obs(self, endog, mu, var_weights=1., scale=1.):
r"""
The log-likelihood function for each observation in terms of the fitted
mean response for the Inverse Gaussian distribution.
Parameters
----------
endog : array
Usually the endogenous response variable.
mu : array
Usually but not always the fitted mean response variable.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float
The scale parameter. The default is 1.
Returns
-------
ll_i : float
The value of the loglikelihood evaluated at
(endog, mu, var_weights, scale) as defined below.
Notes
-----
.. math::
ll_i = -1/2 * (var\_weights_i * (endog_i - \mu_i)^2 /
(scale * endog_i * \mu_i^2) + \ln(scale * \endog_i^3 /
var\_weights_i) - \ln(2 * \pi))
"""
ll_obs = -var_weights * (endog - mu) ** 2 / (scale * endog * mu ** 2)
ll_obs += -np.log(scale * endog ** 3 / var_weights) - np.log(2 * np.pi)
ll_obs /= 2
return ll_obs
def resid_anscombe(self, endog, mu, var_weights=1., scale=1.):
r"""
The Anscombe residuals
Parameters
----------
endog : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_anscombe : array
The Anscombe residuals for the inverse Gaussian distribution as
defined below
Notes
-----
.. math::
resid\_anscombe_i = \log(Y_i / \mu_i) / \sqrt{\mu_i * scale} *
\sqrt(var\_weights)
"""
resid = np.log(endog / mu) / np.sqrt(mu * scale)
resid *= np.sqrt(var_weights)
return resid
class NegativeBinomial(Family):
r"""
Negative Binomial exponential family.
Parameters
----------
link : a link instance, optional
The default link for the negative binomial family is the log link.
Available links are log, cloglog, identity, nbinom and power.
See statsmodels.genmod.families.links for more information.
alpha : float, optional
The ancillary parameter for the negative binomial distribution.
For now ``alpha`` is assumed to be nonstochastic. The default value
is 1. Permissible values are usually assumed to be between.01 and 2.
Attributes
----------
NegativeBinomial.link : a link instance
The link function of the negative binomial instance
NegativeBinomial.variance : varfunc instance
``variance`` is an instance of
statsmodels.genmod.families.varfuncs.nbinom
See Also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
Notes
-----
Power link functions are not yet supported.
Parameterization for :math:`y=0, 1, 2, \ldots` is
.. math::
f(y) = \frac{\Gamma(y+\frac{1}{\alpha})}{y!\Gamma(\frac{1}{\alpha})}
\left(\frac{1}{1+\alpha\mu}\right)^{\frac{1}{\alpha}}
\left(\frac{\alpha\mu}{1+\alpha\mu}\right)^y
with :math:`E[Y]=\mu\,` and :math:`Var[Y]=\mu+\alpha\mu^2`.
"""
links = [L.log, L.cloglog, L.identity, L.nbinom, L.Power]
# TODO: add the ability to use the power links with an if test
# similar to below
variance = V.nbinom
safe_links = [L.Log, ]
def __init__(self, link=None, alpha=1.):
self.alpha = 1. * alpha # make it at least float
if link is None:
link = L.log()
super(NegativeBinomial, self).__init__(
link=link, variance=V.NegativeBinomial(alpha=self.alpha))
def _resid_dev(self, endog, mu):
r"""
Negative Binomial deviance residuals
Parameters
----------
endog : array
The endogenous response variable.
mu : array
The inverse of the link function at the linear predicted values.
Returns
-------
resid_dev : float
Deviance residuals as defined below.
Notes
-----
.. math::
resid_dev_i = 2 * (endog_i * \ln(endog_i /
\mu_i) - (endog_i + 1 / \alpha) * \ln((endog_i + 1 / \alpha) /
(\mu_i + 1 / \alpha)))
"""
endog_mu = self._clean(endog / mu)
endog_alpha = endog + 1 / self.alpha
mu_alpha = mu + 1 / self.alpha
resid_dev = endog * np.log(endog_mu)
resid_dev -= endog_alpha * np.log(endog_alpha / mu_alpha)
return 2 * resid_dev
def loglike_obs(self, endog, mu, var_weights=1., scale=1.):
r"""
The log-likelihood function for each observation in terms of the fitted
mean response for the Negative Binomial distribution.
Parameters
----------
endog : array
Usually the endogenous response variable.
mu : array
Usually but not always the fitted mean response variable.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float
The scale parameter. The default is 1.
Returns
-------
ll_i : float
The value of the loglikelihood evaluated at
(endog, mu, var_weights, scale) as defined below.
Notes
-----
Defined as:
.. math::
llf = \sum_i var\_weights_i / scale * (Y_i * \log{(\alpha * \mu_i /
(1 + \alpha * \mu_i))} - \log{(1 + \alpha * \mu_i)}/
\alpha + Constant)
where :math:`Constant` is defined as:
.. math::
Constant = \ln \Gamma{(Y_i + 1/ \alpha )} - \ln \Gamma(Y_i + 1) -
\ln \Gamma{(1/ \alpha )}
constant = (special.gammaln(endog + 1 / self.alpha) -
special.gammaln(endog+1)-special.gammaln(1/self.alpha))
return (endog * np.log(self.alpha * mu / (1 + self.alpha * mu)) -
np.log(1 + self.alpha * mu) / self.alpha +
constant) * var_weights / scale
"""
ll_obs = endog * np.log(self.alpha * mu)
ll_obs -= (endog + 1 / self.alpha) * np.log(1 + self.alpha * mu)
ll_obs += special.gammaln(endog + 1 / self.alpha)
ll_obs -= special.gammaln(1 / self.alpha)
ll_obs -= special.gammaln(endog + 1)
return var_weights / scale * ll_obs
def resid_anscombe(self, endog, mu, var_weights=1., scale=1.):
r"""
The Anscombe residuals
Parameters
----------
endog : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
Anscombe residuals for Negative Binomial are the same as for Binomial
upon setting :math:`n=-\frac{1}{\alpha}`. Due to the negative value of
:math:`-\alpha*Y` the representation with the hypergeometric function
:math:`H2F1(x) = hyp2f1(2/3.,1/3.,5/3.,x)` is advantageous
.. math::
resid\_anscombe_i = \frac{3}{2} *
(Y_i^(2/3)*H2F1(-\alpha*Y_i) - \mu_i^(2/3)*H2F1(-\alpha*\mu_i))
/ (\mu_i * (1+\alpha*\mu_i) * scale^3)^(1/6) * \sqrt(var\_weights)
Note that for the (unregularized) Beta function, one has
:math:`Beta(z,a,b) = z^a/a * H2F1(a,1-b,a+1,z)`
"""
def hyp2f1(x):
return special.hyp2f1(2 / 3., 1 / 3., 5 / 3., x)
resid = (3 / 2. * (endog ** (2 / 3.) * hyp2f1(-self.alpha * endog) -
mu ** (2 / 3.) * hyp2f1(-self.alpha * mu)) /
(mu * (1 + self.alpha * mu) *
scale ** 3) ** (1 / 6.))
resid *= np.sqrt(var_weights)
return resid
class Tweedie(Family):
"""
Tweedie family.
Parameters
----------
link : a link instance, optional
The default link for the Tweedie family is the log link.
Available links are log and Power.
See statsmodels.genmod.families.links for more information.
var_power : float, optional
The variance power. The default is 1.
eql : bool
If True, the Extended Quasi-Likelihood is used, else the
likelihood is used (however the latter is not implemented).
If eql is True, var_power must be between 1 and 2.
Attributes
----------
Tweedie.link : a link instance
The link function of the Tweedie instance
Tweedie.variance : varfunc instance
``variance`` is an instance of
statsmodels.genmod.families.varfuncs.Power
Tweedie.var_power : float
The power of the variance function.
See Also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
Notes
-----
Logliklihood function not implemented because of the complexity of
calculating an infinite series of summations. The variance power can be
estimated using the ``estimate_tweedie_power`` function that is part of the
statsmodels.genmod.generalized_linear_model.GLM class.
"""
links = [L.log, L.Power]
variance = V.Power(power=1.5)
safe_links = [L.log, L.Power]
def __init__(self, link=None, var_power=1., eql=False):
self.var_power = var_power
self.eql = eql
if eql and (var_power < 1 or var_power > 2):
raise ValueError("Tweedie: if EQL=True then var_power must fall "
"between 1 and 2")
if link is None:
link = L.log()
super(Tweedie, self).__init__(
link=link, variance=V.Power(power=var_power * 1.))
def _resid_dev(self, endog, mu):
r"""
Tweedie deviance residuals
Parameters
----------
endog : array
The endogenous response variable.
mu : array
The inverse of the link function at the linear predicted values.
Returns
-------
resid_dev : float
Deviance residuals as defined below.
Notes
-----
When :math:`p = 1`,
.. math::
dev_i = \mu_i
when :math:`endog_i = 0` and
.. math::
dev_i = endog_i * \log(endog_i / \mu_i) + (\mu_i - endog_i)
otherwise.
When :math:`p = 2`,
.. math::
dev_i = (endog_i - \mu_i) / \mu_i - \log(endog_i / \mu_i)
For all other p,
.. math::
dev_i = endog_i^{2 - p} / ((1 - p) * (2 - p)) -
endog_i * \mu_i^{1 - p} / (1 - p) + \mu_i^{2 - p} /
(2 - p)
The deviance residual is then
.. math::
resid\_dev_i = 2 * dev_i
"""
p = self.var_power
if p == 1:
dev = np.where(endog == 0,
mu,
endog * np.log(endog / mu) + (mu - endog))
elif p == 2:
endog1 = self._clean(endog)
dev = ((endog - mu) / mu) - np.log(endog1 / mu)
else:
dev = (endog ** (2 - p) / ((1 - p) * (2 - p)) -
endog * mu ** (1-p) / (1 - p) + mu ** (2 - p) / (2 - p))
return 2 * dev
def loglike_obs(self, endog, mu, var_weights=1., scale=1.):
r"""
The log-likelihood function for each observation in terms of the fitted
mean response for the Tweedie distribution.
Parameters
----------
endog : array
Usually the endogenous response variable.
mu : array
Usually but not always the fitted mean response variable.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float
The scale parameter. The default is 1.
Returns
-------
ll_i : float
The value of the loglikelihood evaluated at
(endog, mu, var_weights, scale) as defined below.
Notes
-----
If eql is True, the Extended Quasi-Likelihood is used. At present,
this method returns NaN if eql is False. When the actual likelihood
is implemented, it will be accessible by setting eql to False.
References
----------
JA Nelder, D Pregibon (1987). An extended quasi-likelihood function.
Biometrika 74:2, pp 221-232. https://www.jstor.org/stable/2336136
"""
if not self.eql:
# We have not yet implemented the actual likelihood
return np.nan
# Equations 9-10 or Nelder and Pregibon
p = self.var_power
llf = np.log(2 * np.pi * scale) + p * np.log(mu) - np.log(var_weights)
llf /= -2
if p == 1:
u = endog * np.log(endog / mu) - (endog - mu)
u *= var_weights / scale
elif p == 2:
yr = endog / mu
u = yr - np.log(yr) - 1
u *= var_weights / scale
else:
u = (endog ** (2 - p)
- (2 - p) * endog * mu ** (1 - p)
+ (1 - p) * mu ** (2 - p))
u *= var_weights / (scale * (1 - p) * (2 - p))
llf -= u
return llf
def resid_anscombe(self, endog, mu, var_weights=1., scale=1.):
r"""
The Anscombe residuals
Parameters
----------
endog : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
var_weights : array_like
1d array of variance (analytic) weights. The default is 1.
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
When :math:`p = 3`, then
.. math::
resid\_anscombe_i = \log(endog_i / \mu_i) / \sqrt{\mu_i * scale} *
\sqrt(var\_weights)
Otherwise,
.. math::
c = (3 - p) / 3
.. math::
resid\_anscombe_i = (1 / c) * (endog_i^c - \mu_i^c) / \mu_i^{p / 6}
/ \sqrt{scale} * \sqrt(var\_weights)
"""
if self.var_power == 3:
resid = np.log(endog / mu) / np.sqrt(mu * scale)
else:
c = (3. - self.var_power) / 3.
resid = ((1. / c) * (endog ** c - mu ** c) /
mu ** (self.var_power / 6.)) / scale ** 0.5
resid *= np.sqrt(var_weights)
return resid
import numpy as np
from collections import defaultdict
import statsmodels.base.model as base
from statsmodels.genmod import families
from statsmodels.genmod.families import links
from statsmodels.genmod.families import varfuncs
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.tools.decorators import cache_readonly
class QIFCovariance(object):
"""
A covariance model for quadratic inference function regression.
The mat method returns a basis matrix B such that the inverse
of the working covariance lies in the linear span of the
basis matrices.
Subclasses should set the number of basis matrices `num_terms`,
so that `mat(d, j)` for j=0,..., num_terms-1 gives the basis
of dimension d.`
"""
def mat(self, dim, term):
"""
Returns the term'th basis matrix, which is a dim x dim
matrix.
"""
raise NotImplementedError
class QIFIndependence(QIFCovariance):
"""
Independent working covariance for QIF regression. This covariance
model gives identical results to GEE with the independence working
covariance. When using QIFIndependence as the working covariance,
the QIF value will be zero, and cannot be used for chi^2 testing, or
for model selection using AIC, BIC, etc.
"""
def __init__(self):
self.num_terms = 1
def mat(self, dim, term):
if term == 0:
return np.eye(dim)
else:
return None
class QIFExchangeable(QIFCovariance):
"""
Exchangeable working covariance for QIF regression.
"""
def __init__(self):
self.num_terms = 2
def mat(self, dim, term):
if term == 0:
return np.eye(dim)
elif term == 1:
return np.ones((dim, dim))
else:
return None
class QIFAutoregressive(QIFCovariance):
"""
Autoregressive working covariance for QIF regression.
"""
def __init__(self):
self.num_terms = 3
def mat(self, dim, term):
if dim < 3:
msg = ("Groups must have size at least 3 for " +
"autoregressive covariance.")
raise ValueError(msg)
if term == 0:
return np.eye(dim)
elif term == 1:
mat = np.zeros((dim, dim))
mat.flat[1::(dim+1)] = 1
mat += mat.T
return mat
elif term == 2:
mat = np.zeros((dim, dim))
mat[0, 0] = 1
mat[dim-1, dim-1] = 1
return mat
else:
return None
class QIF(base.Model):
"""
Fit a regression model using quadratic inference functions (QIF).
QIF is an alternative to GEE that can be more efficient, and that
offers different approaches for model selection and inference.
Parameters
----------
endog : array_like
The dependent variables of the regression.
exog : array_like
The independent variables of the regression.
groups : array_like
Labels indicating which group each observation belongs to.
Observations in different groups should be independent.
family : genmod family
An instance of a GLM family.
cov_struct : QIFCovariance instance
An instance of a QIFCovariance.
References
----------
A. Qu, B. Lindsay, B. Li (2000). Improving Generalized Estimating
Equations using Quadratic Inference Functions, Biometrika 87:4.
www.jstor.org/stable/2673612
"""
def __init__(self, endog, exog, groups, family=None,
cov_struct=None, missing='none', **kwargs):
# Handle the family argument
if family is None:
family = families.Gaussian()
else:
if not issubclass(family.__class__, families.Family):
raise ValueError("QIF: `family` must be a genmod "
"family instance")
self.family = family
self._fit_history = defaultdict(list)
# Handle the cov_struct argument
if cov_struct is None:
cov_struct = QIFIndependence()
else:
if not isinstance(cov_struct, QIFCovariance):
raise ValueError(
"QIF: `cov_struct` must be a QIFCovariance instance")
self.cov_struct = cov_struct
groups = np.asarray(groups)
super(QIF, self).__init__(endog, exog, groups=groups,
missing=missing, **kwargs)
self.group_names = list(set(groups))
self.nobs = len(self.endog)
groups_ix = defaultdict(list)
for i, g in enumerate(groups):
groups_ix[g].append(i)
self.groups_ix = [groups_ix[na] for na in self.group_names]
self._check_args(groups)
def _check_args(self, groups):
if len(groups)!= len(self.endog):
msg = "QIF: groups and endog should have the same length"
raise ValueError(msg)
if len(self.endog)!= self.exog.shape[0]:
msg = ("QIF: the length of endog should be equal to the "
"number of rows of exog.")
raise ValueError(msg)
def objective(self, params):
"""
Calculate the gradient of the QIF objective function.
Parameters
----------
params : array_like
The model parameters at which the gradient is evaluated.
Returns
-------
grad : array_like
The gradient vector of the QIF objective function.
gn_deriv : array_like
The gradients of each estimating equation with
respect to the parameter.
"""
endog = self.endog
exog = self.exog
lpr = np.dot(exog, params)
mean = self.family.link.inverse(lpr)
va = self.family.variance(mean)
# Mean derivative
idl = self.family.link.inverse_deriv(lpr)
idl2 = self.family.link.inverse_deriv2(lpr)
vd = self.family.variance.deriv(mean)
m = self.cov_struct.num_terms
p = exog.shape[1]
d = p * m
gn = np.zeros(d)
gi = np.zeros(d)
gi_deriv = np.zeros((d, p))
gn_deriv = np.zeros((d, p))
cn_deriv = [0] * p
cmat = np.zeros((d, d))
fastvar = self.family.variance is varfuncs.constant
fastlink = isinstance(self.family.link, links.identity)
for ix in self.groups_ix:
sd = np.sqrt(va[ix])
resid = endog[ix] - mean[ix]
sresid = resid / sd
deriv = exog[ix, :] * idl[ix, None]
jj = 0
for j in range(m):
# The derivative of each term in (5) of Qu et al.
# There are four terms involving beta in a product.
# Iterated application of the product rule gives
# the gradient as a sum of four terms.
c = self.cov_struct.mat(len(ix), j)
crs1 = np.dot(c, sresid) / sd
gi[jj:jj+p] = np.dot(deriv.T, crs1)
crs2 = np.dot(c, -deriv / sd[:, None]) / sd[:, None]
gi_deriv[jj:jj+p, :] = np.dot(deriv.T, crs2)
if not (fastlink and fastvar):
for k in range(p):
m1 = np.dot(exog[ix, :].T,
idl2[ix] * exog[ix, k] * crs1)
if not fastvar:
vx = -0.5 * vd[ix] * deriv[:, k] / va[ix]**1.5
m2 = np.dot(deriv.T, vx * np.dot(c, sresid))
m3 = np.dot(deriv.T, np.dot(c, vx * resid) / sd)
else:
m2, m3 = 0, 0
gi_deriv[jj:jj+p, k] += m1 + m2 + m3
jj += p
for j in range(p):
u = np.outer(gi, gi_deriv[:, j])
cn_deriv[j] += u + u.T
gn += gi
gn_deriv += gi_deriv
cmat += np.outer(gi, gi)
ngrp = len(self.groups_ix)
gn /= ngrp
gn_deriv /= ngrp
cmat /= ngrp**2
qif = np.dot(gn, np.linalg.solve(cmat, gn))
gcg = np.zeros(p)
for j in range(p):
cn_deriv[j] /= len(self.groups_ix)**2
u = np.linalg.solve(cmat, cn_deriv[j]).T
u = np.linalg.solve(cmat, u)
gcg[j] = np.dot(gn, np.dot(u, gn))
grad = 2 * np.dot(gn_deriv.T, np.linalg.solve(cmat, gn)) - gcg
return qif, grad, cmat, gn, gn_deriv
def estimate_scale(self, params):
"""
Estimate the dispersion/scale.
The scale parameter for binomial and Poisson families is
fixed at 1, otherwise it is estimated from the data.
"""
if isinstance(self.family, (families.Binomial, families.Poisson)):
return 1.
if hasattr(self, "ddof_scale"):
ddof_scale = self.ddof_scale
else:
ddof_scale = self.exog[1]
lpr = np.dot(self.exog, params)
mean = self.family.link.inverse(lpr)
resid = self.endog - mean
scale = np.sum(resid**2) / (self.nobs - ddof_scale)
return scale
@classmethod
def from_formula(cls, formula, groups, data, subset=None,
*args, **kwargs):
"""
Create a QIF model instance from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
groups : array_like or string
Array of grouping labels. If a string, this is the name
of a variable in `data` that contains the grouping labels.
data : array_like
The data for the model.
subset : array_like
An array_like object of booleans, integers, or index
values that indicate the subset of the data to used when
fitting the model.
Returns
-------
model : QIF model instance
"""
if isinstance(groups, str):
groups = data[groups]
model = super(QIF, cls).from_formula(
formula, data=data, subset=subset,
groups=groups, *args, **kwargs)
return model
def fit(self, maxiter=100, start_params=None, tol=1e-6, gtol=1e-4,
ddof_scale=None):
"""
Fit a GLM to correlated data using QIF.
Parameters
----------
maxiter : integer
Maximum number of iterations.
start_params : array_like, optional
Starting values
tol : float
Convergence threshold for difference of successive
estimates.
gtol : float
Convergence threshold for gradient.
ddof_scale : int, optional
Degrees of freedom for the scale parameter
Returns
-------
QIFResults object
"""
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
self.ddof_scale = ddof_scale
if start_params is None:
params = np.zeros(self.exog.shape[1])
else:
params = start_params
for _ in range(maxiter):
qif, grad, cmat, _, gn_deriv = self.objective(params)
gnorm = np.sqrt(np.sum(grad * grad))
self._fit_history["qif"].append(qif)
self._fit_history["gradnorm"].append(gnorm)
if gnorm < gtol:
break
cjac = 2 * np.dot(gn_deriv.T, np.linalg.solve(cmat, gn_deriv))
step = np.linalg.solve(cjac, grad)
snorm = np.sqrt(np.sum(step * step))
self._fit_history["stepnorm"].append(snorm)
if snorm < tol:
break
params -= step
vcov = np.dot(gn_deriv.T, np.linalg.solve(cmat, gn_deriv))
vcov = np.linalg.inv(vcov)
scale = self.estimate_scale(params)
rslt = QIFResults(self, params, vcov / scale, scale)
rslt.fit_history = self._fit_history
self._fit_history = defaultdict(list)
return QIFResultsWrapper(rslt)
class QIFResults(base.LikelihoodModelResults):
"""Results class for QIF Regression"""
def __init__(self, model, params, cov_params, scale,
use_t=False, **kwds):
super(QIFResults, self).__init__(
model, params, normalized_cov_params=cov_params,
scale=scale)
self.qif, _, _, _, _ = self.model.objective(params)
@cache_readonly
def aic(self):
"""
An AIC-like statistic for models fit using QIF.
"""
if isinstance(self.model.cov_struct, QIFIndependence):
msg = "AIC not available with QIFIndependence covariance"
raise ValueError(msg)
df = self.model.exog.shape[1]
return self.qif + 2*df
@cache_readonly
def bic(self):
"""
A BIC-like statistic for models fit using QIF.
"""
if isinstance(self.model.cov_struct, QIFIndependence):
msg = "BIC not available with QIFIndependence covariance"
raise ValueError(msg)
df = self.model.exog.shape[1]
return self.qif + np.log(self.model.nobs)*df
@cache_readonly
def fittedvalues(self):
"""
Returns the fitted values from the model.
"""
return self.model.family.link.inverse(
np.dot(self.model.exog, self.params))
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the QIF regression results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables, default is `var_#` for ## in
the number of regressors. Must match the number of parameters in
the model
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
top_left = [('Dep. Variable:', None),
('Method:', ['QIF']),
('Family:', [self.model.family.__class__.__name__]),
('Covariance structure:',
[self.model.cov_struct.__class__.__name__]),
('Date:', None),
('Time:', None),
]
NY = [len(y) for y in self.model.groups_ix]
top_right = [('No. Observations:', [sum(NY)]),
('No. clusters:', [len(NY)]),
('Min. cluster size:', [min(NY)]),
('Max. cluster size:', [max(NY)]),
('Mean cluster size:', ["%.1f" % np.mean(NY)]),
('Scale:', ["%.3f" % self.scale]),
]
if title is None:
title = self.model.__class__.__name__ +'' +\
"Regression Results"
# Override the exog variable names if xname is provided as an
# argument.
if xname is None:
xname = self.model.exog_names
if yname is None:
yname = self.model.endog_names
# Create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname,
title=title)
smry.add_table_params(self, yname=yname, xname=xname,
alpha=alpha, use_t=False)
return smry
class QIFResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(QIFResultsWrapper, QIFResults)
'''
Defines the link functions to be used with GLM and GEE families.
'''
import numpy as np
import scipy.stats
FLOAT_EPS = np.finfo(float).eps
class Link(object):
"""
A generic link function for one-parameter exponential family.
`Link` does nothing, but lays out the methods expected of any subclass.
"""
def __call__(self, p):
"""
Return the value of the link function. This is just a placeholder.
Parameters
----------
p : array_like
Probabilities
Returns
-------
g(p) : array_like
The value of the link function g(p) = z
"""
return NotImplementedError
def inverse(self, z):
"""
Inverse of the link function. Just a placeholder.
Parameters
----------
z : array_like
`z` is usually the linear predictor of the transformed variable
in the IRLS algorithm for GLM.
Returns
-------
g^(-1)(z) : array
The value of the inverse of the link function g^(-1)(z) = p
"""
return NotImplementedError
def deriv(self, p):
"""
Derivative of the link function g'(p). Just a placeholder.
Parameters
----------
p : array_like
Returns
-------
g'(p) : array
The value of the derivative of the link function g'(p)
"""
return NotImplementedError
def deriv2(self, p):
"""Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
from statsmodels.tools.numdiff import approx_fprime_cs
# TODO: workaround proplem with numdiff for 1d
return np.diag(approx_fprime_cs(p, self.deriv))
def inverse_deriv(self, z):
"""
Derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : array
The value of the derivative of the inverse of the link function
Notes
-----
This reference implementation gives the correct result but is
inefficient, so it can be overriden in subclasses.
"""
return 1 / self.deriv(self.inverse(z))
def inverse_deriv2(self, z):
"""
Second derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : array
The value of the second derivative of the inverse of the link
function
Notes
-----
This reference implementation gives the correct result but is
inefficient, so it can be overriden in subclasses.
"""
iz = self.inverse(z)
return -self.deriv2(iz) / self.deriv(iz)**3
class Logit(Link):
"""
The logit transform
Notes
-----
call and derivative use a private method _clean to make trim p by
machine epsilon so that p is in (0,1)
Alias of Logit:
logit = Logit()
"""
def _clean(self, p):
"""
Clip logistic values to range (eps, 1-eps)
Parameters
----------
p : array_like
Probabilities
Returns
-------
pclip : array
Clipped probabilities
"""
return np.clip(p, FLOAT_EPS, 1. - FLOAT_EPS)
def __call__(self, p):
"""
The logit transform
Parameters
----------
p : array_like
Probabilities
Returns
-------
z : array
Logit transform of `p`
Notes
-----
g(p) = log(p / (1 - p))
"""
p = self._clean(p)
return np.log(p / (1. - p))
def inverse(self, z):
"""
Inverse of the logit transform
Parameters
----------
z : array_like
The value of the logit transform at `p`
Returns
-------
p : array
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z))
"""
z = np.asarray(z)
t = np.exp(-z)
return 1. / (1. + t)
def deriv(self, p):
"""
Derivative of the logit transform
Parameters
----------
p: array_like
Probabilities
Returns
-------
g'(p) : array
Value of the derivative of logit transform at `p`
Notes
-----
g'(p) = 1 / (p * (1 - p))
Alias for `Logit`:
logit = Logit()
"""
p = self._clean(p)
return 1. / (p * (1 - p))
def inverse_deriv(self, z):
"""
Derivative of the inverse of the logit transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : array
The value of the derivative of the inverse of the logit function
"""
t = np.exp(z)
return t/(1 + t)**2
def deriv2(self, p):
"""
Second derivative of the logit function.
Parameters
----------
p : array_like
probabilities
Returns
-------
g''(z) : array
The value of the second derivative of the logit function
"""
v = p * (1 - p)
return (2*p - 1) / v**2
class logit(Logit):
pass
class Power(Link):
"""
The power transform
Parameters
----------
power : float
The exponent of the power transform
Notes
-----
Aliases of Power:
inverse = Power(power=-1)
sqrt = Power(power=.5)
inverse_squared = Power(power=-2.)
identity = Power(power=1.)
"""
def __init__(self, power=1.):
self.power = power
def __call__(self, p):
"""
Power transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : array_like
Power transform of x
Notes
-----
g(p) = x**self.power
"""
if self.power == 1:
return p
else:
return np.power(p, self.power)
def inverse(self, z):
"""
Inverse of the power transform link function
Parameters
----------
`z` : array_like
Value of the transformed mean parameters at `p`
Returns
-------
`p` : array
Mean parameters
Notes
-----
g^(-1)(z`) = `z`**(1/`power`)
"""
if self.power == 1:
return z
else:
return np.power(z, 1. / self.power)
def deriv(self, p):
"""
Derivative of the power transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : array
Derivative of power transform of `p`
Notes
-----
g'(`p`) = `power` * `p`**(`power` - 1)
"""
if self.power == 1:
return np.ones_like(p)
else:
return self.power * np.power(p, self.power - 1)
def deriv2(self, p):
"""
Second derivative of the power transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : array
Second derivative of the power transform of `p`
Notes
-----
g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2)
"""
if self.power == 1:
return np.zeros_like(p)
else:
return self.power * (self.power - 1) * np.power(p, self.power - 2)
def inverse_deriv(self, z):
"""
Derivative of the inverse of the power transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : array
The value of the derivative of the inverse of the power transform
function
"""
if self.power == 1:
return np.ones_like(z)
else:
return np.power(z, (1 - self.power)/self.power) / self.power
def inverse_deriv2(self, z):
"""
Second derivative of the inverse of the power transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : array
The value of the derivative of the inverse of the power transform
function
"""
if self.power == 1:
return np.zeros_like(z)
else:
return ((1 - self.power) *
np.power(z, (1 - 2*self.power)/self.power) / self.power**2)
class inverse_power(Power):
"""
The inverse transform
Notes
-----
g(p) = 1/p
Alias of statsmodels.family.links.Power(power=-1.)
"""
def __init__(self):
super(inverse_power, self).__init__(power=-1.)
class sqrt(Power):
"""
The square-root transform
Notes
-----
g(`p`) = sqrt(`p`)
Alias of statsmodels.family.links.Power(power=.5)
"""
def __init__(self):
super(sqrt, self).__init__(power=.5)
class inverse_squared(Power):
r"""
The inverse squared transform
Notes
-----
g(`p`) = 1/(`p`\*\*2)
Alias of statsmodels.family.links.Power(power=2.)
"""
def __init__(self):
super(inverse_squared, self).__init__(power=-2.)
class identity(Power):
"""
The identity transform
Notes
-----
g(`p`) = `p`
Alias of statsmodels.family.links.Power(power=1.)
"""
def __init__(self):
super(identity, self).__init__(power=1.)
class Log(Link):
"""
The log transform
Notes
-----
call and derivative call a private method _clean to trim the data by
machine epsilon so that p is in (0,1). log is an alias of Log.
"""
def _clean(self, x):
return np.clip(x, FLOAT_EPS, np.inf)
def __call__(self, p, **extra):
"""
Log transform link function
Parameters
----------
x : array_like
Mean parameters
Returns
-------
z : array
log(x)
Notes
-----
g(p) = log(p)
"""
x = self._clean(p)
return np.log(x)
def inverse(self, z):
"""
Inverse of log transform link function
Parameters
----------
z : array
The inverse of the link function at `p`
Returns
-------
p : array
The mean probabilities given the value of the inverse `z`
Notes
-----
g^{-1}(z) = exp(z)
"""
return np.exp(z)
def deriv(self, p):
"""
Derivative of log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : array
derivative of log transform of x
Notes
-----
g'(x) = 1/x
"""
p = self._clean(p)
return 1. / p
def deriv2(self, p):
"""
Second derivative of the log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : array
Second derivative of log transform of x
Notes
-----
g''(x) = -1/x^2
"""
p = self._clean(p)
return -1. / p**2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the log transform link function
Parameters
----------
z : array
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : array
The value of the derivative of the inverse of the log function,
the exponential function
"""
return np.exp(z)
class log(Log):
"""
The log transform
Notes
-----
log is a an alias of Log.
"""
pass
# TODO: the CDFLink is untested
class CDFLink(Logit):
"""
The use the CDF of a scipy.stats distribution
CDFLink is a subclass of logit in order to use its _clean method
for the link and its derivative.
Parameters
----------
dbn : scipy.stats distribution
Default is dbn=scipy.stats.norm
Notes
-----
The CDF link is untested.
"""
def __init__(self, dbn=scipy.stats.norm):
self.dbn = dbn
def __call__(self, p):
"""
CDF link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : array
(ppf) inverse of CDF transform of p
Notes
-----
g(`p`) = `dbn`.ppf(`p`)
"""
p = self._clean(p)
return self.dbn.ppf(p)
def inverse(self, z):
"""
The inverse of the CDF link
Parameters
----------
z : array_like
The value of the inverse of the link function at `p`
Returns
-------
p : array
Mean probabilities. The value of the inverse of CDF link of `z`
Notes
-----
g^(-1)(`z`) = `dbn`.cdf(`z`)
"""
return self.dbn.cdf(z)
def deriv(self, p):
"""
Derivative of CDF link
Parameters
----------
p : array_like
mean parameters
Returns
-------
g'(p) : array
The derivative of CDF transform at `p`
Notes
-----
g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`))
"""
p = self._clean(p)
return 1. / self.dbn.pdf(self.dbn.ppf(p))
def deriv2(self, p):
"""
Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
from statsmodels.tools.numdiff import approx_fprime
p = np.atleast_1d(p)
# Note: special function for norm.ppf does not support complex
return np.diag(approx_fprime(p, self.deriv, centered=True))
def inverse_deriv(self, z):
"""
Derivative of the inverse of the CDF transformation link function
Parameters
----------
z : array
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : array
The value of the derivative of the inverse of the logit function
"""
return 1/self.deriv(self.inverse(z))
class probit(CDFLink):
"""
The probit (standard normal CDF) transform
Notes
-----
g(p) = scipy.stats.norm.ppf(p)
probit is an alias of CDFLink.
"""
pass
class cauchy(CDFLink):
"""
The Cauchy (standard Cauchy CDF) transform
Notes
-----
g(p) = scipy.stats.cauchy.ppf(p)
cauchy is an alias of CDFLink with dbn=scipy.stats.cauchy
"""
def __init__(self):
super(cauchy, self).__init__(dbn=scipy.stats.cauchy)
def deriv2(self, p):
"""
Second derivative of the Cauchy link function.
Parameters
----------
p: array_like
Probabilities
Returns
-------
g''(p) : array
Value of the second derivative of Cauchy link function at `p`
"""
a = np.pi * (p - 0.5)
d2 = 2 * np.pi**2 * np.sin(a) / np.cos(a)**3
return d2
class CLogLog(Logit):
"""
The complementary log-log transform
CLogLog inherits from Logit in order to have access to its _clean method
for the link and its derivative.
Notes
-----
CLogLog is untested.
"""
def __call__(self, p):
"""
C-Log-Log transform link function
Parameters
----------
p : array
Mean parameters
Returns
-------
z : array
The CLogLog transform of `p`
Notes
-----
g(p) = log(-log(1-p))
"""
p = self._clean(p)
return np.log(-np.log(1 - p))
def inverse(self, z):
"""
Inverse of C-Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
p : array
Mean parameters
Notes
-----
g^(-1)(`z`) = 1-exp(-exp(`z`))
"""
return 1 - np.exp(-np.exp(z))
def deriv(self, p):
"""
Derivative of C-Log-Log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : array
The derivative of the CLogLog transform link function
Notes
-----
g'(p) = - 1 / ((p-1)*log(1-p))
"""
p = self._clean(p)
return 1. / ((p - 1) * (np.log(1 - p)))
def deriv2(self, p):
"""
Second derivative of the C-Log-Log ink function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : array
The second derivative of the CLogLog link function
"""
p = self._clean(p)
fl = np.log(1 - p)
d2 = -1 / ((1 - p)**2 * fl)
d2 *= 1 + 1 / fl
return d2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the C-Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
g^(-1)'(z) : array
The derivative of the inverse of the CLogLog link function
"""
return np.exp(z - np.exp(z))
class cloglog(CLogLog):
"""
The CLogLog transform link function.
Notes
-----
g(`p`) = log(-log(1-`p`))
cloglog is an alias for CLogLog
cloglog = CLogLog()
"""
pass
class NegativeBinomial(Link):
'''
The negative binomial link function
Parameters
----------
alpha : float, optional
Alpha is the ancillary parameter of the Negative Binomial link
function. It is assumed to be nonstochastic. The default value is 1.
Permissible values are usually assumed to be in (.01, 2).
'''
def __init__(self, alpha=1.):
self.alpha = alpha
def _clean(self, x):
return np.clip(x, FLOAT_EPS, np.inf)
def __call__(self, p):
'''
Negative Binomial transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : array
The negative binomial transform of `p`
Notes
-----
g(p) = log(p/(p + 1/alpha))
'''
p = self._clean(p)
return np.log(p/(p + 1/self.alpha))
def inverse(self, z):
'''
Inverse of the negative binomial transform
Parameters
----------
z : array_like
The value of the inverse of the negative binomial link at `p`.
Returns
-------
p : array
Mean parameters
Notes
-----
g^(-1)(z) = exp(z)/(alpha*(1-exp(z)))
'''
return -1/(self.alpha * (1 - np.exp(-z)))
def deriv(self, p):
'''
Derivative of the negative binomial transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : array
The derivative of the negative binomial transform link function
Notes
-----
g'(x) = 1/(x+alpha*x^2)
'''
return 1/(p + self.alpha * p**2)
def deriv2(self, p):
'''
Second derivative of the negative binomial link function.
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : array
The second derivative of the negative binomial transform link
function
Notes
-----
g''(x) = -(1+2*alpha*x)/(x+alpha*x^2)^2
'''
numer = -(1 + 2 * self.alpha * p)
denom = (p + self.alpha * p**2)**2
return numer / denom
def inverse_deriv(self, z):
'''
Derivative of the inverse of the negative binomial transform
Parameters
----------
z : array_like
Usually the linear predictor for a GLM or GEE model
Returns
-------
g^(-1)'(z) : array
The value of the derivative of the inverse of the negative
binomial link
'''
t = np.exp(z)
return t / (self.alpha * (1-t)**2)
class nbinom(NegativeBinomial):
"""
The negative binomial link function.
Notes
-----
g(p) = log(p/(p + 1/alpha))
nbinom is an alias of NegativeBinomial.
nbinom = NegativeBinomial(alpha=1.)
"""
pass
"""
Covariance models and estimators for GEE.
Some details for the covariance calculations can be found in the Stata
docs:
http://www.stata.com/manuals13/xtxtgee.pdf
"""
from statsmodels.compat.python import iterkeys, itervalues, zip, range
from statsmodels.stats.correlation_tools import cov_nearest
import numpy as np
import pandas as pd
from scipy import linalg as spl
from collections import defaultdict
from statsmodels.tools.sm_exceptions import (ConvergenceWarning, OutputWarning,
NotImplementedWarning)
import warnings
class CovStruct(object):
"""
Base class for correlation and covariance structures.
An implementation of this class takes the residuals from a
regression model that has been fit to grouped data, and uses
them to estimate the within-group dependence structure of the
random errors in the model.
The current state of the covariance structure is represented
through the value of the `dep_params` attribute.
The default state of a newly-created instance should always be
the identity correlation matrix.
"""
def __init__(self, cov_nearest_method="clipped"):
# Parameters describing the dependency structure
self.dep_params = None
# Keep track of the number of times that the covariance was
# adjusted.
self.cov_adjust = []
# Method for projecting the covariance matrix if it is not
# PSD.
self.cov_nearest_method = cov_nearest_method
def initialize(self, model):
"""
Called by GEE, used by implementations that need additional
setup prior to running `fit`.
Parameters
----------
model : GEE class
A reference to the parent GEE class instance.
"""
self.model = model
def update(self, params):
"""
Update the association parameter values based on the current
regression coefficients.
Parameters
----------
params : array_like
Working values for the regression parameters.
"""
raise NotImplementedError
def covariance_matrix(self, endog_expval, index):
"""
Returns the working covariance or correlation matrix for a
given cluster of data.
Parameters
----------
endog_expval: array_like
The expected values of endog for the cluster for which the
covariance or correlation matrix will be returned
index: integer
The index of the cluster for which the covariane or
correlation matrix will be returned
Returns
-------
M: matrix
The covariance or correlation matrix of endog
is_cor: bool
True if M is a correlation matrix, False if M is a
covariance matrix
"""
raise NotImplementedError
def covariance_matrix_solve(self, expval, index, stdev, rhs):
"""
Solves matrix equations of the form `covmat * soln = rhs` and
returns the values of `soln`, where `covmat` is the covariance
matrix represented by this class.
Parameters
----------
expval: array_like
The expected value of endog for each observed value in the
group.
index: integer
The group index.
stdev : array_like
The standard deviation of endog for each observation in
the group.
rhs : list/tuple of array_like
A set of right-hand sides; each defines a matrix equation
to be solved.
Returns
-------
soln : list/tuple of array_like
The solutions to the matrix equations.
Notes
-----
Returns None if the solver fails.
Some dependence structures do not use `expval` and/or `index`
to determine the correlation matrix. Some families
(e.g. binomial) do not use the `stdev` parameter when forming
the covariance matrix.
If the covariance matrix is singular or not SPD, it is
projected to the nearest such matrix. These projection events
are recorded in the fit_history attribute of the GEE model.
Systems of linear equations with the covariance matrix as the
left hand side (LHS) are solved for different right hand sides
(RHS); the LHS is only factorized once to save time.
This is a default implementation, it can be reimplemented in
subclasses to optimize the linear algebra according to the
struture of the covariance matrix.
"""
vmat, is_cor = self.covariance_matrix(expval, index)
if is_cor:
vmat *= np.outer(stdev, stdev)
# Factor the covariance matrix. If the factorization fails,
# attempt to condition it into a factorizable matrix.
threshold = 1e-2
success = False
cov_adjust = 0
for itr in range(20):
try:
vco = spl.cho_factor(vmat)
success = True
break
except np.linalg.LinAlgError:
vmat = cov_nearest(vmat, method=self.cov_nearest_method,
threshold=threshold)
threshold *= 2
cov_adjust += 1
msg = "At least one covariance matrix was not PSD "
msg += "and required projection."
warnings.warn(msg)
self.cov_adjust.append(cov_adjust)
# Last resort if we still can't factor the covariance matrix.
if not success:
warnings.warn(
"Unable to condition covariance matrix to an SPD "
"matrix using cov_nearest", ConvergenceWarning)
vmat = np.diag(np.diag(vmat))
vco = spl.cho_factor(vmat)
soln = [spl.cho_solve(vco, x) for x in rhs]
return soln
def summary(self):
"""
Returns a text summary of the current estimate of the
dependence structure.
"""
raise NotImplementedError
class Independence(CovStruct):
"""
An independence working dependence structure.
"""
# Nothing to update
def update(self, params):
return
def covariance_matrix(self, expval, index):
dim = len(expval)
return np.eye(dim, dtype=np.float64), True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
v = stdev ** 2
rslt = []
for x in rhs:
if x.ndim == 1:
rslt.append(x / v)
else:
rslt.append(x / v[:, None])
return rslt
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("Observations within a cluster are modeled "
"as being independent.")
class Exchangeable(CovStruct):
"""
An exchangeable working dependence structure.
"""
def __init__(self):
super(Exchangeable, self).__init__()
# The correlation between any two values in the same cluster
self.dep_params = 0.
def update(self, params):
endog = self.model.endog_li
nobs = self.model.nobs
varfunc = self.model.family.variance
cached_means = self.model.cached_means
has_weights = self.model.weights is not None
weights_li = self.model.weights
residsq_sum, scale = 0, 0
fsum1, fsum2, n_pairs = 0., 0., 0.
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
f = weights_li[i] if has_weights else 1.
ssr = np.sum(resid * resid)
scale += f * ssr
fsum1 += f * len(endog[i])
residsq_sum += f * (resid.sum() ** 2 - ssr) / 2
ngrp = len(resid)
npr = 0.5 * ngrp * (ngrp - 1)
fsum2 += f * npr
n_pairs += npr
ddof = self.model.ddof_scale
scale /= (fsum1 * (nobs - ddof) / float(nobs))
residsq_sum /= scale
self.dep_params = residsq_sum / \
(fsum2 * (n_pairs - ddof) / float(n_pairs))
def covariance_matrix(self, expval, index):
dim = len(expval)
dp = self.dep_params * np.ones((dim, dim), dtype=np.float64)
np.fill_diagonal(dp, 1)
return dp, True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
k = len(expval)
c = self.dep_params / (1. - self.dep_params)
c /= 1. + self.dep_params * (k - 1)
rslt = []
for x in rhs:
if x.ndim == 1:
x1 = x / stdev
y = x1 / (1. - self.dep_params)
y -= c * sum(x1)
y /= stdev
else:
x1 = x / stdev[:, None]
y = x1 / (1. - self.dep_params)
y -= c * x1.sum(0)
y /= stdev[:, None]
rslt.append(y)
return rslt
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("The correlation between two observations in the " +
"same cluster is %.3f" % self.dep_params)
class Nested(CovStruct):
"""
A nested working dependence structure.
A working dependence structure that captures a nested hierarchy of
groups. Each level of grouping contributes to the random error
structure of the model.
When using this working covariance structure, `dep_data` of the
GEE instance should contain a n_obs x k matrix of 0/1 indicators,
corresponding to the k subgroups nested under the top-level
`groups` of the GEE instance. These subgroups should be nested
from left to right, so that two observations with the same value
for column j of `dep_data` should also have the same value for all
columns j' < j (this only applies to observations in the same
top-level cluster given by the `groups` argument to GEE).
Examples
--------
Suppose our data are student test scores, and the students are in
classrooms, nested in schools, nested in school districts. The
school district is the highest level of grouping, so the school
district id would be provided to GEE as `groups`, and the school
and classroom id's would be provided to the Nested class as the
`dep_data` argument, e.g.
0 0 # School 0, classroom 0, student 0
0 0 # School 0, classroom 0, student 1
0 1 # School 0, classroom 1, student 0
0 1 # School 0, classroom 1, student 1
1 0 # School 1, classroom 0, student 0
1 0 # School 1, classroom 0, student 1
1 1 # School 1, classroom 1, student 0
1 1 # School 1, classroom 1, student 1
Labels lower in the hierarchy are recycled, so that student 0 in
classroom 0 is different fro student 0 in classroom 1, etc.
Notes
-----
The calculations for this dependence structure involve all pairs
of observations within a group (that is, within the top level
`group` structure passed to GEE). Large group sizes will result
in slow iterations.
The variance components are estimated using least squares
regression of the products r*r', for standardized residuals r and
r' in the same group, on a matrix of indicators defining which
variance components are shared by r and r'.
"""
def initialize(self, model):
"""
Called on the first call to update
`ilabels` is a list of n_i x n_i matrices containing integer
labels that correspond to specific correlation parameters.
Two elements of ilabels[i] with the same label share identical
variance components.
`designx` is a matrix, with each row containing dummy
variables indicating which variance components are associated
with the corresponding element of QY.
"""
super(Nested, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for nested cov_struct, "
"using unweighted covariance estimate",
NotImplementedWarning)
# A bit of processing of the nest data
id_matrix = np.asarray(self.model.dep_data)
if id_matrix.ndim == 1:
id_matrix = id_matrix[:, None]
self.id_matrix = id_matrix
endog = self.model.endog_li
designx, ilabels = [], []
# The number of layers of nesting
n_nest = self.id_matrix.shape[1]
for i in range(self.model.num_group):
ngrp = len(endog[i])
glab = self.model.group_labels[i]
rix = self.model.group_indices[glab]
# Determine the number of common variance components
# shared by each pair of observations.
ix1, ix2 = np.tril_indices(ngrp, -1)
ncm = (self.id_matrix[rix[ix1], :] ==
self.id_matrix[rix[ix2], :]).sum(1)
# This is used to construct the working correlation
# matrix.
ilabel = np.zeros((ngrp, ngrp), dtype=np.int32)
ilabel[(ix1, ix2)] = ncm + 1
ilabel[(ix2, ix1)] = ncm + 1
ilabels.append(ilabel)
# This is used to estimate the variance components.
dsx = np.zeros((len(ix1), n_nest + 1), dtype=np.float64)
dsx[:, 0] = 1
for k in np.unique(ncm):
ii = np.flatnonzero(ncm == k)
dsx[ii, 1:k + 1] = 1
designx.append(dsx)
self.designx = np.concatenate(designx, axis=0)
self.ilabels = ilabels
svd = np.linalg.svd(self.designx, 0)
self.designx_u = svd[0]
self.designx_s = svd[1]
self.designx_v = svd[2].T
def update(self, params):
endog = self.model.endog_li
nobs = self.model.nobs
dim = len(params)
if self.designx is None:
self._compute_design(self.model)
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dvmat = []
scale = 0.
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
ix1, ix2 = np.tril_indices(len(resid), -1)
dvmat.append(resid[ix1] * resid[ix2])
scale += np.sum(resid ** 2)
dvmat = np.concatenate(dvmat)
scale /= (nobs - dim)
# Use least squares regression to estimate the variance
# components
vcomp_coeff = np.dot(self.designx_v, np.dot(self.designx_u.T,
dvmat) / self.designx_s)
self.vcomp_coeff = np.clip(vcomp_coeff, 0, np.inf)
self.scale = scale
self.dep_params = self.vcomp_coeff.copy()
def covariance_matrix(self, expval, index):
dim = len(expval)
# First iteration
if self.dep_params is None:
return np.eye(dim, dtype=np.float64), True
ilabel = self.ilabels[index]
c = np.r_[self.scale, np.cumsum(self.vcomp_coeff)]
vmat = c[ilabel]
vmat /= self.scale
return vmat, True
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
def summary(self):
"""
Returns a summary string describing the state of the
dependence structure.
"""
dep_names = ["Groups"]
if hasattr(self.model, "_dep_data_names"):
dep_names.extend(self.model._dep_data_names)
else:
dep_names.extend(["Component %d:" % (k + 1) for k in range(len(self.vcomp_coeff) - 1)])
if hasattr(self.model, "_groups_name"):
dep_names[0] = self.model._groups_name
dep_names.append("Residual")
vc = self.vcomp_coeff.tolist()
vc.append(self.scale - np.sum(vc))
smry = pd.DataFrame({"Variance": vc}, index=dep_names)
return smry
class Stationary(CovStruct):
"""
A stationary covariance structure.
The correlation between two observations is an arbitrary function
of the distance between them. Distances up to a given maximum
value are included in the covariance model.
Parameters
----------
max_lag : float
The largest distance that is included in the covariance model.
grid : bool
If True, the index positions in the data (after dropping missing
values) are used to define distances, and the `time` variable is
ignored.
"""
def __init__(self, max_lag=1, grid=False):
super(Stationary, self).__init__()
self.max_lag = max_lag
self.grid = grid
self.dep_params = np.zeros(max_lag + 1)
def initialize(self, model):
super(Stationary, self).initialize(model)
# Time used as an index needs to be integer type.
if not self.grid:
time = self.model.time[:, 0].astype(np.int32)
self.time = self.model.cluster_list(time)
def update(self, params):
if self.grid:
self.update_grid(params)
else:
self.update_nogrid(params)
def update_grid(self, params):
endog = self.model.endog_li
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dep_params = np.zeros(self.max_lag + 1)
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
dep_params[0] += np.sum(resid * resid) / len(resid)
for j in range(1, self.max_lag + 1):
v = resid[j:]
dep_params[j] += np.sum(resid[0:-j] * v) / len(v)
dep_params /= dep_params[0]
self.dep_params = dep_params
def update_nogrid(self, params):
endog = self.model.endog_li
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dep_params = np.zeros(self.max_lag + 1)
dn = np.zeros(self.max_lag + 1)
resid_ssq = 0
resid_ssq_n = 0
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
j1, j2 = np.tril_indices(len(expval), -1)
dx = np.abs(self.time[i][j1] - self.time[i][j2])
ii = np.flatnonzero(dx <= self.max_lag)
j1 = j1[ii]
j2 = j2[ii]
dx = dx[ii]
vs = np.bincount(dx, weights=resid[j1] * resid[j2],
minlength=self.max_lag + 1)
vd = np.bincount(dx, minlength=self.max_lag + 1)
resid_ssq += np.sum(resid**2)
resid_ssq_n += len(resid)
ii = np.flatnonzero(vd > 0)
if len(ii) > 0:
dn[ii] += 1
dep_params[ii] += vs[ii] / vd[ii]
i0 = np.flatnonzero(dn > 0)
dep_params[i0] /= dn[i0]
resid_msq = resid_ssq / resid_ssq_n
dep_params /= resid_msq
self.dep_params = dep_params
def covariance_matrix(self, endog_expval, index):
if self.grid:
return self.covariance_matrix_grid(endog_expval, index)
j1, j2 = np.tril_indices(len(endog_expval), -1)
dx = np.abs(self.time[index][j1] - self.time[index][j2])
ii = np.flatnonzero(dx <= self.max_lag)
j1 = j1[ii]
j2 = j2[ii]
dx = dx[ii]
cmat = np.eye(len(endog_expval))
cmat[j1, j2] = self.dep_params[dx]
cmat[j2, j1] = self.dep_params[dx]
return cmat, True
def covariance_matrix_grid(self, endog_expval, index):
from scipy.linalg import toeplitz
r = np.zeros(len(endog_expval))
r[0] = 1
r[1:self.max_lag + 1] = self.dep_params[1:]
return toeplitz(r), True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
if not self.grid:
return super(Stationary, self).covariance_matrix_solve(
expval, index, stdev, rhs)
from statsmodels.tools.linalg import stationary_solve
r = np.zeros(len(expval))
r[0:self.max_lag] = self.dep_params[1:]
return [stationary_solve(r, x) for x in rhs]
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
lag = np.arange(self.max_lag + 1)
return pd.DataFrame({"Lag": lag, "Cov": self.dep_params})
class Autoregressive(CovStruct):
"""
A first-order autoregressive working dependence structure.
The dependence is defined in terms of the `time` component of the
parent GEE class, which defaults to the index position of each
value within its cluster, based on the order of values in the
input data set. Time represents a potentially multidimensional
index from which distances between pairs of observations can be
determined.
The correlation between two observations in the same cluster is
dep_params^distance, where `dep_params` contains the (scalar)
autocorrelation parameter to be estimated, and `distance` is the
distance between the two observations, calculated from their
corresponding time values. `time` is stored as an n_obs x k
matrix, where `k` represents the number of dimensions in the time
index.
The autocorrelation parameter is estimated using weighted
nonlinear least squares, regressing each value within a cluster on
each preceeding value in the same cluster.
Parameters
----------
dist_func: function from R^k x R^k to R^+, optional
A function that computes the distance between the two
observations based on their `time` values.
References
----------
B Rosner, A Munoz. Autoregressive modeling for the analysis of
longitudinal data with unequally spaced examinations. Statistics
in medicine. Vol 7, 59-71, 1988.
"""
def __init__(self, dist_func=None):
super(Autoregressive, self).__init__()
# The function for determining distances based on time
if dist_func is None:
self.dist_func = lambda x, y: np.abs(x - y).sum()
else:
self.dist_func = dist_func
self.designx = None
# The autocorrelation parameter
self.dep_params = 0.
def update(self, params):
if self.model.weights is not None:
warnings.warn("weights not implemented for autoregressive "
"cov_struct, using unweighted covariance estimate",
NotImplementedWarning)
endog = self.model.endog_li
time = self.model.time_li
# Only need to compute this once
if self.designx is not None:
designx = self.designx
else:
designx = []
for i in range(self.model.num_group):
ngrp = len(endog[i])
if ngrp == 0:
continue
# Loop over pairs of observations within a cluster
for j1 in range(ngrp):
for j2 in range(j1):
designx.append(self.dist_func(time[i][j1, :],
time[i][j2, :]))
designx = np.array(designx)
self.designx = designx
scale = self.model.estimate_scale()
varfunc = self.model.family.variance
cached_means = self.model.cached_means
# Weights
var = 1. - self.dep_params ** (2 * designx)
var /= 1. - self.dep_params ** 2
wts = 1. / var
wts /= wts.sum()
residmat = []
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(scale * varfunc(expval))
resid = (endog[i] - expval) / stdev
ngrp = len(resid)
for j1 in range(ngrp):
for j2 in range(j1):
residmat.append([resid[j1], resid[j2]])
residmat = np.array(residmat)
# Need to minimize this
def fitfunc(a):
dif = residmat[:, 0] - (a ** designx) * residmat[:, 1]
return np.dot(dif ** 2, wts)
# Left bracket point
b_lft, f_lft = 0., fitfunc(0.)
# Center bracket point
b_ctr, f_ctr = 0.5, fitfunc(0.5)
while f_ctr > f_lft:
b_ctr /= 2
f_ctr = fitfunc(b_ctr)
if b_ctr < 1e-8:
self.dep_params = 0
return
# Right bracket point
b_rgt, f_rgt = 0.75, fitfunc(0.75)
while f_rgt < f_ctr:
b_rgt = b_rgt + (1. - b_rgt) / 2
f_rgt = fitfunc(b_rgt)
if b_rgt > 1. - 1e-6:
raise ValueError(
"Autoregressive: unable to find right bracket")
from scipy.optimize import brent
self.dep_params = brent(fitfunc, brack=[b_lft, b_ctr, b_rgt])
def covariance_matrix(self, endog_expval, index):
ngrp = len(endog_expval)
if self.dep_params == 0:
return np.eye(ngrp, dtype=np.float64), True
idx = np.arange(ngrp)
cmat = self.dep_params ** np.abs(idx[:, None] - idx[None, :])
return cmat, True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
# The inverse of an AR(1) covariance matrix is tri-diagonal.
k = len(expval)
soln = []
# LHS has 1 column
if k == 1:
return [x / stdev ** 2 for x in rhs]
# LHS has 2 columns
if k == 2:
mat = np.array([[1, -self.dep_params], [-self.dep_params, 1]])
mat /= (1. - self.dep_params ** 2)
for x in rhs:
if x.ndim == 1:
x1 = x / stdev
else:
x1 = x / stdev[:, None]
x1 = np.dot(mat, x1)
if x.ndim == 1:
x1 /= stdev
else:
x1 /= stdev[:, None]
soln.append(x1)
return soln
# LHS has >= 3 columns: values c0, c1, c2 defined below give
# the inverse. c0 is on the diagonal, except for the first
# and last position. c1 is on the first and last position of
# the diagonal. c2 is on the sub/super diagonal.
c0 = (1. + self.dep_params ** 2) / (1. - self.dep_params ** 2)
c1 = 1. / (1. - self.dep_params ** 2)
c2 = -self.dep_params / (1. - self.dep_params ** 2)
soln = []
for x in rhs:
flatten = False
if x.ndim == 1:
x = x[:, None]
flatten = True
x1 = x / stdev[:, None]
z0 = np.zeros((1, x.shape[1]))
rhs1 = np.concatenate((x[1:, :], z0), axis=0)
rhs2 = np.concatenate((z0, x[0:-1, :]), axis=0)
y = c0 * x + c2 * rhs1 + c2 * rhs2
y[0, :] = c1 * x[0, :] + c2 * x[1, :]
y[-1, :] = c1 * x[-1, :] + c2 * x[-2, :]
y /= stdev[:, None]
if flatten:
y = np.squeeze(y)
soln.append(y)
return soln
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("Autoregressive(1) dependence parameter: %.3f\n" %
self.dep_params)
class CategoricalCovStruct(CovStruct):
"""
Parent class for covariance structure for categorical data models.
Attributes
----------
nlevel : int
The number of distinct levels for the outcome variable.
ibd : list
A list whose i^th element ibd[i] is an array whose rows
contain integer pairs (a,b), where endog_li[i][a:b] is the
subvector of binary indicators derived from the same ordinal
value.
"""
def initialize(self, model):
super(CategoricalCovStruct, self).initialize(model)
self.nlevel = len(model.endog_values)
self._ncut = self.nlevel - 1
from numpy.lib.stride_tricks import as_strided
b = np.dtype(np.int64).itemsize
ibd = []
for v in model.endog_li:
jj = np.arange(0, len(v) + 1, self._ncut, dtype=np.int64)
jj = as_strided(jj, shape=(len(jj) - 1, 2), strides=(b, b))
ibd.append(jj)
self.ibd = ibd
class GlobalOddsRatio(CategoricalCovStruct):
"""
Estimate the global odds ratio for a GEE with ordinal or nominal
data.
References
----------
PJ Heagerty and S Zeger. "Marginal Regression Models for Clustered
Ordinal Measurements". Journal of the American Statistical
Association Vol. 91, Issue 435 (1996).
Thomas Lumley. Generalized Estimating Equations for Ordinal Data:
A Note on Working Correlation Structures. Biometrics Vol. 52,
No. 1 (Mar., 1996), pp. 354-361
http://www.jstor.org/stable/2533173
Notes
-----
The following data structures are calculated in the class:
'ibd' is a list whose i^th element ibd[i] is a sequence of integer
pairs (a,b), where endog_li[i][a:b] is the subvector of binary
indicators derived from the same ordinal value.
`cpp` is a dictionary where cpp[group] is a map from cut-point
pairs (c,c') to the indices of all between-subject pairs derived
from the given cut points.
"""
def __init__(self, endog_type):
super(GlobalOddsRatio, self).__init__()
self.endog_type = endog_type
self.dep_params = 0.
def initialize(self, model):
super(GlobalOddsRatio, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for GlobalOddsRatio "
"cov_struct, using unweighted covariance estimate",
NotImplementedWarning)
# Need to restrict to between-subject pairs
cpp = []
for v in model.endog_li:
# Number of subjects in this group
m = int(len(v) / self._ncut)
i1, i2 = np.tril_indices(m, -1)
cpp1 = {}
for k1 in range(self._ncut):
for k2 in range(k1 + 1):
jj = np.zeros((len(i1), 2), dtype=np.int64)
jj[:, 0] = i1 * self._ncut + k1
jj[:, 1] = i2 * self._ncut + k2
cpp1[(k2, k1)] = jj
cpp.append(cpp1)
self.cpp = cpp
# Initialize the dependence parameters
self.crude_or = self.observed_crude_oddsratio()
if self.model.update_dep:
self.dep_params = self.crude_or
def pooled_odds_ratio(self, tables):
"""
Returns the pooled odds ratio for a list of 2x2 tables.
The pooled odds ratio is the inverse variance weighted average
of the sample odds ratios of the tables.
"""
if len(tables) == 0:
return 1.
# Get the sampled odds ratios and variances
log_oddsratio, var = [], []
for table in tables:
lor = np.log(table[1, 1]) + np.log(table[0, 0]) -\
np.log(table[0, 1]) - np.log(table[1, 0])
log_oddsratio.append(lor)
var.append((1 / table.astype(np.float64)).sum())
# Calculate the inverse variance weighted average
wts = [1 / v for v in var]
wtsum = sum(wts)
wts = [w / wtsum for w in wts]
log_pooled_or = sum([w * e for w, e in zip(wts, log_oddsratio)])
return np.exp(log_pooled_or)
def covariance_matrix(self, expected_value, index):
vmat = self.get_eyy(expected_value, index)
vmat -= np.outer(expected_value, expected_value)
return vmat, False
def observed_crude_oddsratio(self):
"""
To obtain the crude (global) odds ratio, first pool all binary
indicators corresponding to a given pair of cut points (c,c'),
then calculate the odds ratio for this 2x2 table. The crude
odds ratio is the inverse variance weighted average of these
odds ratios. Since the covariate effects are ignored, this OR
will generally be greater than the stratified OR.
"""
cpp = self.cpp
endog = self.model.endog_li
# Storage for the contingency tables for each (c,c')
tables = {}
for ii in iterkeys(cpp[0]):
tables[ii] = np.zeros((2, 2), dtype=np.float64)
# Get the observed crude OR
for i in range(len(endog)):
# The observed joint values for the current cluster
yvec = endog[i]
endog_11 = np.outer(yvec, yvec)
endog_10 = np.outer(yvec, 1. - yvec)
endog_01 = np.outer(1. - yvec, yvec)
endog_00 = np.outer(1. - yvec, 1. - yvec)
cpp1 = cpp[i]
for ky in iterkeys(cpp1):
ix = cpp1[ky]
tables[ky][1, 1] += endog_11[ix[:, 0], ix[:, 1]].sum()
tables[ky][1, 0] += endog_10[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 1] += endog_01[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 0] += endog_00[ix[:, 0], ix[:, 1]].sum()
return self.pooled_odds_ratio(list(itervalues(tables)))
def get_eyy(self, endog_expval, index):
"""
Returns a matrix V such that V[i,j] is the joint probability
that endog[i] = 1 and endog[j] = 1, based on the marginal
probabilities of endog and the global odds ratio `current_or`.
"""
current_or = self.dep_params
ibd = self.ibd[index]
# The between-observation joint probabilities
if current_or == 1.0:
vmat = np.outer(endog_expval, endog_expval)
else:
psum = endog_expval[:, None] + endog_expval[None, :]
pprod = endog_expval[:, None] * endog_expval[None, :]
pfac = np.sqrt((1. + psum * (current_or - 1.)) ** 2 +
4 * current_or * (1. - current_or) * pprod)
vmat = 1. + psum * (current_or - 1.) - pfac
vmat /= 2. * (current_or - 1)
# Fix E[YY'] for elements that belong to same observation
for bdl in ibd:
evy = endog_expval[bdl[0]:bdl[1]]
if self.endog_type == "ordinal":
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.minimum.outer(evy, evy)
else:
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] = np.diag(evy)
return vmat
def update(self, params):
"""
Update the global odds ratio based on the current value of
params.
"""
cpp = self.cpp
cached_means = self.model.cached_means
# This will happen if all the clusters have only
# one observation
if len(cpp[0]) == 0:
return
tables = {}
for ii in cpp[0]:
tables[ii] = np.zeros((2, 2), dtype=np.float64)
for i in range(self.model.num_group):
endog_expval, _ = cached_means[i]
emat_11 = self.get_eyy(endog_expval, i)
emat_10 = endog_expval[:, None] - emat_11
emat_01 = -emat_11 + endog_expval
emat_00 = 1. - (emat_11 + emat_10 + emat_01)
cpp1 = cpp[i]
for ky in iterkeys(cpp1):
ix = cpp1[ky]
tables[ky][1, 1] += emat_11[ix[:, 0], ix[:, 1]].sum()
tables[ky][1, 0] += emat_10[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 1] += emat_01[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 0] += emat_00[ix[:, 0], ix[:, 1]].sum()
cor_expval = self.pooled_odds_ratio(list(itervalues(tables)))
self.dep_params *= self.crude_or / cor_expval
if not np.isfinite(self.dep_params):
self.dep_params = 1.
warnings.warn("dep_params became inf, resetting to 1",
ConvergenceWarning)
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
def summary(self):
return "Global odds ratio: %.3f\n" % self.dep_params
class OrdinalIndependence(CategoricalCovStruct):
"""
An independence covariance structure for ordinal models.
The working covariance between indicators derived from different
observations is zero. The working covariance between indicators
derived form a common observation is determined from their current
mean values.
There are no parameters to estimate in this covariance structure.
"""
def covariance_matrix(self, expected_value, index):
ibd = self.ibd[index]
n = len(expected_value)
vmat = np.zeros((n, n))
for bdl in ibd:
ev = expected_value[bdl[0]:bdl[1]]
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.minimum.outer(ev, ev) - np.outer(ev, ev)
return vmat, False
# Nothing to update
def update(self, params):
pass
class NominalIndependence(CategoricalCovStruct):
"""
An independence covariance structure for nominal models.
The working covariance between indicators derived from different
observations is zero. The working covariance between indicators
derived form a common observation is determined from their current
mean values.
There are no parameters to estimate in this covariance structure.
"""
def covariance_matrix(self, expected_value, index):
ibd = self.ibd[index]
n = len(expected_value)
vmat = np.zeros((n, n))
for bdl in ibd:
ev = expected_value[bdl[0]:bdl[1]]
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.diag(ev) - np.outer(ev, ev)
return vmat, False
# Nothing to update
def update(self, params):
pass
class Equivalence(CovStruct):
"""
A covariance structure defined in terms of equivalence classes.
An 'equivalence class' is a set of pairs of observations such that
the covariance of every pair within the equivalence class has a
common value.
Parameters
----------
pairs : dict-like
A dictionary of dictionaries, where `pairs[group][label]`
provides the indices of all pairs of observations in the group
that have the same covariance value. Specifically,
`pairs[group][label]` is a tuple `(j1, j2)`, where `j1` and `j2`
are integer arrays of the same length. `j1[i], j2[i]` is one
index pair that belongs to the `label` equivalence class. Only
one triangle of each covariance matrix should be included.
Positions where j1 and j2 have the same value are variance
parameters.
labels : array_like
An array of labels such that every distinct pair of labels
defines an equivalence class. Either `labels` or `pairs` must
be provided. When the two labels in a pair are equal two
equivalence classes are defined: one for the diagonal elements
(corresponding to variances) and one for the off-diagonal
elements (corresponding to covariances).
return_cov : boolean
If True, `covariance_matrix` returns an estimate of the
covariance matrix, otherwise returns an estimate of the
correlation matrix.
Notes
-----
Using `labels` to define the class is much easier than using
`pairs`, but is less general.
Any pair of values not contained in `pairs` will be assigned zero
covariance.
The index values in `pairs` are row indices into the `exog`
matrix. They are not updated if missing data are present. When
using this covariance structure, missing data should be removed
before constructing the model.
If using `labels`, after a model is defined using the covariance
structure it is possible to remove a label pair from the second
level of the `pairs` dictionary to force the corresponding
covariance to be zero.
Examples
--------
The following sets up the `pairs` dictionary for a model with two
groups, equal variance for all observations, and constant
covariance for all pairs of observations within each group.
>> pairs = {0: {}, 1: {}}
>> pairs[0][0] = (np.r_[0, 1, 2], np.r_[0, 1, 2])
>> pairs[0][1] = np.tril_indices(3, -1)
>> pairs[1][0] = (np.r_[3, 4, 5], np.r_[3, 4, 5])
>> pairs[1][2] = 3 + np.tril_indices(3, -1)
"""
def __init__(self, pairs=None, labels=None, return_cov=False):
super(Equivalence, self).__init__()
if (pairs is None) and (labels is None):
raise ValueError(
"Equivalence cov_struct requires either `pairs` or `labels`")
if (pairs is not None) and (labels is not None):
raise ValueError(
"Equivalence cov_struct accepts only one of `pairs` "
"and `labels`")
if pairs is not None:
import copy
self.pairs = copy.deepcopy(pairs)
if labels is not None:
self.labels = np.asarray(labels)
self.return_cov = return_cov
def _make_pairs(self, i, j):
"""
Create arrays containing all unique ordered pairs of i, j.
The arrays i and j must be one-dimensional containing non-negative
integers.
"""
mat = np.zeros((len(i) * len(j), 2), dtype=np.int32)
# Create the pairs and order them
f = np.ones(len(j))
mat[:, 0] = np.kron(f, i).astype(np.int32)
f = np.ones(len(i))
mat[:, 1] = np.kron(j, f).astype(np.int32)
mat.sort(1)
# Remove repeated rows
try:
dtype = np.dtype((np.void, mat.dtype.itemsize * mat.shape[1]))
bmat = np.ascontiguousarray(mat).view(dtype)
_, idx = np.unique(bmat, return_index=True)
except TypeError:
# workaround for old numpy that can't call unique with complex
# dtypes
rs = np.random.RandomState(4234)
bmat = np.dot(mat, rs.uniform(size=mat.shape[1]))
_, idx = np.unique(bmat, return_index=True)
mat = mat[idx, :]
return mat[:, 0], mat[:, 1]
def _pairs_from_labels(self):
from collections import defaultdict
pairs = defaultdict(lambda: defaultdict(lambda: None))
model = self.model
df = pd.DataFrame({"labels": self.labels, "groups": model.groups})
gb = df.groupby(["groups", "labels"])
ulabels = np.unique(self.labels)
for g_ix, g_lb in enumerate(model.group_labels):
# Loop over label pairs
for lx1 in range(len(ulabels)):
for lx2 in range(lx1 + 1):
lb1 = ulabels[lx1]
lb2 = ulabels[lx2]
try:
i1 = gb.groups[(g_lb, lb1)]
i2 = gb.groups[(g_lb, lb2)]
except KeyError:
continue
i1, i2 = self._make_pairs(i1, i2)
clabel = str(lb1) + "/" + str(lb2)
# Variance parameters belong in their own equiv class.
jj = np.flatnonzero(i1 == i2)
if len(jj) > 0:
clabelv = clabel + "/v"
pairs[g_lb][clabelv] = (i1[jj], i2[jj])
# Covariance parameters
jj = np.flatnonzero(i1!= i2)
if len(jj) > 0:
i1 = i1[jj]
i2 = i2[jj]
pairs[g_lb][clabel] = (i1, i2)
self.pairs = pairs
def initialize(self, model):
super(Equivalence, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for equalence cov_struct, "
"using unweighted covariance estimate",
NotImplementedWarning)
if not hasattr(self, 'pairs'):
self._pairs_from_labels()
# Initialize so that any equivalence class containing a
# variance parameter has value 1.
self.dep_params = defaultdict(lambda: 0.)
self._var_classes = set([])
for gp in self.model.group_labels:
for lb in self.pairs[gp]:
j1, j2 = self.pairs[gp][lb]
if np.any(j1 == j2):
if not np.all(j1 == j2):
warnings.warn(
"equivalence class contains both variance "
"and covariance parameters", OutputWarning)
self._var_classes.add(lb)
self.dep_params[lb] = 1
# Need to start indexing at 0 within each group.
# rx maps olds indices to new indices
rx = -1 * np.ones(len(self.model.endog), dtype=np.int32)
for g_ix, g_lb in enumerate(self.model.group_labels):
ii = self.model.group_indices[g_lb]
rx[ii] = np.arange(len(ii), dtype=np.int32)
# Reindex
for gp in self.model.group_labels:
for lb in self.pairs[gp].keys():
a, b = self.pairs[gp][lb]
self.pairs[gp][lb] = (rx[a], rx[b])
def update(self, params):
endog = self.model.endog_li
varfunc = self.model.family.variance
cached_means = self.model.cached_means
dep_params = defaultdict(lambda: [0., 0., 0.])
n_pairs = defaultdict(lambda: 0)
dim = len(params)
for k, gp in enumerate(self.model.group_labels):
expval, _ = cached_means[k]
stdev = np.sqrt(varfunc(expval))
resid = (endog[k] - expval) / stdev
for lb in self.pairs[gp].keys():
if (not self.return_cov) and lb in self._var_classes:
continue
jj = self.pairs[gp][lb]
dep_params[lb][0] += np.sum(resid[jj[0]] * resid[jj[1]])
if not self.return_cov:
dep_params[lb][1] += np.sum(resid[jj[0]] ** 2)
dep_params[lb][2] += np.sum(resid[jj[1]] ** 2)
n_pairs[lb] += len(jj[0])
if self.return_cov:
for lb in dep_params.keys():
dep_params[lb] = dep_params[lb][0] / (n_pairs[lb] - dim)
else:
for lb in dep_params.keys():
den = np.sqrt(dep_params[lb][1] * dep_params[lb][2])
dep_params[lb] = dep_params[lb][0] / den
for lb in self._var_classes:
dep_params[lb] = 1.
self.dep_params = dep_params
self.n_pairs = n_pairs
def covariance_matrix(self, expval, index):
dim = len(expval)
cmat = np.zeros((dim, dim))
g_lb = self.model.group_labels[index]
for lb in self.pairs[g_lb].keys():
j1, j2 = self.pairs[g_lb][lb]
cmat[j1, j2] = self.dep_params[lb]
cmat = cmat + cmat.T
np.fill_diagonal(cmat, cmat.diagonal() / 2)
return cmat, not self.return_cov
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
"""
Procedures for fitting marginal regression models to dependent data
using Generalized Estimating Equations.
References
----------
KY Liang and S Zeger. "Longitudinal data analysis using
generalized linear models". Biometrika (1986) 73 (1): 13-22.
S Zeger and KY Liang. "Longitudinal Data Analysis for Discrete and
Continuous Outcomes". Biometrics Vol. 42, No. 1 (Mar., 1986),
pp. 121-130
A Rotnitzky and NP Jewell (1990). "Hypothesis testing of regression
parameters in semiparametric generalized linear models for cluster
correlated data", Biometrika, 77, 485-497.
Xu Guo and Wei Pan (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
LA Mancl LA, TA DeRouen (2001). A covariance estimator for GEE with
improved small-sample properties. Biometrics. 2001 Mar;57(1):126-34.
"""
from statsmodels.compat.python import range, lzip, zip
import numpy as np
from scipy import stats
import pandas as pd
import patsy
from collections import defaultdict
from statsmodels.tools.decorators import cache_readonly
import statsmodels.base.model as base
# used for wrapper:
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import cov_struct as cov_structs
import statsmodels.genmod.families.varfuncs as varfuncs
from statsmodels.genmod.families.links import Link
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
DomainWarning,
IterationLimitWarning,
ValueWarning)
import warnings
from statsmodels.graphics._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
from statsmodels.discrete.discrete_margins import (
_get_margeff_exog, _check_margeff_args, _effects_at, margeff_cov_with_se,
_check_at_is_all, _transform_names, _check_discrete_args,
_get_dummy_index, _get_count_index)
class ParameterConstraint(object):
"""
A class for managing linear equality constraints for a parameter
vector.
"""
def __init__(self, lhs, rhs, exog):
"""
Parameters
----------
lhs : ndarray
A q x p matrix which is the left hand side of the
constraint lhs * param = rhs. The number of constraints is
q >= 1 and p is the dimension of the parameter vector.
rhs : ndarray
A 1-dimensional vector of length q which is the right hand
side of the constraint equation.
exog : ndarray
The n x p exognenous data for the full model.
"""
# In case a row or column vector is passed (patsy linear
# constraints passes a column vector).
rhs = np.atleast_1d(rhs.squeeze())
if rhs.ndim > 1:
raise ValueError("The right hand side of the constraint "
"must be a vector.")
if len(rhs)!= lhs.shape[0]:
raise ValueError("The number of rows of the left hand "
"side constraint matrix L must equal "
"the length of the right hand side "
"constraint vector R.")
self.lhs = lhs
self.rhs = rhs
# The columns of lhs0 are an orthogonal basis for the
# orthogonal complement to row(lhs), the columns of lhs1 are
# an orthogonal basis for row(lhs). The columns of lhsf =
# [lhs0, lhs1] are mutually orthogonal.
lhs_u, lhs_s, lhs_vt = np.linalg.svd(lhs.T, full_matrices=1)
self.lhs0 = lhs_u[:, len(lhs_s):]
self.lhs1 = lhs_u[:, 0:len(lhs_s)]
self.lhsf = np.hstack((self.lhs0, self.lhs1))
# param0 is one solution to the underdetermined system
# L * param = R.
self.param0 = np.dot(self.lhs1, np.dot(lhs_vt, self.rhs) /
lhs_s)
self._offset_increment = np.dot(exog, self.param0)
self.orig_exog = exog
self.exog_fulltrans = np.dot(exog, self.lhsf)
def offset_increment(self):
"""
Returns a vector that should be added to the offset vector to
accommodate the constraint.
Parameters
----------
exog : array_like
The exogeneous data for the model.
"""
return self._offset_increment
def reduced_exog(self):
"""
Returns a linearly transformed exog matrix whose columns span
the constrained model space.
Parameters
----------
exog : array_like
The exogeneous data for the model.
"""
return self.exog_fulltrans[:, 0:self.lhs0.shape[1]]
def restore_exog(self):
"""
Returns the full exog matrix before it was reduced to
satisfy the constraint.
"""
return self.orig_exog
def unpack_param(self, params):
"""
Converts the parameter vector `params` from reduced to full
coordinates.
"""
return self.param0 + np.dot(self.lhs0, params)
def unpack_cov(self, bcov):
"""
Converts the covariance matrix `bcov` from reduced to full
coordinates.
"""
return np.dot(self.lhs0, np.dot(bcov, self.lhs0.T))
_gee_init_doc = """
Marginal regression model fit using Generalized Estimating Equations.
GEE can be used to fit Generalized Linear Models (GLMs) when the
data have a grouped structure, and the observations are possibly
correlated within groups but not between groups.
Parameters
----------
endog : array_like
1d array of endogenous values (i.e. responses, outcomes,
dependent variables, or 'Y' values).
exog : array_like
2d array of exogeneous values (i.e. covariates, predictors,
independent variables, regressors, or 'X' values). A `nobs x
k` array where `nobs` is the number of observations and `k` is
the number of regressors. An intercept is not included by
default and should be added by the user. See
`statsmodels.tools.add_constant`.
groups : array_like
A 1d array of length `nobs` containing the group labels.
time : array_like
A 2d array of time (or other index) values, used by some
dependence structures to define similarity relationships among
observations within a cluster.
family : family class instance
%(family_doc)s
cov_struct : CovStruct class instance
The default is Independence. To specify an exchangeable
structure use cov_struct = Exchangeable(). See
statsmodels.genmod.cov_struct.CovStruct for more
information.
offset : array_like
An offset to be included in the fit. If provided, must be
an array whose length is the number of rows in exog.
dep_data : array_like
Additional data passed to the dependence structure.
constraint : (ndarray, ndarray)
If provided, the constraint is a tuple (L, R) such that the
model parameters are estimated under the constraint L *
param = R, where L is a q x p matrix and R is a
q-dimensional vector. If constraint is provided, a score
test is performed to compare the constrained model to the
unconstrained model.
update_dep : bool
If true, the dependence parameters are optimized, otherwise
they are held fixed at their starting values.
weights : array_like
An array of weights to use in the analysis. The weights must
be constant within each group. These correspond to
probability weights (pweights) in Stata.
%(extra_params)s
See Also
--------
statsmodels.genmod.families.family
:ref:`families`
:ref:`links`
Notes
-----
Only the following combinations make sense for family and link ::
+ ident log logit probit cloglog pow opow nbinom loglog logc
Gaussian | x x x
inv Gaussian | x x x
binomial | x x x x x x x x x
Poission | x x x
neg binomial | x x x x
gamma | x x x
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer
to are already arrays and these arrays are changed, endog and
exog will change.
The "robust" covariance type is the standard "sandwich estimator"
(e.g. Liang and Zeger (1986)). It is the default here and in most
other packages. The "naive" estimator gives smaller standard
errors, but is only correct if the working correlation structure
is correctly specified. The "bias reduced" estimator of Mancl and
DeRouen (Biometrics, 2001) reduces the downard bias of the robust
estimator.
The robust covariance provided here follows Liang and Zeger (1986)
and agrees with R's gee implementation. To obtain the robust
standard errors reported in Stata, multiply by sqrt(N / (N - g)),
where N is the total sample size, and g is the average group size.
Examples
--------
%(example)s
"""
_gee_family_doc = """\
The default is Gaussian. To specify the binomial
distribution use `family=sm.families.Binomial()`. Each family
can take a link instance as an argument. See
statsmodels.genmod.families.family for more information."""
_gee_ordinal_family_doc = """\
The only family supported is `Binomial`. The default `Logit`
link may be replaced with `probit` if desired."""
_gee_nominal_family_doc = """\
The default value `None` uses a multinomial logit family
specifically designed for use with GEE. Setting this
argument to a non-default value is not currently supported."""
_gee_fit_doc = """
Fits a marginal regression model using generalized estimating
equations (GEE).
Parameters
----------
maxiter : integer
The maximum number of iterations
ctol : float
The convergence criterion for stopping the Gauss-Seidel
iterations
start_params : array_like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
params_niter : integer
The number of Gauss-Seidel updates of the mean structure
parameters that take place prior to each update of the
dependence structure.
first_dep_update : integer
No dependence structure updates occur before this
iteration number.
cov_type : string
One of "robust", "naive", or "bias_reduced".
ddof_scale : scalar or None
The scale parameter is estimated as the sum of squared
Pearson residuals divided by `N - ddof_scale`, where N
is the total sample size. If `ddof_scale` is None, the
number of covariates (including an intercept if present)
is used.
scaling_factor : scalar
The estimated covariance of the parameter estimates is
scaled by this value. Default is 1, Stata uses N / (N - g),
where N is the total sample size and g is the average group
size.
Returns
-------
An instance of the GEEResults class or subclass
Notes
-----
If convergence difficulties occur, increase the values of
`first_dep_update` and/or `params_niter`. Setting
`first_dep_update` to a greater value (e.g. ~10-20) causes the
algorithm to move close to the GLM solution before attempting
to identify the dependence structure.
For the Gaussian family, there is no benefit to setting
`params_niter` to a value greater than 1, since the mean
structure parameters converge in one step.
"""
_gee_results_doc = """
Attributes
----------
cov_params_default : ndarray
default covariance of the parameter estimates. Is chosen among one
of the following three based on `cov_type`
cov_robust : ndarray
covariance of the parameter estimates that is robust
cov_naive : ndarray
covariance of the parameter estimates that is not robust to
correlation or variance misspecification
cov_robust_bc : ndarray
covariance of the parameter estimates that is robust and bias
reduced
converged : bool
indicator for convergence of the optimization.
True if the norm of the score is smaller than a threshold
cov_type : string
string indicating whether a "robust", "naive" or "bias_reduced"
covariance is used as default
fit_history : dict
Contains information about the iterations.
fittedvalues : array
Linear predicted values for the fitted model.
dot(exog, params)
model : class instance
Pointer to GEE model instance that called `fit`.
normalized_cov_params : array
See GEE docstring
params : array
The coefficients of the fitted model. Note that
interpretation of the coefficients often depends on the
distribution family and the data.
scale : float
The estimate of the scale / dispersion for the model fit.
See GEE.fit for more information.
score_norm : float
norm of the score at the end of the iterative estimation.
bse : array
The standard errors of the fitted GEE parameters.
"""
_gee_example = """
Logistic regression with autoregressive working dependence:
>>> import statsmodels.api as sm
>>> family = sm.families.Binomial()
>>> va = sm.cov_struct.Autoregressive()
>>> model = sm.GEE(endog, exog, group, family=family, cov_struct=va)
>>> result = model.fit()
>>> print(result.summary())
Use formulas to fit a Poisson GLM with independent working
dependence:
>>> import statsmodels.api as sm
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = sm.GEE.from_formula("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
Equivalent, using the formula API:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = smf.gee("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_ordinal_example = """
Fit an ordinal regression model using GEE, with "global
odds ratio" dependence:
>>> import statsmodels.api as sm
>>> gor = sm.cov_struct.GlobalOddsRatio("ordinal")
>>> model = sm.OrdinalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.formula.api as smf
>>> model = smf.ordinal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_nominal_example = """
Fit a nominal regression model using GEE:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> gor = sm.cov_struct.GlobalOddsRatio("nominal")
>>> model = sm.NominalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.api as sm
>>> model = sm.NominalGEE.from_formula("y ~ x1 + x2", groups,
data, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using the formula API:
>>> import statsmodels.formula.api as smf
>>> model = smf.nominal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
def _check_args(endog, exog, groups, time, offset, exposure):
if endog.size!= exog.shape[0]:
raise ValueError("Leading dimension of 'exog' should match "
"length of 'endog'")
if groups.size!= endog.size:
raise ValueError("'groups' and 'endog' should have the same size")
if time is not None and (time.size!= endog.size):
raise ValueError("'time' and 'endog' should have the same size")
if offset is not None and (offset.size!= endog.size):
raise ValueError("'offset and 'endog' should have the same size")
if exposure is not None and (exposure.size!= endog.size):
raise ValueError("'exposure' and 'endog' should have the same size")
class GEE(base.Model):
__doc__ = (
" Estimation of marginal regression models using Generalized\n"
" Estimating Equations (GEE).\n" + _gee_init_doc %
{'extra_params': base._missing_param_doc,
'family_doc': _gee_family_doc,
'example': _gee_example})
cached_means = None
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
exposure=None, dep_data=None, constraint=None,
update_dep=True, weights=None, **kwargs):
if family is not None:
if not isinstance(family.link, tuple(family.safe_links)):
import warnings
msg = ("The {0} link function does not respect the "
"domain of the {1} family.")
warnings.warn(msg.format(family.link.__class__.__name__,
family.__class__.__name__),
DomainWarning)
groups = np.asarray(groups) # in case groups is pandas
if "missing_idx" in kwargs and kwargs["missing_idx"] is not None:
# If here, we are entering from super.from_formula; missing
# has already been dropped from endog and exog, but not from
# the other variables.
ii = ~kwargs["missing_idx"]
groups = groups[ii]
if time is not None:
time = time[ii]
if offset is not None:
offset = offset[ii]
if exposure is not None:
exposure = exposure[ii]
del kwargs["missing_idx"]
_check_args(endog, exog, groups, time, offset, exposure)
self.missing = missing
self.dep_data = dep_data
self.constraint = constraint
self.update_dep = update_dep
self._fit_history = defaultdict(list)
# Pass groups, time, offset, and dep_data so they are
# processed for missing data along with endog and exog.
# Calling super creates self.exog, self.endog, etc. as
# ndarrays and the original exog, endog, etc. are
# self.data.endog, etc.
super(GEE, self).__init__(endog, exog, groups=groups,
time=time, offset=offset,
exposure=exposure, weights=weights,
dep_data=dep_data, missing=missing,
**kwargs)
self._init_keys.extend(["update_dep", "constraint", "family",
"cov_struct"])
# Handle the family argument
if family is None:
family = families.Gaussian()
else:
if not issubclass(family.__class__, families.Family):
raise ValueError("GEE: `family` must be a genmod "
"family instance")
self.family = family
# Handle the cov_struct argument
if cov_struct is None:
cov_struct = cov_structs.Independence()
else:
if not issubclass(cov_struct.__class__, cov_structs.CovStruct):
raise ValueError("GEE: `cov_struct` must be a genmod "
"cov_struct instance")
self.cov_struct = cov_struct
# Handle the offset and exposure
self._offset_exposure = None
if offset is not None:
self._offset_exposure = self.offset.copy()
self.offset = offset
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError(
"exposure can only be used with the log link function")
if self._offset_exposure is not None:
self._offset_exposure += np.log(exposure)
else:
self._offset_exposure = np.log(exposure)
self.exposure = exposure
# Handle the constraint
self.constraint = None
if constraint is not None:
if len(constraint)!= 2:
raise ValueError("GEE: `constraint` must be a 2-tuple.")
if constraint[0].shape[1]!= self.exog.shape[1]:
raise ValueError(
"GEE: the left hand side of the constraint must have "
"the same number of columns as the exog matrix.")
self.constraint = ParameterConstraint(constraint[0],
constraint[1],
self.exog)
if self._offset_exposure is not None:
self._offset_exposure += self.constraint.offset_increment()
else:
self._offset_exposure = (
self.constraint.offset_increment().copy())
self.exog = self.constraint.reduced_exog()
# Create list of row indices for each group
group_labels, ix = np.unique(self.groups, return_inverse=True)
se = pd.Series(index=np.arange(len(ix)))
gb = se.groupby(ix).groups
dk = [(lb, np.asarray(gb[k])) for k, lb in enumerate(group_labels)]
self.group_indices = dict(dk)
self.group_labels = group_labels
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
self.endog_li = self.cluster_list(self.endog)
self.exog_li = self.cluster_list(self.exog)
if self.weights is not None:
self.weights_li = self.cluster_list(self.weights)
self.weights_li = [x[0] for x in self.weights_li]
self.weights_li = np.asarray(self.weights_li)
self.num_group = len(self.endog_li)
# Time defaults to a 1d grid with equal spacing
if self.time is not None:
self.time = np.asarray(self.time, np.float64)
if self.time.ndim == 1:
self.time = self.time[:, None]
self.time_li = self.cluster_list(self.time)
else:
self.time_li = \
[np.arange(len(y), dtype=np.float64)[:, None]
for y in self.endog_li]
self.time = np.concatenate(self.time_li)
if self._offset_exposure is not None:
self.offset_li = self.cluster_list(self._offset_exposure)
else:
self.offset_li = None
if constraint is not None:
self.constraint.exog_fulltrans_li = \
self.cluster_list(self.constraint.exog_fulltrans)
self.family = family
self.cov_struct.initialize(self)
# Total sample size
group_ns = [len(y) for y in self.endog_li]
self.nobs = sum(group_ns)
# The following are column based, not on rank see #1928
self.df_model = self.exog.shape[1] - 1 # assumes constant
self.df_resid = self.nobs - self.exog.shape[1]
# Skip the covariance updates if all groups have a single
# observation (reduces to fitting a GLM).
maxgroup = max([len(x) for x in self.endog_li])
if maxgroup == 1:
self.update_dep = False
# Override to allow groups and time to be passed as variable
# names.
@classmethod
def from_formula(cls, formula, groups, data, subset=None,
time=None, offset=None, exposure=None,
*args, **kwargs):
"""
Create a GEE model instance from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
groups : array_like or string
Array of grouping labels. If a string, this is the name
of a variable in `data` that contains the grouping labels.
data : array_like
The data for the model.
subset : array_like
An array-like object of booleans, integers, or index
values that indicate the subset of the data to used when
fitting the model.
time : array_like or string
The time values, used for dependence structures involving
distances between observations. If a string, this is the
name of a variable in `data` that contains the time
values.
offset : array_like or string
The offset values, added to the linear predictor. If a
string, this is the name of a variable in `data` that
contains the offset values.
exposure : array_like or string
The exposure values, only used if the link function is the
logarithm function, in which case the log of `exposure`
is added to the offset (if any). If a string, this is the
name of a variable in `data` that contains the offset
values.
%(missing_param_doc)s
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with two exceptions. `dep_data`
is processed as described below. The ``eval_env`` keyword is
passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace.
If you wish to use a "clean" environment set ``eval_env=-1``.
Optional arguments
------------------
dep_data : string or array_like
Data used for estimating the dependence structure. See
specific dependence structure classes (e.g. Nested) for
details. If `dep_data` is a string, it is interpreted as
a formula that is applied to `data`. If it is an array, it
must be an array of strings corresponding to column names in
`data`. Otherwise it must be an array-like with the same
number of rows as data.
Returns
-------
model : GEE model instance
Notes
-----
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
""" % {'missing_param_doc': base._missing_param_doc}
groups_name = "Groups"
if isinstance(groups, str):
groups_name = groups
groups = data[groups]
if isinstance(time, str):
time = data[time]
if isinstance(offset, str):
offset = data[offset]
if isinstance(exposure, str):
exposure = data[exposure]
dep_data = kwargs.get("dep_data")
dep_data_names = None
if dep_data is not None:
if isinstance(dep_data, str):
dep_data = patsy.dmatrix(dep_data, data,
return_type='dataframe')
dep_data_names = dep_data.columns.tolist()
else:
dep_data_names = list(dep_data)
dep_data = data[dep_data]
kwargs["dep_data"] = np.asarray(dep_data)
model = super(GEE, cls).from_formula(formula, data=data, subset=subset,
groups=groups, time=time,
offset=offset,
exposure=exposure,
*args, **kwargs)
if dep_data_names is not None:
model._dep_data_names = dep_data_names
model._groups_name = groups_name
return model
def cluster_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
cluster structure.
"""
if array.ndim == 1:
return [np.array(array[self.group_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.group_indices[k], :])
for k in self.group_labels]
def compare_score_test(self, submodel):
"""
Perform a score test for the given submodel against this model.
Parameters
----------
submodel : GEEResults instance
A fitted GEE model that is a submodel of this model.
Returns
-------
A dictionary with keys "statistic", "p-value", and "df",
containing the score test statistic, its chi^2 p-value,
and the degrees of freedom used to compute the p-value.
Notes
-----
The score test can be performed without calling 'fit' on the
larger model. The provided submodel must be obtained from a
fitted GEE.
This method performs the same score test as can be obtained by
fitting the GEE with a linear constraint and calling `score_test`
on the results.
References
----------
Xu Guo and Wei Pan (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
# Check consistency between model and submodel (not a comprehensive
# check)
submod = submodel.model
if self.exog.shape[0]!= submod.exog.shape[0]:
msg = "Model and submodel have different numbers of cases."
raise ValueError(msg)
if self.exog.shape[1] == submod.exog.shape[1]:
msg = "Model and submodel have the same number of variables"
warnings.warn(msg)
if not isinstance(self.family, type(submod.family)):
msg = "Model and submodel have different GLM families."
warnings.warn(msg)
if not isinstance(self.cov_struct, type(submod.cov_struct)):
warnings.warn("Model and submodel have different GEE covariance "
"structures.")
if not np.equal(self.weights, submod.weights).all():
msg = "Model and submodel should have the same weights."
warnings.warn(msg)
# Get the positions of the submodel variables in the
# parent model
qm, qc = _score_test_submodel(self, submodel.model)
if qm is None:
msg = "The provided model is not a submodel."
raise ValueError(msg)
# Embed the submodel params into a params vector for the
# parent model
params_ex = np.dot(qm, submodel.params)
# Attempt to preserve the state of the parent model
cov_struct_save = self.cov_struct
import copy
cached_means_save = copy.deepcopy(self.cached_means)
# Get the score vector of the submodel params in
# the parent model
self.cov_struct = submodel.cov_struct
self.update_cached_means(params_ex)
_, score = self._update_mean_params()
if score is None:
msg = "Singular matrix encountered in GEE score test"
warnings.warn(msg, ConvergenceWarning)
return None
if not hasattr(self, "ddof_scale"):
self.ddof_scale = self.exog.shape[1]
if not hasattr(self, "scaling_factor"):
self.scaling_factor = 1
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = np.dot(qc.T, score) / scale
amat = np.linalg.inv(ncov1)
bmat_11 = np.dot(qm.T, np.dot(cmat, qm))
bmat_22 = np.dot(qc.T, np.dot(cmat, qc))
bmat_12 = np.dot(qm.T, np.dot(cmat, qc))
amat_11 = np.dot(qm.T, np.dot(amat, qm))
amat_12 = np.dot(qm.T, np.dot(amat, qc))
score_cov = bmat_22 - np.dot(amat_12.T,
np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
# Attempt to restore state
self.cov_struct = cov_struct_save
self.cached_means = cached_means_save
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
return {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
def estimate_scale(self):
"""
Estimate the dispersion/scale.
The scale parameter for binomial, Poisson, and multinomial
families is fixed at 1, otherwise it is estimated from
the data.
"""
if isinstance(self.family, (families.Binomial, families.Poisson,
_Multinomial)):
return 1.
endog = self.endog_li
cached_means = self.cached_means
nobs = self.nobs
varfunc = self.family.variance
scale = 0.
fsum = 0.
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
expval, _ = cached_means[i]
f = self.weights_li[i] if self.weights is not None else 1.
sdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / sdev
scale += f * np.sum(resid ** 2)
fsum += f * len(endog[i])
scale /= (fsum * (nobs - self.ddof_scale) / float(nobs))
return scale
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array_like
The exogeneous data at which the derivative is computed.
lin_pred : array_like
The values of the linear predictor.
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
Notes
-----
If there is an offset or exposure, it should be added to
`lin_pred` prior to calling this function.
"""
idl = self.family.link.inverse_deriv(lin_pred)
dmat = exog * idl[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog.
Parameters
----------
exog : array_like
Values of the independent variables at which the derivative
is calculated.
params : array_like
Parameter values at which the derivative is calculated.
offset_exposure : array_like, optional
Combined offset and exposure.
Returns
-------
The derivative of the expected endog with respect to exog.
"""
lin_pred = np.dot(exog, params)
if offset_exposure is not None:
lin_pred += offset_exposure
idl = self.family.link.inverse_deriv(lin_pred)
dmat = np.outer(idl, params)
return dmat
def _update_mean_params(self):
"""
Returns
-------
update : array_like
The update vector such that params + update is the next
iterate when solving the score equations.
score : array_like
The current value of the score equations, not
incorporating the scale parameter. If desired,
multiply this vector by the scale parameter to
incorporate the scale.
"""
endog = self.endog_li
exog = self.exog_li
cached_means = self.cached_means
varfunc = self.family.variance
bmat, score = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(expval, i,
sdev, (dmat, resid))
if rslt is None:
return None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
score += f * np.dot(dmat.T, vinv_resid)
update = np.linalg.solve(bmat, score)
self._fit_history["cov_adjust"].append(
self.cov_struct.cov_adjust)
return update, score
def update_cached_means(self, mean_params):
"""
cached_means should always contain the most recent calculation
of the group-wise mean vectors. This function should be
called every time the regression parameters are changed, to
keep the cached means up to date.
"""
endog = self.endog_li
exog = self.exog_li
offset = self.offset_li
linkinv = self.family.link.inverse
self.cached_means = []
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
lpr = np.dot(exog[i], mean_params)
if offset is not None:
lpr += offset[i]
expval = linkinv(lpr)
self.cached_means.append((expval, lpr))
def _covmat(self):
"""
Returns the sampling covariance matrix of the regression
parameters and related quantities.
Returns
-------
cov_robust : array_like
The robust, or sandwich estimate of the covariance, which
is meaningful even if the working covariance structure is
incorrectly specified.
cov_naive : array_like
The model-based estimate of the covariance, which is
meaningful if the covariance structure is correctly
specified.
cmat : array_like
The center matrix of the sandwich expression, used in
obtaining score test results.
"""
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
# Calculate the naive (model-based) and robust (sandwich)
# covariances.
bmat, cmat = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat, resid))
if rslt is None:
return None, None, None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
dvinv_resid = f * np.dot(dmat.T, vinv_resid)
cmat += np.outer(dvinv_resid, dvinv_resid)
scale = self.estimate_scale()
bmati = np.linalg.inv(bmat)
cov_naive = bmati * scale
cov_robust = np.dot(bmati, np.dot(cmat, bmati))
cov_naive *= self.scaling_factor
cov_robust *= self.scaling_factor
return cov_robust, cov_naive, cmat
# Calculate the bias-corrected sandwich estimate of Mancl and
# DeRouen.
def _bc_covmat(self, cov_naive):
cov_naive = cov_naive / self.scaling_factor
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
scale = self.estimate_scale()
bcm = 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat,))
if rslt is None:
return None
vinv_d = rslt[0]
vinv_d /= scale
hmat = np.dot(vinv_d, cov_naive)
hmat = np.dot(hmat, dmat.T).T
f = self.weights_li[i] if self.weights is not None else 1.
aresid = np.linalg.solve(np.eye(len(resid)) - hmat, resid)
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (aresid,))
if rslt is None:
return None
srt = rslt[0]
srt = f * np.dot(dmat.T, srt) / scale
bcm += np.outer(srt, srt)
cov_robust_bc = np.dot(cov_naive, np.dot(bcm, cov_naive))
cov_robust_bc *= self.scaling_factor
return cov_robust_bc
def predict(self, params, exog=None, offset=None,
exposure=None, linear=False):
"""
Return predicted values for a marginal regression model fit
using GEE.
Parameters
----------
params : array_like
Parameters / coefficients of a marginal regression model.
exog : array_like, optional
Design / exogenous data. If exog is None, model exog is
used.
offset : array_like, optional
Offset for exog if provided. If offset is None, model
offset is used.
exposure : array_like, optional
Exposure for exog, if exposure is None, model exposure is
used. Only allowed if link function is the logarithm.
linear : bool
If True, returns the linear predicted values. If False,
returns the value of the inverse of the model's link
function at the linear predicted values.
Returns
-------
An array of fitted values
Notes
-----
Using log(V) as the offset is equivalent to using V as the
exposure. If exposure U and offset V are both provided, then
log(U) + V is added to the linear predictor.
"""
# TODO: many paths through this, not well covered in tests
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError(
"exposure can only be used with the log link function")
# This is the combined offset and exposure
_offset = 0.
# Using model exog
if exog is None:
exog = self.exog
if not isinstance(self.family.link, families.links.Log):
# Don't need to worry about exposure
if offset is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure.copy()
else:
_offset = offset
else:
if offset is None and exposure is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure
elif offset is None and exposure is not None:
_offset = np.log(exposure)
if hasattr(self, "offset"):
_offset = _offset + self.offset
elif offset is not None and exposure is None:
_offset = offset
if hasattr(self, "exposure"):
_offset = offset + np.log(self.exposure)
else:
_offset = offset + np.log(exposure)
# exog is provided: this is simpler than above because we
# never use model exog or exposure if exog is provided.
else:
if offset is not None:
_offset = _offset + offset
if exposure is not None:
_offset += np.log(exposure)
lin_pred = _offset + np.dot(exog, params)
if not linear:
return self.family.link.inverse(lin_pred)
return lin_pred
def _starting_params(self):
model = GLM(self.endog, self.exog, family=self.family,
offset=self._offset_exposure,
freq_weights=self.weights)
result = model.fit()
return result.params
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust', ddof_scale=None, scaling_factor=1.):
# Docstring attached below
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
self.scaling_factor = scaling_factor
self._fit_history = defaultdict(list)
if self.weights is not None and cov_type == 'naive':
raise ValueError("when using weights, cov_type may not be naive")
if start_params is None:
mean_params = self._starting_params()
else:
start_params = np.asarray(start_params)
mean_params = start_params.copy()
self.update_cached_means(mean_params)
del_params = -1.
num_assoc_updates = 0
for itr in range(maxiter):
update, score = self._update_mean_params()
if update is None:
warnings.warn("Singular matrix encountered in GEE update",
ConvergenceWarning)
break
mean_params += update
self.update_cached_means(mean_params)
# L2 norm of the change in mean structure parameters at
# this iteration.
del_params = np.sqrt(np.sum(score ** 2))
self._fit_history['params'].append(mean_params.copy())
self._fit_history['score'].append(score)
self._fit_history['dep_params'].append(
self.cov_struct.dep_params)
# Don't exit until the association parameters have been
# updated at least once.
if (del_params < ctol and
(num_assoc_updates > 0 or self.update_dep is False)):
break
# Update the dependence structure
if (self.update_dep and (itr % params_niter) == 0
and (itr >= first_dep_update)):
self._update_assoc(mean_params)
num_assoc_updates += 1
if del_params >= ctol:
warnings.warn("Iteration limit reached prior to convergence",
IterationLimitWarning)
if mean_params is None:
warnings.warn("Unable to estimate GEE parameters.",
ConvergenceWarning)
return None
bcov, ncov, _ = self._covmat()
if bcov is None:
warnings.warn("Estimated covariance structure for GEE "
"estimates is singular", ConvergenceWarning)
return None
bc_cov = None
if cov_type == "bias_reduced":
bc_cov = self._bc_covmat(ncov)
if self.constraint is not None:
x = mean_params.copy()
mean_params, bcov = self._handle_constraint(mean_params, bcov)
if mean_params is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
y, ncov = self._handle_constraint(x, ncov)
if y is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
if bc_cov is not None:
y, bc_cov = self._handle_constraint(x, bc_cov)
if x is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
scale = self.estimate_scale()
# kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type=cov_type,
cov_robust=bcov,
cov_naive=ncov,
cov_robust_bc=bc_cov)
# The superclass constructor will multiply the covariance
# matrix argument bcov by scale, which we don't want, so we
# divide bcov by the scale parameter here
results = GEEResults(self, mean_params, bcov / scale, scale,
cov_type=cov_type, use_t=False,
attr_kwds=res_kwds)
# attributes not needed during results__init__
results.fit_history = self._fit_history
self.fit_history = defaultdict(list)
results.score_norm = del_params
results.converged = (del_params < ctol)
results.cov_struct = self.cov_struct
results.params_niter = params_niter
results.first_dep_update = first_dep_update
results.ctol = ctol
results.maxiter = maxiter
# These will be copied over to subclasses when upgrading.
results._props = ["cov_type", "use_t",
"cov_params_default", "cov_robust",
"cov_naive", "cov_robust_bc",
"fit_history",
"score_norm", "converged", "cov_struct",
"params_niter", "first_dep_update", "ctol",
"maxiter"]
return GEEResultsWrapper(results)
fit.__doc__ = _gee_fit_doc
def _update_regularized(self, params, pen_wt, scad_param, eps):
sn, hm = 0, 0
for i in range(self.num_group):
expval, _ = self.cached_means[i]
resid = self.endog_li[i] - expval
sdev = np.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid, ex))
sn0 = rslt[0]
sn += np.dot(ex.T, sn0)
hm0 = rslt[1]
hm += np.dot(ex.T, hm0)
# Wang et al. divide sn here by num_group, but that
# seems to be incorrect
ap = np.abs(params)
clipped = np.clip(scad_param * pen_wt - ap, 0, np.inf)
en = pen_wt * clipped * (ap > pen_wt)
en /= (scad_param - 1) * pen_wt
en += pen_wt * (ap <= pen_wt)
en /= eps + ap
hm.flat[::hm.shape[0] + 1] += self.num_group * en
hm *= self.estimate_scale()
sn -= self.num_group * en * params
return np.linalg.solve(hm, sn), hm
def _regularized_covmat(self, mean_params):
self.update_cached_means(mean_params)
ma = 0
for i in range(self.num_group):
expval, _ = self.cached_means[i]
resid = self.endog_li[i] - expval
sdev = np.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid,))
ma0 = np.dot(ex.T, rslt[0])
ma += np.outer(ma0, ma0)
return ma
def fit_regularized(self, pen_wt, scad_param=3.7, maxiter=100,
ddof_scale=None, update_assoc=5,
ctol=1e-5, ztol=1e-3, eps=1e-6):
"""
Regularized estimation for GEE.
Parameters
----------
pen_wt : float
The penalty weight (a non-negative scalar).
scad_param : float
Non-negative scalar determining the shape of the Scad
penalty.
maxiter : integer
The maximum number of iterations.
ddof_scale : integer
Value to subtract from `nobs` when calculating the
denominator degrees of freedom for t-statistics, defaults
to the number of columns in `exog`.
update_assoc : integer
The dependence parameters are updated every `update_assoc`
iterations of the mean structure parameter updates.
ctol : float
Convergence criterion, default is one order of magnitude
smaller than proposed in section 3.1 of Wang et al.
ztol : float
Coefficients smaller than this value are treated as
being zero, default is based on section 5 of Wang et al.
eps : non-negative scalar
Numerical constant, see section 3.2 of Wang et al.
Returns
-------
GEEResults instance. Note that not all methods of the results
class make sense when the model has been fit with regularization.
Notes
-----
This implementation assumes that the link is canonical.
References
----------
Wang L, Zhou J, Qu A. (2012). Penalized generalized estimating
equations for high-dimensional longitudinal data analysis.
Biometrics. 2012 Jun;68(2):353-60.
doi: 10.1111/j.1541-0420.2011.01678.x.
https://www.ncbi.nlm.nih.gov/pubmed/21955051
http://users.stat.umn.edu/~wangx346/research/GEE_selection.pdf
"""
mean_params = np.zeros(self.exog.shape[1])
self.update_cached_means(mean_params)
converged = False
fit_history = defaultdict(list)
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
for itr in range(maxiter):
update, hm = self._update_regularized(
mean_params, pen_wt, scad_param, eps)
if update is None:
msg = "Singular matrix encountered in regularized GEE update",
warnings.warn(msg, ConvergenceWarning)
break
if np.sqrt(np.sum(update**2)) < ctol:
converged = True
break
mean_params += update
fit_history['params'].append(mean_params.copy())
self.update_cached_means(mean_params)
if itr!= 0 and (itr % update_assoc == 0):
self._update_assoc(mean_params)
if not converged:
msg = "GEE.fit_regularized did not converge"
warnings.warn(msg)
mean_params[np.abs(mean_params) < ztol] = 0
self._update_assoc(mean_params)
ma = self._regularized_covmat(mean_params)
cov = np.linalg.solve(hm, ma)
cov = np.linalg.solve(hm, cov.T)
# kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type="robust", cov_robust=cov)
scale = self.estimate_scale()
rslt = GEEResults(self, mean_params, cov, scale,
regularized=True, attr_kwds=res_kwds)
rslt.fit_history = fit_history
return GEEResultsWrapper(rslt)
def _handle_constraint(self, mean_params, bcov):
"""
Expand the parameter estimate `mean_params` and covariance matrix
`bcov` to the coordinate system of the unconstrained model.
Parameters
----------
mean_params : array_like
A parameter vector estimate for the reduced model.
bcov : array_like
The covariance matrix of mean_params.
Returns
-------
mean_params : array_like
The input parameter vector mean_params, expanded to the
coordinate system of the full model
bcov : array_like
The input covariance matrix bcov, expanded to the
coordinate system of the full model
"""
# The number of variables in the full model
red_p = len(mean_params)
full_p = self.constraint.lhs.shape[1]
mean_params0 = np.r_[mean_params, np.zeros(full_p - red_p)]
# Get the score vector under the full model.
save_exog_li = self.exog_li
self.exog_li = self.constraint.exog_fulltrans_li
import copy
save_cached_means = copy.deepcopy(self.cached_means)
self.update_cached_means(mean_params0)
_, score = self._update_mean_params()
if score is None:
warnings.warn("Singular matrix encountered in GEE score test",
ConvergenceWarning)
return None, None
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = score[red_p:] / scale
amat = np.linalg.inv(ncov1)
bmat_11 = cmat[0:red_p, 0:red_p]
bmat_22 = cmat[red_p:, red_p:]
bmat_12 = cmat[0:red_p, red_p:]
amat_11 = amat[0:red_p, 0:red_p]
amat_12 = amat[0:red_p, red_p:]
score_cov = bmat_22 - np.dot(amat_12.T,
np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
self.score_test_results = {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
mean_params = self.constraint.unpack_param(mean_params)
bcov = self.constraint.unpack_cov(bcov)
self.exog_li = save_exog_li
self.cached_means = save_cached_means
self.exog = self.constraint.restore_exog()
return mean_params, bcov
def _update_assoc(self, params):
"""
Update the association parameters
"""
self.cov_struct.update(params)
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects, returns dF(XB) / dX where F(.)
is the fitted mean.
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# This form should be appropriate for group 1 probit, logit,
# logistic, cloglog, heckprob, xtprobit.
offset_exposure = None
if exog is None:
exog = self.exog
offset_exposure = self._offset_exposure
margeff = self.mean_deriv_exog(exog, params, offset_exposure)
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:, None]
if count_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_count_effects)
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_dummy_effects)
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
def qic(self, params, scale, cov_params):
"""
Returns quasi-information criteria and quasi-likelihood values.
Parameters
----------
params : array_like
The GEE estimates of the regression parameters.
scale : scalar
Estimated scale parameter
cov_params : array_like
An estimate of the covariance matrix for the
model parameters. Conventionally this is the robust
covariance matrix.
Returns
-------
ql : scalar
The quasi-likelihood value
qic : scalar
A QIC that can be used to compare the mean and covariance
structures of the model.
qicu : scalar
A simplified QIC that can be used to compare mean structures
but not covariance structures
Notes
-----
The quasi-likelihood used here is obtained by numerically evaluating
Wedderburn's integral representation of the quasi-likelihood function.
This approach is valid for all families and links. Many other
packages use analytical expressions for quasi-likelihoods that are
valid in special cases where the link function is canonical. These
analytical expressions may omit additive constants that only depend
on the data. Therefore, the numerical values of our QL and QIC values
will differ from the values reported by other packages. However only
the differences between two QIC values calculated for different models
using the same data are meaningful. Our QIC should produce the same
QIC differences as other software.
When using the QIC for models with unknown scale parameter, use a
common estimate of the scale parameter for all models being compared.
References
----------
.. [*] W. Pan (2001). Akaike's information criterion in generalized
estimating equations. Biometrics (57) 1.
"""
varfunc = self.family.variance
means = []
omega = 0.0
# omega^-1 is the model-based covariance assuming independence
for i in range(self.num_group):
expval, lpr = self.cached_means[i]
means.append(expval)
dmat = self.mean_deriv(self.exog_li[i], lpr)
omega += np.dot(dmat.T, dmat) / scale
means = np.concatenate(means)
# The quasi-likelihood, use change of variables so the integration is
# from -1 to 1.
du = means - self.endog
nstep = 10000
qv = np.empty(nstep)
xv = np.linspace(-0.99999, 1, nstep)
for i, g in enumerate(xv):
u = self.endog + (g + 1) * du / 2.0
vu = varfunc(u)
qv[i] = -np.sum(du**2 * (g + 1) / vu)
qv /= (4 * scale)
from scipy.integrate import trapz
ql = trapz(qv, dx=xv[1] - xv[0])
qicu = -2 * ql + 2 * self.exog.shape[1]
qic = -2 * ql + 2 * np.trace(np.dot(omega, cov_params))
return ql, qic, qicu
class GEEResults(base.LikelihoodModelResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model "
"using GEE.\n" + _gee_results_doc)
def __init__(self, model, params, cov_params, scale,
cov_type='robust', use_t=False, regularized=False,
**kwds):
super(GEEResults, self).__init__(
model, params, normalized_cov_params=cov_params,
scale=scale)
# not added by super
self.df_resid = model.df_resid
self.df_model = model.df_model
self.family = model.family
attr_kwds = kwds.pop('attr_kwds', {})
self.__dict__.update(attr_kwds)
# we don't do this if the cov_type has already been set
# subclasses can set it through attr_kwds
if not (hasattr(self, 'cov_type') and
hasattr(self, 'cov_params_default')):
self.cov_type = cov_type # keep alias
covariance_type = self.cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = ("GEE: `cov_type` must be one of " +
", ".join(allowed_covariances))
raise ValueError(msg)
if cov_type == "robust":
cov = self.cov_robust
elif cov_type == "naive":
cov = self.cov_naive
elif cov_type == "bias_reduced":
cov = self.cov_robust_bc
self.cov_params_default = cov
else:
if self.cov_type!= cov_type:
raise ValueError('cov_type in argument is different from '
'already attached cov_type')
def standard_errors(self, cov_type="robust"):
"""
This is a convenience function that returns the standard
errors for any covariance type. The value of `bse` is the
standard errors for whichever covariance type is specified as
an argument to `fit` (defaults to "robust").
Parameters
----------
cov_type : string
One of "robust", "naive", or "bias_reduced". Determines
the covariance used to compute standard errors. Defaults
to "robust".
"""
# Check covariance_type
covariance_type = cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = ("GEE: `covariance_type` must be one of " +
", ".join(allowed_covariances))
raise ValueError(msg)
if covariance_type == "robust":
return np.sqrt(np.diag(self.cov_robust))
elif covariance_type == "naive":
return np.sqrt(np.diag(self.cov_naive))
elif covariance_type == "bias_reduced":
if self.cov_robust_bc is None:
raise ValueError(
"GEE: `bias_reduced` covariance not available")
return np.sqrt(np.diag(self.cov_robust_bc))
# Need to override to allow for different covariance types.
@cache_readonly
def bse(self):
return self.standard_errors(self.cov_type)
@cache_readonly
def resid(self):
"""
Returns the residuals, the endogeneous data minus the fitted
values from the model.
"""
return self.model.endog - self.fittedvalues
def score_test(self):
"""
Return the results of a score test for a linear constraint.
Returns
-------
Adictionary containing the p-value, the test statistic,
and the degrees of freedom for the score test.
Notes
-----
See also GEE.compare_score_test for an alternative way to perform
a score test. GEEResults.score_test is more general, in that it
supports testing arbitrary linear equality constraints. However
GEE.compare_score_test might be easier to use when comparing
two explicit models.
References
----------
Xu Guo and Wei Pan (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
if not hasattr(self.model, "score_test_results"):
msg = "score_test on results instance only available when "
msg += " model was fit with constraints"
raise ValueError(msg)
return self.model.score_test_results
@cache_readonly
def resid_split(self):
"""
Returns the residuals, the endogeneous data minus the fitted
values from the model. The residuals are returned as a list
of arrays containing the residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.resid[ii])
return sresid
@cache_readonly
def resid_centered(self):
"""
Returns the residuals centered within each group.
"""
cresid = self.resid.copy()
for v in self.model.group_labels:
ii = self.model.group_indices[v]
cresid[ii] -= cresid[ii].mean()
return cresid
@cache_readonly
def resid_centered_split(self):
"""
Returns the residuals centered within each group. The
residuals are returned as a list of arrays containing the
centered residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.centered_resid[ii])
return sresid
def qic(self, scale=None):
"""
Returns the QIC and QICu information criteria.
For families with a scale parameter (e.g. Gaussian), provide
as the scale argument the estimated scale from the largest
model under consideration.
If the scale parameter is not provided, the estimated scale
parameter is used. Doing this does not allow comparisons of
QIC values between models.
"""
# It is easy to forget to set the scale parameter. Sometimes
# this is intentional, so we warn.
if scale is None:
warnings.warn("QIC values obtained using scale=None are not "
"appropriate for comparing models")
if scale is None:
scale = self.scale
_, qic, qicu = self.model.qic(self.params, scale,
self.cov_params())
return qic, qicu
# FIXME: alias to be removed, temporary backwards compatibility
split_resid = resid_split
centered_resid = resid_centered
split_centered_resid = resid_centered_split
@cache_readonly
def resid_response(self):
return self.model.endog - self.fittedvalues
@cache_readonly
def resid_pearson(self):
val = self.model.endog - self.fittedvalues
val = val / np.sqrt(self.family.variance(self.fittedvalues))
return val
@cache_readonly
def resid_working(self):
val = self.resid_response
val = val * self.family.link.deriv(self.fittedvalues)
return val
@cache_readonly
def resid_anscombe(self):
return self.family.resid_anscombe(self.model.endog, self.fittedvalues)
@cache_readonly
def resid_deviance(self):
return self.family.resid_dev(self.model.endog, self.fittedvalues)
@cache_readonly
def fittedvalues(self):
"""
Returns the fitted values from the model.
"""
return self.model.family.link.inverse(np.dot(self.model.exog,
self.params))
def plot_added_variable(self, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_added_variable
fig = plot_added_variable(self, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs, ax=ax)
return fig
plot_added_variable.__doc__ = _plot_added_variable_doc % {
'extra_params_doc': ''}
def plot_partial_residuals(self, focus_exog, ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_partial_residuals
return plot_partial_residuals(self, focus_exog, ax=ax)
plot_partial_residuals.__doc__ = _plot_partial_residuals_doc % {
'extra_params_doc': ''}
def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_ceres_residuals
return plot_ceres_residuals(self, focus_exog, frac,
cond_means=cond_means, ax=ax)
plot_ceres_residuals.__doc__ = _plot_ceres_residuals_doc % {
'extra_params_doc': ''}
def conf_int(self, alpha=.05, cols=None, cov_type=None):
"""
Returns confidence intervals for the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. i.e., The
default `alpha` =.05 returns a 95% confidence interval.
cols : array_like, optional
`cols` specifies which confidence intervals to return
cov_type : string
The covariance type used for computing standard errors;
must be one of 'robust', 'naive', and 'bias reduced'.
See `GEE` for details.
Notes
-----
The confidence interval is based on the Gaussian distribution.
"""
# super doesn't allow to specify cov_type and method is not
# implemented,
# FIXME: remove this method here
if cov_type is None:
bse = self.bse
else:
bse = self.standard_errors(cov_type=cov_type)
params = self.params
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = np.asarray(cols)
lower = params[cols] - q * bse[cols]
upper = params[cols] + q * bse[cols]
return np.asarray(lzip(lower, upper))
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the GEE regression results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables, default is `var_#` for ## in
the number of regressors. Must match the number of parameters in
the model
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
cov_type : str
The covariance type used to compute the standard errors;
one of 'robust' (the usual robust sandwich-type covariance
estimate), 'naive' (ignores dependence), and 'bias
reduced' (the Mancl/DeRouen estimate).
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Generalized']),
('', ['Estimating Equations']),
('Family:', [self.model.family.__class__.__name__]),
('Dependence structure:',
[self.model.cov_struct.__class__.__name__]),
('Date:', None),
('Covariance type: ', [self.cov_type, ])
]
NY = [len(y) for y in self.model.endog_li]
top_right = [('No. Observations:', [sum(NY)]),
('No. clusters:', [len(self.model.endog_li)]),
('Min. cluster size:', [min(NY)]),
('Max. cluster size:', [max(NY)]),
('Mean cluster size:', ["%.1f" % np.mean(NY)]),
('Num. iterations:', ['%d' %
len(self.fit_history['params'])]),
('Scale:', ["%.3f" % self.scale]),
('Time:', None),
]
# The skew of the residuals
skew1 = stats.skew(self.resid)
kurt1 = stats.kurtosis(self.resid)
skew2 = stats.skew(self.centered_resid)
kurt2 = stats.kurtosis(self.centered_resid)
diagn_left = [('Skew:', ["%12.4f" % skew1]),
('Centered skew:', ["%12.4f" % skew2])]
diagn_right = [('Kurtosis:', ["%12.4f" % kurt1]),
('Centered kurtosis:', ["%12.4f" % kurt2])
]
if title is None:
title = self.model.__class__.__name__ +'' +\
"Regression Results"
# Override the exog variable names if xname is provided as an
# argument.
if xname is None:
xname = self.model.exog_names
if yname is None:
yname = self.model.endog_names
# Create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname,
title=title)
smry.add_table_params(self, yname=yname, xname=xname,
alpha=alpha, use_t=False)
smry.add_table_2cols(self, gleft=diagn_left,
gright=diagn_right, yname=yname,
xname=xname, title="")
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
-'mean', The marginal effects at the mean of each regressor.
-'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is 'all'
only margeff will be available.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semielasticity -- dy/d(lnx)
- 'eydx' - estimate semeilasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array_like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
if self.model.constraint is not None:
warnings.warn("marginal effects ignore constraints",
ValueWarning)
return GEEMargins(self, (at, method, atexog, dummy, count))
def plot_isotropic_dependence(self, ax=None, xpoints=10,
min_n=50):
"""
Create a plot of the pairwise products of within-group
residuals against the corresponding time differences. This
plot can be used to assess the possible form of an isotropic
covariance structure.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
xpoints : scalar or array_like
If scalar, the number of points equally spaced points on
the time difference axis used to define bins for
calculating local means. If an array, the specific points
that define the bins.
min_n : integer
The minimum sample size in a bin for the mean residual
product to be included on the plot.
"""
from statsmodels.graphics import utils as gutils
resid = self.model.cluster_list(self.resid)
time = self.model.cluster_list(self.model.time)
# All within-group pairwise time distances (xdt) and the
# corresponding products of scaled residuals (xre).
xre, xdt = [], []
for re, ti in zip(resid, time):
ix = np.tril_indices(re.shape[0], 0)
re = re[ix[0]] * re[ix[1]] / self.scale ** 2
xre.append(re)
dists = np.sqrt(((ti[ix[0], :] - ti[ix[1], :]) ** 2).sum(1))
xdt.append(dists)
xre = np.concatenate(xre)
xdt = np.concatenate(xdt)
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# Convert to a correlation
ii = np.flatnonzero(xdt == 0)
v0 = np.mean(xre[ii])
xre /= v0
# Use the simple average to smooth, since fancier smoothers
# that trim and downweight outliers give biased results (we
# need the actual mean of a skewed distribution).
if np.isscalar(xpoints):
xpoints = np.linspace(0, max(xdt), xpoints)
dg = np.digitize(xdt, xpoints)
dgu = np.unique(dg)
hist = np.asarray([np.sum(dg == k) for k in dgu])
ii = np.flatnonzero(hist >= min_n)
dgu = dgu[ii]
dgy = np.asarray([np.mean(xre[dg == k]) for k in dgu])
dgx = np.asarray([np.mean(xdt[dg == k]) for k in dgu])
ax.plot(dgx, dgy, '-', color='orange', lw=5)
ax.set_xlabel("Time difference")
ax.set_ylabel("Product of scaled residuals")
return fig
def sensitivity_params(self, dep_params_first,
dep_params_last, num_steps):
"""
Refits the GEE model using a sequence of values for the
dependence parameters.
Parameters
----------
dep_params_first : array_like
The first dep_params in the sequence
dep_params_last : array_like
The last dep_params in the sequence
num_steps : int
The number of dep_params in the sequence
Returns
-------
results : array_like
The GEEResults objects resulting from the fits.
"""
model = self.model
import copy
cov_struct = copy.deepcopy(self.model.cov_struct)
# We are fixing the dependence structure in each run.
update_dep = model.update_dep
model.update_dep = False
dep_params = []
results = []
for x in np.linspace(0, 1, num_steps):
dp = x * dep_params_last + (1 - x) * dep_params_first
dep_params.append(dp)
model.cov_struct = copy.deepcopy(cov_struct)
model.cov_struct.dep_params = dp
rslt = model.fit(start_params=self.params,
ctol=self.ctol,
params_niter=self.params_niter,
first_dep_update=self.first_dep_update,
cov_type=self.cov_type)
results.append(rslt)
model.update_dep = update_dep
return results
# FIXME: alias to be removed, temporary backwards compatibility
params_sensitivity = sensitivity_params
class GEEResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {
'centered_resid': 'rows',
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(GEEResultsWrapper, GEEResults) # noqa:E305
class OrdinalGEE(GEE):
__doc__ = (
" Estimation of ordinal response marginal regression models\n"
" using Generalized Estimating Equations (GEE).\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_ordinal_family_doc,
'example': _gee_ordinal_example})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
if family is None:
family = families.Binomial()
else:
if not isinstance(family, families.Binomial):
raise ValueError("ordinal GEE must use a Binomial family")
if cov_struct is None:
cov_struct = cov_structs.OrdinalIndependence()
endog, exog, groups, time, offset = self.setup_ordinal(
endog, exog, groups, time, offset)
super(OrdinalGEE, self).__init__(endog, exog, groups, time,
family, cov_struct, missing,
offset, dep_data, constraint)
def setup_ordinal(self, endog, exog, groups, time, offset):
"""
Restructure ordinal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
nrows = ncut * len(endog)
exog_out = np.zeros((nrows, exog.shape[1]),
dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
intercepts = np.zeros((nrows, ncut), dtype=np.float64)
groups_out = np.zeros(nrows, dtype=groups.dtype)
time_out = np.zeros((nrows, time.shape[1]),
dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for (exog_row, endog_value, group_value, time_value,
offset_value) in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
exog_out[jrow, :] = exog_row
endog_out[jrow] = (int(endog_value > thresh))
intercepts[jrow, thresh_ix] = 1
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
exog_out = np.concatenate((intercepts, exog_out), axis=1)
# exog column names, including intercepts
xnames = ["I(y>%.1f)" % v for v in endog_cuts]
if type(self.exog_orig) == pd.DataFrame:
xnames.extend(self.exog_orig.columns)
else:
xnames.extend(["x%d" % k for k in range(1, exog.shape[1] + 1)])
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve the endog name if there is one
if type(self.endog_orig) == pd.Series:
endog_out = pd.Series(endog_out, name=self.endog_orig.name)
return endog_out, exog_out, groups_out, time_out, offset_out
def _starting_params(self):
model = GEE(self.endog, self.exog, self.groups,
time=self.time, family=families.Binomial(),
offset=self.offset, exposure=self.exposure)
result = model.fit()
return result.params
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust'):
rslt = super(OrdinalGEE, self).fit(maxiter, ctol, start_params,
params_niter, first_dep_update,
cov_type=cov_type)
rslt = rslt._results # use unwrapped instance
res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))
# Convert the GEEResults to an OrdinalGEEResults
ord_rslt = OrdinalGEEResults(self, rslt.params,
rslt.cov_params() / rslt.scale,
rslt.scale,
cov_type=cov_type,
attr_kwds=res_kwds)
# for k in rslt._props:
# setattr(ord_rslt, k, getattr(rslt, k))
return OrdinalGEEResultsWrapper(ord_rslt)
fit.__doc__ = _gee_fit_doc
class OrdinalGEEResults(GEEResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model"
"for an ordinal response using GEE.\n"
+ _gee_results_doc)
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an ordinal model,
for specifed values of the predictors.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array_like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>>> ev = [{"sex": 1}, {"sex": 0}]
>>> rslt.distribution_plot(exog_values=ev)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [{}, ]
exog_means = self.model.exog.mean(0)
ix_icept = [i for i, x in enumerate(self.model.exog_names) if
x.startswith("I(")]
for ev in exog_values:
for k in ev.keys():
if k not in self.model.exog_names:
raise ValueError("%s is not a variable in the model"
% k)
# Get the fitted probability for each level, at the given
# covariate values.
pr = []
for j in ix_icept:
xp = np.zeros_like(self.params)
xp[j] = 1.
for i, vn in enumerate(self.model.exog_names):
if i in ix_icept:
continue
# User-specified value
if vn in ev:
xp[i] = ev[vn]
# Mean value
else:
xp[i] = exog_means[i]
p = 1 / (1 + np.exp(-np.dot(xp, self.params)))
pr.append(p)
pr.insert(0, 1)
pr.append(0)
pr = np.asarray(pr)
prd = -np.diff(pr)
ax.plot(self.model.endog_values, prd, 'o-')
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_ylim(0, 1)
return fig
def _score_test_submodel(par, sub):
"""
Return transformation matrices for design matrices.
Parameters
----------
par : instance
The parent model
sub : instance
The sub-model
Returns
-------
qm : array_like
Matrix mapping the design matrix of the parent to the design matrix
for the sub-model.
qc : array_like
Matrix mapping the design matrix of the parent to the orthogonal
complement of the columnspace of the submodel in the columnspace
of the parent.
Notes
-----
Returns None, None if the provided submodel is not actually a submodel.
"""
x1 = par.exog
x2 = sub.exog
u, s, vt = np.linalg.svd(x1, 0)
# Get the orthogonal complement of col(x2) in col(x1).
a, _, _ = np.linalg.svd(x2, 0)
a = u - np.dot(a, np.dot(a.T, u))
x2c, sb, _ = np.linalg.svd(a, 0)
x2c = x2c[:, sb > 1e-12]
# x1 * qm = x2
qm = np.dot(vt.T, np.dot(u.T, x2) / s[:, None])
e = np.max(np.abs(x2 - np.dot(x1, qm)))
if e > 1e-8:
return None, None
# x1 * qc = x2c
qc = np.dot(vt.T, np.dot(u.T, x2c) / s[:, None])
return qm, qc
class OrdinalGEEResultsWrapper(GEEResultsWrapper):
pass
wrap.populate_wrapper(OrdinalGEEResultsWrapper, OrdinalGEEResults) # noqa:E305
class NominalGEE(GEE):
__doc__ = (
" Estimation of nominal response marginal regression models\n"
" using Generalized Estimating Equations (GEE).\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_nominal_family_doc,
'example': _gee_nominal_example})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
endog, exog, groups, time, offset = self.setup_nominal(
endog, exog, groups, time, offset)
if family is None:
family = _Multinomial(self.ncut + 1)
if cov_struct is None:
cov_struct = cov_structs.NominalIndependence()
super(NominalGEE, self).__init__(
endog, exog, groups, time, family, cov_struct, missing,
offset, dep_data, constraint)
def _starting_params(self):
model = GEE(self.endog, self.exog, self.groups,
time=self.time, family=families.Binomial(),
offset=self.offset, exposure=self.exposure)
result = model.fit()
return result.params
def setup_nominal(self, endog, exog, groups, time, offset):
"""
Restructure nominal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
self.ncut = ncut
nrows = len(endog_cuts) * exog.shape[0]
ncols = len(endog_cuts) * exog.shape[1]
exog_out = np.zeros((nrows, ncols), dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
groups_out = np.zeros(nrows, dtype=np.float64)
time_out = np.zeros((nrows, time.shape[1]),
dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for (exog_row, endog_value, group_value, time_value,
offset_value) in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
u = np.zeros(len(endog_cuts), dtype=np.float64)
u[thresh_ix] = 1
exog_out[jrow, :] = np.kron(u, exog_row)
endog_out[jrow] = (int(endog_value == thresh))
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
# exog names
if isinstance(self.exog_orig, pd.DataFrame):
xnames_in = self.exog_orig.columns
else:
xnames_in = ["x%d" % k for k in range(1, exog.shape[1] + 1)]
xnames = []
for tr in endog_cuts:
xnames.extend(["%s[%.1f]" % (v, tr) for v in xnames_in])
exog_out = pd.DataFrame(exog_out, columns=xnames)
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve endog name if there is one
if isinstance(self.endog_orig, pd.Series):
endog_out = pd.Series(endog_out, name=self.endog_orig.name)
return endog_out, exog_out, groups_out, time_out, offset_out
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array_like
The exogeneous data at which the derivative is computed,
number of rows must be a multiple of `ncut`.
lin_pred : array_like
The values of the linear predictor, length must be multiple
of `ncut`.
Returns
-------
The derivative of the expected endog with respect to the
parameters.
"""
expval = np.exp(lin_pred)
# Reshape so that each row contains all the indicators
# corresponding to one multinomial observation.
expval_m = np.reshape(expval, (len(expval) // self.ncut,
self.ncut))
# The normalizing constant for the multinomial probabilities.
denom = 1 + expval_m.sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
# The multinomial probabilities
mprob = expval / denom
# First term of the derivative: denom * expval' / denom^2 =
# expval' / denom.
dmat = mprob[:, None] * exog
# Second term of the derivative: -expval * denom' / denom^2
ddenom = expval[:, None] * exog
dmat -= mprob[:, None] * ddenom / denom[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog for the
multinomial model, used in analyzing marginal effects.
Parameters
----------
exog : array_like
The exogeneous data at which the derivative is computed,
number of rows must be a multiple of `ncut`.
lpr : array_like
The linear predictor values, length must be multiple of
`ncut`.
Returns
-------
The value of the derivative of the expected endog with respect
to exog.
Notes
-----
offset_exposure must be set at None for the multinoial family.
"""
if offset_exposure is not None:
warnings.warn("Offset/exposure ignored for the multinomial family",
ValueWarning)
lpr = np.dot(exog, params)
expval = np.exp(lpr)
expval_m = np.reshape(expval, (len(expval) // self.ncut,
self.ncut))
denom = 1 + expval_m.sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
bmat0 = np.outer(np.ones(exog.shape[0]), params)
# Masking matrix
qmat = []
for j in range(self.ncut):
ee = np.zeros(self.ncut, dtype=np.float64)
ee[j] = 1
qmat.append(np.kron(ee, np.ones(len(params) // self.ncut)))
qmat = np.array(qmat)
qmat = np.kron(np.ones((exog.shape[0] // self.ncut, 1)), qmat)
bmat = bmat0 * qmat
dmat = expval[:, None] * bmat / denom[:, None]
expval_mb = np.kron(expval_m, np.ones((self.ncut, 1)))
expval_mb = np.kron(expval_mb, np.ones((1, self.ncut)))
dmat -= expval[:, None] * (bmat * expval_mb) / denom[:, None] ** 2
return dmat
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust'):
rslt = super(NominalGEE, self).fit(maxiter, ctol, start_params,
params_niter, first_dep_update,
cov_type=cov_type)
if rslt is None:
warnings.warn("GEE updates did not converge",
ConvergenceWarning)
return None
rslt = rslt._results # use unwrapped instance
res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))
# Convert the GEEResults to a NominalGEEResults
nom_rslt = NominalGEEResults(self, rslt.params,
rslt.cov_params() / rslt.scale,
rslt.scale,
cov_type=cov_type,
attr_kwds=res_kwds)
# for k in rslt._props:
# setattr(nom_rslt, k, getattr(rslt, k))
return NominalGEEResultsWrapper(nom_rslt)
fit.__doc__ = _gee_fit_doc
class NominalGEEResults(GEEResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model"
"for a nominal response using GEE.\n"
+ _gee_results_doc)
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an nominal model,
for specifed values of the predictors.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array_like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>>> ex = [{"sex": 1}, {"sex": 0}]
>>> rslt.distribution_plot(exog_values=ex)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [{}, ]
link = self.model.family.link.inverse
ncut = self.model.family.ncut
k = int(self.model.exog.shape[1] / ncut)
exog_means = self.model.exog.mean(0)[0:k]
exog_names = self.model.exog_names[0:k]
exog_names = [x.split("[")[0] for x in exog_names]
params = np.reshape(self.params,
(ncut, len(self.params) // ncut))
for ev in exog_values:
exog = exog_means.copy()
for k in ev.keys():
if k not in exog_names:
raise ValueError("%s is not a variable in the model"
% k)
ii = exog_names.index(k)
exog[ii] = ev[k]
lpr = np.dot(params, exog)
pr = link(lpr)
pr = np.r_[pr, 1 - pr.sum()]
ax.plot(self.model.endog_values, pr, 'o-')
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_xticks(self.model.endog_values)
ax.set_xticklabels(self.model.endog_values)
ax.set_ylim(0, 1)
return fig
class NominalGEEResultsWrapper(GEEResultsWrapper):
pass
wrap.populate_wrapper(NominalGEEResultsWrapper, NominalGEEResults) # noqa:E305
class _MultinomialLogit(Link):
"""
The multinomial logit transform, only for use with GEE.
Notes
-----
The data are assumed coded as binary indicators, where each
observed multinomial value y is coded as I(y == S[0]),..., I(y ==
S[-1]), where S is the set of possible response labels, excluding
the largest one. Thererefore functions in this class should only
be called using vector argument whose length is a multiple of |S|
= ncut, which is an argument to be provided when initializing the
class.
call and derivative use a private method _clean to trim p by 1e-10
so that p is in (0, 1)
"""
def __init__(self, ncut):
self.ncut = ncut
def inverse(self, lpr):
"""
Inverse of the multinomial logit transform, which gives the
expected values of the data as a function of the linear
predictors.
Parameters
----------
lpr : array_like (length must be divisible by `ncut`)
The linear predictors
Returns
-------
prob : array
Probabilities, or expected values
"""
expval = np.exp(lpr)
denom = 1 + np.reshape(expval, (len(expval) // self.ncut,
self.ncut)).sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
prob = expval / denom
return prob
class _Multinomial(families.Family):
"""
Pseudo-link function for fitting nominal multinomial models with
GEE. Not for use outside the GEE class.
"""
links = [_MultinomialLogit, ]
variance = varfuncs.binary
safe_links = [_MultinomialLogit, ]
def __init__(self, nlevels):
"""
Parameters
----------
nlevels : integer
The number of distinct categories for the multinomial
distribution.
"""
self.initialize(nlevels)
def initialize(self, nlevels):
self.ncut = nlevels - 1
self.link = _MultinomialLogit(self.ncut)
class GEEMargins(object):
"""
Estimated marginal effects for a regression model fit with GEE.
Parameters
----------
results : GEEResults instance
The results instance of a fitted discrete choice model
args : tuple
Args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
kwargs : dict
Keyword args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
"""
def __init__(self, results, args, kwargs={}):
self._cache = {}
self.results = results
self.get_margeff(*args, **kwargs)
def _reset(self):
self._cache = {}
@cache_readonly
def tvalues(self):
_check_at_is_all(self.margeff_options)
return self.margeff / self.margeff_se
def summary_frame(self, alpha=.05):
"""
Returns a DataFrame summarizing the marginal effects.
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
frame : DataFrames
A DataFrame summarizing the marginal effects.
"""
_check_at_is_all(self.margeff_options)
from pandas import DataFrame
names = [_transform_names[self.margeff_options['method']],
'Std. Err.', 'z', 'Pr(>|z|)',
'Conf. Int. Low', 'Cont. Int. Hi.']
ind = self.results.model.exog.var(0)!= 0 # True if not a constant
exog_names = self.results.model.exog_names
var_names = [name for i, name in enumerate(exog_names) if ind[i]]
table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,
self.pvalues, self.conf_int(alpha)))
return DataFrame(table, columns=names, index=var_names)
@cache_readonly
def pvalues(self):
_check_at_is_all(self.margeff_options)
return stats.norm.sf(np.abs(self.tvalues)) * 2
def conf_int(self, alpha=.05):
"""
Returns the confidence intervals of the marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
conf_int : ndarray
An array with lower, upper confidence intervals for the marginal
effects.
"""
_check_at_is_all(self.margeff_options)
me_se = self.margeff_se
q = stats.norm.ppf(1 - alpha / 2)
lower = self.margeff - q * me_se
upper = self.margeff + q * me_se
return np.asarray(lzip(lower, upper))
def summary(self, alpha=.05):
"""
Returns a summary table for marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
Summary : SummaryTable
A SummaryTable instance
"""
_check_at_is_all(self.margeff_options)
results = self.results
model = results.model
title = model.__class__.__name__ + " Marginal Effects"
method = self.margeff_options['method']
top_left = [('Dep. Variable:', [model.endog_names]),
('Method:', [method]),
('At:', [self.margeff_options['at']]), ]
from statsmodels.iolib.summary import (Summary, summary_params,
table_extend)
exog_names = model.exog_names[:] # copy
smry = Summary()
const_idx = model.data.const_idx
if const_idx is not None:
exog_names.pop(const_idx)
J = int(getattr(model, "J", 1))
if J > 1:
yname, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
else:
yname = model.endog_names
yname_list = [yname]
smry.add_table_2cols(self, gleft=top_left, gright=[],
yname=yname, xname=exog_names, title=title)
# NOTE: add_table_params is not general enough yet for margeff
# could use a refactor with getattr instead of hard-coded params
# tvalues etc.
table = []
conf_int = self.conf_int(alpha)
margeff = self.margeff
margeff_se = self.margeff_se
tvalues = self.tvalues
pvalues = self.pvalues
if J > 1:
for eq in range(J):
restup = (results, margeff[:, eq], margeff_se[:, eq],
tvalues[:, eq], pvalues[:, eq], conf_int[:, :, eq])
tble = summary_params(restup, yname=yname_list[eq],
xname=exog_names, alpha=alpha,
use_t=False,
skip_header=True)
tble.title = yname_list[eq]
# overwrite coef with method name
header = ['', _transform_names[method],'std err', 'z',
'P>|z|',
'[%3.1f%% Conf. Int.]' % (100 - alpha * 100)]
tble.insert_header_row(0, header)
# from IPython.core.debugger import Pdb; Pdb().set_trace()
table.append(tble)
table = table_extend(table, keep_headers=True)
else:
restup = (results, margeff, margeff_se, tvalues, pvalues, conf_int)
table = summary_params(restup, yname=yname, xname=exog_names,
alpha=alpha, use_t=False, skip_header=True)
header = ['', _transform_names[method],'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100 - alpha * 100)]
table.insert_header_row(0, header)
smry.tables.append(table)
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
self._reset() # always reset the cache when this is called
# TODO: if at is not all or overall, we can also put atexog values
# in summary table head
method = method.lower()
at = at.lower()
_check_margeff_args(at, method)
self.margeff_options = dict(method=method, at=at)
results = self.results
model = results.model
params = results.params
exog = model.exog.copy() # copy because values are changed
effects_idx = exog.var(0)!= 0
const_idx = model.data.const_idx
if dummy:
_check_discrete_args(at, method)
dummy_idx, dummy = _get_dummy_index(exog, const_idx)
else:
dummy_idx = None
if count:
_check_discrete_args(at, method)
count_idx, count = _get_count_index(exog, const_idx)
else:
count_idx = None
# get the exogenous variables
exog = _get_margeff_exog(exog, at, atexog, effects_idx)
# get base marginal effects, handled by sub-classes
effects = model._derivative_exog(params, exog, method,
dummy_idx, count_idx)
effects = _effects_at(effects, at)
if at == 'all':
self.margeff = effects[:, effects_idx]
else:
# Set standard error of the marginal effects by Delta method.
margeff_cov, margeff_se = margeff_cov_with_se(
model, params, exog, results.cov_params(), at,
model._derivative_exog, dummy_idx, count_idx,
method, 1)
# don't care about at constant
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
self.margeff_se = margeff_se[effects_idx]
self.margeff = effects[effects_idx] |
|
statsmodels__statsmodels | gmm.rst | Description | Generate description to this module | BSD 3-Clause New or Revised License | statsmodels__statsmodels/docs/source/gmm.rst | [
"statsmodels__statsmodels/statsmodels/sandbox/regression/gmm.py"
] | Generalized Method of Moments gmm
statsmodels.gmm contains model classes and functions that are based on
estimation with Generalized Method of Moments. Currently the general
non-linear case is implemented. An example class for the standard linear
instrumental variable model is included. This has been introduced as a
test case, it works correctly but it does not take the linear structure
into account. For the linear case we intend to introduce a specific
implementation which will be faster and numerically more accurate.
Currently, GMM takes arbitrary non-linear moment conditions and
calculates the estimates either for a given weighting matrix or
iteratively by alternating between estimating the optimal weighting
matrix and estimating the parameters. Implementing models with different
moment conditions is done by subclassing GMM. In the minimal
implementation only the moment conditions, momcond have to be defined. | '''Generalized Method of Moments, GMM, and Two-Stage Least Squares for
instrumental variables IV2SLS
Issues
------
* number of parameters, nparams, and starting values for parameters
Where to put them? start was initially taken from global scope (bug)
* When optimal weighting matrix cannot be calculated numerically
In DistQuantilesGMM, we only have one row of moment conditions, not a
moment condition for each observation, calculation for cov of moments
breaks down. iter=1 works (weights is identity matrix)
-> need method to do one iteration with an identity matrix or an
analytical weighting matrix given as parameter.
-> add result statistics for this case, e.g. cov_params, I have it in the
standalone function (and in calc_covparams which is a copy of it),
but not tested yet.
DONE `fitonce` in DistQuantilesGMM, params are the same as in direct call to fitgmm
move it to GMM class (once it's clearer for which cases I need this.)
* GMM doesn't know anything about the underlying model, e.g. y = X beta + u or panel
data model. It would be good if we can reuse methods from regressions, e.g.
predict, fitted values, calculating the error term, and some result statistics.
What's the best way to do this, multiple inheritance, outsourcing the functions,
mixins or delegation (a model creates a GMM instance just for estimation).
Unclear
-------
* dof in Hausman
- based on rank
- differs between IV2SLS method and function used with GMM or (IV2SLS)
- with GMM, covariance matrix difference has negative eigenvalues in iv example,???
* jtest/jval
- I'm not sure about the normalization (multiply or divide by nobs) in jtest.
need a test case. Scaling of jval is irrelevant for estimation.
jval in jtest looks to large in example, but I have no idea about the size
* bse for fitonce look too large (no time for checking now)
formula for calc_cov_params for the case without optimal weighting matrix
is wrong. I don't have an estimate for omega in that case. And I'm confusing
between weights and omega, which are *not* the same in this case.
Author: josef-pktd
License: BSD (3-clause)
'''
from statsmodels.compat.python import lrange
import numpy as np
from scipy import optimize, stats
from statsmodels.tools.numdiff import approx_fprime
from statsmodels.base.model import (Model,
LikelihoodModel, LikelihoodModelResults)
from statsmodels.regression.linear_model import (OLS, RegressionResults,
RegressionResultsWrapper)
import statsmodels.stats.sandwich_covariance as smcov
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.tools import _ensure_2d
DEBUG = 0
def maxabs(x):
'''just a shortcut to np.abs(x).max()
'''
return np.abs(x).max()
class IV2SLS(LikelihoodModel):
"""
Instrumental variables estimation using Two-Stage Least-Squares (2SLS)
Parameters
----------
endog: array
Endogenous variable, 1-dimensional or 2-dimensional array nobs by 1
exog : array
Explanatory variables, 1-dimensional or 2-dimensional array nobs by k
instrument : array
Instruments for explanatory variables. Must contain both exog
variables that are not being instrumented and instruments
Notes
-----
All variables in exog are instrumented in the calculations. If variables
in exog are not supposed to be instrumented, then these variables
must also to be included in the instrument array.
Degrees of freedom in the calculation of the standard errors uses
`df_resid = (nobs - k_vars)`.
(This corresponds to the `small` option in Stata's ivreg2.)
"""
def __init__(self, endog, exog, instrument=None):
self.instrument, self.instrument_names = _ensure_2d(instrument, True)
super(IV2SLS, self).__init__(endog, exog)
# where is this supposed to be handled
# Note: Greene p.77/78 dof correction is not necessary (because only
# asy results), but most packages do it anyway
self.df_resid = self.exog.shape[0] - self.exog.shape[1]
#self.df_model = float(self.rank - self.k_constant)
self.df_model = float(self.exog.shape[1] - self.k_constant)
def initialize(self):
self.wendog = self.endog
self.wexog = self.exog
def whiten(self, X):
"""Not implemented"""
pass
def fit(self):
'''estimate model using 2SLS IV regression
Returns
-------
results : instance of RegressionResults
regression result
Notes
-----
This returns a generic RegressioResults instance as defined for the
linear models.
Parameter estimates and covariance are correct, but other results
haven't been tested yet, to seee whether they apply without changes.
'''
#Greene 5th edt., p.78 section 5.4
#move this maybe
y,x,z = self.endog, self.exog, self.instrument
# TODO: this uses "textbook" calculation, improve linalg
ztz = np.dot(z.T, z)
ztx = np.dot(z.T, x)
self.xhatparams = xhatparams = np.linalg.solve(ztz, ztx)
#print 'x.T.shape, xhatparams.shape', x.shape, xhatparams.shape
F = xhat = np.dot(z, xhatparams)
FtF = np.dot(F.T, F)
self.xhatprod = FtF #store for Housman specification test
Ftx = np.dot(F.T, x)
Fty = np.dot(F.T, y)
params = np.linalg.solve(FtF, Fty)
Ftxinv = np.linalg.inv(Ftx)
self.normalized_cov_params = np.dot(Ftxinv.T, np.dot(FtF, Ftxinv))
lfit = IVRegressionResults(self, params,
normalized_cov_params=self.normalized_cov_params)
lfit.exog_hat_params = xhatparams
lfit.exog_hat = xhat # TODO: do we want to store this, might be large
self._results_ols2nd = OLS(y, xhat).fit()
return RegressionResultsWrapper(lfit)
# copied from GLS, because I subclass currently LikelihoodModel and not GLS
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
exog : array_like
Design / exogenous data
params : array_like, optional after fit has been called
Parameters of a linear model
Returns
-------
An array of fitted values
Notes
-----
If the model as not yet been fit, params is not optional.
"""
if exog is None:
exog = self.exog
return np.dot(exog, params)
class IVRegressionResults(RegressionResults):
"""
Results class for for an OLS model.
Most of the methods and attributes are inherited from RegressionResults.
The special methods that are only available for OLS are:
- get_influence
- outlier_test
- el_test
- conf_int_el
See Also
--------
RegressionResults
"""
@cache_readonly
def fvalue(self):
const_idx = self.model.data.const_idx
# if constant is implicit or missing, return nan see #2444, #3544
if const_idx is None:
return np.nan
else:
k_vars = len(self.params)
restriction = np.eye(k_vars)
idx_noconstant = lrange(k_vars)
del idx_noconstant[const_idx]
fval = self.f_test(restriction[idx_noconstant]).fvalue # without constant
return fval
def spec_hausman(self, dof=None):
'''Hausman's specification test
See Also
--------
spec_hausman : generic function for Hausman's specification test
'''
#use normalized cov_params for OLS
endog, exog = self.model.endog, self.model.exog
resols = OLS(endog, exog).fit()
normalized_cov_params_ols = resols.model.normalized_cov_params
# Stata `ivendog` doesn't use df correction for se
#se2 = resols.mse_resid #* resols.df_resid * 1. / len(endog)
se2 = resols.ssr / len(endog)
params_diff = self.params - resols.params
cov_diff = np.linalg.pinv(self.model.xhatprod) - normalized_cov_params_ols
#TODO: the following is very inefficient, solves problem (svd) twice
#use linalg.lstsq or svd directly
#cov_diff will very often be in-definite (singular)
if not dof:
dof = np.linalg.matrix_rank(cov_diff)
cov_diffpinv = np.linalg.pinv(cov_diff)
H = np.dot(params_diff, np.dot(cov_diffpinv, params_diff))/se2
pval = stats.chi2.sf(H, dof)
return H, pval, dof
# copied from regression results with small changes, no llf
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
#TODO: import where we need it (for now), add as cached attributes
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest, durbin_watson)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
#TODO: reuse condno from somewhere else?
#condno = np.linalg.cond(np.dot(self.wexog.T, self.wexog))
wexog = self.model.wexog
eigvals = np.linalg.linalg.eigvalsh(np.dot(wexog.T, wexog))
eigvals = np.sort(eigvals) #in increasing order
condno = np.sqrt(eigvals[-1]/eigvals[0])
# TODO: check what is valid.
# box-pierce, breusch-pagan, durbin's h are not with endogenous on rhs
# use Cumby Huizinga 1992 instead
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[0])
#TODO not used yet
#diagn_left_header = ['Models stats']
#diagn_right_header = ['Residual stats']
#TODO: requiring list/iterable is a bit annoying
#need more control over formatting
#TODO: default don't work if it's not identically spelled
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Two Stage']),
('', ['Least Squares']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None), #[self.df_resid]), #TODO: spelling
('Df Model:', None), #[self.df_model])
]
top_right = [('R-squared:', ["%#8.3f" % self.rsquared]),
('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
('F-statistic:', ["%#8.4g" % self.fvalue] ),
('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
#('Log-Likelihood:', None), #["%#6.4g" % self.llf]),
#('AIC:', ["%#8.4g" % self.aic]),
#('BIC:', ["%#8.4g" % self.bic])
]
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:', ["%#8.3f" % durbin_watson(self.wresid)]),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ +'' + "Regression Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=True)
smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
yname=yname, xname=xname,
title="")
return smry
############# classes for Generalized Method of Moments GMM
_gmm_options = '''\
Options for GMM
---------------
Type of GMM
~~~~~~~~~~~
- one-step
- iterated
- CUE : not tested yet
weight matrix
~~~~~~~~~~~~~
- `weights_method` : string, defines method for robust
Options here are similar to :mod:`statsmodels.stats.robust_covariance`
default is heteroscedasticity consistent, HC0
currently available methods are
- `cov` : HC0, optionally with degrees of freedom correction
- `hac` :
- `iid` : untested, only for Z*u case, IV cases with u as error indep of Z
- `ac` : not available yet
- `cluster` : not connected yet
- others from robust_covariance
other arguments:
- `wargs` : tuple or dict, required arguments for weights_method
- `centered` : bool,
indicates whether moments are centered for the calculation of the weights
and covariance matrix, applies to all weight_methods
- `ddof` : int
degrees of freedom correction, applies currently only to `cov`
- maxlag : int
number of lags to include in HAC calculation, applies only to `hac`
- others not yet, e.g. groups for cluster robust
covariance matrix
~~~~~~~~~~~~~~~~~
The same options as for weight matrix also apply to the calculation of the
estimate of the covariance matrix of the parameter estimates.
The additional option is
- `has_optimal_weights`: If true, then the calculation of the covariance
matrix assumes that we have optimal GMM with :math:`W = S^{-1}`.
Default is True.
TODO: do we want to have a different default after `onestep`?
'''
class GMM(Model):
'''
Class for estimation by Generalized Method of Moments
needs to be subclassed, where the subclass defined the moment conditions
`momcond`
Parameters
----------
endog : array
endogenous variable, see notes
exog : array
array of exogenous variables, see notes
instrument : array
array of instruments, see notes
nmoms : None or int
number of moment conditions, if None then it is set equal to the
number of columns of instruments. Mainly needed to determin the shape
or size of start parameters and starting weighting matrix.
kwds : anything
this is mainly if additional variables need to be stored for the
calculations of the moment conditions
Attributes
----------
results : instance of GMMResults
currently just a storage class for params and cov_params without it's
own methods
bse : property
return bse
Notes
-----
The GMM class only uses the moment conditions and does not use any data
directly. endog, exog, instrument and kwds in the creation of the class
instance are only used to store them for access in the moment conditions.
Which of this are required and how they are used depends on the moment
conditions of the subclass.
Warning:
Options for various methods have not been fully implemented and
are still missing in several methods.
TODO:
currently onestep (maxiter=0) still produces an updated estimate of bse
and cov_params.
'''
results_class = 'GMMResults'
def __init__(self, endog, exog, instrument, k_moms=None, k_params=None,
missing='none', **kwds):
'''
maybe drop and use mixin instead
TODO: GMM doesn't really care about the data, just the moment conditions
'''
instrument = self._check_inputs(instrument, endog) # attaches if needed
super(GMM, self).__init__(endog, exog, missing=missing,
instrument=instrument)
# self.endog = endog
# self.exog = exog
# self.instrument = instrument
self.nobs = endog.shape[0]
if k_moms is not None:
self.nmoms = k_moms
elif instrument is not None:
self.nmoms = instrument.shape[1]
else:
self.nmoms = np.nan
if k_params is not None:
self.k_params = k_params
elif instrument is not None:
self.k_params = exog.shape[1]
else:
self.k_params = np.nan
self.__dict__.update(kwds)
self.epsilon_iter = 1e-6
def _check_inputs(self, instrument, endog):
if instrument is not None:
offset = np.asarray(instrument)
if offset.shape[0]!= endog.shape[0]:
raise ValueError("instrument is not the same length as endog")
return instrument
def _fix_param_names(self, params, param_names=None):
# TODO: this is a temporary fix, need
xnames = self.data.xnames
if param_names is not None:
if len(params) == len(param_names):
self.data.xnames = param_names
else:
raise ValueError('param_names has the wrong length')
else:
if len(params) < len(xnames):
# cut in front for poisson multiplicative
self.data.xnames = xnames[-len(params):]
elif len(params) > len(xnames):
# use generic names
self.data.xnames = ['p%2d' % i for i in range(len(params))]
def set_param_names(self, param_names, k_params=None):
"""set the parameter names in the model
Parameters
----------
param_names : list of strings
param_names should have the same length as the number of params
k_params : None or int
If k_params is None, then the k_params attribute is used, unless
it is None.
If k_params is not None, then it will also set the k_params
attribute.
"""
if k_params is not None:
self.k_params = k_params
else:
k_params = self.k_params
if k_params == len(param_names):
self.data.xnames = param_names
else:
raise ValueError('param_names has the wrong length')
def fit(self, start_params=None, maxiter=10, inv_weights=None,
weights_method='cov', wargs=(),
has_optimal_weights=True,
optim_method='bfgs', optim_args=None):
'''
Estimate parameters using GMM and return GMMResults
TODO: weight and covariance arguments still need to be made consistent
with similar options in other models,
see RegressionResult.get_robustcov_results
Parameters
----------
start_params : array (optional)
starting value for parameters ub minimization. If None then
fitstart method is called for the starting values.
maxiter : int or 'cue'
Number of iterations in iterated GMM. The onestep estimate can be
obtained with maxiter=0 or 1. If maxiter is large, then the
iteration will stop either at maxiter or on convergence of the
parameters (TODO: no options for convergence criteria yet.)
If `maxiter == 'cue'`, the the continuously updated GMM is
calculated which updates the weight matrix during the minimization
of the GMM objective function. The CUE estimation uses the onestep
parameters as starting values.
inv_weights : None or ndarray
inverse of the starting weighting matrix. If inv_weights are not
given then the method `start_weights` is used which depends on
the subclass, for IV subclasses `inv_weights = z'z` where `z` are
the instruments, otherwise an identity matrix is used.
weights_method : string, defines method for robust
Options here are similar to :mod:`statsmodels.stats.robust_covariance`
default is heteroscedasticity consistent, HC0
currently available methods are
- `cov` : HC0, optionally with degrees of freedom correction
- `hac` :
- `iid` : untested, only for Z*u case, IV cases with u as error indep of Z
- `ac` : not available yet
- `cluster` : not connected yet
- others from robust_covariance
wargs` : tuple or dict,
required and optional arguments for weights_method
- `centered` : bool,
indicates whether moments are centered for the calculation of the weights
and covariance matrix, applies to all weight_methods
- `ddof` : int
degrees of freedom correction, applies currently only to `cov`
- `maxlag` : int
number of lags to include in HAC calculation, applies only to `hac`
- others not yet, e.g. groups for cluster robust
has_optimal_weights: If true, then the calculation of the covariance
matrix assumes that we have optimal GMM with :math:`W = S^{-1}`.
Default is True.
TODO: do we want to have a different default after `onestep`?
optim_method : string, default is 'bfgs'
numerical optimization method. Currently not all optimizers that
are available in LikelihoodModels are connected.
optim_args : dict
keyword arguments for the numerical optimizer.
Returns
-------
results : instance of GMMResults
this is also attached as attribute results
Notes
-----
Warning: One-step estimation, `maxiter` either 0 or 1, still has
problems (at least compared to Stata's gmm).
By default it uses a heteroscedasticity robust covariance matrix, but
uses the assumption that the weight matrix is optimal.
See options for cov_params in the results instance.
The same options as for weight matrix also apply to the calculation of
the estimate of the covariance matrix of the parameter estimates.
'''
# TODO: add check for correct wargs keys
# currently a misspelled key is not detected,
# because I'm still adding options
# TODO: check repeated calls to fit with different options
# arguments are dictionaries, i.e. mutable
# unit test if anything is stale or spilled over.
#bug: where does start come from???
start = start_params # alias for renaming
if start is None:
start = self.fitstart() #TODO: temporary hack
if inv_weights is None:
inv_weights
if optim_args is None:
optim_args = {}
if 'disp' not in optim_args:
optim_args['disp'] = 1
if maxiter == 0 or maxiter == 'cue':
if inv_weights is not None:
weights = np.linalg.pinv(inv_weights)
else:
# let start_weights handle the inv=False for maxiter=0
weights = self.start_weights(inv=False)
params = self.fitgmm(start, weights=weights,
optim_method=optim_method, optim_args=optim_args)
weights_ = weights # temporary alias used in jval
else:
params, weights = self.fititer(start,
maxiter=maxiter,
start_invweights=inv_weights,
weights_method=weights_method,
wargs=wargs,
optim_method=optim_method,
optim_args=optim_args)
# TODO weights returned by fititer is inv_weights - not true anymore
# weights_ currently not necessary and used anymore
weights_ = np.linalg.pinv(weights)
if maxiter == 'cue':
#we have params from maxiter= 0 as starting value
# TODO: need to give weights options to gmmobjective_cu
params = self.fitgmm_cu(params,
optim_method=optim_method,
optim_args=optim_args)
# weights is stored as attribute
weights = self._weights_cu
#TODO: use Bunch instead?
options_other = {'weights_method':weights_method,
'has_optimal_weights':has_optimal_weights,
'optim_method':optim_method}
# check that we have the right number of xnames
self._fix_param_names(params, param_names=None)
results = results_class_dict[self.results_class](
model = self,
params = params,
weights = weights,
wargs = wargs,
options_other = options_other,
optim_args = optim_args)
self.results = results # FIXME: remove, still keeping it temporarily
return results
def fitgmm(self, start, weights=None, optim_method='bfgs', optim_args=None):
'''estimate parameters using GMM
Parameters
----------
start : array_like
starting values for minimization
weights : array
weighting matrix for moment conditions. If weights is None, then
the identity matrix is used
Returns
-------
paramest : array
estimated parameters
Notes
-----
todo: add fixed parameter option, not here???
uses scipy.optimize.fmin
'''
## if not fixed is None: #fixed not defined in this version
## raise NotImplementedError
# TODO: should start_weights only be in `fit`
if weights is None:
weights = self.start_weights(inv=False)
if optim_args is None:
optim_args = {}
if optim_method == 'nm':
optimizer = optimize.fmin
elif optim_method == 'bfgs':
optimizer = optimize.fmin_bfgs
# TODO: add score
optim_args['fprime'] = self.score #lambda params: self.score(params, weights)
elif optim_method == 'ncg':
optimizer = optimize.fmin_ncg
optim_args['fprime'] = self.score
elif optim_method == 'cg':
optimizer = optimize.fmin_cg
optim_args['fprime'] = self.score
elif optim_method == 'fmin_l_bfgs_b':
optimizer = optimize.fmin_l_bfgs_b
optim_args['fprime'] = self.score
elif optim_method == 'powell':
optimizer = optimize.fmin_powell
elif optim_method =='slsqp':
optimizer = optimize.fmin_slsqp
else:
raise ValueError('optimizer method not available')
if DEBUG:
print(np.linalg.det(weights))
#TODO: add other optimization options and results
return optimizer(self.gmmobjective, start, args=(weights,),
**optim_args)
def fitgmm_cu(self, start, optim_method='bfgs', optim_args=None):
'''estimate parameters using continuously updating GMM
Parameters
----------
start : array_like
starting values for minimization
Returns
-------
paramest : array
estimated parameters
Notes
-----
todo: add fixed parameter option, not here???
uses scipy.optimize.fmin
'''
## if not fixed is None: #fixed not defined in this version
## raise NotImplementedError
if optim_args is None:
optim_args = {}
if optim_method == 'nm':
optimizer = optimize.fmin
elif optim_method == 'bfgs':
optimizer = optimize.fmin_bfgs
optim_args['fprime'] = self.score_cu
elif optim_method == 'ncg':
optimizer = optimize.fmin_ncg
else:
raise ValueError('optimizer method not available')
#TODO: add other optimization options and results
return optimizer(self.gmmobjective_cu, start, args=(), **optim_args)
def start_weights(self, inv=True):
"""Create identity matrix for starting weights"""
return np.eye(self.nmoms)
def gmmobjective(self, params, weights):
'''
objective function for GMM minimization
Parameters
----------
params : array
parameter values at which objective is evaluated
weights : array
weighting matrix
Returns
-------
jval : float
value of objective function
'''
moms = self.momcond_mean(params)
return np.dot(np.dot(moms, weights), moms)
#moms = self.momcond(params)
#return np.dot(np.dot(moms.mean(0),weights), moms.mean(0))
def gmmobjective_cu(self, params, weights_method='cov',
wargs=()):
'''
objective function for continuously updating GMM minimization
Parameters
----------
params : array
parameter values at which objective is evaluated
Returns
-------
jval : float
value of objective function
'''
moms = self.momcond(params)
inv_weights = self.calc_weightmatrix(moms, weights_method=weights_method,
wargs=wargs)
weights = np.linalg.pinv(inv_weights)
self._weights_cu = weights # store if we need it later
return np.dot(np.dot(moms.mean(0), weights), moms.mean(0))
def fititer(self, start, maxiter=2, start_invweights=None,
weights_method='cov', wargs=(), optim_method='bfgs',
optim_args=None):
'''iterative estimation with updating of optimal weighting matrix
stopping criteria are maxiter or change in parameter estimate less
than self.epsilon_iter, with default 1e-6.
Parameters
----------
start : array
starting value for parameters
maxiter : int
maximum number of iterations
start_weights : array (nmoms, nmoms)
initial weighting matrix; if None, then the identity matrix
is used
weights_method : {'cov',...}
method to use to estimate the optimal weighting matrix,
see calc_weightmatrix for details
Returns
-------
params : array
estimated parameters
weights : array
optimal weighting matrix calculated with final parameter
estimates
Notes
-----
'''
self.history = []
momcond = self.momcond
if start_invweights is None:
w = self.start_weights(inv=True)
else:
w = start_invweights
#call fitgmm function
#args = (self.endog, self.exog, self.instrument)
#args is not used in the method version
winv_new = w
for it in range(maxiter):
winv = winv_new
w = np.linalg.pinv(winv)
#this is still calling function not method
## resgmm = fitgmm(momcond, (), start, weights=winv, fixed=None,
## weightsoptimal=False)
resgmm = self.fitgmm(start, weights=w, optim_method=optim_method,
optim_args=optim_args)
moms = momcond(resgmm)
# the following is S = cov_moments
winv_new = self.calc_weightmatrix(moms,
weights_method=weights_method,
wargs=wargs, params=resgmm)
if it > 2 and maxabs(resgmm - start) < self.epsilon_iter:
#check rule for early stopping
# TODO: set has_optimal_weights = True
break
start = resgmm
return resgmm, w
def calc_weightmatrix(self, moms, weights_method='cov', wargs=(),
params=None):
'''
calculate omega or the weighting matrix
Parameters
----------
moms : array
moment conditions (nobs x nmoms) for all observations evaluated at
a parameter value
weights_method : string 'cov'
If method='cov' is cov then the matrix is calculated as simple
covariance of the moment conditions.
see fit method for available aoptions for the weight and covariance
matrix
wargs : tuple or dict
parameters that are required by some kernel methods to
estimate the long-run covariance. Not used yet.
Returns
-------
w : array (nmoms, nmoms)
estimate for the weighting matrix or covariance of the moment
condition
Notes
-----
currently a constant cutoff window is used
TODO: implement long-run cov estimators, kernel-based
Newey-West
Andrews
Andrews-Moy????
References
----------
Greene
Hansen, Bruce
'''
nobs, k_moms = moms.shape
# TODO: wargs are tuple or dict?
if DEBUG:
print(' momcov wargs', wargs)
centered = not ('centered' in wargs and not wargs['centered'])
if not centered:
# caller doesn't want centered moment conditions
moms_ = moms
else:
moms_ = moms - moms.mean()
# TODO: store this outside to avoid doing this inside optimization loop
# TODO: subclasses need to be able to add weights_methods, and remove
# IVGMM can have homoscedastic (OLS),
# some options won't make sense in some cases
# possible add all here and allow subclasses to define a list
# TODO: should other weights_methods also have `ddof`
if weights_method == 'cov':
w = np.dot(moms_.T, moms_)
if 'ddof' in wargs:
# caller requests degrees of freedom correction
if wargs['ddof'] == 'k_params':
w /= (nobs - self.k_params)
else:
if DEBUG:
print(' momcov ddof', wargs['ddof'])
w /= (nobs - wargs['ddof'])
else:
# default: divide by nobs
w /= nobs
elif weights_method == 'flatkernel':
#uniform cut-off window
# This was a trial version, can use HAC with flatkernel
if'maxlag' not in wargs:
raise ValueError('flatkernel requires maxlag')
maxlag = wargs['maxlag']
h = np.ones(maxlag + 1)
w = np.dot(moms_.T, moms_)/nobs
for i in range(1,maxlag+1):
w += (h[i] * np.dot(moms_[i:].T, moms_[:-i]) / (nobs-i))
elif weights_method == 'hac':
maxlag = wargs['maxlag']
if 'kernel' in wargs:
weights_func = wargs['kernel']
else:
weights_func = smcov.weights_bartlett
wargs['kernel'] = weights_func
w = smcov.S_hac_simple(moms_, nlags=maxlag,
weights_func=weights_func)
w /= nobs #(nobs - self.k_params)
elif weights_method == 'iid':
# only when we have instruments and residual mom = Z * u
# TODO: problem we don't have params in argument
# I cannot keep everything in here w/o params as argument
u = self.get_error(params)
if centered:
# Note: I'm not centering instruments,
# shouldn't we always center u? Ok, with centered as default
u -= u.mean(0) #demean inplace, we don't need original u
instrument = self.instrument
w = np.dot(instrument.T, instrument).dot(np.dot(u.T, u)) / nobs
if 'ddof' in wargs:
# caller requests degrees of freedom correction
if wargs['ddof'] == 'k_params':
w /= (nobs - self.k_params)
else:
# assume ddof is a number
if DEBUG:
print(' momcov ddof', wargs['ddof'])
w /= (nobs - wargs['ddof'])
else:
# default: divide by nobs
w /= nobs
else:
raise ValueError('weight method not available')
return w
def momcond_mean(self, params):
'''
mean of moment conditions,
'''
momcond = self.momcond(params)
self.nobs_moms, self.k_moms = momcond.shape
return momcond.mean(0)
def gradient_momcond(self, params, epsilon=1e-4, centered=True):
'''gradient of moment conditions
Parameters
----------
params : ndarray
parameter at which the moment conditions are evaluated
epsilon : float
stepsize for finite difference calculation
centered : bool
This refers to the finite difference calculation. If `centered`
is true, then the centered finite difference calculation is
used. Otherwise the one-sided forward differences are used.
TODO: looks like not used yet
missing argument `weights`
'''
momcond = self.momcond_mean
# TODO: approx_fprime has centered keyword
if centered:
gradmoms = (approx_fprime(params, momcond, epsilon=epsilon) +
approx_fprime(params, momcond, epsilon=-epsilon))/2
else:
gradmoms = approx_fprime(params, momcond, epsilon=epsilon)
return gradmoms
def score(self, params, weights, epsilon=None, centered=True):
"""Score"""
deriv = approx_fprime(params, self.gmmobjective, args=(weights,),
centered=centered, epsilon=epsilon)
return deriv
def score_cu(self, params, epsilon=None, centered=True):
"""Score cu"""
deriv = approx_fprime(params, self.gmmobjective_cu, args=(),
centered=centered, epsilon=epsilon)
return deriv
# TODO: wrong superclass, I want tvalues,... right now
class GMMResults(LikelihoodModelResults):
'''just a storage class right now'''
use_t = False
def __init__(self, *args, **kwds):
self.__dict__.update(kwds)
self.nobs = self.model.nobs
self.df_resid = np.inf
self.cov_params_default = self._cov_params()
@cache_readonly
def q(self):
"""Objective function at params"""
return self.model.gmmobjective(self.params, self.weights)
@cache_readonly
def jval(self):
"""nobs_moms attached by momcond_mean"""
return self.q * self.model.nobs_moms
def _cov_params(self, **kwds):
#TODO add options???)
# this should use by default whatever options have been specified in
# fit
# TODO: don't do this when we want to change options
# if hasattr(self, '_cov_params'):
# #replace with decorator later
# return self._cov_params
# set defaults based on fit arguments
if 'wargs' not in kwds:
# Note: we don't check the keys in wargs, use either all or nothing
kwds['wargs'] = self.wargs
if 'weights_method' not in kwds:
kwds['weights_method'] = self.options_other['weights_method']
if 'has_optimal_weights' not in kwds:
kwds['has_optimal_weights'] = self.options_other['has_optimal_weights']
gradmoms = self.model.gradient_momcond(self.params)
moms = self.model.momcond(self.params)
covparams = self.calc_cov_params(moms, gradmoms, **kwds)
return covparams
def calc_cov_params(self, moms, gradmoms, weights=None, use_weights=False,
has_optimal_weights=True,
weights_method='cov', wargs=()):
'''calculate covariance of parameter estimates
not all options tried out yet
If weights matrix is given, then the formula use to calculate cov_params
depends on whether has_optimal_weights is true.
If no weights are given, then the weight matrix is calculated with
the given method, and has_optimal_weights is assumed to be true.
(API Note: The latter assumption could be changed if we allow for
has_optimal_weights=None.)
'''
nobs = moms.shape[0]
if weights is None:
#omegahat = self.model.calc_weightmatrix(moms, method=method, wargs=wargs)
#has_optimal_weights = True
#add other options, Barzen,... longrun var estimators
# TODO: this might still be inv_weights after fititer
weights = self.weights
else:
pass
#omegahat = weights #2 different names used,
#TODO: this is wrong, I need an estimate for omega
if use_weights:
omegahat = weights
else:
omegahat = self.model.calc_weightmatrix(
moms,
weights_method=weights_method,
wargs=wargs,
params=self.params)
if has_optimal_weights: #has_optimal_weights:
# TOD0 make has_optimal_weights depend on convergence or iter >2
cov = np.linalg.inv(np.dot(gradmoms.T,
np.dot(np.linalg.inv(omegahat), gradmoms)))
else:
gw = np.dot(gradmoms.T, weights)
gwginv = np.linalg.inv(np.dot(gw, gradmoms))
cov = np.dot(np.dot(gwginv, np.dot(np.dot(gw, omegahat), gw.T)), gwginv)
#cov /= nobs
return cov/nobs
@property
def bse_(self):
'''standard error of the parameter estimates
'''
return self.get_bse()
def get_bse(self, **kwds):
'''standard error of the parameter estimates with options
Parameters
----------
kwds : optional keywords
options for calculating cov_params
Returns
-------
bse : ndarray
estimated standard error of parameter estimates
'''
return np.sqrt(np.diag(self.cov_params(**kwds)))
def jtest(self):
'''overidentification test
I guess this is missing a division by nobs,
what's the normalization in jval?
'''
jstat = self.jval
nparams = self.params.size #self.nparams
df = self.model.nmoms - nparams
return jstat, stats.chi2.sf(jstat, df), df
def compare_j(self, other):
'''overidentification test for comparing two nested gmm estimates
This assumes that some moment restrictions have been dropped in one
of the GMM estimates relative to the other.
Not tested yet
We are comparing two separately estimated models, that use different
weighting matrices. It is not guaranteed that the resulting
difference is positive.
TODO: Check in which cases Stata programs use the same weigths
'''
jstat1 = self.jval
k_moms1 = self.model.nmoms
jstat2 = other.jval
k_moms2 = other.model.nmoms
jdiff = jstat1 - jstat2
df = k_moms1 - k_moms2
if df < 0:
# possible nested in other way, TODO allow this or not
# flip sign instead of absolute
df = - df
jdiff = - jdiff
return jdiff, stats.chi2.sf(jdiff, df), df
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
#TODO: add a summary text for options that have been used
jvalue, jpvalue, jdf = self.jtest()
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['GMM']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
#('Df Residuals:', None), #[self.df_resid]), #TODO: spelling
#('Df Model:', None), #[self.df_model])
]
top_right = [#('R-squared:', ["%#8.3f" % self.rsquared]),
#('Adj. R-squared:', ["%#8.3f" % self.rsquared_adj]),
('Hansen J:', ["%#8.4g" % jvalue] ),
('Prob (Hansen J):', ["%#6.3g" % jpvalue]),
#('F-statistic:', ["%#8.4g" % self.fvalue] ),
#('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
#('Log-Likelihood:', None), #["%#6.4g" % self.llf]),
#('AIC:', ["%#8.4g" % self.aic]),
#('BIC:', ["%#8.4g" % self.bic])
]
if title is None:
title = self.model.__class__.__name__ +'' + "Results"
# create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
return smry
class IVGMM(GMM):
'''
Basic class for instrumental variables estimation using GMM
A linear function for the conditional mean is defined as default but the
methods should be overwritten by subclasses, currently `LinearIVGMM` and
`NonlinearIVGMM` are implemented as subclasses.
See Also
--------
LinearIVGMM
NonlinearIVGMM
'''
results_class = 'IVGMMResults'
def fitstart(self):
"""Create array of zeros"""
return np.zeros(self.exog.shape[1])
def start_weights(self, inv=True):
"""Starting weights"""
zz = np.dot(self.instrument.T, self.instrument)
nobs = self.instrument.shape[0]
if inv:
return zz / nobs
else:
return np.linalg.pinv(zz / nobs)
def get_error(self, params):
"""Get error at params"""
return self.endog - self.predict(params)
def predict(self, params, exog=None):
"""Get prediction at params"""
if exog is None:
exog = self.exog
return np.dot(exog, params)
def momcond(self, params):
"""Error times instrument"""
instrument = self.instrument
return instrument * self.get_error(params)[:, None]
class LinearIVGMM(IVGMM):
"""class for linear instrumental variables models estimated with GMM
Uses closed form expression instead of nonlinear optimizers for each step
of the iterative GMM.
The model is assumed to have the following moment condition
E( z * (y - x beta)) = 0
Where `y` is the dependent endogenous variable, `x` are the explanatory
variables and `z` are the instruments. Variables in `x` that are exogenous
need also be included in `z`.
Notation Warning: our name `exog` stands for the explanatory variables,
and includes both exogenous and explanatory variables that are endogenous,
i.e. included endogenous variables
Parameters
----------
endog : array_like
dependent endogenous variable
exog : array_like
explanatory, right hand side variables, including explanatory variables
that are endogenous
instrument : array_like
Instrumental variables, variables that are exogenous to the error
in the linear model containing both included and excluded exogenous
variables
"""
def fitgmm(self, start, weights=None, optim_method=None, **kwds):
'''estimate parameters using GMM for linear model
Uses closed form expression instead of nonlinear optimizers
Parameters
----------
start : not used
starting values for minimization, not used, only for consistency
of method signature
weights : array
weighting matrix for moment conditions. If weights is None, then
the identity matrix is used
optim_method : not used,
optimization method, not used, only for consistency of method
signature
**kwds : keyword arguments
not used, will be silently ignored (for compatibility with generic)
Returns
-------
paramest : array
estimated parameters
'''
## if not fixed is None: #fixed not defined in this version
## raise NotImplementedError
# TODO: should start_weights only be in `fit`
if weights is None:
weights = self.start_weights(inv=False)
y, x, z = self.endog, self.exog, self.instrument
zTx = np.dot(z.T, x)
zTy = np.dot(z.T, y)
# normal equation, solved with pinv
part0 = zTx.T.dot(weights)
part1 = part0.dot(zTx)
part2 = part0.dot(zTy)
params = np.linalg.pinv(part1).dot(part2)
return params
def predict(self, params, exog=None):
if exog is None:
exog = self.exog
return np.dot(exog, params)
def gradient_momcond(self, params, **kwds):
# **kwds for compatibility not used
x, z = self.exog, self.instrument
gradmoms = -np.dot(z.T, x) / self.nobs
return gradmoms
def score(self, params, weights, **kwds):
# **kwds for compatibility, not used
# Note: I coud use general formula with gradient_momcond instead
x, z = self.exog, self.instrument
nobs = z.shape[0]
u = self.get_errors(params)
score = -2 * np.dot(x.T, z).dot(weights.dot(np.dot(z.T, u)))
score /= nobs * nobs
return score
class NonlinearIVGMM(IVGMM):
"""
Class for non-linear instrumental variables estimation wusing GMM
The model is assumed to have the following moment condition
E[ z * (y - f(X, beta)] = 0
Where `y` is the dependent endogenous variable, `x` are the explanatory
variables and `z` are the instruments. Variables in `x` that are exogenous
need also be included in z. `f` is a nonlinear function.
Notation Warning: our name `exog` stands for the explanatory variables,
and includes both exogenous and explanatory variables that are endogenous,
i.e. included endogenous variables
Parameters
----------
endog : array_like
dependent endogenous variable
exog : array_like
explanatory, right hand side variables, including explanatory variables
that are endogenous.
instruments : array_like
Instrumental variables, variables that are exogenous to the error
in the linear model containing both included and excluded exogenous
variables
func : callable
function for the mean or conditional expectation of the endogenous
variable. The function will be called with parameters and the array of
explanatory, right hand side variables, `func(params, exog)`
Notes
-----
This class uses numerical differences to obtain the derivative of the
objective function. If the jacobian of the conditional mean function, `func`
is available, then it can be used by subclassing this class and defining
a method `jac_func`.
TODO: check required signature of jac_error and jac_func
"""
# This should be reversed:
# NonlinearIVGMM is IVGMM and need LinearIVGMM as special case (fit, predict)
def fitstart(self):
#might not make sense for more general functions
return np.zeros(self.exog.shape[1])
def __init__(self, endog, exog, instrument, func, **kwds):
self.func = func
super(NonlinearIVGMM, self).__init__(endog, exog, instrument, **kwds)
def predict(self, params, exog=None):
if exog is None:
exog = self.exog
return self.func(params, exog)
#---------- the following a semi-general versions,
# TODO: move to higher class after testing
def jac_func(self, params, weights, args=None, centered=True, epsilon=None):
# TODO: Why are ther weights in the signature - copy-paste error?
deriv = approx_fprime(params, self.func, args=(self.exog,),
centered=centered, epsilon=epsilon)
return deriv
def jac_error(self, params, weights, args=None, centered=True,
epsilon=None):
jac_func = self.jac_func(params, weights, args=None, centered=True,
epsilon=None)
return -jac_func
def score(self, params, weights, **kwds):
# **kwds for compatibility not used
# Note: I coud use general formula with gradient_momcond instead
z = self.instrument
nobs = z.shape[0]
jac_u = self.jac_error(params, weights, args=None, epsilon=None,
centered=True)
x = -jac_u # alias, plays the same role as X in linear model
u = self.get_error(params)
score = -2 * np.dot(np.dot(x.T, z), weights).dot(np.dot(z.T, u))
score /= nobs * nobs
return score
class IVGMMResults(GMMResults):
"""Results class of IVGMM"""
# this assumes that we have an additive error model `(y - f(x, params))`
@cache_readonly
def fittedvalues(self):
"""Fitted values"""
return self.model.predict(self.params)
@cache_readonly
def resid(self):
"""Residuals"""
return self.model.endog - self.fittedvalues
@cache_readonly
def ssr(self):
"""Sum of square errors"""
return (self.resid * self.resid).sum(0)
def spec_hausman(params_e, params_i, cov_params_e, cov_params_i, dof=None):
'''Hausmans specification test
Parameters
----------
params_e : array
efficient and consistent under Null hypothesis,
inconsistent under alternative hypothesis
params_i: array
consistent under Null hypothesis,
consistent under alternative hypothesis
cov_params_e : array, 2d
covariance matrix of parameter estimates for params_e
cov_params_i : array, 2d
covariance matrix of parameter estimates for params_i
example instrumental variables OLS estimator is `e`, IV estimator is `i`
Notes
-----
Todos,Issues
- check dof calculations and verify for linear case
- check one-sided hypothesis
References
----------
Greene section 5.5 p.82/83
'''
params_diff = (params_i - params_e)
cov_diff = cov_params_i - cov_params_e
#TODO: the following is very inefficient, solves problem (svd) twice
#use linalg.lstsq or svd directly
#cov_diff will very often be in-definite (singular)
if not dof:
dof = np.linalg.matrix_rank(cov_diff)
cov_diffpinv = np.linalg.pinv(cov_diff)
H = np.dot(params_diff, np.dot(cov_diffpinv, params_diff))
pval = stats.chi2.sf(H, dof)
evals = np.linalg.eigvalsh(cov_diff)
return H, pval, dof, evals
###########
class DistQuantilesGMM(GMM):
'''
Estimate distribution parameters by GMM based on matching quantiles
Currently mainly to try out different requirements for GMM when we cannot
calculate the optimal weighting matrix.
'''
def __init__(self, endog, exog, instrument, **kwds):
#TODO: something wrong with super
super(DistQuantilesGMM, self).__init__(endog, exog, instrument)
#self.func = func
self.epsilon_iter = 1e-5
self.distfn = kwds['distfn']
#done by super doesn't work yet
#TypeError: super does not take keyword arguments
self.endog = endog
#make this optional for fit
if 'pquant' not in kwds:
self.pquant = pquant = np.array([0.01, 0.05,0.1,0.4,0.6,0.9,0.95,0.99])
else:
self.pquant = pquant = kwds['pquant']
#TODO: vectorize this: use edf
self.xquant = np.array([stats.scoreatpercentile(endog, p) for p
in pquant*100])
self.nmoms = len(self.pquant)
#TODOcopied from GMM, make super work
self.endog = endog
self.exog = exog
self.instrument = instrument
self.results = GMMResults(model=self)
#self.__dict__.update(kwds)
self.epsilon_iter = 1e-6
def fitstart(self):
#todo: replace with or add call to distfn._fitstart
# added but not used during testing, avoid Travis
distfn = self.distfn
if hasattr(distfn, '_fitstart'):
start = distfn._fitstart(self.endog)
else:
start = [1]*distfn.numargs + [0.,1.]
return np.asarray(start)
def momcond(self, params): #drop distfn as argument
#, mom2, quantile=None, shape=None
'''moment conditions for estimating distribution parameters by matching
quantiles, defines as many moment conditions as quantiles.
Returns
-------
difference : array
difference between theoretical and empirical quantiles
Notes
-----
This can be used for method of moments or for generalized method of
moments.
'''
#this check looks redundant/unused know
if len(params) == 2:
loc, scale = params
elif len(params) == 3:
shape, loc, scale = params
else:
#raise NotImplementedError
pass #see whether this might work, seems to work for beta with 2 shape args
#mom2diff = np.array(distfn.stats(*params)) - mom2
#if not quantile is None:
pq, xq = self.pquant, self.xquant
#ppfdiff = distfn.ppf(pq, alpha)
cdfdiff = self.distfn.cdf(xq, *params) - pq
#return np.concatenate([mom2diff, cdfdiff[:1]])
return np.atleast_2d(cdfdiff)
def fitonce(self, start=None, weights=None, has_optimal_weights=False):
'''fit without estimating an optimal weighting matrix and return results
This is a convenience function that calls fitgmm and covparams with
a given weight matrix or the identity weight matrix.
This is useful if the optimal weight matrix is know (or is analytically
given) or if an optimal weight matrix cannot be calculated.
(Developer Notes: this function could go into GMM, but is needed in this
class, at least at the moment.)
Parameters
----------
Returns
-------
results : GMMResult instance
result instance with params and _cov_params attached
See Also
--------
fitgmm
cov_params
'''
if weights is None:
weights = np.eye(self.nmoms)
params = self.fitgmm(start=start)
# TODO: rewrite this old hack, should use fitgmm or fit maxiter=0
self.results.params = params #required before call to self.cov_params
self.results.wargs = {} #required before call to self.cov_params
self.results.options_other = {'weights_method':'cov'}
# TODO: which weights_method? There shouldn't be any needed?
_cov_params = self.results.cov_params(weights=weights,
has_optimal_weights=has_optimal_weights)
self.results.weights = weights
self.results.jval = self.gmmobjective(params, weights)
self.results.options_other.update({'has_optimal_weights':has_optimal_weights})
return self.results
results_class_dict = {'GMMResults': GMMResults,
'IVGMMResults': IVGMMResults,
'DistQuantilesGMM': GMMResults} #TODO: should be a default |
|
statsmodels__statsmodels | imputation.rst | Description | Generate description to this module | BSD 3-Clause New or Revised License | statsmodels__statsmodels/docs/source/imputation.rst | [
"statsmodels__statsmodels/statsmodels/imputation/mice.py"
] | Multiple Imputation with Chained Equations
The MICE module allows most Statsmodels models to be fit to a dataset
with missing values on the independent and/or dependent variables, and
provides rigorous standard errors for the fitted parameters. The basic
idea is to treat each variable with missing values as the dependent
variable in a regression, with some or all of the remaining variables as
its predictors. The MICE procedure cycles through these models, fitting
each in turn, then uses a procedure called "predictive mean matching"
(PMM) to generate random draws from the predictive distributions
determined by the fitted models. These random draws become the imputed
values for one imputed data set.
By default, each variable with missing variables is modeled using a
linear regression with main effects for all other variables in the data
set. Note that even when the imputation model is linear, the PMM
procedure preserves the domain of each variable. Thus, for example, if
all observed values for a given variable are positive, all imputed
values for the variable will always be positive. The user also has the
option to specify which model is used to produce imputed values for each
variable. | """
Overview
--------
This module implements the Multiple Imputation through Chained
Equations (MICE) approach to handling missing data in statistical data
analyses. The approach has the following steps:
0. Impute each missing value with the mean of the observed values of
the same variable.
1. For each variable in the data set with missing values (termed the
'focus variable'), do the following:
1a. Fit an 'imputation model', which is a regression model for the
focus variable, regressed on the observed and (current) imputed values
of some or all of the other variables.
1b. Impute the missing values for the focus variable. Currently this
imputation must use the 'predictive mean matching' (pmm) procedure.
2. Once all variables have been imputed, fit the 'analysis model' to
the data set.
3. Repeat steps 1-2 multiple times and combine the results using a
'combining rule' to produce point estimates of all parameters in the
analysis model and standard errors for them.
The imputations for each variable are based on an imputation model
that is specified via a model class and a formula for the regression
relationship. The default model is OLS, with a formula specifying
main effects for all other variables.
The MICE procedure can be used in one of two ways:
* If the goal is only to produce imputed data sets, the MICEData class
can be used to wrap a data frame, providing facilities for doing the
imputation. Summary plots are available for assessing the performance
of the imputation.
* If the imputed data sets are to be used to fit an additional
'analysis model', a MICE instance can be used. After specifying the
MICE instance and running it, the results are combined using the
`combine` method. Results and various summary plots are then
available.
Terminology
-----------
The primary goal of the analysis is usually to fit and perform
inference using an 'analysis model'. If an analysis model is not
specified, then imputed datasets are produced for later use.
The MICE procedure involves a family of imputation models. There is
one imputation model for each variable with missing values. An
imputation model may be conditioned on all or a subset of the
remaining variables, using main effects, transformations,
interactions, etc. as desired.
A 'perturbation method' is a method for setting the parameter estimate
in an imputation model. The 'gaussian' perturbation method first fits
the model (usually using maximum likelihood, but it could use any
statsmodels fit procedure), then sets the parameter vector equal to a
draw from the Gaussian approximation to the sampling distribution for
the fit. The 'bootstrap' perturbation method sets the parameter
vector equal to a fitted parameter vector obtained when fitting the
conditional model to a bootstrapped version of the data set.
Class structure
---------------
There are two main classes in the module:
* 'MICEData' wraps a Pandas dataframe, incorporating information about
the imputation model for each variable with missing values. It can
be used to produce multiply imputed data sets that are to be further
processed or distributed to other researchers. A number of plotting
procedures are provided to visualize the imputation results and
missing data patterns. The `history_func` hook allows any features
of interest of the imputed data sets to be saved for further
analysis.
* 'MICE' takes both a 'MICEData' object and an analysis model
specification. It runs the multiple imputation, fits the analysis
models, and combines the results to produce a `MICEResults` object.
The summary method of this results object can be used to see the key
estimands and inferential quantities.
Notes
-----
By default, to conserve memory 'MICEData' saves very little
information from one iteration to the next. The data set passed by
the user is copied on entry, but then is over-written each time new
imputations are produced. If using 'MICE', the fitted
analysis models and results are saved. MICEData includes a
`history_callback` hook that allows arbitrary information from the
intermediate datasets to be saved for future use.
References
----------
JL Schafer: 'Multiple Imputation: A Primer', Stat Methods Med Res,
1999.
TE Raghunathan et al.: 'A Multivariate Technique for Multiply
Imputing Missing Values Using a Sequence of Regression Models', Survey
Methodology, 2001.
SAS Institute: 'Predictive Mean Matching Method for Monotone Missing
Data', SAS 9.2 User's Guide, 2014.
A Gelman et al.: 'Multiple Imputation with Diagnostics (mi) in R:
Opening Windows into the Black Box', Journal of Statistical Software,
2009.
"""
import pandas as pd
import numpy as np
import patsy
from statsmodels.base.model import LikelihoodModelResults
from statsmodels.regression.linear_model import OLS
from collections import defaultdict
_mice_data_example_1 = """
>>> imp = mice.MICEData(data)
>>> imp.set_imputer('x1', formula='x2 + np.square(x2) + x3')
>>> for j in range(20):
... imp.update_all()
... imp.data.to_csv('data%02d.csv' % j)"""
_mice_data_example_2 = """
>>> imp = mice.MICEData(data)
>>> j = 0
>>> for data in imp:
... imp.data.to_csv('data%02d.csv' % j)
... j += 1"""
class PatsyFormula(object):
"""
A simple wrapper for a string to be interpreted as a Patsy formula.
"""
def __init__(self, formula):
self.formula = "0 + " + formula
class MICEData(object):
__doc__ = """\
Wrap a data set to allow missing data handling with MICE.
Parameters
----------
data : Pandas data frame
The data set, whch is copied internally.
perturbation_method : string
The default perturbation method
k_pmm : int
The number of nearest neighbors to use during predictive mean
matching. Can also be specified in `fit`.
history_callback : function
A function that is called after each complete imputation
cycle. The return value is appended to `history`. The
MICEData object is passed as the sole argument to
`history_callback`.
Examples
--------
Draw 20 imputations from a data set called `data` and save them in
separate files with filename pattern `dataXX.csv`. The variables
other than `x1` are imputed using linear models fit with OLS, with
mean structures containing main effects of all other variables in
`data`. The variable named `x1` has a condtional mean structure
that includes an additional term for x2^2.
%(_mice_data_example_1)s
Impute using default models, using the MICEData object as an
iterator.
%(_mice_data_example_2)s
Notes
-----
Allowed perturbation methods are 'gaussian' (the model parameters
are set to a draw from the Gaussian approximation to the posterior
distribution), and 'boot' (the model parameters are set to the
estimated values obtained when fitting a bootstrapped version of
the data set).
`history_callback` can be implemented to have side effects such as
saving the current imputed data set to disk.
""" % {'_mice_data_example_1': _mice_data_example_1,
'_mice_data_example_2': _mice_data_example_2}
def __init__(self, data, perturbation_method='gaussian',
k_pmm=20, history_callback=None):
if data.columns.dtype!= np.dtype('O'):
msg = "MICEData data column names should be string type"
raise ValueError(msg)
self.regularized = dict()
# Drop observations where all variables are missing. This
# also has the effect of copying the data frame.
self.data = data.dropna(how='all').reset_index(drop=True)
self.history_callback = history_callback
self.history = []
self.predict_kwds = {}
# Assign the same perturbation method for all variables.
# Can be overriden when calling'set_imputer'.
self.perturbation_method = defaultdict(lambda:
perturbation_method)
# Map from variable name to indices of observed/missing
# values.
self.ix_obs = {}
self.ix_miss = {}
for col in self.data.columns:
ix_obs, ix_miss = self._split_indices(self.data[col])
self.ix_obs[col] = ix_obs
self.ix_miss[col] = ix_miss
# Most recent model instance and results instance for each variable.
self.models = {}
self.results = {}
# Map from variable names to the conditional formula.
self.conditional_formula = {}
# Map from variable names to init/fit args of the conditional
# models.
self.init_kwds = defaultdict(lambda: dict())
self.fit_kwds = defaultdict(lambda: dict())
# Map from variable names to the model class.
self.model_class = {}
# Map from variable names to most recent params update.
self.params = {}
# Set default imputers.
for vname in data.columns:
self.set_imputer(vname)
# The order in which variables are imputed in each cycle.
# Impute variables with the fewest missing values first.
vnames = list(data.columns)
nmiss = [len(self.ix_miss[v]) for v in vnames]
nmiss = np.asarray(nmiss)
ii = np.argsort(nmiss)
ii = ii[sum(nmiss == 0):]
self._cycle_order = [vnames[i] for i in ii]
self._initial_imputation()
self.k_pmm = k_pmm
def next_sample(self):
"""
Returns the next imputed dataset in the imputation process.
Returns
-------
data : array_like
An imputed dataset from the MICE chain.
Notes
-----
`MICEData` does not have a `skip` parameter. Consecutive
values returned by `next_sample` are immediately consecutive
in the imputation chain.
The returned value is a reference to the data attribute of
the class and should be copied before making any changes.
"""
self.update_all(1)
return self.data
def _initial_imputation(self):
"""
Use a PMM-like procedure for initial imputed values.
For each variable, missing values are imputed as the observed
value that is closest to the mean over all observed values.
"""
for col in self.data.columns:
di = self.data[col] - self.data[col].mean()
di = np.abs(di)
ix = di.idxmin()
imp = self.data[col].loc[ix]
self.data[col].fillna(imp, inplace=True)
def _split_indices(self, vec):
null = pd.isnull(vec)
ix_obs = np.flatnonzero(~null)
ix_miss = np.flatnonzero(null)
if len(ix_obs) == 0:
raise ValueError("variable to be imputed has no observed values")
return ix_obs, ix_miss
def set_imputer(self, endog_name, formula=None, model_class=None,
init_kwds=None, fit_kwds=None, predict_kwds=None,
k_pmm=20, perturbation_method=None, regularized=False):
"""
Specify the imputation process for a single variable.
Parameters
----------
endog_name : string
Name of the variable to be imputed.
formula : string
Conditional formula for imputation. Defaults to a formula
with main effects for all other variables in dataset. The
formula should only include an expression for the mean
structure, e.g. use 'x1 + x2' not 'x4 ~ x1 + x2'.
model_class : statsmodels model
Conditional model for imputation. Defaults to OLS. See below
for more information.
init_kwds : dit-like
Keyword arguments passed to the model init method.
fit_kwds : dict-like
Keyword arguments passed to the model fit method.
predict_kwds : dict-like
Keyword arguments passed to the model predict method.
k_pmm : int
Determines number of neighboring observations from which
to randomly sample when using predictive mean matching.
perturbation_method : string
Either 'gaussian' or 'bootstrap'. Determines the method
for perturbing parameters in the imputation model. If
None, uses the default specified at class initialization.
regularized : dict
If regularized[name]=True, `fit_regularized` rather than
`fit` is called when fitting imputation models for this
variable. When regularized[name]=True for any variable,
pertrurbation_method must be set to boot.
Notes
-----
The model class must meet the following conditions:
* A model must have a 'fit' method that returns an object.
* The object returned from `fit` must have a `params` attribute
that is an array-like object.
* The object returned from `fit` must have a cov_params method
that returns a square array-like object.
* The model must have a `predict` method.
"""
if formula is None:
main_effects = [x for x in self.data.columns
if x!= endog_name]
fml = endog_name + " ~ " + " + ".join(main_effects)
self.conditional_formula[endog_name] = fml
else:
fml = endog_name + " ~ " + formula
self.conditional_formula[endog_name] = fml
if model_class is None:
self.model_class[endog_name] = OLS
else:
self.model_class[endog_name] = model_class
if init_kwds is not None:
self.init_kwds[endog_name] = init_kwds
if fit_kwds is not None:
self.fit_kwds[endog_name] = fit_kwds
if predict_kwds is not None:
self.predict_kwds[endog_name] = predict_kwds
if perturbation_method is not None:
self.perturbation_method[endog_name] = perturbation_method
self.k_pmm = k_pmm
self.regularized[endog_name] = regularized
def _store_changes(self, col, vals):
"""
Fill in dataset with imputed values.
Parameters
----------
col : string
Name of variable to be filled in.
vals : array
Array of imputed values to use for filling-in missing values.
"""
ix = self.ix_miss[col]
if len(ix) > 0:
self.data.iloc[ix, self.data.columns.get_loc(col)] = np.atleast_1d(vals)
def update_all(self, n_iter=1):
"""
Perform a specified number of MICE iterations.
Parameters
----------
n_iter : int
The number of updates to perform. Only the result of the
final update will be available.
Notes
-----
The imputed values are stored in the class attribute `self.data`.
"""
for k in range(n_iter):
for vname in self._cycle_order:
self.update(vname)
if self.history_callback is not None:
hv = self.history_callback(self)
self.history.append(hv)
def get_split_data(self, vname):
"""
Return endog and exog for imputation of a given variable.
Parameters
----------
vname : string
The variable for which the split data is returned.
Returns
-------
endog_obs : DataFrame
Observed values of the variable to be imputed.
exog_obs : DataFrame
Current values of the predictors where the variable to be
imputed is observed.
exog_miss : DataFrame
Current values of the predictors where the variable to be
Imputed is missing.
init_kwds : dict-like
The init keyword arguments for `vname`, processed through Patsy
as required.
fit_kwds : dict-like
The fit keyword arguments for `vname`, processed through Patsy
as required.
"""
formula = self.conditional_formula[vname]
endog, exog = patsy.dmatrices(formula, self.data,
return_type="dataframe")
# Rows with observed endog
ixo = self.ix_obs[vname]
endog_obs = np.asarray(endog.iloc[ixo])
exog_obs = np.asarray(exog.iloc[ixo, :])
# Rows with missing endog
ixm = self.ix_miss[vname]
exog_miss = np.asarray(exog.iloc[ixm, :])
predict_obs_kwds = {}
if vname in self.predict_kwds:
kwds = self.predict_kwds[vname]
predict_obs_kwds = self._process_kwds(kwds, ixo)
predict_miss_kwds = {}
if vname in self.predict_kwds:
kwds = self.predict_kwds[vname]
predict_miss_kwds = self._process_kwds(kwds, ixo)
return (endog_obs, exog_obs, exog_miss, predict_obs_kwds,
predict_miss_kwds)
def _process_kwds(self, kwds, ix):
kwds = kwds.copy()
for k in kwds:
v = kwds[k]
if isinstance(v, PatsyFormula):
mat = patsy.dmatrix(v.formula, self.data,
return_type="dataframe")
mat = np.asarray(mat)[ix, :]
if mat.shape[1] == 1:
mat = mat[:, 0]
kwds[k] = mat
return kwds
def get_fitting_data(self, vname):
"""
Return the data needed to fit a model for imputation.
The data is used to impute variable `vname`, and therefore
only includes cases for which `vname` is observed.
Values of type `PatsyFormula` in `init_kwds` or `fit_kwds` are
processed through Patsy and subset to align with the model's
endog and exog.
Parameters
----------
vname : string
The variable for which the fitting data is returned.
Returns
-------
endog : DataFrame
Observed values of `vname`.
exog : DataFrame
Regression design matrix for imputing `vname`.
init_kwds : dict-like
The init keyword arguments for `vname`, processed through Patsy
as required.
fit_kwds : dict-like
The fit keyword arguments for `vname`, processed through Patsy
as required.
"""
# Rows with observed endog
ix = self.ix_obs[vname]
formula = self.conditional_formula[vname]
endog, exog = patsy.dmatrices(formula, self.data,
return_type="dataframe")
endog = np.asarray(endog.iloc[ix, 0])
exog = np.asarray(exog.iloc[ix, :])
init_kwds = self._process_kwds(self.init_kwds[vname], ix)
fit_kwds = self._process_kwds(self.fit_kwds[vname], ix)
return endog, exog, init_kwds, fit_kwds
def plot_missing_pattern(self, ax=None, row_order="pattern",
column_order="pattern",
hide_complete_rows=False,
hide_complete_columns=False,
color_row_patterns=True):
"""
Generate an image showing the missing data pattern.
Parameters
----------
ax : matplotlib axes
Axes on which to draw the plot.
row_order : string
The method for ordering the rows. Must be one of 'pattern',
'proportion', or 'raw'.
column_order : string
The method for ordering the columns. Must be one of 'pattern',
'proportion', or 'raw'.
hide_complete_rows : boolean
If True, rows with no missing values are not drawn.
hide_complete_columns : boolean
If True, columns with no missing values are not drawn.
color_row_patterns : boolean
If True, color the unique row patterns, otherwise use grey
and white as colors.
Returns
-------
A figure containing a plot of the missing data pattern.
"""
# Create an indicator matrix for missing values.
miss = np.zeros(self.data.shape)
cols = self.data.columns
for j, col in enumerate(cols):
ix = self.ix_miss[col]
miss[ix, j] = 1
# Order the columns as requested
if column_order == "proportion":
ix = np.argsort(miss.mean(0))
elif column_order == "pattern":
cv = np.cov(miss.T)
u, s, vt = np.linalg.svd(cv, 0)
ix = np.argsort(cv[:, 0])
elif column_order == "raw":
ix = np.arange(len(cols))
else:
raise ValueError(
column_order + " is not an allowed value for `column_order`.")
miss = miss[:, ix]
cols = [cols[i] for i in ix]
# Order the rows as requested
if row_order == "proportion":
ix = np.argsort(miss.mean(1))
elif row_order == "pattern":
x = 2**np.arange(miss.shape[1])
rky = np.dot(miss, x)
ix = np.argsort(rky)
elif row_order == "raw":
ix = np.arange(miss.shape[0])
else:
raise ValueError(
row_order + " is not an allowed value for `row_order`.")
miss = miss[ix, :]
if hide_complete_rows:
ix = np.flatnonzero((miss == 1).any(1))
miss = miss[ix, :]
if hide_complete_columns:
ix = np.flatnonzero((miss == 1).any(0))
miss = miss[:, ix]
cols = [cols[i] for i in ix]
from statsmodels.graphics import utils as gutils
from matplotlib.colors import LinearSegmentedColormap
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
if color_row_patterns:
x = 2**np.arange(miss.shape[1])
rky = np.dot(miss, x)
_, rcol = np.unique(rky, return_inverse=True)
miss *= 1 + rcol[:, None]
ax.imshow(miss, aspect="auto", interpolation="nearest",
cmap='gist_ncar_r')
else:
cmap = LinearSegmentedColormap.from_list("_",
["white", "darkgrey"])
ax.imshow(miss, aspect="auto", interpolation="nearest",
cmap=cmap)
ax.set_ylabel("Cases")
ax.set_xticks(range(len(cols)))
ax.set_xticklabels(cols, rotation=90)
return fig
def plot_bivariate(self, col1_name, col2_name,
lowess_args=None, lowess_min_n=40,
jitter=None, plot_points=True, ax=None):
"""
Plot observed and imputed values for two variables.
Displays a scatterplot of one variable against another. The
points are colored according to whether the values are
observed or imputed.
Parameters
----------
col1_name : string
The variable to be plotted on the horizontal axis.
col2_name : string
The variable to be plotted on the vertical axis.
lowess_args : dictionary
A dictionary of dictionaries, keys are 'ii', 'io', 'oi'
and 'oo', where 'o' denotes 'observed' and 'i' denotes
imputed. See Notes for details.
lowess_min_n : integer
Minimum sample size to plot a lowess fit
jitter : float or tuple
Standard deviation for jittering points in the plot.
Either a single scalar applied to both axes, or a tuple
containing x-axis jitter and y-axis jitter, respectively.
plot_points : bool
If True, the data points are plotted.
ax : matplotlib axes object
Axes on which to plot, created if not provided.
Returns
-------
The matplotlib figure on which the plot id drawn.
"""
from statsmodels.graphics import utils as gutils
from statsmodels.nonparametric.smoothers_lowess import lowess
if lowess_args is None:
lowess_args = {}
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
ax.set_position([0.1, 0.1, 0.7, 0.8])
ix1i = self.ix_miss[col1_name]
ix1o = self.ix_obs[col1_name]
ix2i = self.ix_miss[col2_name]
ix2o = self.ix_obs[col2_name]
ix_ii = np.intersect1d(ix1i, ix2i)
ix_io = np.intersect1d(ix1i, ix2o)
ix_oi = np.intersect1d(ix1o, ix2i)
ix_oo = np.intersect1d(ix1o, ix2o)
vec1 = np.asarray(self.data[col1_name])
vec2 = np.asarray(self.data[col2_name])
if jitter is not None:
if np.isscalar(jitter):
jitter = (jitter, jitter)
vec1 += jitter[0] * np.random.normal(size=len(vec1))
vec2 += jitter[1] * np.random.normal(size=len(vec2))
# Plot the points
keys = ['oo', 'io', 'oi', 'ii']
lak = {'i': 'imp', 'o': 'obs'}
ixs = {'ii': ix_ii, 'io': ix_io, 'oi': ix_oi, 'oo': ix_oo}
color = {'oo': 'grey', 'ii':'red', 'io': 'orange',
'oi': 'lime'}
if plot_points:
for ky in keys:
ix = ixs[ky]
lab = lak[ky[0]] + "/" + lak[ky[1]]
ax.plot(vec1[ix], vec2[ix], 'o', color=color[ky],
label=lab, alpha=0.6)
# Plot the lowess fits
for ky in keys:
ix = ixs[ky]
if len(ix) < lowess_min_n:
continue
if ky in lowess_args:
la = lowess_args[ky]
else:
la = {}
ix = ixs[ky]
lfit = lowess(vec2[ix], vec1[ix], **la)
if plot_points:
ax.plot(lfit[:, 0], lfit[:, 1], '-', color=color[ky],
alpha=0.6, lw=4)
else:
lab = lak[ky[0]] + "/" + lak[ky[1]]
ax.plot(lfit[:, 0], lfit[:, 1], '-', color=color[ky],
alpha=0.6, lw=4, label=lab)
ha, la = ax.get_legend_handles_labels()
pad = 0.0001 if plot_points else 0.5
leg = fig.legend(ha, la, 'center right', numpoints=1,
handletextpad=pad)
leg.draw_frame(False)
ax.set_xlabel(col1_name)
ax.set_ylabel(col2_name)
return fig
def plot_fit_obs(self, col_name, lowess_args=None,
lowess_min_n=40, jitter=None,
plot_points=True, ax=None):
"""
Plot fitted versus imputed or observed values as a scatterplot.
Parameters
----------
col_name : string
The variable to be plotted on the horizontal axis.
lowess_args : dict-like
Keyword arguments passed to lowess fit. A dictionary of
dictionaries, keys are 'o' and 'i' denoting 'observed' and
'imputed', respectively.
lowess_min_n : integer
Minimum sample size to plot a lowess fit
jitter : float or tuple
Standard deviation for jittering points in the plot.
Either a single scalar applied to both axes, or a tuple
containing x-axis jitter and y-axis jitter, respectively.
plot_points : bool
If True, the data points are plotted.
ax : matplotlib axes object
Axes on which to plot, created if not provided.
Returns
-------
The matplotlib figure on which the plot is drawn.
"""
from statsmodels.graphics import utils as gutils
from statsmodels.nonparametric.smoothers_lowess import lowess
if lowess_args is None:
lowess_args = {}
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
ax.set_position([0.1, 0.1, 0.7, 0.8])
ixi = self.ix_miss[col_name]
ixo = self.ix_obs[col_name]
vec1 = np.asarray(self.data[col_name])
# Fitted values
formula = self.conditional_formula[col_name]
endog, exog = patsy.dmatrices(formula, self.data,
return_type="dataframe")
results = self.results[col_name]
vec2 = results.predict(exog=exog)
vec2 = self._get_predicted(vec2)
if jitter is not None:
if np.isscalar(jitter):
jitter = (jitter, jitter)
vec1 += jitter[0] * np.random.normal(size=len(vec1))
vec2 += jitter[1] * np.random.normal(size=len(vec2))
# Plot the points
keys = ['o', 'i']
ixs = {'o': ixo, 'i': ixi}
lak = {'o': 'obs', 'i': 'imp'}
color = {'o': 'orange', 'i': 'lime'}
if plot_points:
for ky in keys:
ix = ixs[ky]
ax.plot(vec1[ix], vec2[ix], 'o', color=color[ky],
label=lak[ky], alpha=0.6)
# Plot the lowess fits
for ky in keys:
ix = ixs[ky]
if len(ix) < lowess_min_n:
continue
if ky in lowess_args:
la = lowess_args[ky]
else:
la = {}
ix = ixs[ky]
lfit = lowess(vec2[ix], vec1[ix], **la)
ax.plot(lfit[:, 0], lfit[:, 1], '-', color=color[ky],
alpha=0.6, lw=4, label=lak[ky])
ha, la = ax.get_legend_handles_labels()
leg = fig.legend(ha, la, 'center right', numpoints=1)
leg.draw_frame(False)
ax.set_xlabel(col_name + " observed or imputed")
ax.set_ylabel(col_name + " fitted")
return fig
def plot_imputed_hist(self, col_name, ax=None, imp_hist_args=None,
obs_hist_args=None, all_hist_args=None):
"""
Display imputed values for one variable as a histogram.
Parameters
----------
col_name : string
The name of the variable to be plotted.
ax : matplotlib axes
An axes on which to draw the histograms. If not provided,
one is created.
imp_hist_args : dict
Keyword arguments to be passed to pyplot.hist when
creating the histogram for imputed values.
obs_hist_args : dict
Keyword arguments to be passed to pyplot.hist when
creating the histogram for observed values.
all_hist_args : dict
Keyword arguments to be passed to pyplot.hist when
creating the histogram for all values.
Returns
-------
The matplotlib figure on which the histograms were drawn
"""
from statsmodels.graphics import utils as gutils
if imp_hist_args is None:
imp_hist_args = {}
if obs_hist_args is None:
obs_hist_args = {}
if all_hist_args is None:
all_hist_args = {}
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
ax.set_position([0.1, 0.1, 0.7, 0.8])
ixm = self.ix_miss[col_name]
ixo = self.ix_obs[col_name]
imp = self.data[col_name].iloc[ixm]
obs = self.data[col_name].iloc[ixo]
for di in imp_hist_args, obs_hist_args, all_hist_args:
if 'histtype' not in di:
di['histtype'] ='step'
ha, la = [], []
if len(imp) > 0:
h = ax.hist(np.asarray(imp), **imp_hist_args)
ha.append(h[-1][0])
la.append("Imp")
h1 = ax.hist(np.asarray(obs), **obs_hist_args)
h2 = ax.hist(np.asarray(self.data[col_name]), **all_hist_args)
ha.extend([h1[-1][0], h2[-1][0]])
la.extend(["Obs", "All"])
leg = fig.legend(ha, la, 'center right', numpoints=1)
leg.draw_frame(False)
ax.set_xlabel(col_name)
ax.set_ylabel("Frequency")
return fig
# Try to identify any auxiliary arrays (e.g. status vector in
# PHReg) that need to be bootstrapped along with exog and endog.
def _boot_kwds(self, kwds, rix):
for k in kwds:
v = kwds[k]
# This is only relevant for ndarrays
if not isinstance(v, np.ndarray):
continue
# Handle 1d vectors
if (v.ndim == 1) and (v.shape[0] == len(rix)):
kwds[k] = v[rix]
# Handle 2d arrays
if (v.ndim == 2) and (v.shape[0] == len(rix)):
kwds[k] = v[rix, :]
return kwds
def _perturb_bootstrap(self, vname):
"""
Perturbs the model's parameters using a bootstrap.
"""
endog, exog, init_kwds, fit_kwds = self.get_fitting_data(vname)
m = len(endog)
rix = np.random.randint(0, m, m)
endog = endog[rix]
exog = exog[rix, :]
init_kwds = self._boot_kwds(init_kwds, rix)
fit_kwds = self._boot_kwds(fit_kwds, rix)
klass = self.model_class[vname]
self.models[vname] = klass(endog, exog, **init_kwds)
if vname in self.regularized and self.regularized[vname]:
self.results[vname] = (
self.models[vname].fit_regularized(**fit_kwds))
else:
self.results[vname] = self.models[vname].fit(**fit_kwds)
self.params[vname] = self.results[vname].params
def _perturb_gaussian(self, vname):
"""
Gaussian perturbation of model parameters.
The normal approximation to the sampling distribution of the
parameter estimates is used to define the mean and covariance
structure of the perturbation distribution.
"""
endog, exog, init_kwds, fit_kwds = self.get_fitting_data(vname)
klass = self.model_class[vname]
self.models[vname] = klass(endog, exog, **init_kwds)
self.results[vname] = self.models[vname].fit(**fit_kwds)
cov = self.results[vname].cov_params()
mu = self.results[vname].params
self.params[vname] = np.random.multivariate_normal(mean=mu, cov=cov)
def perturb_params(self, vname):
if self.perturbation_method[vname] == "gaussian":
self._perturb_gaussian(vname)
elif self.perturbation_method[vname] == "boot":
self._perturb_bootstrap(vname)
else:
raise ValueError("unknown perturbation method")
def impute(self, vname):
# Wrap this in case we later add additional imputation
# methods.
self.impute_pmm(vname)
def update(self, vname):
"""
Impute missing values for a single variable.
This is a two-step process in which first the parameters are
perturbed, then the missing values are re-imputed.
Parameters
----------
vname : string
The name of the variable to be updated.
"""
self.perturb_params(vname)
self.impute(vname)
# work-around for inconsistent predict return values
def _get_predicted(self, obj):
if isinstance(obj, np.ndarray):
return obj
elif isinstance(obj, pd.Series):
return obj.values
elif hasattr(obj, 'predicted_values'):
return obj.predicted_values
else:
raise ValueError(
"cannot obtain predicted values from %s" % obj.__class__)
def impute_pmm(self, vname):
"""
Use predictive mean matching to impute missing values.
Notes
-----
The `perturb_params` method must be called first to define the
model.
"""
k_pmm = self.k_pmm
endog_obs, exog_obs, exog_miss, predict_obs_kwds, predict_miss_kwds = (
self.get_split_data(vname))
# Predict imputed variable for both missing and non-missing
# observations
model = self.models[vname]
pendog_obs = model.predict(self.params[vname], exog_obs,
**predict_obs_kwds)
pendog_miss = model.predict(self.params[vname], exog_miss,
**predict_miss_kwds)
pendog_obs = self._get_predicted(pendog_obs)
pendog_miss = self._get_predicted(pendog_miss)
# Jointly sort the observed and predicted endog values for the
# cases with observed values.
ii = np.argsort(pendog_obs)
endog_obs = endog_obs[ii]
pendog_obs = pendog_obs[ii]
# Find the closest match to the predicted endog values for
# cases with missing endog values.
ix = np.searchsorted(pendog_obs, pendog_miss)
# Get the indices for the closest k_pmm values on
# either side of the closest index.
ixm = ix[:, None] + np.arange(-k_pmm, k_pmm)[None, :]
# Account for boundary effects
msk = np.nonzero((ixm < 0) | (ixm > len(endog_obs) - 1))
ixm = np.clip(ixm, 0, len(endog_obs) - 1)
# Get the distances
dx = pendog_miss[:, None] - pendog_obs[ixm]
dx = np.abs(dx)
dx[msk] = np.inf
# Closest positions in ix, row-wise.
dxi = np.argsort(dx, 1)[:, 0:k_pmm]
# Choose a column for each row.
ir = np.random.randint(0, k_pmm, len(pendog_miss))
# Unwind the indices
jj = np.arange(dxi.shape[0])
ix = dxi[(jj, ir)]
iz = ixm[(jj, ix)]
imputed_miss = np.array(endog_obs[iz]).squeeze()
self._store_changes(vname, imputed_miss)
_mice_example_1 = """
>>> imp = mice.MICEData(data)
>>> fml = 'y ~ x1 + x2 + x3 + x4'
>>> mice = mice.MICE(fml, sm.OLS, imp)
>>> results = mice.fit(10, 10)
>>> print(results.summary())
.. literalinclude::../plots/mice_example_1.txt
"""
_mice_example_2 = """
>>> imp = mice.MICEData(data)
>>> fml = 'y ~ x1 + x2 + x3 + x4'
>>> mice = mice.MICE(fml, sm.OLS, imp)
>>> results = []
>>> for k in range(10):
>>> x = mice.next_sample()
>>> results.append(x)
"""
class MICE(object):
__doc__ = """\
Multiple Imputation with Chained Equations.
This class can be used to fit most Statsmodels models to data sets
with missing values using the'multiple imputation with chained
equations' (MICE) approach..
Parameters
----------
model_formula : string
The model formula to be fit to the imputed data sets. This
formula is for the 'analysis model'.
model_class : statsmodels model
The model to be fit to the imputed data sets. This model
class if for the 'analysis model'.
data : MICEData instance
MICEData object containing the data set for which
missing values will be imputed
n_skip : int
The number of imputed datasets to skip between consecutive
imputed datasets that are used for analysis.
init_kwds : dict-like
Dictionary of keyword arguments passed to the init method
of the analysis model.
fit_kwds : dict-like
Dictionary of keyword arguments passed to the fit method
of the analysis model.
Examples
--------
Run all MICE steps and obtain results:
%(mice_example_1)s
Obtain a sequence of fitted analysis models without combining
to obtain summary::
%(mice_example_2)s
""" % {'mice_example_1': _mice_example_1,
'mice_example_2': _mice_example_2}
def __init__(self, model_formula, model_class, data, n_skip=3,
init_kwds=None, fit_kwds=None):
self.model_formula = model_formula
self.model_class = model_class
self.n_skip = n_skip
self.data = data
self.results_list = []
self.init_kwds = init_kwds if init_kwds is not None else {}
self.fit_kwds = fit_kwds if fit_kwds is not None else {}
def next_sample(self):
"""
Perform one complete MICE iteration.
A single MICE iteration updates all missing values using their
respective imputation models, then fits the analysis model to
the imputed data.
Returns
-------
params : array_like
The model parameters for the analysis model.
Notes
-----
This function fits the analysis model and returns its
parameter estimate. The parameter vector is not stored by the
class and is not used in any subsequent calls to `combine`.
Use `fit` to run all MICE steps together and obtain summary
results.
The complete cycle of missing value imputation followed by
fitting the analysis model is repeated `n_skip + 1` times and
the analysis model parameters from the final fit are returned.
"""
# Impute missing values
self.data.update_all(self.n_skip + 1)
start_params = None
if len(self.results_list) > 0:
start_params = self.results_list[-1].params
# Fit the analysis model.
model = self.model_class.from_formula(self.model_formula,
self.data.data,
**self.init_kwds)
self.fit_kwds.update({"start_params": start_params})
result = model.fit(**self.fit_kwds)
return result
def fit(self, n_burnin=10, n_imputations=10):
"""
Fit a model using MICE.
Parameters
----------
n_burnin : int
The number of burn-in cycles to skip.
n_imputations : int
The number of data sets to impute
"""
# Run without fitting the analysis model
self.data.update_all(n_burnin)
for j in range(n_imputations):
result = self.next_sample()
self.results_list.append(result)
self.endog_names = result.model.endog_names
self.exog_names = result.model.exog_names
return self.combine()
def combine(self):
"""
Pools MICE imputation results.
This method can only be used after the `run` method has been
called. Returns estimates and standard errors of the analysis
model parameters.
Returns a MICEResults instance.
"""
# Extract a few things from the models that were fit to
# imputed data sets.
params_list = []
cov_within = 0.
scale_list = []
for results in self.results_list:
results_uw = results._results
params_list.append(results_uw.params)
cov_within += results_uw.cov_params()
scale_list.append(results.scale)
params_list = np.asarray(params_list)
scale_list = np.asarray(scale_list)
# The estimated parameters for the MICE analysis
params = params_list.mean(0)
# The average of the within-imputation covariances
cov_within /= len(self.results_list)
# The between-imputation covariance
cov_between = np.cov(params_list.T)
# The estimated covariance matrix for the MICE analysis
f = 1 + 1 / float(len(self.results_list))
cov_params = cov_within + f * cov_between
# Fraction of missing information
fmi = f * np.diag(cov_between) / np.diag(cov_params)
# Set up a results instance
scale = np.mean(scale_list)
results = MICEResults(self, params, cov_params / scale)
results.scale = scale
results.frac_miss_info = fmi
results.exog_names = self.exog_names
results.endog_names = self.endog_names
results.model_class = self.model_class
return results
class MICEResults(LikelihoodModelResults):
def __init__(self, model, params, normalized_cov_params):
super(MICEResults, self).__init__(model, params,
normalized_cov_params)
def summary(self, title=None, alpha=.05):
"""
Summarize the results of running MICE.
Parameters
----------
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
Significance level for the confidence intervals
Returns
-------
smry : Summary instance
This holds the summary tables and text, which can be
printed or converted to various output formats.
"""
from statsmodels.iolib import summary2
from collections import OrderedDict
smry = summary2.Summary()
float_format = "%8.3f"
info = OrderedDict()
info["Method:"] = "MICE"
info["Model:"] = self.model_class.__name__
info["Dependent variable:"] = self.endog_names
info["Sample size:"] = "%d" % self.model.data.data.shape[0]
info["Scale"] = "%.2f" % self.scale
info["Num. imputations"] = "%d" % len(self.model.results_list)
smry.add_dict(info, align='l', float_format=float_format)
param = summary2.summary_params(self, alpha=alpha)
param["FMI"] = self.frac_miss_info
smry.add_df(param, float_format=float_format)
smry.add_title(title=title, results=self)
return smry |
|
statsmodels__statsmodels | large_data.rst | Tutorial | Working with Large Data Sets | BSD 3-Clause New or Revised License | statsmodels__statsmodels/docs/source/large_data.rst | [
"statsmodels__statsmodels/statsmodels/base/distributed_estimation.py"
] | Working with Large Data Sets
Big data is something of a buzzword in the modern world. While
statsmodels works well with small and moderately-sized data sets that
can be loaded in memory--perhaps tens of thousands of observations--use
cases exist with millions of observations or more. Depending your use
case, statsmodels may or may not be a sufficient tool.
statsmodels and most of the software stack it is written on operates in
memory. Resultantly, building models on larger data sets can be
challenging or even impractical. With that said, there are 2 general
strategies for building models on larger data sets with statsmodels.
Divide and Conquer - Distributing Jobs
If your system is capable of loading all the data, but the analysis you
are attempting to perform is slow, you might be able to build models on
horizontal slices of the data and then aggregate the individual models
once fit.
A current limitation of this approach is that it generally does not
support patsy so constructing your design matrix (known as exog) in
statsmodels, is a little challenging.
Subsetting your data
If your entire data set is too large to store in memory, you might try
storing it in a columnar container like Apache Parquet or bcolz. Using
the patsy formula interface, statsmodels will use the __getitem__
function (i.e. data['Item']) to pull only the specified columns.
import pyarrow as pa
import pyarrow.parquet as pq
import statsmodels.formula.api as smf
class DataSet(dict):
def __init__(self, path):
self.parquet = pq.ParquetFile(path)
def __getitem__(self, key):
try:
return self.parquet.read([key]).to_pandas()[key]
except:
raise KeyError
LargeData = DataSet('LargeData.parquet')
res = smf.ols('Profit ~ Sugar + Power + Women', data=LargeData).fit()
Additionally, you can add code to this example DataSet object to return
only a subset of the rows until you have built a good model. Then, you
can refit your final model on more data.
| from statsmodels.base.elastic_net import RegularizedResults
from statsmodels.stats.regularized_covariance import _calc_nodewise_row, \
_calc_nodewise_weight, _calc_approx_inv_cov
from statsmodels.base.model import LikelihoodModelResults
from statsmodels.regression.linear_model import OLS
import numpy as np
"""
Distributed estimation routines. Currently, we support several
methods of distribution
- sequential, has no extra dependencies
- parallel
- with joblib
A variety of backends are supported through joblib
This allows for different types of clusters besides
standard local clusters. Some examples of
backends supported by joblib are
- dask.distributed
- yarn
- ipyparallel
The framework is very general and allows for a variety of
estimation methods. Currently, these include
- debiased regularized estimation
- simple coefficient averaging (naive)
- regularized
- unregularized
Currently, the default is regularized estimation with debiasing
which follows the methods outlined in
Jason D. Lee, Qiang Liu, Yuekai Sun and Jonathan E. Taylor.
"Communication-Efficient Sparse Regression: A One-Shot Approach."
arXiv:1503.04337. 2015. https://arxiv.org/abs/1503.04337.
There are several variables that are taken from the source paper
for which the interpretation may not be directly clear from the
code, these are mostly used to help form the estimate of the
approximate inverse covariance matrix as part of the
debiasing procedure.
wexog
A weighted design matrix used to perform the node-wise
regression procedure.
nodewise_row
nodewise_row is produced as part of the node-wise regression
procedure used to produce the approximate inverse covariance
matrix. One is produced for each variable using the
LASSO.
nodewise_weight
nodewise_weight is produced using the gamma_hat values for
each p to produce weights to reweight the gamma_hat values which
are ultimately used to form approx_inv_cov.
approx_inv_cov
This is the estimate of the approximate inverse covariance
matrix. This is used to debiase the coefficient average
along with the average gradient. For the OLS case,
approx_inv_cov is an approximation for
n * (X^T X)^{-1}
formed by node-wise regression.
"""
def _est_regularized_naive(mod, pnum, partitions, fit_kwds=None):
"""estimates the regularized fitted parameters.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
pnum : scalar
Index of current partition
partitions : scalar
Total number of partitions
fit_kwds : dict-like or None
Keyword arguments to be given to fit_regularized
Returns
-------
An array of the paramters for the regularized fit
"""
if fit_kwds is None:
raise ValueError("_est_regularized_naive currently " +
"requires that fit_kwds not be None.")
return mod.fit_regularized(**fit_kwds).params
def _est_unregularized_naive(mod, pnum, partitions, fit_kwds=None):
"""estimates the unregularized fitted parameters.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
pnum : scalar
Index of current partition
partitions : scalar
Total number of partitions
fit_kwds : dict-like or None
Keyword arguments to be given to fit
Returns
-------
An array of the parameters for the fit
"""
if fit_kwds is None:
raise ValueError("_est_unregularized_naive currently " +
"requires that fit_kwds not be None.")
return mod.fit(**fit_kwds).params
def _join_naive(params_l, threshold=0):
"""joins the results from each run of _est_<type>_naive
and returns the mean estimate of the coefficients
Parameters
----------
params_l : list
A list of arrays of coefficients.
threshold : scalar
The threshold at which the coefficients will be cut.
"""
p = len(params_l[0])
partitions = len(params_l)
params_mn = np.zeros(p)
for params in params_l:
params_mn += params
params_mn /= partitions
params_mn[np.abs(params_mn) < threshold] = 0
return params_mn
def _calc_grad(mod, params, alpha, L1_wt, score_kwds):
"""calculates the log-likelihood gradient for the debiasing
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
params : array_like
The estimated coefficients for the current partition.
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt : scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is
a ridge fit, if 1 it is a lasso fit.
score_kwds : dict-like or None
Keyword arguments for the score function.
Returns
-------
An array-like object of the same dimension as params
Notes
-----
In general:
gradient l_k(params)
where k corresponds to the index of the partition
For OLS:
X^T(y - X^T params)
"""
grad = -mod.score(np.asarray(params), **score_kwds)
grad += alpha * (1 - L1_wt)
return grad
def _calc_wdesign_mat(mod, params, hess_kwds):
"""calculates the weighted design matrix necessary to generate
the approximate inverse covariance matrix
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
params : array_like
The estimated coefficients for the current partition.
hess_kwds : dict-like or None
Keyword arguments for the hessian function.
Returns
-------
An array-like object, updated design matrix, same dimension
as mod.exog
"""
rhess = np.sqrt(mod.hessian_factor(np.asarray(params), **hess_kwds))
return rhess[:, None] * mod.exog
def _est_regularized_debiased(mod, mnum, partitions, fit_kwds=None,
score_kwds=None, hess_kwds=None):
"""estimates the regularized fitted parameters, is the default
estimation_method for class DistributedModel.
Parameters
----------
mod : statsmodels model class instance
The model for the current partition.
mnum : scalar
Index of current partition.
partitions : scalar
Total number of partitions.
fit_kwds : dict-like or None
Keyword arguments to be given to fit_regularized
score_kwds : dict-like or None
Keyword arguments for the score function.
hess_kwds : dict-like or None
Keyword arguments for the Hessian function.
Returns
-------
A tuple of parameters for regularized fit
An array-like object of the fitted parameters, params
An array-like object for the gradient
A list of array like objects for nodewise_row
A list of array like objects for nodewise_weight
"""
score_kwds = {} if score_kwds is None else score_kwds
hess_kwds = {} if hess_kwds is None else hess_kwds
if fit_kwds is None:
raise ValueError("_est_regularized_debiased currently " +
"requires that fit_kwds not be None.")
else:
alpha = fit_kwds["alpha"]
if "L1_wt" in fit_kwds:
L1_wt = fit_kwds["L1_wt"]
else:
L1_wt = 1
nobs, p = mod.exog.shape
p_part = int(np.ceil((1. * p) / partitions))
params = mod.fit_regularized(**fit_kwds).params
grad = _calc_grad(mod, params, alpha, L1_wt, score_kwds) / nobs
wexog = _calc_wdesign_mat(mod, params, hess_kwds)
nodewise_row_l = []
nodewise_weight_l = []
for idx in range(mnum * p_part, min((mnum + 1) * p_part, p)):
nodewise_row = _calc_nodewise_row(wexog, idx, alpha)
nodewise_row_l.append(nodewise_row)
nodewise_weight = _calc_nodewise_weight(wexog, nodewise_row, idx,
alpha)
nodewise_weight_l.append(nodewise_weight)
return params, grad, nodewise_row_l, nodewise_weight_l
def _join_debiased(results_l, threshold=0):
"""joins the results from each run of _est_regularized_debiased
and returns the debiased estimate of the coefficients
Parameters
----------
results_l : list
A list of tuples each one containing the params, grad,
nodewise_row and nodewise_weight values for each partition.
threshold : scalar
The threshold at which the coefficients will be cut.
"""
p = len(results_l[0][0])
partitions = len(results_l)
params_mn = np.zeros(p)
grad_mn = np.zeros(p)
nodewise_row_l = []
nodewise_weight_l = []
for r in results_l:
params_mn += r[0]
grad_mn += r[1]
nodewise_row_l.extend(r[2])
nodewise_weight_l.extend(r[3])
nodewise_row_l = np.array(nodewise_row_l)
nodewise_weight_l = np.array(nodewise_weight_l)
params_mn /= partitions
grad_mn *= -1. / partitions
approx_inv_cov = _calc_approx_inv_cov(nodewise_row_l, nodewise_weight_l)
debiased_params = params_mn + approx_inv_cov.dot(grad_mn)
debiased_params[np.abs(debiased_params) < threshold] = 0
return debiased_params
def _helper_fit_partition(self, pnum, endog, exog, fit_kwds,
init_kwds_e={}):
"""handles the model fitting for each machine. NOTE: this
is primarily handled outside of DistributedModel because
joblib can't handle class methods.
Parameters
----------
self : DistributedModel class instance
An instance of DistributedModel.
pnum : scalar
index of current partition.
endog : array_like
endogenous data for current partition.
exog : array_like
exogenous data for current partition.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_e : dict-like
Additional init_kwds to add for each partition.
Returns
-------
estimation_method result. For the default,
_est_regularized_debiased, a tuple.
"""
temp_init_kwds = self.init_kwds.copy()
temp_init_kwds.update(init_kwds_e)
model = self.model_class(endog, exog, **temp_init_kwds)
results = self.estimation_method(model, pnum, self.partitions,
fit_kwds=fit_kwds,
**self.estimation_kwds)
return results
class DistributedModel(object):
__doc__ = """
Distributed model class
Parameters
----------
partitions : scalar
The number of partitions that the data will be split into.
model_class : statsmodels model class
The model class which will be used for estimation. If None
this defaults to OLS.
init_kwds : dict-like or None
Keywords needed for initializing the model, in addition to
endog and exog.
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
estimation_method : function or None
The method that performs the estimation for each partition.
If None this defaults to _est_regularized_debiased.
estimation_kwds : dict-like or None
Keywords to be passed to estimation_method.
join_method : function or None
The method used to recombine the results from each partition.
If None this defaults to _join_debiased.
join_kwds : dict-like or None
Keywords to be passed to join_method.
results_class : results class or None
The class of results that should be returned. If None this
defaults to RegularizedResults.
results_kwds : dict-like or None
Keywords to be passed to results class.
Attributes
----------
partitions : scalar
See Parameters.
model_class : statsmodels model class
See Parameters.
init_kwds : dict-like
See Parameters.
init_kwds_generator : generator or None
See Parameters.
estimation_method : function
See Parameters.
estimation_kwds : dict-like
See Parameters.
join_method : function
See Parameters.
join_kwds : dict-like
See Parameters.
results_class : results class
See Parameters.
results_kwds : dict-like
See Parameters.
Examples
--------
Notes
-----
"""
def __init__(self, partitions, model_class=None,
init_kwds=None, estimation_method=None,
estimation_kwds=None, join_method=None, join_kwds=None,
results_class=None, results_kwds=None):
self.partitions = partitions
if model_class is None:
self.model_class = OLS
else:
self.model_class = model_class
if init_kwds is None:
self.init_kwds = {}
else:
self.init_kwds = init_kwds
if estimation_method is None:
self.estimation_method = _est_regularized_debiased
else:
self.estimation_method = estimation_method
if estimation_kwds is None:
self.estimation_kwds = {}
else:
self.estimation_kwds = estimation_kwds
if join_method is None:
self.join_method = _join_debiased
else:
self.join_method = join_method
if join_kwds is None:
self.join_kwds = {}
else:
self.join_kwds = join_kwds
if results_class is None:
self.results_class = RegularizedResults
else:
self.results_class = results_class
if results_kwds is None:
self.results_kwds = {}
else:
self.results_kwds = results_kwds
def fit(self, data_generator, fit_kwds=None, parallel_method="sequential",
parallel_backend=None, init_kwds_generator=None):
"""Performs the distributed estimation using the corresponding
DistributedModel
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like or None
Keywords needed for the model fitting.
parallel_method : str
type of distributed estimation to be used, currently
"sequential", "joblib" and "dask" are supported.
parallel_backend : None or joblib parallel_backend object
used to allow support for more complicated backends,
ex: dask.distributed
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array.
"""
if fit_kwds is None:
fit_kwds = {}
if parallel_method == "sequential":
results_l = self.fit_sequential(data_generator, fit_kwds,
init_kwds_generator)
elif parallel_method == "joblib":
results_l = self.fit_joblib(data_generator, fit_kwds,
parallel_backend,
init_kwds_generator)
else:
raise ValueError("parallel_method: %s is currently not supported"
% parallel_method)
params = self.join_method(results_l, **self.join_kwds)
# NOTE that currently, the dummy result model that is initialized
# here does not use any init_kwds from the init_kwds_generator event
# if it is provided. It is possible to imagine an edge case where
# this might be a problem but given that the results model instance
# does not correspond to any data partition this seems reasonable.
res_mod = self.model_class([0], [0], **self.init_kwds)
return self.results_class(res_mod, params, **self.results_kwds)
def fit_sequential(self, data_generator, fit_kwds,
init_kwds_generator=None):
"""Sequentially performs the distributed estimation using
the corresponding DistributedModel
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array.
"""
results_l = []
if init_kwds_generator is None:
for pnum, (endog, exog) in enumerate(data_generator):
results = _helper_fit_partition(self, pnum, endog, exog,
fit_kwds)
results_l.append(results)
else:
tup_gen = enumerate(zip(data_generator,
init_kwds_generator))
for pnum, ((endog, exog), init_kwds_e) in tup_gen:
results = _helper_fit_partition(self, pnum, endog, exog,
fit_kwds, init_kwds_e)
results_l.append(results)
return results_l
def fit_joblib(self, data_generator, fit_kwds, parallel_backend,
init_kwds_generator=None):
"""Performs the distributed estimation in parallel using joblib
Parameters
----------
data_generator : generator
A generator that produces a sequence of tuples where the first
element in the tuple corresponds to an endog array and the
element corresponds to an exog array.
fit_kwds : dict-like
Keywords needed for the model fitting.
parallel_backend : None or joblib parallel_backend object
used to allow support for more complicated backends,
ex: dask.distributed
init_kwds_generator : generator or None
Additional keyword generator that produces model init_kwds
that may vary based on data partition. The current usecase
is for WLS and GLS
Returns
-------
join_method result. For the default, _join_debiased, it returns a
p length array.
"""
from statsmodels.tools.parallel import parallel_func
par, f, n_jobs = parallel_func(_helper_fit_partition, self.partitions)
if parallel_backend is None and init_kwds_generator is None:
results_l = par(f(self, pnum, endog, exog, fit_kwds)
for pnum, (endog, exog)
in enumerate(data_generator))
elif parallel_backend is not None and init_kwds_generator is None:
with parallel_backend:
results_l = par(f(self, pnum, endog, exog, fit_kwds)
for pnum, (endog, exog)
in enumerate(data_generator))
elif parallel_backend is None and init_kwds_generator is not None:
tup_gen = enumerate(zip(data_generator, init_kwds_generator))
results_l = par(f(self, pnum, endog, exog, fit_kwds, init_kwds)
for pnum, ((endog, exog), init_kwds)
in tup_gen)
elif parallel_backend is not None and init_kwds_generator is not None:
tup_gen = enumerate(zip(data_generator, init_kwds_generator))
with parallel_backend:
results_l = par(f(self, pnum, endog, exog, fit_kwds, init_kwds)
for pnum, ((endog, exog), init_kwds)
in tup_gen)
return results_l
class DistributedResults(LikelihoodModelResults):
"""
Class to contain model results
Parameters
----------
model : class instance
class instance for model used for distributed data,
this particular instance uses fake data and is really
only to allow use of methods like predict.
params : array
parameter estimates from the fit model.
"""
def __init__(self, model, params):
super(DistributedResults, self).__init__(model, params)
def predict(self, exog, *args, **kwargs):
"""Calls self.model.predict for the provided exog. See
Results.predict.
Parameters
----------
exog : array_like NOT optional
The values for which we want to predict, unlike standard
predict this is NOT optional since the data in self.model
is fake.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction : ndarray, pandas.Series or pandas.DataFrame
See self.model.predict
"""
return self.model.predict(self.params, exog, *args, **kwargs) |
|
statsmodels__statsmodels | miscmodels.rst | Description | Generate description to this module | BSD 3-Clause New or Revised License | statsmodels__statsmodels/docs/source/miscmodels.rst | [
"statsmodels__statsmodels/statsmodels/miscmodels/tmodel.py",
"statsmodels__statsmodels/statsmodels/miscmodels/count.py"
] | statsmodels__statsmodels/statsmodels/miscmodels | Other Models miscmodels
statsmodels.miscmodels contains model classes and that do not yet fit
into any other category, or are basic implementations that are not yet
polished and will most likely still change. Some of these models were
written as examples for the generic maximum likelihood framework, and
there will be others that might be based on general method of moments.
The models in this category have been checked for basic cases, but might
be more exposed to numerical problems than the complete implementation.
For example, count.Poisson has been added using only the generic maximum
likelihood framework, the standard errors are based on the numerical
evaluation of the Hessian, while discretemod.Poisson uses analytical
Gradients and Hessian and will be more precise, especially in cases when
there is strong multicollinearity. On the other hand, by subclassing
GenericLikelihoodModel, it is easy to add new models, another example
can be seen in the zero inflated Poisson model, miscmodels.count. | """Linear Model with Student-t distributed errors
Because the t distribution has fatter tails than the normal distribution, it
can be used to model observations with heavier tails and observations that have
some outliers. For the latter case, the t-distribution provides more robust
estimators for mean or mean parameters (what about var?).
References
----------
Kenneth L. Lange, Roderick J. A. Little, Jeremy M. G. Taylor (1989)
Robust Statistical Modeling Using the t Distribution
Journal of the American Statistical Association
Vol. 84, No. 408 (Dec., 1989), pp. 881-896
Published by: American Statistical Association
Stable URL: http://www.jstor.org/stable/2290063
not read yet
Created on 2010-09-24
Author: josef-pktd
License: BSD
TODO
----
* add starting values based on OLS
* bugs: store_params doesn't seem to be defined, I think this was a module
global for debugging - commented out
* parameter restriction: check whether version with some fixed parameters works
"""
#mostly copied from the examples directory written for trying out generic mle.
import numpy as np
from scipy import special, stats
from statsmodels.base.model import GenericLikelihoodModel
from statsmodels.tsa.arma_mle import Arma
#redefine some shortcuts
np_log = np.log
np_pi = np.pi
sps_gamln = special.gammaln
class TLinearModel(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Linear Model with t-distributed errors
This is an example for generic MLE.
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
'''
def initialize(self):
# TODO: here or in __init__
self.k_vars = self.exog.shape[1]
if not hasattr(self, 'fix_df'):
self.fix_df = False
if self.fix_df is False:
# df will be estimated, no parameter restrictions
self.fixed_params = None
self.fixed_paramsmask = None
self.k_params = self.exog.shape[1] + 2
extra_params_names = ['df','scale']
else:
# df fixed
self.k_params = self.exog.shape[1] + 1
fixdf = np.nan * np.zeros(self.exog.shape[1] + 2)
fixdf[-2] = self.fix_df
self.fixed_params = fixdf
self.fixed_paramsmask = np.isnan(fixdf)
extra_params_names = ['scale']
self._set_extra_params_names(extra_params_names)
self._set_start_params()
super(TLinearModel, self).initialize()
def _set_start_params(self, start_params=None, use_kurtosis=False):
if start_params is not None:
self.start_params = start_params
else:
from statsmodels.regression.linear_model import OLS
res_ols = OLS(self.endog, self.exog).fit()
start_params = 0.1*np.ones(self.k_params)
start_params[:self.k_vars] = res_ols.params
if self.fix_df is False:
if use_kurtosis:
kurt = stats.kurtosis(res_ols.resid)
df = 6./kurt + 4
else:
df = 5
start_params[-2] = df
#TODO adjust scale for df
start_params[-1] = np.sqrt(res_ols.scale)
self.start_params = start_params
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
def nloglikeobs(self, params):
"""
Loglikelihood of linear model with t distributed errors.
Parameters
----------
params : array
The parameters of the model. The last 2 parameters are degrees of
freedom and scale.
Returns
-------
loglike : array
The log likelihood of the model evaluated at `params` for each
observation defined by self.endog and self.exog.
Notes
-----
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
The t distribution is the standard t distribution and not a standardized
t distribution, which means that the scale parameter is not equal to the
standard deviation.
self.fixed_params and self.expandparams can be used to fix some
parameters. (I doubt this has been tested in this model.)
"""
#print len(params),
#store_params.append(params)
if self.fixed_params is not None:
#print 'using fixed'
params = self.expandparams(params)
beta = params[:-2]
df = params[-2]
scale = np.abs(params[-1]) #TODO check behavior around zero
loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
#next part is stats.t._logpdf
lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)
lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)
lPx -= np_log(scale) # correction for scale
return -lPx
def predict(self, params, exog=None):
if exog is None:
exog = self.exog
return np.dot(exog, params[:self.exog.shape[1]])
class TArma(Arma):
'''Univariate Arma Model with t-distributed errors
This inherit all methods except loglike from tsa.arma_mle.Arma
This uses the standard t-distribution, the implied variance of
the error is not equal to scale, but ::
error_variance = df/(df-2)*scale**2
Notes
-----
This might be replaced by a standardized t-distribution with scale**2
equal to variance
'''
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
#add for Jacobian calculation bsejac in GenericMLE, copied from loglike
def nloglikeobs(self, params):
"""
Loglikelihood for arma model for each observation, t-distribute
Notes
-----
The ancillary parameter is assumed to be the last element of
the params vector
"""
errorsest = self.geterrors(params[:-2])
#sigma2 = np.maximum(params[-1]**2, 1e-6) #do I need this
#axis = 0
#nobs = len(errorsest)
df = params[-2]
scale = np.abs(params[-1])
llike = - stats.t._logpdf(errorsest/scale, df) + np_log(scale)
return llike
#TODO rename fit_mle -> fit, fit -> fit_ls
def fit_mle(self, order, start_params=None, method='nm', maxiter=5000,
tol=1e-08, **kwds):
nar, nma = order
if start_params is not None:
if len(start_params)!= nar + nma + 2:
raise ValueError('start_param need sum(order) + 2 elements')
else:
start_params = np.concatenate((0.05*np.ones(nar + nma), [5, 1]))
res = super(TArma, self).fit_mle(order=order,
start_params=start_params,
method=method, maxiter=maxiter,
tol=tol, **kwds)
return res
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 26 08:34:59 2010
Author: josef-pktd
changes:
added offset and zero-inflated version of Poisson
- kind of ok, need better test cases,
- a nan in ZIP bse, need to check hessian calculations
- found error in ZIP loglike
- all tests pass with
Issues
------
* If true model is not zero-inflated then numerical Hessian for ZIP has zeros
for the inflation probability and is not invertible.
-> hessian inverts and bse look ok if row and column are dropped, pinv also works
* GenericMLE: still get somewhere (where?)
"CacheWriteWarning: The attribute 'bse' cannot be overwritten"
* bfgs is too fragile, doesn't come back
* `nm` is slow but seems to work
* need good start_params and their use in genericmle needs to be checked for
consistency, set as attribute or method (called as attribute)
* numerical hessian needs better scaling
* check taking parts out of the loop, e.g. factorial(endog) could be precalculated
"""
import numpy as np
from scipy import stats
from scipy.special import factorial
from statsmodels.base.model import GenericLikelihoodModel
def maxabs(arr1, arr2):
return np.max(np.abs(arr1 - arr2))
def maxabsrel(arr1, arr2):
return np.max(np.abs(arr2 / arr1 - 1))
class NonlinearDeltaCov(object):
'''Asymptotic covariance by Deltamethod
the function is designed for 2d array, with rows equal to
the number of equations and columns equal to the number
of parameters. 1d params work by chance?
fun: R^{m*k) -> R^{m} where m is number of equations and k is
the number of parameters.
equations follow Greene
'''
def __init__(self, fun, params, cov_params):
self.fun = fun
self.params = params
self.cov_params = cov_params
def grad(self, params=None, **kwds):
if params is None:
params = self.params
kwds.setdefault('epsilon', 1e-4)
from statsmodels.tools.numdiff import approx_fprime
return approx_fprime(params, self.fun, **kwds)
def cov(self):
g = self.grad()
covar = np.dot(np.dot(g, self.cov_params), g.T)
return covar
def expected(self):
# rename: misnomer, this is the MLE of the fun
return self.fun(self.params)
def wald(self, value):
m = self.expected()
v = self.cov()
df = np.size(m)
diff = m - value
lmstat = np.dot(np.dot(diff.T, np.linalg.inv(v)), diff)
return lmstat, stats.chi2.sf(lmstat, df)
class PoissonGMLE(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Poisson Model
This is an example for generic MLE which has the same
statistical model as discretemod.Poisson.
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
'''
# copied from discretemod.Poisson
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
--------
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
XB = np.dot(self.exog, params)
endog = self.endog
return np.exp(XB) - endog*XB + np.log(factorial(endog))
def predict_distribution(self, exog):
'''return frozen scipy.stats distribution with mu at estimated prediction
'''
if not hasattr(self, "result"):
# TODO: why would this be ValueError instead of AttributeError?
# TODO: Why even make this a Model attribute in the first place?
# It belongs on the Results class
raise ValueError
else:
result = self.result
params = result.params
mu = np.exp(np.dot(exog, params))
return stats.poisson(mu, loc=0)
class PoissonOffsetGMLE(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Poisson Model
This is an example for generic MLE which has the same
statistical model as discretemod.Poisson but adds offset
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
'''
def __init__(self, endog, exog=None, offset=None, missing='none', **kwds):
# let them be none in case user wants to use inheritance
if offset is not None:
if offset.ndim == 1:
offset = offset[:,None] #need column
self.offset = offset.ravel()
else:
self.offset = 0.
super(PoissonOffsetGMLE, self).__init__(endog, exog, missing=missing,
**kwds)
#this was added temporarily for bug-hunting, but shouldn't be needed
# def loglike(self, params):
# return -self.nloglikeobs(params).sum(0)
# original copied from discretemod.Poisson
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
--------
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
XB = self.offset + np.dot(self.exog, params)
endog = self.endog
nloglik = np.exp(XB) - endog*XB + np.log(factorial(endog))
return nloglik
class PoissonZiGMLE(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Poisson Model
This is an example for generic MLE which has the same statistical model
as discretemod.Poisson but adds offset and zero-inflation.
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
There are numerical problems if there is no zero-inflation.
'''
def __init__(self, endog, exog=None, offset=None, missing='none', **kwds):
# let them be none in case user wants to use inheritance
super(PoissonZiGMLE, self).__init__(endog, exog, missing=missing,
**kwds)
if offset is not None:
if offset.ndim == 1:
offset = offset[:,None] #need column
self.offset = offset.ravel() #which way?
else:
self.offset = 0.
#TODO: it's not standard pattern to use default exog
if exog is None:
self.exog = np.ones((self.nobs,1))
self.nparams = self.exog.shape[1]
#what's the shape in regression for exog if only constant
self.start_params = np.hstack((np.ones(self.nparams), 0))
self.cloneattr = ['start_params']
#needed for t_test and summary
self.exog_names.append('zi')
# original copied from discretemod.Poisson
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
--------
.. math:: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
beta = params[:-1]
gamm = 1 / (1 + np.exp(params[-1])) #check this
# replace with np.dot(self.exogZ, gamma)
#print(np.shape(self.offset), self.exog.shape, beta.shape
XB = self.offset + np.dot(self.exog, beta)
endog = self.endog
nloglik = -np.log(1-gamm) + np.exp(XB) - endog*XB + np.log(factorial(endog))
nloglik[endog==0] = - np.log(gamm + np.exp(-nloglik[endog==0]))
return nloglik |
statsmodels__statsmodels | mixed_glm.rst | Description | Generate description to this module | BSD 3-Clause New or Revised License | statsmodels__statsmodels/docs/source/mixed_glm.rst | [
"statsmodels__statsmodels/statsmodels/genmod/bayes_mixed_glm.py"
] | Generalized Linear Mixed Effects Models
Generalized Linear Mixed Effects (GLIMMIX) models are generalized linear
models with random effects in the linear predictors. Statsmodels
currently supports estimation of binomial and Poisson GLIMMIX models
using two Bayesian methods: the Laplace approximation to the posterior,
and a variational Bayes approximation to the posterior. Both methods
provide point estimates (posterior means) and assessments of uncertainty
(posterior standard deviation).
The current implementation only supports independent random effects.
Technical Documentation
Unlike Statsmodels mixed linear models, the GLIMMIX implementation is
not group-based. Groups are created by interacting all random effects
with a categorical variable. Note that this creates large, sparse random
effects design matrices exog_vc. Internally, exog_vc is converted to a
scipy sparse matrix. When passing the arguments directly to the class
initializer, a sparse matrix may be passed. When using formulas, a dense
matrix is created then converted to sparse. For very large problems, it
may not be feasible to use formulas due to the size of this dense
intermediate matrix. | r"""
Bayesian inference for generalized linear mixed models.
Currently only families without additional scale or shape parameters
are supported (binomial and Poisson).
Two estimation approaches are supported: Laplace approximation
('maximum a posteriori'), and variational Bayes (mean field
approximation to the posterior distribution).
All realizations of random effects are modeled to be mutually
independent in this implementation.
The `exog_vc` matrix is the design matrix for the random effects.
Every column of `exog_vc` corresponds to an independent realization of
a random effect. These random effects have mean zero and an unknown
standard deviation. The standard deviation parameters are constrained
to be equal within subsets of the columns. When not using formulas,
these subsets are specified through the parameter `ident`. `ident`
must have the same length as the number of columns of `exog_vc`, and
two columns whose `ident` values are equal have the same standard
deviation. When formulas are used, the columns of `exog_vc` derived
from a common formula are constrained to have the same standard
deviation.
In many applications, `exog_vc` will be sparse. A sparse matrix may
be passed when constructing a model class. If a dense matrix is
passed, it will be converted internally to a sparse matrix. There
currently is no way to avoid creating a temporary dense version of
`exog_vc` when using formulas.
Model and parameterization
--------------------------
The joint density of data and parameters factors as:
.. math::
p(y | vc, fep) p(vc | vcp) p(vcp) p(fe)
The terms :math:`p(vcp)` and :math:`p(fe)` are prior distributions
that are taken to be Gaussian (the :math:`vcp` parameters are log
standard deviations so the standard deviations have log-normal
distributions). The random effects distribution :math:`p(vc | vcp)`
is independent Gaussian (random effect realizations are independent
within and between values of the `ident` array). The model
:math:`p(y | vc, fep)` depends on the specific GLM being fit.
"""
import numpy as np
from scipy.optimize import minimize
from scipy import sparse
import statsmodels.base.model as base
from statsmodels.iolib import summary2
from statsmodels.genmod import families
import pandas as pd
import warnings
import patsy
# Gauss-Legendre weights
glw = [
[0.2955242247147529, -0.1488743389816312],
[0.2955242247147529, 0.1488743389816312],
[0.2692667193099963, -0.4333953941292472],
[0.2692667193099963, 0.4333953941292472],
[0.2190863625159820, -0.6794095682990244],
[0.2190863625159820, 0.6794095682990244],
[0.1494513491505806, -0.8650633666889845],
[0.1494513491505806, 0.8650633666889845],
[0.0666713443086881, -0.9739065285171717],
[0.0666713443086881, 0.9739065285171717],
]
_init_doc = r"""
Fit a generalized linear mixed model using Bayesian methods.
The class implements the Laplace approximation to the posterior
distribution (`fit_map`) and a variational Bayes approximation to
the posterior (`fit_vb`). See the two fit method docstrings for
more information about the fitting approaches.
Parameters
----------
endog : array_like
Vector of response values.
exog : array_like
Array of covariates for the fixed effects part of the mean
structure.
exog_vc : array_like
Array of covariates for the random part of the model. A
scipy.sparse array may be provided, or else the passed
array will be converted to sparse internally.
ident : array_like
Array of integer labels showing which random terms (columns
of `exog_vc`) have a common variance.
vcp_p : float
Prior standard deviation for variance component parameters
(the prior standard deviation of log(s) is vcp_p, where s is
the standard deviation of a random effect).
fe_p : float
Prior standard deviation for fixed effects parameters.
family : statsmodels.genmod.families instance
The GLM family.
fep_names : list of strings
The names of the fixed effects parameters (corresponding to
columns of exog). If None, default names are constructed.
vcp_names : list of strings
The names of the variance component parameters (corresponding
to distinct labels in ident). If None, default names are
constructed.
vc_names : list of strings
The names of the random effect realizations.
Returns
-------
MixedGLMResults object
Notes
-----
There are three types of values in the posterior distribution:
fixed effects parameters (fep), corresponding to the columns of
`exog`, random effects realizations (vc), corresponding to the
columns of `exog_vc`, and the standard deviations of the random
effects realizations (vcp), corresponding to the unique integer
labels in `ident`.
All random effects are modeled as being independent Gaussian
values (given the variance structure parameters). Every column of
`exog_vc` has a distinct realized random effect that is used to
form the linear predictors. The elements of `ident` determine the
distinct variance structure parameters. Two random effect
realizations that have the same value in `ident` have the same
variance. When fitting with a formula, `ident` is constructed
internally (each element of `vc_formulas` yields a distinct label
in `ident`).
The random effect standard deviation parameters (`vcp`) have
log-normal prior distributions with mean 0 and standard deviation
`vcp_p`.
Note that for some families, e.g. Binomial, the posterior mode may
be difficult to find numerically if `vcp_p` is set to too large of
a value. Setting `vcp_p` to 0.5 seems to work well.
The prior for the fixed effects parameters is Gaussian with mean 0
and standard deviation `fe_p`.
Examples
--------{example}
References
----------
Introduction to generalized linear mixed models:
https://stats.idre.ucla.edu/other/mult-pkg/introduction-to-generalized-linear-mixed-models
SAS documentation:
https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_intromix_a0000000215.htm
An assessment of estimation methods for generalized linear mixed
models with binary outcomes
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3866838/
"""
# The code in the example should be identical to what appears in
# the test_doc_examples unit test
_logit_example = """
A binomial (logistic) random effects model with random intercepts
for villages and random slopes for each year within each village:
>>> random = {"a": '0 + C(Village)', "b": '0 + C(Village)*year_cen'}
>>> model = BinomialBayesMixedGLM.from_formula(
'y ~ year_cen', random, data)
>>> result = model.fit_vb()
"""
# The code in the example should be identical to what appears in
# the test_doc_examples unit test
_poisson_example = """
A Poisson random effects model with random intercepts for villages
and random slopes for each year within each village:
>>> random = {"a": '0 + C(Village)', "b": '0 + C(Village)*year_cen'}
>>> model = PoissonBayesMixedGLM.from_formula(
'y ~ year_cen', random, data)
>>> result = model.fit_vb()
"""
class _BayesMixedGLM(base.Model):
def __init__(self,
endog,
exog,
exog_vc=None,
ident=None,
family=None,
vcp_p=1,
fe_p=2,
fep_names=None,
vcp_names=None,
vc_names=None,
**kwargs):
if exog.ndim == 1:
if isinstance(exog, np.ndarray):
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
if exog.ndim!= 2:
msg = "'exog' must have one or two columns"
raise ValueError(msg)
if exog_vc.ndim == 1:
if isinstance(exog_vc, np.ndarray):
exog_vc = exog_vc[:, None]
else:
exog_vc = pd.DataFrame(exog_vc)
if exog_vc.ndim!= 2:
msg = "'exog_vc' must have one or two columns"
raise ValueError(msg)
ident = np.asarray(ident)
if ident.ndim!= 1:
msg = "ident must be a one-dimensional array"
raise ValueError(msg)
if len(ident)!= exog_vc.shape[1]:
msg = "len(ident) should match the number of columns of exog_vc"
raise ValueError(msg)
if not np.issubdtype(ident.dtype, np.integer):
msg = "ident must have an integer dtype"
raise ValueError(msg)
# Get the fixed effects parameter names
if fep_names is None:
if hasattr(exog, "columns"):
fep_names = exog.columns.tolist()
else:
fep_names = ["FE_%d" % (k + 1) for k in range(exog.shape[1])]
# Get the variance parameter names
if vcp_names is None:
vcp_names = ["VC_%d" % (k + 1) for k in range(int(max(ident)) + 1)]
else:
if len(vcp_names)!= len(set(ident)):
msg = "The lengths of vcp_names and ident should be the same"
raise ValueError(msg)
if not sparse.issparse(exog_vc):
exog_vc = sparse.csr_matrix(exog_vc)
ident = ident.astype(np.int)
vcp_p = float(vcp_p)
fe_p = float(fe_p)
# Number of fixed effects parameters
if exog is None:
k_fep = 0
else:
k_fep = exog.shape[1]
# Number of variance component structure parameters and
# variance component realizations.
if exog_vc is None:
k_vc = 0
k_vcp = 0
else:
k_vc = exog_vc.shape[1]
k_vcp = max(ident) + 1
# power might be better but not available in older scipy
exog_vc2 = exog_vc.multiply(exog_vc)
super(_BayesMixedGLM, self).__init__(endog, exog, **kwargs)
self.exog_vc = exog_vc
self.exog_vc2 = exog_vc2
self.ident = ident
self.family = family
self.k_fep = k_fep
self.k_vc = k_vc
self.k_vcp = k_vcp
self.fep_names = fep_names
self.vcp_names = vcp_names
self.vc_names = vc_names
self.fe_p = fe_p
self.vcp_p = vcp_p
self.names = fep_names + vcp_names
if vc_names is not None:
self.names += vc_names
def _unpack(self, vec):
ii = 0
# Fixed effects parameters
fep = vec[:ii + self.k_fep]
ii += self.k_fep
# Variance component structure parameters (standard
# deviations). These are on the log scale. The standard
# deviation for random effect j is exp(vcp[ident[j]]).
vcp = vec[ii:ii + self.k_vcp]
ii += self.k_vcp
# Random effect realizations
vc = vec[ii:]
return fep, vcp, vc
def logposterior(self, params):
"""
The overall log-density: log p(y, fe, vc, vcp).
This differs by an additive constant from the log posterior
log p(fe, vc, vcp | y).
"""
fep, vcp, vc = self._unpack(params)
# Contributions from p(y | x, vc)
lp = 0
if self.k_fep > 0:
lp += np.dot(self.exog, fep)
if self.k_vc > 0:
lp += self.exog_vc.dot(vc)
mu = self.family.link.inverse(lp)
ll = self.family.loglike(self.endog, mu)
if self.k_vc > 0:
# Contributions from p(vc | vcp)
vcp0 = vcp[self.ident]
s = np.exp(vcp0)
ll -= 0.5 * np.sum(vc**2 / s**2) + np.sum(vcp0)
# Contributions from p(vc)
ll -= 0.5 * np.sum(vcp**2 / self.vcp_p**2)
# Contributions from p(fep)
if self.k_fep > 0:
ll -= 0.5 * np.sum(fep**2 / self.fe_p**2)
return ll
def logposterior_grad(self, params):
"""
The gradient of the log posterior.
"""
fep, vcp, vc = self._unpack(params)
lp = 0
if self.k_fep > 0:
lp += np.dot(self.exog, fep)
if self.k_vc > 0:
lp += self.exog_vc.dot(vc)
mu = self.family.link.inverse(lp)
score_factor = (self.endog - mu) / self.family.link.deriv(mu)
score_factor /= self.family.variance(mu)
te = [None, None, None]
# Contributions from p(y | x, z, vc)
if self.k_fep > 0:
te[0] = np.dot(score_factor, self.exog)
if self.k_vc > 0:
te[2] = self.exog_vc.transpose().dot(score_factor)
if self.k_vc > 0:
# Contributions from p(vc | vcp)
# vcp0 = vcp[self.ident]
# s = np.exp(vcp0)
# ll -= 0.5 * np.sum(vc**2 / s**2) + np.sum(vcp0)
vcp0 = vcp[self.ident]
s = np.exp(vcp0)
u = vc**2 / s**2 - 1
te[1] = np.bincount(self.ident, weights=u)
te[2] -= vc / s**2
# Contributions from p(vcp)
# ll -= 0.5 * np.sum(vcp**2 / self.vcp_p**2)
te[1] -= vcp / self.vcp_p**2
# Contributions from p(fep)
if self.k_fep > 0:
te[0] -= fep / self.fe_p**2
te = [x for x in te if x is not None]
return np.concatenate(te)
def _get_start(self):
start_fep = np.zeros(self.k_fep)
start_vcp = np.ones(self.k_vcp)
start_vc = np.random.normal(size=self.k_vc)
start = np.concatenate((start_fep, start_vcp, start_vc))
return start
@classmethod
def from_formula(cls,
formula,
vc_formulas,
data,
family=None,
vcp_p=1,
fe_p=2):
"""
Fit a BayesMixedGLM using a formula.
Parameters
----------
formula : string
Formula for the endog and fixed effects terms (use ~ to
separate dependent and independent expressions).
vc_formulas : dictionary
vc_formulas[name] is a one-sided formula that creates one
collection of random effects with a common variance
prameter. If using categorical (factor) variables to
produce variance components, note that generally `0 +...`
should be used so that an intercept is not included.
data : data frame
The data to which the formulas are applied.
family : genmod.families instance
A GLM family.
vcp_p : float
The prior standard deviation for the logarithms of the standard
deviations of the random effects.
fe_p : float
The prior standard deviation for the fixed effects parameters.
"""
ident = []
exog_vc = []
vcp_names = []
j = 0
for na, fml in vc_formulas.items():
mat = patsy.dmatrix(fml, data, return_type='dataframe')
exog_vc.append(mat)
vcp_names.append(na)
ident.append(j * np.ones(mat.shape[1], dtype=np.integer))
j += 1
exog_vc = pd.concat(exog_vc, axis=1)
vc_names = exog_vc.columns.tolist()
ident = np.concatenate(ident)
model = super(_BayesMixedGLM, cls).from_formula(
formula,
data=data,
family=family,
subset=None,
exog_vc=exog_vc,
ident=ident,
vc_names=vc_names,
vcp_names=vcp_names,
fe_p=fe_p,
vcp_p=vcp_p)
return model
def fit(self, method="BFGS", minim_opts=None):
"""
fit is equivalent to fit_map.
See fit_map for parameter information.
Use `fit_vb` to fit the model using variational Bayes.
"""
self.fit_map(method, minim_opts)
def fit_map(self, method="BFGS", minim_opts=None, scale_fe=False):
"""
Construct the Laplace approximation to the posterior
distribution.
Parameters
----------
method : string
Optimization method for finding the posterior mode.
minim_opts : dict-like
Options passed to scipy.minimize.
scale_fe : bool
If True, the columns of the fixed effects design matrix
are centered and scaled to unit variance before fitting
the model. The results are back-transformed so that the
results are presented on the original scale.
Returns
-------
BayesMixedGLMResults instance.
"""
if scale_fe:
mn = self.exog.mean(0)
sc = self.exog.std(0)
self._exog_save = self.exog
self.exog = self.exog.copy()
ixs = np.flatnonzero(sc > 1e-8)
self.exog[:, ixs] -= mn[ixs]
self.exog[:, ixs] /= sc[ixs]
def fun(params):
return -self.logposterior(params)
def grad(params):
return -self.logposterior_grad(params)
start = self._get_start()
r = minimize(fun, start, method=method, jac=grad, options=minim_opts)
if not r.success:
msg = ("Laplace fitting did not converge, |gradient|=%.6f" %
np.sqrt(np.sum(r.jac**2)))
warnings.warn(msg)
from statsmodels.tools.numdiff import approx_fprime
hess = approx_fprime(r.x, grad)
cov = np.linalg.inv(hess)
params = r.x
if scale_fe:
self.exog = self._exog_save
del self._exog_save
params[ixs] /= sc[ixs]
cov[ixs, :][:, ixs] /= np.outer(sc[ixs], sc[ixs])
return BayesMixedGLMResults(self, params, cov, optim_retvals=r)
def predict(self, params, exog=None, linear=False):
"""
Return the fitted mean structure.
Parameters
----------
params : array_like
The parameter vector, may be the full parameter vector, or may
be truncated to include only the mean parameters.
exog : array_like
The design matrix for the mean structure. If omitted, use the
model's design matrix.
linear : bool
If True, return the linear predictor without passing through the
link function.
Returns
-------
A 1-dimensional array of predicted values
"""
if exog is None:
exog = self.exog
q = exog.shape[1]
pr = np.dot(exog, params[0:q])
if not linear:
pr = self.family.link.inverse(pr)
return pr
class _VariationalBayesMixedGLM(object):
"""
A mixin providing generic (not family-specific) methods for
variational Bayes mean field fitting.
"""
# Integration range (from -rng to +rng). The integrals are with
# respect to a standard Gaussian distribution so (-5, 5) will be
# sufficient in many cases.
rng = 5
verbose = False
# Returns the mean and variance of the linear predictor under the
# given distribution parameters.
def _lp_stats(self, fep_mean, fep_sd, vc_mean, vc_sd):
tm = np.dot(self.exog, fep_mean)
tv = np.dot(self.exog**2, fep_sd**2)
tm += self.exog_vc.dot(vc_mean)
tv += self.exog_vc2.dot(vc_sd**2)
return tm, tv
def vb_elbo_base(self, h, tm, fep_mean, vcp_mean, vc_mean, fep_sd, vcp_sd,
vc_sd):
"""
Returns the evidence lower bound (ELBO) for the model.
This function calculates the family-specific ELBO function
based on information provided from a subclass.
Parameters
----------
h : function mapping 1d vector to 1d vector
The contribution of the model to the ELBO function can be
expressed as y_i*lp_i + Eh_i(z), where y_i and lp_i are
the response and linear predictor for observation i, and z
is a standard normal rangom variable. This formulation
can be achieved for any GLM with a canonical link
function.
"""
# p(y | vc) contributions
iv = 0
for w in glw:
z = self.rng * w[1]
iv += w[0] * h(z) * np.exp(-z**2 / 2)
iv /= np.sqrt(2 * np.pi)
iv *= self.rng
iv += self.endog * tm
iv = iv.sum()
# p(vc | vcp) * p(vcp) * p(fep) contributions
iv += self._elbo_common(fep_mean, fep_sd, vcp_mean, vcp_sd, vc_mean,
vc_sd)
r = (iv + np.sum(np.log(fep_sd)) + np.sum(np.log(vcp_sd)) + np.sum(
np.log(vc_sd)))
return r
def vb_elbo_grad_base(self, h, tm, tv, fep_mean, vcp_mean, vc_mean, fep_sd,
vcp_sd, vc_sd):
"""
Return the gradient of the ELBO function.
See vb_elbo_base for parameters.
"""
fep_mean_grad = 0.
fep_sd_grad = 0.
vcp_mean_grad = 0.
vcp_sd_grad = 0.
vc_mean_grad = 0.
vc_sd_grad = 0.
# p(y | vc) contributions
for w in glw:
z = self.rng * w[1]
u = h(z) * np.exp(-z**2 / 2) / np.sqrt(2 * np.pi)
r = u / np.sqrt(tv)
fep_mean_grad += w[0] * np.dot(u, self.exog)
vc_mean_grad += w[0] * self.exog_vc.transpose().dot(u)
fep_sd_grad += w[0] * z * np.dot(r, self.exog**2 * fep_sd)
v = self.exog_vc2.multiply(vc_sd).transpose().dot(r)
v = np.squeeze(np.asarray(v))
vc_sd_grad += w[0] * z * v
fep_mean_grad *= self.rng
vc_mean_grad *= self.rng
fep_sd_grad *= self.rng
vc_sd_grad *= self.rng
fep_mean_grad += np.dot(self.endog, self.exog)
vc_mean_grad += self.exog_vc.transpose().dot(self.endog)
(fep_mean_grad_i, fep_sd_grad_i, vcp_mean_grad_i, vcp_sd_grad_i,
vc_mean_grad_i, vc_sd_grad_i) = self._elbo_grad_common(
fep_mean, fep_sd, vcp_mean, vcp_sd, vc_mean, vc_sd)
fep_mean_grad += fep_mean_grad_i
fep_sd_grad += fep_sd_grad_i
vcp_mean_grad += vcp_mean_grad_i
vcp_sd_grad += vcp_sd_grad_i
vc_mean_grad += vc_mean_grad_i
vc_sd_grad += vc_sd_grad_i
fep_sd_grad += 1 / fep_sd
vcp_sd_grad += 1 / vcp_sd
vc_sd_grad += 1 / vc_sd
mean_grad = np.concatenate((fep_mean_grad, vcp_mean_grad,
vc_mean_grad))
sd_grad = np.concatenate((fep_sd_grad, vcp_sd_grad, vc_sd_grad))
if self.verbose:
print(
"|G|=%f" % np.sqrt(np.sum(mean_grad**2) + np.sum(sd_grad**2)))
return mean_grad, sd_grad
def fit_vb(self,
mean=None,
sd=None,
fit_method="BFGS",
minim_opts=None,
scale_fe=False,
verbose=False):
"""
Fit a model using the variational Bayes mean field approximation.
Parameters
----------
mean : array_like
Starting value for VB mean vector
sd : array_like
Starting value for VB standard deviation vector
fit_method : string
Algorithm for scipy.minimize
minim_opts : dict-like
Options passed to scipy.minimize
scale_fe : bool
If true, the columns of the fixed effects design matrix
are centered and scaled to unit variance before fitting
the model. The results are back-transformed so that the
results are presented on the original scale.
verbose : bool
If True, print the gradient norm to the screen each time
it is calculated.
Notes
-----
The goal is to find a factored Gaussian approximation
q1*q2*... to the posterior distribution, approximately
minimizing the KL divergence from the factored approximation
to the actual posterior. The KL divergence, or ELBO function
has the form
E* log p(y, fe, vcp, vc) - E* log q
where E* is expectation with respect to the product of qj.
References
----------
Blei, Kucukelbir, McAuliffe (2017). Variational Inference: A
review for Statisticians
https://arxiv.org/pdf/1601.00670.pdf
"""
self.verbose = verbose
if scale_fe:
mn = self.exog.mean(0)
sc = self.exog.std(0)
self._exog_save = self.exog
self.exog = self.exog.copy()
ixs = np.flatnonzero(sc > 1e-8)
self.exog[:, ixs] -= mn[ixs]
self.exog[:, ixs] /= sc[ixs]
n = self.k_fep + self.k_vcp + self.k_vc
ml = self.k_fep + self.k_vcp + self.k_vc
if mean is None:
m = np.zeros(n)
else:
if len(mean)!= ml:
raise ValueError(
"mean has incorrect length, %d!= %d" % (len(mean), ml))
m = mean.copy()
if sd is None:
s = -0.5 + 0.1 * np.random.normal(size=n)
else:
if len(sd)!= ml:
raise ValueError(
"sd has incorrect length, %d!= %d" % (len(sd), ml))
# s is parameterized on the log-scale internally when
# optimizing the ELBO function (this is transparent to the
# caller)
s = np.log(sd)
# Don't allow the variance parameter starting mean values to
# be too small.
i1, i2 = self.k_fep, self.k_fep + self.k_vcp
m[i1:i2] = np.where(m[i1:i2] < -1, -1, m[i1:i2])
# Don't allow the posterior standard deviation starting values
# to be too small.
s = np.where(s < -1, -1, s)
def elbo(x):
n = len(x) // 2
return -self.vb_elbo(x[:n], np.exp(x[n:]))
def elbo_grad(x):
n = len(x) // 2
gm, gs = self.vb_elbo_grad(x[:n], np.exp(x[n:]))
gs *= np.exp(x[n:])
return -np.concatenate((gm, gs))
start = np.concatenate((m, s))
mm = minimize(
elbo, start, jac=elbo_grad, method=fit_method, options=minim_opts)
if not mm.success:
warnings.warn("VB fitting did not converge")
n = len(mm.x) // 2
params = mm.x[0:n]
va = np.exp(2 * mm.x[n:])
if scale_fe:
self.exog = self._exog_save
del self._exog_save
params[ixs] /= sc[ixs]
va[ixs] /= sc[ixs]**2
return BayesMixedGLMResults(self, params, va, mm)
# Handle terms in the ELBO that are common to all models.
def _elbo_common(self, fep_mean, fep_sd, vcp_mean, vcp_sd, vc_mean, vc_sd):
iv = 0
# p(vc | vcp) contributions
m = vcp_mean[self.ident]
s = vcp_sd[self.ident]
iv -= np.sum((vc_mean**2 + vc_sd**2) * np.exp(2 * (s**2 - m))) / 2
iv -= np.sum(m)
# p(vcp) contributions
iv -= 0.5 * (vcp_mean**2 + vcp_sd**2).sum() / self.vcp_p**2
# p(b) contributions
iv -= 0.5 * (fep_mean**2 + fep_sd**2).sum() / self.fe_p**2
return iv
def _elbo_grad_common(self, fep_mean, fep_sd, vcp_mean, vcp_sd, vc_mean,
vc_sd):
# p(vc | vcp) contributions
m = vcp_mean[self.ident]
s = vcp_sd[self.ident]
u = vc_mean**2 + vc_sd**2
ve = np.exp(2 * (s**2 - m))
dm = u * ve - 1
ds = -2 * u * ve * s
vcp_mean_grad = np.bincount(self.ident, weights=dm)
vcp_sd_grad = np.bincount(self.ident, weights=ds)
vc_mean_grad = -vc_mean.copy() * ve
vc_sd_grad = -vc_sd.copy() * ve
# p(vcp) contributions
vcp_mean_grad -= vcp_mean / self.vcp_p**2
vcp_sd_grad -= vcp_sd / self.vcp_p**2
# p(b) contributions
fep_mean_grad = -fep_mean.copy() / self.fe_p**2
fep_sd_grad = -fep_sd.copy() / self.fe_p**2
return (fep_mean_grad, fep_sd_grad, vcp_mean_grad, vcp_sd_grad,
vc_mean_grad, vc_sd_grad)
class BayesMixedGLMResults(object):
"""
Class to hold results from a Bayesian estimation of a Mixed GLM model.
Attributes
----------
fe_mean : array_like
Posterior mean of the fixed effects coefficients.
fe_sd : array_like
Posterior standard deviation of the fixed effects coefficients
vcp_mean : array_like
Posterior mean of the logged variance component standard
deviations.
vcp_sd : array_like
Posterior standard deviation of the logged variance component
standard deviations.
vc_mean : array_like
Posterior mean of the random coefficients
vc_sd : array_like
Posterior standard deviation of the random coefficients
"""
def __init__(self, model, params, cov_params, optim_retvals=None):
self.model = model
self.params = params
self._cov_params = cov_params
self.optim_retvals = optim_retvals
self.fe_mean, self.vcp_mean, self.vc_mean = (model._unpack(params))
if cov_params.ndim == 2:
cp = np.diag(cov_params)
else:
cp = cov_params
self.fe_sd, self.vcp_sd, self.vc_sd = model._unpack(cp)
self.fe_sd = np.sqrt(self.fe_sd)
self.vcp_sd = np.sqrt(self.vcp_sd)
self.vc_sd = np.sqrt(self.vc_sd)
def cov_params(self):
if hasattr(self.model.data, "frame"):
# Return the covariance matrix as a dataframe or series
na = (self.model.fep_names + self.model.vcp_names +
self.model.vc_names)
if self._cov_params.ndim == 2:
return pd.DataFrame(self._cov_params, index=na, columns=na)
else:
return pd.Series(self._cov_params, index=na)
# Return the covariance matrix as a ndarray
return self._cov_params
def summary(self):
df = pd.DataFrame()
m = self.model.k_fep + self.model.k_vcp
df["Type"] = (["M" for k in range(self.model.k_fep)] +
["V" for k in range(self.model.k_vcp)])
df["Post. Mean"] = self.params[0:m]
if self._cov_params.ndim == 2:
v = np.diag(self._cov_params)[0:m]
df["Post. SD"] = np.sqrt(v)
else:
df["Post. SD"] = np.sqrt(self._cov_params[0:m])
# Convert variance parameters to natural scale
df["SD"] = np.exp(df["Post. Mean"])
df["SD (LB)"] = np.exp(df["Post. Mean"] - 2 * df["Post. SD"])
df["SD (UB)"] = np.exp(df["Post. Mean"] + 2 * df["Post. SD"])
df["SD"] = ["%.3f" % x for x in df.SD]
df["SD (LB)"] = ["%.3f" % x for x in df["SD (LB)"]]
df["SD (UB)"] = ["%.3f" % x for x in df["SD (UB)"]]
df.loc[df.index < self.model.k_fep, "SD"] = ""
df.loc[df.index < self.model.k_fep, "SD (LB)"] = ""
df.loc[df.index < self.model.k_fep, "SD (UB)"] = ""
df.index = self.model.fep_names + self.model.vcp_names
summ = summary2.Summary()
summ.add_title(self.model.family.__class__.__name__ +
" Mixed GLM Results")
summ.add_df(df)
summ.add_text("Parameter types are mean structure (M) and "
"variance structure (V)")
summ.add_text("Variance parameters are modeled as log "
"standard deviations")
return summ
def random_effects(self, term=None):
"""
Posterior mean and standard deviation of random effects.
Parameters
----------
term : int or None
If None, results for all random effects are returned. If
an integer, returns results for a given set of random
effects. The value of `term` refers to an element of the
`ident` vector, or to a position in the `vc_formulas`
list.
Returns
-------
Data frame of posterior means and posterior standard
deviations of random effects.
"""
z = self.vc_mean
s = self.vc_sd
na = self.model.vc_names
if term is not None:
termix = self.model.vcp_names.index(term)
ii = np.flatnonzero(self.model.ident == termix)
z = z[ii]
s = s[ii]
na = [na[i] for i in ii]
x = pd.DataFrame({"Mean": z, "SD": s})
if na is not None:
x.index = na
return x
def predict(self, exog=None, linear=False):
"""
Return predicted values for the mean structure.
Parameters
----------
exog : array_like
The design matrix for the mean structure. If None,
use the model's design matrix.
linear : bool
If True, returns the linear predictor, otherwise
transform the linear predictor using the link function.
Returns
-------
A one-dimensional array of fitted values.
"""
return self.model.predict(self.params, exog, linear)
class BinomialBayesMixedGLM(_VariationalBayesMixedGLM, _BayesMixedGLM):
__doc__ = _init_doc.format(example=_logit_example)
def __init__(self,
endog,
exog,
exog_vc,
ident,
vcp_p=1,
fe_p=2,
fep_names=None,
vcp_names=None,
vc_names=None):
super(BinomialBayesMixedGLM, self).__init__(
endog,
exog,
exog_vc=exog_vc,
ident=ident,
vcp_p=vcp_p,
fe_p=fe_p,
family=families.Binomial(),
fep_names=fep_names,
vcp_names=vcp_names,
vc_names=vc_names)
if not np.all(np.unique(endog) == np.r_[0, 1]):
msg = "endog values must be 0 and 1, and not all identical"
raise ValueError(msg)
@classmethod
def from_formula(cls, formula, vc_formulas, data, vcp_p=1, fe_p=2):
fam = families.Binomial()
x = _BayesMixedGLM.from_formula(
formula, vc_formulas, data, family=fam, vcp_p=vcp_p, fe_p=fe_p)
# Copy over to the intended class structure
mod = BinomialBayesMixedGLM(
x.endog,
x.exog,
exog_vc=x.exog_vc,
ident=x.ident,
vcp_p=x.vcp_p,
fe_p=x.fe_p,
fep_names=x.fep_names,
vcp_names=x.vcp_names,
vc_names=x.vc_names)
mod.data = x.data
return mod
def vb_elbo(self, vb_mean, vb_sd):
"""
Returns the evidence lower bound (ELBO) for the model.
"""
fep_mean, vcp_mean, vc_mean = self._unpack(vb_mean)
fep_sd, vcp_sd, vc_sd = self._unpack(vb_sd)
tm, tv = self._lp_stats(fep_mean, fep_sd, vc_mean, vc_sd)
def h(z):
return -np.log(1 + np.exp(tm + np.sqrt(tv) * z))
return self.vb_elbo_base(h, tm, fep_mean, vcp_mean, vc_mean, fep_sd,
vcp_sd, vc_sd)
def vb_elbo_grad(self, vb_mean, vb_sd):
"""
Returns the gradient of the model's evidence lower bound (ELBO).
"""
fep_mean, vcp_mean, vc_mean = self._unpack(vb_mean)
fep_sd, vcp_sd, vc_sd = self._unpack(vb_sd)
tm, tv = self._lp_stats(fep_mean, fep_sd, vc_mean, vc_sd)
def h(z):
u = tm + np.sqrt(tv) * z
x = np.zeros_like(u)
ii = np.flatnonzero(u > 0)
uu = u[ii]
x[ii] = 1 / (1 + np.exp(-uu))
ii = np.flatnonzero(u <= 0)
uu = u[ii]
x[ii] = np.exp(uu) / (1 + np.exp(uu))
return -x
return self.vb_elbo_grad_base(h, tm, tv, fep_mean, vcp_mean, vc_mean,
fep_sd, vcp_sd, vc_sd)
class PoissonBayesMixedGLM(_VariationalBayesMixedGLM, _BayesMixedGLM):
__doc__ = _init_doc.format(example=_poisson_example)
def __init__(self,
endog,
exog,
exog_vc,
ident,
vcp_p=1,
fe_p=2,
fep_names=None,
vcp_names=None,
vc_names=None):
super(PoissonBayesMixedGLM, self).__init__(
endog=endog,
exog=exog,
exog_vc=exog_vc,
ident=ident,
vcp_p=vcp_p,
fe_p=fe_p,
family=families.Poisson(),
fep_names=fep_names,
vcp_names=vcp_names,
vc_names=vc_names)
@classmethod
def from_formula(cls,
formula,
vc_formulas,
data,
vcp_p=1,
fe_p=2,
vcp_names=None,
vc_names=None):
fam = families.Poisson()
x = _BayesMixedGLM.from_formula(
formula,
vc_formulas,
data,
family=fam,
vcp_p=vcp_p,
fe_p=fe_p)
# Copy over to the intended class structure
mod = PoissonBayesMixedGLM(
endog=x.endog,
exog=x.exog,
exog_vc=x.exog_vc,
ident=x.ident,
vcp_p=x.vcp_p,
fe_p=x.fe_p,
fep_names=x.fep_names,
vcp_names=x.vcp_names,
vc_names=x.vc_names)
mod.data = x.data
return mod
def vb_elbo(self, vb_mean, vb_sd):
"""
Returns the evidence lower bound (ELBO) for the model.
"""
fep_mean, vcp_mean, vc_mean = self._unpack(vb_mean)
fep_sd, vcp_sd, vc_sd = self._unpack(vb_sd)
tm, tv = self._lp_stats(fep_mean, fep_sd, vc_mean, vc_sd)
def h(z):
return -np.exp(tm + np.sqrt(tv) * z)
return self.vb_elbo_base(h, tm, fep_mean, vcp_mean, vc_mean, fep_sd,
vcp_sd, vc_sd)
def vb_elbo_grad(self, vb_mean, vb_sd):
"""
Returns the gradient of the model's evidence lower bound (ELBO).
"""
fep_mean, vcp_mean, vc_mean = self._unpack(vb_mean)
fep_sd, vcp_sd, vc_sd = self._unpack(vb_sd)
tm, tv = self._lp_stats(fep_mean, fep_sd, vc_mean, vc_sd)
def h(z):
y = -np.exp(tm + np.sqrt(tv) * z)
return y
return self.vb_elbo_grad_base(h, tm, tv, fep_mean, vcp_mean, vc_mean,
fep_sd, vcp_sd, vc_sd) |
|
statsmodels__statsmodels | mixed_linear.rst | Description | Generate description to this module | BSD 3-Clause New or Revised License | statsmodels__statsmodels/docs/source/mixed_linear.rst | [
"statsmodels__statsmodels/statsmodels/regression/mixed_linear_model.py"
] | Linear Mixed Effects Models
Linear Mixed Effects models are used for regression analyses involving
dependent data. Such data arise when working with longitudinal and other
study designs in which multiple observations are made on each subject.
Some specific linear mixed effects models are
- Random intercepts models, where all responses in a group are
additively shifted by a value that is specific to the group.
- Random slopes models, where the responses in a group follow a
(conditional) mean trajectory that is linear in the observed
covariates, with the slopes (and possibly intercepts) varying by
group.
- Variance components models, where the levels of one or more
categorical covariates are associated with draws from distributions.
These random terms additively determine the conditional mean of each
observation based on its covariate values.
The Statsmodels implementation of LME is primarily group-based, meaning
that random effects must be independently-realized for responses in
different groups. There are two types of random effects in our
implementation of mixed models: (i) random coefficients (possibly
vectors) that have an unknown covariance matrix, and (ii) random
coefficients that are independent draws from a common univariate
distribution. For both (i) and (ii), the random effects influence the
conditional mean of a group through their matrix/vector product with a
group-specific design matrix.
A simple example of random coefficients, as in (i) above, is:
Y_(ij) = β₀ + β₁X_(ij) + γ_(0i) + γ_(1i)X_(ij) + ϵ_(ij)
Here, Y_(ij) is the $j^\rm{th}$ measured response for subject i, and
X_(ij) is a covariate for this response. The "fixed effects parameters"
β₀ and β₁ are shared by all subjects, and the errors ϵ_(ij) are
independent of everything else, and identically distributed (with mean
zero). The "random effects parameters" γ_(0i) and γ_(1i) follow a
bivariate distribution with mean zero, described by three parameters:
${\rm var}(\gamma_{0i})$, ${\rm var}(\gamma_{1i})$, and
${\rm cov}(\gamma_{0i},
\gamma_{1i})$. There is also a parameter for ${\rm
var}(\epsilon_{ij})$.
A simple example of variance components, as in (ii) above, is:
Y_(ijk) = β₀ + η_(1i) + η_(2j) + ϵ_(ijk)
Here, Y_(ijk) is the $k^\rm{th}$ measured response under conditions
i, j. The only "mean structure parameter" is β₀. The η_(1i) are
independent and identically distributed with zero mean, and variance
τ₁², and the η_(2j) are independent and identically distributed with
zero mean, and variance τ₂².
Statsmodels MixedLM handles most non-crossed random effects models, and
some crossed models. To include crossed random effects in a model, it is
necessary to treat the entire dataset as a single group. The variance
components arguments to the model can then be used to define models with
various combinations of crossed and non-crossed random effects.
The Statsmodels LME framework currently supports post-estimation
inference via Wald tests and confidence intervals on the coefficients,
profile likelihood analysis, likelihood ratio testing, and AIC.
Examples
import statsmodels.api as sm import statsmodels.formula.api as smf
data = sm.datasets.get_rdataset("dietox", "geepack").data
md = smf.mixedlm("Weight ~ Time", data, groups=data["Pig"]) mdf =
md.fit() print(mdf.summary())
Detailed examples can be found here
- Mixed LM
There are some notebook examples on the Wiki: Wiki notebooks for MixedLM
Technical Documentation
The data are partitioned into disjoint groups. The probability model for
group i is:
Y = Xβ + Zγ + Q₁η₁ + ⋯ + Q_(k)η_(k) + ϵ
where
- n_(i) is the number of observations in group i
- Y is a n_(i) dimensional response vector
- X is a n_(i) * k_(fe) dimensional matrix of fixed effects
coefficients
- β is a k_(fe)-dimensional vector of fixed effects slopes
- Z is a n_(i) * k_(re) dimensional matrix of random effects
coefficients
- γ is a k_(re)-dimensional random vector with mean 0 and covariance
matrix Ψ; note that each group gets its own independent realization
of gamma.
- Q_(j) is a n_(i) × q_(j) dimensional design matrix for the
$j^\rm{th}$ variance component.
- η_(j) is a q_(j)-dimensional random vector containing independent
and identically distributed values with variance τ_(j)².
- ϵ is a n_(i) dimensional vector of i.i.d normal errors with mean 0
and variance σ²; the ϵ values are independent both within and
between groups
Y, X, {Q_(j)} and Z must be entirely observed. β, Ψ, and σ² are
estimated using ML or REML estimation, and γ, {η_(j)} and ϵ are random
so define the probability model.
The marginal mean structure is E[Y|X,Z] = X * β. If only the marginal
mean structure is of interest, GEE is a good alternative to mixed
models.
Notation:
- cov_(re) is the random effects covariance matrix (referred to above
as Ψ) and scale is the (scalar) error variance. There is also a
single estimated variance parameter τ_(j)² for each variance
component. For a single group, the marginal covariance matrix of
endog given exog is scale * I + Z * cov_(re) * Z, where Z is the
design matrix for the random effects in one group. | """
Linear mixed effects models are regression models for dependent data.
They can be used to estimate regression relationships involving both
means and variances.
These models are also known as multilevel linear models, and
hierarchical linear models.
The MixedLM class fits linear mixed effects models to data, and
provides support for some common post-estimation tasks. This is a
group-based implementation that is most efficient for models in which
the data can be partitioned into independent groups. Some models with
crossed effects can be handled by specifying a model with a single
group.
The data are partitioned into disjoint groups. The probability model
for group i is:
Y = X*beta + Z*gamma + epsilon
where
* n_i is the number of observations in group i
* Y is a n_i dimensional response vector (called endog in MixedLM)
* X is a n_i x k_fe dimensional design matrix for the fixed effects
(called exog in MixedLM)
* beta is a k_fe-dimensional vector of fixed effects parameters
(called fe_params in MixedLM)
* Z is a design matrix for the random effects with n_i rows (called
exog_re in MixedLM). The number of columns in Z can vary by group
as discussed below.
* gamma is a random vector with mean 0. The covariance matrix for the
first `k_re` elements of `gamma` (called cov_re in MixedLM) is
common to all groups. The remaining elements of `gamma` are
variance components as discussed in more detail below. Each group
receives its own independent realization of gamma.
* epsilon is a n_i dimensional vector of iid normal
errors with mean 0 and variance sigma^2; the epsilon
values are independent both within and between groups
Y, X and Z must be entirely observed. beta, Psi, and sigma^2 are
estimated using ML or REML estimation, and gamma and epsilon are
random so define the probability model.
The marginal mean structure is E[Y | X, Z] = X*beta. If only the mean
structure is of interest, GEE is an alternative to using linear mixed
models.
Two types of random effects are supported. Standard random effects
are correlated with each other in arbitrary ways. Every group has the
same number (`k_re`) of standard random effects, with the same joint
distribution (but with independent realizations across the groups).
Variance components are uncorrelated with each other, and with the
standard random effects. Each variance component has mean zero, and
all realizations of a given variance component have the same variance
parameter. The number of realized variance components per variance
parameter can differ across the groups.
The primary reference for the implementation details is:
MJ Lindstrom, DM Bates (1988). "Newton Raphson and EM algorithms for
linear mixed effects models for repeated measures data". Journal of
the American Statistical Association. Volume 83, Issue 404, pages
1014-1022.
See also this more recent document:
http://econ.ucsb.edu/~doug/245a/Papers/Mixed%20Effects%20Implement.pdf
All the likelihood, gradient, and Hessian calculations closely follow
Lindstrom and Bates 1988, adapted to support variance components.
The following two documents are written more from the perspective of
users:
http://lme4.r-forge.r-project.org/lMMwR/lrgprt.pdf
http://lme4.r-forge.r-project.org/slides/2009-07-07-Rennes/3Longitudinal-4.pdf
Notation:
* `cov_re` is the random effects covariance matrix (referred to above
as Psi) and `scale` is the (scalar) error variance. For a single
group, the marginal covariance matrix of endog given exog is scale*I
+ Z * cov_re * Z', where Z is the design matrix for the random
effects in one group.
* `vcomp` is a vector of variance parameters. The length of `vcomp`
is determined by the number of keys in either the `exog_vc` argument
to ``MixedLM``, or the `vc_formula` argument when using formulas to
fit a model.
Notes:
1. Three different parameterizations are used in different places.
The regression slopes (usually called `fe_params`) are identical in
all three parameterizations, but the variance parameters differ. The
parameterizations are:
* The "user parameterization" in which cov(endog) = scale*I + Z *
cov_re * Z', as described above. This is the main parameterization
visible to the user.
* The "profile parameterization" in which cov(endog) = I +
Z * cov_re1 * Z'. This is the parameterization of the profile
likelihood that is maximized to produce parameter estimates.
(see Lindstrom and Bates for details). The "user" cov_re is
equal to the "profile" cov_re1 times the scale.
* The "square root parameterization" in which we work with the Cholesky
factor of cov_re1 instead of cov_re directly. This is hidden from the
user.
All three parameterizations can be packed into a vector by
(optionally) concatenating `fe_params` together with the lower
triangle or Cholesky square root of the dependence structure, followed
by the variance parameters for the variance components. The are
stored as square roots if (and only if) the random effects covariance
matrix is stored as its Choleky factor. Note that when unpacking, it
is important to either square or reflect the dependence structure
depending on which parameterization is being used.
Two score methods are implemented. One takes the score with respect
to the elements of the random effects covariance matrix (used for
inference once the MLE is reached), and the other takes the score with
respect to the parameters of the Choleky square root of the random
effects covariance matrix (used for optimization).
The numerical optimization uses GLS to avoid explicitly optimizing
over the fixed effects parameters. The likelihood that is optimized
is profiled over both the scale parameter (a scalar) and the fixed
effects parameters (if any). As a result of this profiling, it is
difficult and unnecessary to calculate the Hessian of the profiled log
likelihood function, so that calculation is not implemented here.
Therefore, optimization methods requiring the Hessian matrix such as
the Newton-Raphson algorithm cannot be used for model fitting.
"""
import numpy as np
import statsmodels.base.model as base
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools import data as data_tools
from scipy.stats.distributions import norm
from scipy import sparse
import pandas as pd
import patsy
from collections import OrderedDict
from statsmodels.compat.python import string_types
from statsmodels.compat import range
import warnings
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.base._penalties import Penalty
def _dot(x, y):
"""
Returns the dot product of the arrays, works for sparse and dense.
"""
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
return np.dot(x, y)
elif sparse.issparse(x):
return x.dot(y)
elif sparse.issparse(y):
return y.T.dot(x.T).T
# From numpy, adapted to work with sparse and dense arrays.
def _multi_dot_three(A, B, C):
"""
Find best ordering for three arrays and do the multiplication.
Doing in manually instead of using dynamic programing is
approximately 15 times faster.
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return _dot(_dot(A, B), C)
else:
return _dot(A, _dot(B, C))
def _dotsum(x, y):
"""
Returns sum(x * y), where '*' is the pointwise product, computed
efficiently for dense and sparse matrices.
"""
if sparse.issparse(x):
return x.multiply(y).sum()
else:
# This way usually avoids allocating a temporary.
return np.dot(x.ravel(), y.ravel())
class VCSpec(object):
"""
Define the variance component structure of a multilevel model.
An instance of the class contains three attributes:
- names : names[k] is the name of variance component k.
- mats : mats[k][i] is the design matrix for group index
i in variance component k.
- colnames : colnames[k][i] is the list of column names for
mats[k][i].
The groups in colnames and mats must be in sorted order.
"""
def __init__(self, names, colnames, mats):
self.names = names
self.colnames = colnames
self.mats = mats
def _get_exog_re_names(self, exog_re):
"""
Passes through if given a list of names. Otherwise, gets pandas names
or creates some generic variable names as needed.
"""
if self.k_re == 0:
return []
if isinstance(exog_re, pd.DataFrame):
return exog_re.columns.tolist()
elif isinstance(exog_re, pd.Series) and exog_re.name is not None:
return [exog_re.name]
elif isinstance(exog_re, list):
return exog_re
# Default names
defnames = ["x_re{0:1d}".format(k + 1) for k in range(exog_re.shape[1])]
return defnames
class MixedLMParams(object):
"""
This class represents a parameter state for a mixed linear model.
Parameters
----------
k_fe : integer
The number of covariates with fixed effects.
k_re : integer
The number of covariates with random coefficients (excluding
variance components).
k_vc : integer
The number of variance components parameters.
Notes
-----
This object represents the parameter state for the model in which
the scale parameter has been profiled out.
"""
def __init__(self, k_fe, k_re, k_vc):
self.k_fe = k_fe
self.k_re = k_re
self.k_re2 = k_re * (k_re + 1) // 2
self.k_vc = k_vc
self.k_tot = self.k_fe + self.k_re2 + self.k_vc
self._ix = np.tril_indices(self.k_re)
def from_packed(params, k_fe, k_re, use_sqrt, has_fe):
"""
Create a MixedLMParams object from packed parameter vector.
Parameters
----------
params : array_like
The mode parameters packed into a single vector.
k_fe : integer
The number of covariates with fixed effects
k_re : integer
The number of covariates with random effects (excluding
variance components).
use_sqrt : boolean
If True, the random effects covariance matrix is provided
as its Cholesky factor, otherwise the lower triangle of
the covariance matrix is stored.
has_fe : boolean
If True, `params` contains fixed effects parameters.
Otherwise, the fixed effects parameters are set to zero.
Returns
-------
A MixedLMParams object.
"""
k_re2 = int(k_re * (k_re + 1) / 2)
# The number of covariance parameters.
if has_fe:
k_vc = len(params) - k_fe - k_re2
else:
k_vc = len(params) - k_re2
pa = MixedLMParams(k_fe, k_re, k_vc)
cov_re = np.zeros((k_re, k_re))
ix = pa._ix
if has_fe:
pa.fe_params = params[0:k_fe]
cov_re[ix] = params[k_fe:k_fe+k_re2]
else:
pa.fe_params = np.zeros(k_fe)
cov_re[ix] = params[0:k_re2]
if use_sqrt:
cov_re = np.dot(cov_re, cov_re.T)
else:
cov_re = (cov_re + cov_re.T) - np.diag(np.diag(cov_re))
pa.cov_re = cov_re
if k_vc > 0:
if use_sqrt:
pa.vcomp = params[-k_vc:]**2
else:
pa.vcomp = params[-k_vc:]
else:
pa.vcomp = np.array([])
return pa
from_packed = staticmethod(from_packed)
def from_components(fe_params=None, cov_re=None, cov_re_sqrt=None,
vcomp=None):
"""
Create a MixedLMParams object from each parameter component.
Parameters
----------
fe_params : array_like
The fixed effects parameter (a 1-dimensional array). If
None, there are no fixed effects.
cov_re : array_like
The random effects covariance matrix (a square, symmetric
2-dimensional array).
cov_re_sqrt : array_like
The Cholesky (lower triangular) square root of the random
effects covariance matrix.
vcomp : array_like
The variance component parameters. If None, there are no
variance components.
Returns
-------
A MixedLMParams object.
"""
if vcomp is None:
vcomp = np.empty(0)
if fe_params is None:
fe_params = np.empty(0)
if cov_re is None and cov_re_sqrt is None:
cov_re = np.empty((0, 0))
k_fe = len(fe_params)
k_vc = len(vcomp)
k_re = cov_re.shape[0] if cov_re is not None else cov_re_sqrt.shape[0]
pa = MixedLMParams(k_fe, k_re, k_vc)
pa.fe_params = fe_params
if cov_re_sqrt is not None:
pa.cov_re = np.dot(cov_re_sqrt, cov_re_sqrt.T)
elif cov_re is not None:
pa.cov_re = cov_re
pa.vcomp = vcomp
return pa
from_components = staticmethod(from_components)
def copy(self):
"""
Returns a copy of the object.
"""
obj = MixedLMParams(self.k_fe, self.k_re, self.k_vc)
obj.fe_params = self.fe_params.copy()
obj.cov_re = self.cov_re.copy()
obj.vcomp = self.vcomp.copy()
return obj
def get_packed(self, use_sqrt, has_fe=False):
"""
Return the model parameters packed into a single vector.
Parameters
----------
use_sqrt : bool
If True, the Cholesky square root of `cov_re` is
included in the packed result. Otherwise the
lower triangle of `cov_re` is included.
has_fe : bool
If True, the fixed effects parameters are included
in the packed result, otherwise they are omitted.
"""
if self.k_re > 0:
if use_sqrt:
L = np.linalg.cholesky(self.cov_re)
cpa = L[self._ix]
else:
cpa = self.cov_re[self._ix]
else:
cpa = np.zeros(0)
if use_sqrt:
vcomp = np.sqrt(self.vcomp)
else:
vcomp = self.vcomp
if has_fe:
pa = np.concatenate((self.fe_params, cpa, vcomp))
else:
pa = np.concatenate((cpa, vcomp))
return pa
def _smw_solver(s, A, AtA, Qi, di):
r"""
Returns a solver for the linear system:
.. math::
(sI + ABA^\prime) y = x
The returned function f satisfies f(x) = y as defined above.
B and its inverse matrix are block diagonal. The upper left block
of :math:`B^{-1}` is Qi and its lower right block is diag(di).
Parameters
----------
s : scalar
See above for usage
A : ndarray
p x q matrix, in general q << p, may be sparse.
AtA : square ndarray
:math:`A^\prime A`, a q x q matrix.
Qi : square symmetric ndarray
The matrix `B` is q x q, where q = r + d. `B` consists of a r
x r diagonal block whose inverse is `Qi`, and a d x d diagonal
block, whose inverse is diag(di).
di : 1d array_like
See documentation for Qi.
Returns
-------
A function for solving a linear system, as documented above.
Notes
-----
Uses Sherman-Morrison-Woodbury identity:
https://en.wikipedia.org/wiki/Woodbury_matrix_identity
"""
# Use SMW identity
qmat = AtA / s
if sparse.issparse(qmat):
qmat = qmat.todense()
m = Qi.shape[0]
qmat[0:m, 0:m] += Qi
d = qmat.shape[0]
qmat.flat[m*(d+1)::d+1] += di
if sparse.issparse(A):
qmati = sparse.linalg.spsolve(sparse.csc_matrix(qmat), A.T)
else:
qmati = np.linalg.solve(qmat, A.T)
if sparse.issparse(A):
def solver(rhs):
ql = qmati.dot(rhs)
ql = A.dot(ql)
return rhs / s - ql / s**2
else:
def solver(rhs):
ql = np.dot(qmati, rhs)
ql = np.dot(A, ql)
return rhs / s - ql / s**2
return solver
def _smw_logdet(s, A, AtA, Qi, di, B_logdet):
r"""
Returns the log determinant of
.. math::
sI + ABA^\prime
Uses the matrix determinant lemma to accelerate the calculation.
B is assumed to be positive definite, and s > 0, therefore the
determinant is positive.
Parameters
----------
s : positive scalar
See above for usage
A : ndarray
p x q matrix, in general q << p.
AtA : square ndarray
:math:`A^\prime A`, a q x q matrix.
Qi : square symmetric ndarray
The matrix `B` is q x q, where q = r + d. `B` consists of a r
x r diagonal block whose inverse is `Qi`, and a d x d diagonal
block, whose inverse is diag(di).
di : 1d array_like
See documentation for Qi.
B_logdet : real
The log determinant of B
Returns
-------
The log determinant of s*I + A*B*A'.
Notes
-----
Uses the matrix determinant lemma:
https://en.wikipedia.org/wiki/Matrix_determinant_lemma
"""
p = A.shape[0]
ld = p * np.log(s)
qmat = AtA / s
m = Qi.shape[0]
qmat[0:m, 0:m] += Qi
d = qmat.shape[0]
qmat.flat[m*(d+1)::d+1] += di
_, ld1 = np.linalg.slogdet(qmat)
return B_logdet + ld + ld1
def _convert_vc(exog_vc):
vc_names = []
vc_colnames = []
vc_mats = []
# Get the groups in sorted order
groups = set([])
for k, v in exog_vc.items():
groups |= set(v.keys())
groups = list(groups)
groups.sort()
for k, v in exog_vc.items():
vc_names.append(k)
colnames, mats = [], []
for g in groups:
try:
colnames.append(v[g].columns)
except AttributeError:
colnames.append([str(j) for j in range(v[g].shape[1])])
mats.append(v[g])
vc_colnames.append(colnames)
vc_mats.append(mats)
ii = np.argsort(vc_names)
vc_names = [vc_names[i] for i in ii]
vc_colnames = [vc_colnames[i] for i in ii]
vc_mats = [vc_mats[i] for i in ii]
return VCSpec(vc_names, vc_colnames, vc_mats)
class MixedLM(base.LikelihoodModel):
"""
An object specifying a linear mixed effects model. Use the `fit`
method to fit the model and obtain a results object.
Parameters
----------
endog : 1d array_like
The dependent variable
exog : 2d array_like
A matrix of covariates used to determine the
mean structure (the "fixed effects" covariates).
groups : 1d array_like
A vector of labels determining the groups -- data from
different groups are independent
exog_re : 2d array_like
A matrix of covariates used to determine the variance and
covariance structure (the "random effects" covariates). If
None, defaults to a random intercept for each group.
exog_vc : VCSpec instance or dict-like (deprecated)
A VCSPec instance defines the structure of the variance
components in the model. Alternatively, see notes below
for a dictionary-based format. The dictionary format is
deprecated and may be removed at some point in the future.
use_sqrt : bool
If True, optimization is carried out using the lower
triangle of the square root of the random effects
covariance matrix, otherwise it is carried out using the
lower triangle of the random effects covariance matrix.
missing : string
The approach to missing data handling
Notes
-----
If `exog_vc` is not a `VCSpec` instance, then it must be a
dictionary of dictionaries. Specifically, `exog_vc[a][g]` is a
matrix whose columns are linearly combined using independent
random coefficients. This random term then contributes to the
variance structure of the data for group `g`. The random
coefficients all have mean zero, and have the same variance. The
matrix must be `m x k`, where `m` is the number of observations in
group `g`. The number of columns may differ among the top-level
groups.
The covariates in `exog`, `exog_re` and `exog_vc` may (but need
not) partially or wholly overlap.
`use_sqrt` should almost always be set to True. The main use case
for use_sqrt=False is when complicated patterns of fixed values in
the covariance structure are set (using the `free` argument to
`fit`) that cannot be expressed in terms of the Cholesky factor L.
Examples
--------
A basic mixed model with fixed effects for the columns of
``exog`` and a random intercept for each distinct value of
``group``:
>>> model = sm.MixedLM(endog, exog, groups)
>>> result = model.fit()
A mixed model with fixed effects for the columns of ``exog`` and
correlated random coefficients for the columns of ``exog_re``:
>>> model = sm.MixedLM(endog, exog, groups, exog_re=exog_re)
>>> result = model.fit()
A mixed model with fixed effects for the columns of ``exog`` and
independent random coefficients for the columns of ``exog_re``:
>>> free = MixedLMParams.from_components(
fe_params=np.ones(exog.shape[1]),
cov_re=np.eye(exog_re.shape[1]))
>>> model = sm.MixedLM(endog, exog, groups, exog_re=exog_re)
>>> result = model.fit(free=free)
A different way to specify independent random coefficients for the
columns of ``exog_re``. In this example ``groups`` must be a
Pandas Series with compatible indexing with ``exog_re``, and
``exog_re`` has two columns.
>>> g = pd.groupby(groups, by=groups).groups
>>> vc = {}
>>> vc['1'] = {k : exog_re.loc[g[k], 0] for k in g}
>>> vc['2'] = {k : exog_re.loc[g[k], 1] for k in g}
>>> model = sm.MixedLM(endog, exog, groups, vcomp=vc)
>>> result = model.fit()
"""
def __init__(self, endog, exog, groups, exog_re=None,
exog_vc=None, use_sqrt=True, missing='none',
**kwargs):
_allowed_kwargs = ["missing_idx", "design_info", "formula"]
for x in kwargs.keys():
if x not in _allowed_kwargs:
raise ValueError(
"argument %s not permitted for MixedLM initialization" % x)
self.use_sqrt = use_sqrt
# Some defaults
self.reml = True
self.fe_pen = None
self.re_pen = None
if isinstance(exog_vc, dict):
warnings.warn("Using deprecated variance components format")
# Convert from old to new representation
exog_vc = _convert_vc(exog_vc)
if exog_vc is not None:
self.k_vc = len(exog_vc.names)
self.exog_vc = exog_vc
else:
self.k_vc = 0
self.exog_vc = VCSpec([], [], [])
# If there is one covariate, it may be passed in as a column
# vector, convert these to 2d arrays.
# TODO: Can this be moved up in the class hierarchy?
# yes, it should be done up the hierarchy
if (exog is not None and
data_tools._is_using_ndarray_type(exog, None) and
exog.ndim == 1):
exog = exog[:, None]
if (exog_re is not None and
data_tools._is_using_ndarray_type(exog_re, None) and
exog_re.ndim == 1):
exog_re = exog_re[:, None]
# Calling super creates self.endog, etc. as ndarrays and the
# original exog, endog, etc. are self.data.endog, etc.
super(MixedLM, self).__init__(endog, exog, groups=groups,
exog_re=exog_re, missing=missing,
**kwargs)
self._init_keys.extend(["use_sqrt", "exog_vc"])
# Number of fixed effects parameters
self.k_fe = exog.shape[1]
if exog_re is None and len(self.exog_vc.names) == 0:
# Default random effects structure (random intercepts).
self.k_re = 1
self.k_re2 = 1
self.exog_re = np.ones((len(endog), 1), dtype=np.float64)
self.data.exog_re = self.exog_re
names = ['Group Var']
self.data.param_names = self.exog_names + names
self.data.exog_re_names = names
self.data.exog_re_names_full = names
elif exog_re is not None:
# Process exog_re the same way that exog is handled
# upstream
# TODO: this is wrong and should be handled upstream wholly
self.data.exog_re = exog_re
self.exog_re = np.asarray(exog_re)
if self.exog_re.ndim == 1:
self.exog_re = self.exog_re[:, None]
# Model dimensions
# Number of random effect covariates
self.k_re = self.exog_re.shape[1]
# Number of covariance parameters
self.k_re2 = self.k_re * (self.k_re + 1) // 2
else:
# All random effects are variance components
self.k_re = 0
self.k_re2 = 0
if not self.data._param_names:
# HACK: could've been set in from_formula already
# needs refactor
(param_names, exog_re_names,
exog_re_names_full) = self._make_param_names(exog_re)
self.data.param_names = param_names
self.data.exog_re_names = exog_re_names
self.data.exog_re_names_full = exog_re_names_full
self.k_params = self.k_fe + self.k_re2
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
group_labels = list(set(groups))
group_labels.sort()
row_indices = dict((s, []) for s in group_labels)
for i, g in enumerate(groups):
row_indices[g].append(i)
self.row_indices = row_indices
self.group_labels = group_labels
self.n_groups = len(self.group_labels)
# Split the data by groups
self.endog_li = self.group_list(self.endog)
self.exog_li = self.group_list(self.exog)
self.exog_re_li = self.group_list(self.exog_re)
# Precompute this.
if self.exog_re is None:
self.exog_re2_li = None
else:
self.exog_re2_li = [np.dot(x.T, x) for x in self.exog_re_li]
# The total number of observations, summed over all groups
self.nobs = len(self.endog)
self.n_totobs = self.nobs
# Set the fixed effects parameter names
if self.exog_names is None:
self.exog_names = ["FE%d" % (k + 1) for k in
range(self.exog.shape[1])]
# Precompute this
self._aex_r = []
self._aex_r2 = []
for i in range(self.n_groups):
a = self._augment_exog(i)
self._aex_r.append(a)
# This matrix is not very sparse so convert it to dense.
ma = _dot(a.T, a)
if sparse.issparse(ma):
ma = ma.todense()
self._aex_r2.append(ma)
# Precompute this
self._lin, self._quad = self._reparam()
def _make_param_names(self, exog_re):
"""
Returns the full parameter names list, just the exogenous random
effects variables, and the exogenous random effects variables with
the interaction terms.
"""
exog_names = list(self.exog_names)
exog_re_names = _get_exog_re_names(self, exog_re)
param_names = []
jj = self.k_fe
for i in range(len(exog_re_names)):
for j in range(i + 1):
if i == j:
param_names.append(exog_re_names[i] + " Var")
else:
param_names.append(exog_re_names[j] + " x " +
exog_re_names[i] + " Cov")
jj += 1
vc_names = [x + " Var" for x in self.exog_vc.names]
return exog_names + param_names + vc_names, exog_re_names, param_names
@classmethod
def from_formula(cls, formula, data, re_formula=None, vc_formula=None,
subset=None, use_sparse=False, missing='none', *args,
**kwargs):
"""
Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array_like
The data for the model. See Notes.
re_formula : string
A one-sided formula defining the variance structure of the
model. The default gives a random intercept for each
group.
vc_formula : dict-like
Formulas describing variance components. `vc_formula[vc]` is
the formula for the component with variance parameter named
`vc`. The formula is processed into a matrix, and the columns
of this matrix are linearly combined with independent random
coefficients having mean zero and a common variance.
subset : array_like
An array-like object of booleans, integers, or index
values that indicate the subset of df to use in the
model. Assumes df is a `pandas.DataFrame`
missing : string
Either 'none' or 'drop'
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : Model instance
Notes
-----
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
If the variance component is intended to produce random
intercepts for disjoint subsets of a group, specified by
string labels or a categorical data value, always use '0 +' in
the formula so that no overall intercept is included.
If the variance components specify random slopes and you do
not also want a random group-level intercept in the model,
then use '0 +' in the formula to exclude the intercept.
The variance components formulas are processed separately for
each group. If a variable is categorical the results will not
be affected by whether the group labels are distinct or
re-used over the top-level groups.
Examples
--------
Suppose we have data from an educational study with students
nested in classrooms nested in schools. The students take a
test, and we want to relate the test scores to the students'
ages, while accounting for the effects of classrooms and
schools. The school will be the top-level group, and the
classroom is a nested group that is specified as a variance
component. Note that the schools may have different number of
classrooms, and the classroom labels may (but need not be)
different across the schools.
>>> vc = {'classroom': '0 + C(classroom)'}
>>> MixedLM.from_formula('test_score ~ age', vc_formula=vc, \
re_formula='1', groups='school', data=data)
Now suppose we also have a previous test score called
'pretest'. If we want the relationship between pretest
scores and the current test to vary by classroom, we can
specify a random slope for the pretest score
>>> vc = {'classroom': '0 + C(classroom)', 'pretest': '0 + pretest'}
>>> MixedLM.from_formula('test_score ~ age + pretest', vc_formula=vc, \
re_formula='1', groups='school', data=data)
The following model is almost equivalent to the previous one,
but here the classroom random intercept and pretest slope may
be correlated.
>>> vc = {'classroom': '0 + C(classroom)'}
>>> MixedLM.from_formula('test_score ~ age + pretest', vc_formula=vc, \
re_formula='1 + pretest', groups='school', \
data=data)
"""
if "groups" not in kwargs.keys():
raise AttributeError("'groups' is a required keyword argument " +
"in MixedLM.from_formula")
groups = kwargs["groups"]
# If `groups` is a variable name, retrieve the data for the
# groups variable.
group_name = "Group"
if isinstance(groups, string_types):
group_name = groups
groups = np.asarray(data[groups])
else:
groups = np.asarray(groups)
del kwargs["groups"]
# Bypass all upstream missing data handling to properly handle
# variance components
if missing == 'drop':
data, groups = _handle_missing(data, groups, formula, re_formula,
vc_formula)
missing = 'none'
if re_formula is not None:
if re_formula.strip() == "1":
# Work around Patsy bug, fixed by 0.3.
exog_re = np.ones((data.shape[0], 1))
exog_re_names = [group_name]
else:
eval_env = kwargs.get('eval_env', None)
if eval_env is None:
eval_env = 1
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
exog_re = patsy.dmatrix(re_formula, data, eval_env=eval_env)
exog_re_names = exog_re.design_info.column_names
exog_re_names = [x.replace("Intercept", group_name)
for x in exog_re_names]
exog_re = np.asarray(exog_re)
if exog_re.ndim == 1:
exog_re = exog_re[:, None]
else:
exog_re = None
if vc_formula is None:
exog_re_names = [group_name]
else:
exog_re_names = []
if vc_formula is not None:
eval_env = kwargs.get('eval_env', None)
if eval_env is None:
eval_env = 1
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
vc_mats = []
vc_colnames = []
vc_names = []
gb = data.groupby(groups)
kylist = sorted(gb.groups.keys())
vcf = sorted(vc_formula.keys())
for vc_name in vcf:
md = patsy.ModelDesc.from_formula(vc_formula[vc_name])
vc_names.append(vc_name)
evc_mats, evc_colnames = [], []
for group_ix, group in enumerate(kylist):
ii = gb.groups[group]
mat = patsy.dmatrix(
md,
data.loc[ii, :],
eval_env=eval_env,
return_type='dataframe')
evc_colnames.append(mat.columns.tolist())
if use_sparse:
evc_mats.append(sparse.csr_matrix(mat))
else:
evc_mats.append(np.asarray(mat))
vc_mats.append(evc_mats)
vc_colnames.append(evc_colnames)
exog_vc = VCSpec(vc_names, vc_colnames, vc_mats)
else:
exog_vc = VCSpec([], [], [])
kwargs["subset"] = None
kwargs["exog_re"] = exog_re
kwargs["exog_vc"] = exog_vc
kwargs["groups"] = groups
mod = super(MixedLM, cls).from_formula(
formula, data, *args, **kwargs)
# expand re names to account for pairs of RE
(param_names,
exog_re_names,
exog_re_names_full) = mod._make_param_names(exog_re_names)
mod.data.param_names = param_names
mod.data.exog_re_names = exog_re_names
mod.data.exog_re_names_full = exog_re_names_full
if vc_formula is not None:
mod.data.vcomp_names = mod.exog_vc.names
return mod
def predict(self, params, exog=None):
"""
Return predicted values from a design matrix.
Parameters
----------
params : array_like
Parameters of a mixed linear model. Can be either a
MixedLMParams instance, or a vector containing the packed
model parameters in which the fixed effects parameters are
at the beginning of the vector, or a vector containing
only the fixed effects parameters.
exog : array_like, optional
Design / exogenous data for the fixed effects. Model exog
is used if None.
Returns
-------
An array of fitted values. Note that these predicted values
only reflect the fixed effects mean structure of the model.
"""
if exog is None:
exog = self.exog
if isinstance(params, MixedLMParams):
params = params.fe_params
else:
params = params[0:self.k_fe]
return np.dot(exog, params)
def group_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
grouping structure.
"""
if array is None:
return None
if array.ndim == 1:
return [np.array(array[self.row_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.row_indices[k], :])
for k in self.group_labels]
def fit_regularized(self, start_params=None, method='l1', alpha=0,
ceps=1e-4, ptol=1e-6, maxit=200, **fit_kwargs):
"""
Fit a model in which the fixed effects parameters are
penalized. The dependence parameters are held fixed at their
estimated values in the unpenalized model.
Parameters
----------
method : string of Penalty object
Method for regularization. If a string, must be 'l1'.
alpha : array_like
Scalar or vector of penalty weights. If a scalar, the
same weight is applied to all coefficients; if a vector,
it contains a weight for each coefficient. If method is a
Penalty object, the weights are scaled by alpha. For L1
regularization, the weights are used directly.
ceps : positive real scalar
Fixed effects parameters smaller than this value
in magnitude are treaded as being zero.
ptol : positive real scalar
Convergence occurs when the sup norm difference
between successive values of `fe_params` is less than
`ptol`.
maxit : integer
The maximum number of iterations.
fit_kwargs : keywords
Additional keyword arguments passed to fit.
Returns
-------
A MixedLMResults instance containing the results.
Notes
-----
The covariance structure is not updated as the fixed effects
parameters are varied.
The algorithm used here for L1 regularization is a"shooting"
or cyclic coordinate descent algorithm.
If method is 'l1', then `fe_pen` and `cov_pen` are used to
obtain the covariance structure, but are ignored during the
L1-penalized fitting.
References
----------
Friedman, J. H., Hastie, T. and Tibshirani, R. Regularized
Paths for Generalized Linear Models via Coordinate
Descent. Journal of Statistical Software, 33(1) (2008)
http://www.jstatsoft.org/v33/i01/paper
http://statweb.stanford.edu/~tibs/stat315a/Supplements/fuse.pdf
"""
if isinstance(method, string_types) and (method.lower()!= 'l1'):
raise ValueError("Invalid regularization method")
# If method is a smooth penalty just optimize directly.
if isinstance(method, Penalty):
# Scale the penalty weights by alpha
method.alpha = alpha
fit_kwargs.update({"fe_pen": method})
return self.fit(**fit_kwargs)
if np.isscalar(alpha):
alpha = alpha * np.ones(self.k_fe, dtype=np.float64)
# Fit the unpenalized model to get the dependence structure.
mdf = self.fit(**fit_kwargs)
fe_params = mdf.fe_params
cov_re = mdf.cov_re
vcomp = mdf.vcomp
scale = mdf.scale
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
for itr in range(maxit):
fe_params_s = fe_params.copy()
for j in range(self.k_fe):
if abs(fe_params[j]) < ceps:
continue
# The residuals
fe_params[j] = 0.
expval = np.dot(self.exog, fe_params)
resid_all = self.endog - expval
# The loss function has the form
# a*x^2 + b*x + pwt*|x|
a, b = 0., 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group_ix)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
resid = resid_all[self.row_indices[group]]
solver = _smw_solver(scale, ex_r, ex2_r, cov_re_inv,
1 / vc_var)
x = exog[:, j]
u = solver(x)
a += np.dot(u, x)
b -= 2 * np.dot(u, resid)
pwt1 = alpha[j]
if b > pwt1:
fe_params[j] = -(b - pwt1) / (2 * a)
elif b < -pwt1:
fe_params[j] = -(b + pwt1) / (2 * a)
if np.abs(fe_params_s - fe_params).max() < ptol:
break
# Replace the fixed effects estimates with their penalized
# values, leave the dependence parameters in their unpenalized
# state.
params_prof = mdf.params.copy()
params_prof[0:self.k_fe] = fe_params
scale = self.get_scale(fe_params, mdf.cov_re_unscaled, mdf.vcomp)
# Get the Hessian including only the nonzero fixed effects,
# then blow back up to the full size after inverting.
hess = self.hessian(params_prof)
pcov = np.nan * np.ones_like(hess)
ii = np.abs(params_prof) > ceps
ii[self.k_fe:] = True
ii = np.flatnonzero(ii)
hess1 = hess[ii, :][:, ii]
pcov[np.ix_(ii, ii)] = np.linalg.inv(-hess1)
params_object = MixedLMParams.from_components(fe_params, cov_re=cov_re)
results = MixedLMResults(self, params_prof, pcov / scale)
results.params_object = params_object
results.fe_params = fe_params
results.cov_re = cov_re
results.scale = scale
results.cov_re_unscaled = mdf.cov_re_unscaled
results.method = mdf.method
results.converged = True
results.cov_pen = self.cov_pen
results.k_fe = self.k_fe
results.k_re = self.k_re
results.k_re2 = self.k_re2
results.k_vc = self.k_vc
return MixedLMResultsWrapper(results)
def get_fe_params(self, cov_re, vcomp):
"""
Use GLS to update the fixed effects parameter estimates.
Parameters
----------
cov_re : array_like
The covariance matrix of the random effects.
Returns
-------
The GLS estimates of the fixed effects parameters.
"""
if self.k_fe == 0:
return np.array([])
if self.k_re == 0:
cov_re_inv = np.empty((0, 0))
else:
cov_re_inv = np.linalg.inv(cov_re)
# Cache these quantities that don't change.
if not hasattr(self, "_endex_li"):
self._endex_li = []
for group_ix, _ in enumerate(self.group_labels):
mat = np.concatenate(
(self.exog_li[group_ix],
self.endog_li[group_ix][:, None]), axis=1)
self._endex_li.append(mat)
xtxy = 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group_ix)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
u = solver(self._endex_li[group_ix])
xtxy += np.dot(exog.T, u)
fe_params = np.linalg.solve(xtxy[:, 0:-1], xtxy[:, -1])
return fe_params
def _reparam(self):
"""
Returns parameters of the map converting parameters from the
form used in optimization to the form returned to the user.
Returns
-------
lin : list-like
Linear terms of the map
quad : list-like
Quadratic terms of the map
Notes
-----
If P are the standard form parameters and R are the
transformed parameters (i.e. with the Cholesky square root
covariance and square root transformed variance components),
then P[i] = lin[i] * R + R' * quad[i] * R
"""
k_fe, k_re, k_re2, k_vc = self.k_fe, self.k_re, self.k_re2, self.k_vc
k_tot = k_fe + k_re2 + k_vc
ix = np.tril_indices(self.k_re)
lin = []
for k in range(k_fe):
e = np.zeros(k_tot)
e[k] = 1
lin.append(e)
for k in range(k_re2):
lin.append(np.zeros(k_tot))
for k in range(k_vc):
lin.append(np.zeros(k_tot))
quad = []
# Quadratic terms for fixed effects.
for k in range(k_tot):
quad.append(np.zeros((k_tot, k_tot)))
# Quadratic terms for random effects covariance.
ii = np.tril_indices(k_re)
ix = [(a, b) for a, b in zip(ii[0], ii[1])]
for i1 in range(k_re2):
for i2 in range(k_re2):
ix1 = ix[i1]
ix2 = ix[i2]
if (ix1[1] == ix2[1]) and (ix1[0] <= ix2[0]):
ii = (ix2[0], ix1[0])
k = ix.index(ii)
quad[k_fe+k][k_fe+i2, k_fe+i1] += 1
for k in range(k_tot):
quad[k] = 0.5*(quad[k] + quad[k].T)
# Quadratic terms for variance components.
km = k_fe + k_re2
for k in range(km, km+k_vc):
quad[k][k, k] = 1
return lin, quad
def _expand_vcomp(self, vcomp, group_ix):
"""
Replicate variance parameters to match a group's design.
Parameters
----------
vcomp : array_like
The variance parameters for the variance components.
group_ix : integer
The group index
Returns an expanded version of vcomp, in which each variance
parameter is copied as many times as there are independent
realizations of the variance component in the given group.
"""
if len(vcomp) == 0:
return np.empty(0)
vc_var = []
for j in range(len(self.exog_vc.names)):
d = self.exog_vc.mats[j][group_ix].shape[1]
vc_var.append(vcomp[j] * np.ones(d))
if len(vc_var) > 0:
return np.concatenate(vc_var)
else:
# Cannot reach here?
return np.empty(0)
def _augment_exog(self, group_ix):
"""
Concatenate the columns for variance components to the columns
for other random effects to obtain a single random effects
exog matrix for a given group.
"""
ex_r = self.exog_re_li[group_ix] if self.k_re > 0 else None
if self.k_vc == 0:
return ex_r
ex = [ex_r] if self.k_re > 0 else []
any_sparse = False
for j, _ in enumerate(self.exog_vc.names):
ex.append(self.exog_vc.mats[j][group_ix])
any_sparse |= sparse.issparse(ex[-1])
if any_sparse:
for j, x in enumerate(ex):
if not sparse.issparse(x):
ex[j] = sparse.csr_matrix(x)
ex = sparse.hstack(ex)
ex = sparse.csr_matrix(ex)
else:
ex = np.concatenate(ex, axis=1)
return ex
def loglike(self, params, profile_fe=True):
"""
Evaluate the (profile) log-likelihood of the linear mixed
effects model.
Parameters
----------
params : MixedLMParams, or array_like.
The parameter value. If array-like, must be a packed
parameter vector containing only the covariance
parameters.
profile_fe : boolean
If True, replace the provided value of `fe_params` with
the GLS estimates.
Returns
-------
The log-likelihood value at `params`.
Notes
-----
The scale parameter `scale` is always profiled out of the
log-likelihood. In addition, if `profile_fe` is true the
fixed effects parameters are also profiled out.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe,
self.k_re, self.use_sqrt,
has_fe=False)
cov_re = params.cov_re
vcomp = params.vcomp
# Move to the profile set
if profile_fe:
fe_params = self.get_fe_params(cov_re, vcomp)
else:
fe_params = params.fe_params
if self.k_re > 0:
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
_, cov_re_logdet = np.linalg.slogdet(cov_re)
else:
cov_re_inv = np.zeros((0, 0))
cov_re_logdet = 0
# The residuals
expval = np.dot(self.exog, fe_params)
resid_all = self.endog - expval
likeval = 0.
# Handle the covariance penalty
if (self.cov_pen is not None) and (self.k_re > 0):
likeval -= self.cov_pen.func(cov_re, cov_re_inv)
# Handle the fixed effects penalty
if (self.fe_pen is not None):
likeval -= self.fe_pen.func(fe_params)
xvx, qf = 0., 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group_ix)
cov_aug_logdet = cov_re_logdet + np.sum(np.log(vc_var))
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
resid = resid_all[self.row_indices[group]]
# Part 1 of the log likelihood (for both ML and REML)
ld = _smw_logdet(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var,
cov_aug_logdet)
likeval -= ld / 2.
# Part 2 of the log likelihood (for both ML and REML)
u = solver(resid)
qf += np.dot(resid, u)
# Adjustment for REML
if self.reml:
mat = solver(exog)
xvx += np.dot(exog.T, mat)
if self.reml:
likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.
_, ld = np.linalg.slogdet(xvx)
likeval -= ld / 2.
likeval -= (self.n_totobs - self.k_fe) * np.log(2 * np.pi) / 2.
likeval += ((self.n_totobs - self.k_fe) *
np.log(self.n_totobs - self.k_fe) / 2.)
likeval -= (self.n_totobs - self.k_fe) / 2.
else:
likeval -= self.n_totobs * np.log(qf) / 2.
likeval -= self.n_totobs * np.log(2 * np.pi) / 2.
likeval += self.n_totobs * np.log(self.n_totobs) / 2.
likeval -= self.n_totobs / 2.
return likeval
def _gen_dV_dPar(self, ex_r, solver, group_ix, max_ix=None):
"""
A generator that yields the element-wise derivative of the
marginal covariance matrix with respect to the random effects
variance and covariance parameters.
ex_r : array_like
The random effects design matrix
solver : function
A function that given x returns V^{-1}x, where V
is the group's marginal covariance matrix.
group_ix : integer
The group index
max_ix : integer or None
If not None, the generator ends when this index
is reached.
"""
axr = solver(ex_r)
# Regular random effects
jj = 0
for j1 in range(self.k_re):
for j2 in range(j1 + 1):
if max_ix is not None and jj > max_ix:
return
# Need 2d
mat_l, mat_r = ex_r[:, j1:j1+1], ex_r[:, j2:j2+1]
vsl, vsr = axr[:, j1:j1+1], axr[:, j2:j2+1]
yield jj, mat_l, mat_r, vsl, vsr, j1 == j2
jj += 1
# Variance components
for j, _ in enumerate(self.exog_vc.names):
if max_ix is not None and jj > max_ix:
return
mat = self.exog_vc.mats[j][group_ix]
axmat = solver(mat)
yield jj, mat, mat, axmat, axmat, True
jj += 1
def score(self, params, profile_fe=True):
"""
Returns the score vector of the profile log-likelihood.
Notes
-----
The score vector that is returned is computed with respect to
the parameterization defined by this model instance's
`use_sqrt` attribute.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(
params, self.k_fe, self.k_re, self.use_sqrt,
has_fe=False)
if profile_fe:
params.fe_params = self.get_fe_params(params.cov_re, params.vcomp)
if self.use_sqrt:
score_fe, score_re, score_vc = self.score_sqrt(
params, calc_fe=not profile_fe)
else:
score_fe, score_re, score_vc = self.score_full(
params, calc_fe=not profile_fe)
if self._freepat is not None:
score_fe *= self._freepat.fe_params
score_re *= self._freepat.cov_re[self._freepat._ix]
score_vc *= self._freepat.vcomp
if profile_fe:
return np.concatenate((score_re, score_vc))
else:
return np.concatenate((score_fe, score_re, score_vc))
def score_full(self, params, calc_fe):
"""
Returns the score with respect to untransformed parameters.
Calculates the score vector for the profiled log-likelihood of
the mixed effects model with respect to the parameterization
in which the random effects covariance matrix is represented
in its full form (not using the Cholesky factor).
Parameters
----------
params : MixedLMParams or array_like
The parameter at which the score function is evaluated.
If array-like, must contain the packed random effects
parameters (cov_re and vcomp) without fe_params.
calc_fe : boolean
If True, calculate the score vector for the fixed effects
parameters. If False, this vector is not calculated, and
a vector of zeros is returned in its place.
Returns
-------
score_fe : array_like
The score vector with respect to the fixed effects
parameters.
score_re : array_like
The score vector with respect to the random effects
parameters (excluding variance components parameters).
score_vc : array_like
The score vector with respect to variance components
parameters.
Notes
-----
`score_re` is taken with respect to the parameterization in
which `cov_re` is represented through its lower triangle
(without taking the Cholesky square root).
"""
fe_params = params.fe_params
cov_re = params.cov_re
vcomp = params.vcomp
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
score_fe = np.zeros(self.k_fe)
score_re = np.zeros(self.k_re2)
score_vc = np.zeros(self.k_vc)
# Handle the covariance penalty.
if self.cov_pen is not None:
score_re -= self.cov_pen.deriv(cov_re, cov_re_inv)
# Handle the fixed effects penalty.
if calc_fe and (self.fe_pen is not None):
score_fe -= self.fe_pen.deriv(fe_params)
# resid' V^{-1} resid, summed over the groups (a scalar)
rvir = 0.
# exog' V^{-1} resid, summed over the groups (a k_fe
# dimensional vector)
xtvir = 0.
# exog' V^{_1} exog, summed over the groups (a k_fe x k_fe
# matrix)
xtvix = 0.
# V^{-1} exog' dV/dQ_jj exog V^{-1}, where Q_jj is the jj^th
# covariance parameter.
xtax = [0., ] * (self.k_re2 + self.k_vc)
# Temporary related to the gradient of log |V|
dlv = np.zeros(self.k_re2 + self.k_vc)
# resid' V^{-1} dV/dQ_jj V^{-1} resid (a scalar)
rvavr = np.zeros(self.k_re2 + self.k_vc)
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group_ix)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
# The residuals
resid = self.endog_li[group_ix]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
if self.reml:
viexog = solver(exog)
xtvix += np.dot(exog.T, viexog)
# Contributions to the covariance parameter gradient
vir = solver(resid)
for (jj, matl, matr, vsl, vsr, sym) in\
self._gen_dV_dPar(ex_r, solver, group_ix):
dlv[jj] = _dotsum(matr, vsl)
if not sym:
dlv[jj] += _dotsum(matl, vsr)
ul = _dot(vir, matl)
ur = ul.T if sym else _dot(matr.T, vir)
ulr = np.dot(ul, ur)
rvavr[jj] += ulr
if not sym:
rvavr[jj] += ulr.T
if self.reml:
ul = _dot(viexog.T, matl)
ur = ul.T if sym else _dot(matr.T, viexog)
ulr = np.dot(ul, ur)
xtax[jj] += ulr
if not sym:
xtax[jj] += ulr.T
# Contribution of log|V| to the covariance parameter
# gradient.
if self.k_re > 0:
score_re -= 0.5 * dlv[0:self.k_re2]
if self.k_vc > 0:
score_vc -= 0.5 * dlv[self.k_re2:]
rvir += np.dot(resid, vir)
if calc_fe:
xtvir += np.dot(exog.T, vir)
fac = self.n_totobs
if self.reml:
fac -= self.k_fe
if calc_fe and self.k_fe > 0:
score_fe += fac * xtvir / rvir
if self.k_re > 0:
score_re += 0.5 * fac * rvavr[0:self.k_re2] / rvir
if self.k_vc > 0:
score_vc += 0.5 * fac * rvavr[self.k_re2:] / rvir
if self.reml:
xtvixi = np.linalg.inv(xtvix)
for j in range(self.k_re2):
score_re[j] += 0.5 * _dotsum(xtvixi.T, xtax[j])
for j in range(self.k_vc):
score_vc[j] += 0.5 * _dotsum(xtvixi.T, xtax[self.k_re2 + j])
return score_fe, score_re, score_vc
def score_sqrt(self, params, calc_fe=True):
"""
Returns the score with respect to transformed parameters.
Calculates the score vector with respect to the
parameterization in which the random effects covariance matrix
is represented through its Cholesky square root.
Parameters
----------
params : MixedLMParams or array_like
The model parameters. If array-like must contain packed
parameters that are compatible with this model instance.
calc_fe : boolean
If True, calculate the score vector for the fixed effects
parameters. If False, this vector is not calculated, and
a vector of zeros is returned in its place.
Returns
-------
score_fe : array_like
The score vector with respect to the fixed effects
parameters.
score_re : array_like
The score vector with respect to the random effects
parameters (excluding variance components parameters).
score_vc : array_like
The score vector with respect to variance components
parameters.
"""
score_fe, score_re, score_vc = self.score_full(params, calc_fe=calc_fe)
params_vec = params.get_packed(use_sqrt=True, has_fe=True)
score_full = np.concatenate((score_fe, score_re, score_vc))
scr = 0.
for i in range(len(params_vec)):
v = self._lin[i] + 2 * np.dot(self._quad[i], params_vec)
scr += score_full[i] * v
score_fe = scr[0:self.k_fe]
score_re = scr[self.k_fe:self.k_fe + self.k_re2]
score_vc = scr[self.k_fe + self.k_re2:]
return score_fe, score_re, score_vc
def hessian(self, params):
"""
Returns the model's Hessian matrix.
Calculates the Hessian matrix for the linear mixed effects
model with respect to the parameterization in which the
covariance matrix is represented directly (without square-root
transformation).
Parameters
----------
params : MixedLMParams or array_like
The model parameters at which the Hessian is calculated.
If array-like, must contain the packed parameters in a
form that is compatible with this model instance.
Returns
-------
hess : 2d ndarray
The Hessian matrix, evaluated at `params`.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe, self.k_re,
use_sqrt=self.use_sqrt,
has_fe=True)
fe_params = params.fe_params
vcomp = params.vcomp
cov_re = params.cov_re
if self.k_re > 0:
cov_re_inv = np.linalg.inv(cov_re)
else:
cov_re_inv = np.empty((0, 0))
# Blocks for the fixed and random effects parameters.
hess_fe = 0.
hess_re = np.zeros((self.k_re2 + self.k_vc, self.k_re2 + self.k_vc))
hess_fere = np.zeros((self.k_re2 + self.k_vc, self.k_fe))
fac = self.n_totobs
if self.reml:
fac -= self.exog.shape[1]
rvir = 0.
xtvix = 0.
xtax = [0., ] * (self.k_re2 + self.k_vc)
m = self.k_re2 + self.k_vc
B = np.zeros(m)
D = np.zeros((m, m))
F = [[0.] * m for k in range(m)]
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group_ix)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
# The residuals
resid = self.endog_li[group_ix]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
viexog = solver(exog)
xtvix += np.dot(exog.T, viexog)
vir = solver(resid)
rvir += np.dot(resid, vir)
for (jj1, matl1, matr1, vsl1, vsr1, sym1) in\
self._gen_dV_dPar(ex_r, solver, group_ix):
ul = _dot(viexog.T, matl1)
ur = _dot(matr1.T, vir)
hess_fere[jj1, :] += np.dot(ul, ur)
if not sym1:
ul = _dot(viexog.T, matr1)
ur = _dot(matl1.T, vir)
hess_fere[jj1, :] += np.dot(ul, ur)
if self.reml:
ul = _dot(viexog.T, matl1)
ur = ul if sym1 else np.dot(viexog.T, matr1)
ulr = _dot(ul, ur.T)
xtax[jj1] += ulr
if not sym1:
xtax[jj1] += ulr.T
ul = _dot(vir, matl1)
ur = ul if sym1 else _dot(vir, matr1)
B[jj1] += np.dot(ul, ur) * (1 if sym1 else 2)
# V^{-1} * dV/d_theta
E = [(vsl1, matr1)]
if not sym1:
E.append((vsr1, matl1))
for (jj2, matl2, matr2, vsl2, vsr2, sym2) in\
self._gen_dV_dPar(ex_r, solver, group_ix, jj1):
re = sum([_multi_dot_three(matr2.T, x[0], x[1].T)
for x in E])
vt = 2 * _dot(_multi_dot_three(vir[None, :], matl2, re),
vir[:, None])
if not sym2:
le = sum([_multi_dot_three(matl2.T, x[0], x[1].T)
for x in E])
vt += 2 * _dot(_multi_dot_three(
vir[None, :], matr2, le), vir[:, None])
D[jj1, jj2] += vt
if jj1!= jj2:
D[jj2, jj1] += vt
rt = _dotsum(vsl2, re.T) / 2
if not sym2:
rt += _dotsum(vsr2, le.T) / 2
hess_re[jj1, jj2] += rt
if jj1!= jj2:
hess_re[jj2, jj1] += rt
if self.reml:
ev = sum([_dot(x[0], _dot(x[1].T, viexog)) for x in E])
u1 = _dot(viexog.T, matl2)
u2 = _dot(matr2.T, ev)
um = np.dot(u1, u2)
F[jj1][jj2] += um + um.T
if not sym2:
u1 = np.dot(viexog.T, matr2)
u2 = np.dot(matl2.T, ev)
um = np.dot(u1, u2)
F[jj1][jj2] += um + um.T
hess_fe -= fac * xtvix / rvir
hess_re = hess_re - 0.5 * fac * (D/rvir - np.outer(B, B) / rvir**2)
hess_fere = -fac * hess_fere / rvir
if self.reml:
QL = [np.linalg.solve(xtvix, x) for x in xtax]
for j1 in range(self.k_re2 + self.k_vc):
for j2 in range(j1 + 1):
a = _dotsum(QL[j1].T, QL[j2])
a -= np.trace(np.linalg.solve(xtvix, F[j1][j2]))
a *= 0.5
hess_re[j1, j2] += a
if j1 > j2:
hess_re[j2, j1] += a
# Put the blocks together to get the Hessian.
m = self.k_fe + self.k_re2 + self.k_vc
hess = np.zeros((m, m))
hess[0:self.k_fe, 0:self.k_fe] = hess_fe
hess[0:self.k_fe, self.k_fe:] = hess_fere.T
hess[self.k_fe:, 0:self.k_fe] = hess_fere
hess[self.k_fe:, self.k_fe:] = hess_re
return hess
def get_scale(self, fe_params, cov_re, vcomp):
"""
Returns the estimated error variance based on given estimates
of the slopes and random effects covariance matrix.
Parameters
----------
fe_params : array_like
The regression slope estimates
cov_re : 2d array_like
Estimate of the random effects covariance matrix
vcomp : array_like
Estimate of the variance components
Returns
-------
scale : float
The estimated error variance.
"""
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
qf = 0.
for group_ix, group in enumerate(self.group_labels):
vc_var = self._expand_vcomp(vcomp, group_ix)
exog = self.exog_li[group_ix]
ex_r, ex2_r = self._aex_r[group_ix], self._aex_r2[group_ix]
solver = _smw_solver(1., ex_r, ex2_r, cov_re_inv, 1 / vc_var)
# The residuals
resid = self.endog_li[group_ix]
if self.k_fe > 0:
expval = np.dot(exog, fe_params)
resid = resid - expval
mat = solver(resid)
qf += np.dot(resid, mat)
if self.reml:
qf /= (self.n_totobs - self.k_fe)
else:
qf /= self.n_totobs
return qf
def fit(self, start_params=None, reml=True, niter_sa=0,
do_cg=True, fe_pen=None, cov_pen=None, free=None,
full_output=False, method=None, **kwargs):
"""
Fit a linear mixed model to the data.
Parameters
----------
start_params: array_like or MixedLMParams
Starting values for the profile log-likelihood. If not a
`MixedLMParams` instance, this should be an array
containing the packed parameters for the profile
log-likelihood, including the fixed effects
parameters.
reml : bool
If true, fit according to the REML likelihood, else
fit the standard likelihood using ML.
niter_sa :
Currently this argument is ignored and has no effect
on the results.
cov_pen : CovariancePenalty object
A penalty for the random effects covariance matrix
do_cg : boolean, defaults to True
If False, the optimization is skipped and a results
object at the given (or default) starting values is
returned.
fe_pen : Penalty object
A penalty on the fixed effects
free : MixedLMParams object
If not `None`, this is a mask that allows parameters to be
held fixed at specified values. A 1 indicates that the
correspondinig parameter is estimated, a 0 indicates that
it is fixed at its starting value. Setting the `cov_re`
component to the identity matrix fits a model with
independent random effects. Note that some optimization
methods do not respect this constraint (bfgs and lbfgs both
work).
full_output : bool
If true, attach iteration history to results
method : string
Optimization method. Can be a scipy.optimize method name,
or a list of such names to be tried in sequence.
Returns
-------
A MixedLMResults instance.
"""
_allowed_kwargs = ['gtol','maxiter', 'eps','maxcor', 'ftol',
'tol', 'disp','maxls']
for x in kwargs.keys():
if x not in _allowed_kwargs:
warnings.warn("Argument %s not used by MixedLM.fit" % x)
if method is None:
method = ['bfgs', 'lbfgs', 'cg']
elif isinstance(method, str):
method = [method]
for meth in method:
if meth.lower() in ["newton", "ncg"]:
raise ValueError(
"method %s not available for MixedLM" % meth)
self.reml = reml
self.cov_pen = cov_pen
self.fe_pen = fe_pen
self._freepat = free
if full_output:
hist = []
else:
hist = None
if start_params is None:
params = MixedLMParams(self.k_fe, self.k_re, self.k_vc)
params.fe_params = np.zeros(self.k_fe)
params.cov_re = np.eye(self.k_re)
params.vcomp = np.ones(self.k_vc)
else:
if isinstance(start_params, MixedLMParams):
params = start_params
else:
# It's a packed array
if len(start_params) == self.k_fe + self.k_re2 + self.k_vc:
params = MixedLMParams.from_packed(
start_params, self.k_fe, self.k_re, self.use_sqrt,
has_fe=True)
elif len(start_params) == self.k_re2 + self.k_vc:
params = MixedLMParams.from_packed(
start_params, self.k_fe, self.k_re, self.use_sqrt,
has_fe=False)
else:
raise ValueError("invalid start_params")
if do_cg:
kwargs["retall"] = hist is not None
if "disp" not in kwargs:
kwargs["disp"] = False
packed = params.get_packed(use_sqrt=self.use_sqrt, has_fe=False)
if niter_sa > 0:
warnings.warn("niter_sa is currently ignored")
# Try optimizing one or more times
for j in range(len(method)):
rslt = super(MixedLM, self).fit(start_params=packed,
skip_hessian=True,
method=method[j],
**kwargs)
if rslt.mle_retvals['converged']:
break
packed = rslt.params
if j + 1 < len(method):
next_method = method[j + 1]
warnings.warn(
"Retrying MixedLM optimization with %s" % next_method,
ConvergenceWarning)
else:
msg = ("MixedLM optimization failed, " +
"trying a different optimizer may help.")
warnings.warn(msg, ConvergenceWarning)
# The optimization succeeded
params = np.atleast_1d(rslt.params)
if hist is not None:
hist.append(rslt.mle_retvals)
converged = rslt.mle_retvals['converged']
if not converged:
gn = self.score(rslt.params)
gn = np.sqrt(np.sum(gn**2))
msg = "Gradient optimization failed, |grad| = %f" % gn
warnings.warn(msg, ConvergenceWarning)
# Convert to the final parameterization (i.e. undo the square
# root transform of the covariance matrix, and the profiling
# over the error variance).
params = MixedLMParams.from_packed(
params, self.k_fe, self.k_re, use_sqrt=self.use_sqrt, has_fe=False)
cov_re_unscaled = params.cov_re
vcomp_unscaled = params.vcomp
fe_params = self.get_fe_params(cov_re_unscaled, vcomp_unscaled)
params.fe_params = fe_params
scale = self.get_scale(fe_params, cov_re_unscaled, vcomp_unscaled)
cov_re = scale * cov_re_unscaled
vcomp = scale * vcomp_unscaled
f1 = (self.k_re > 0) and (np.min(np.abs(np.diag(cov_re))) < 0.01)
f2 = (self.k_vc > 0) and (np.min(np.abs(vcomp)) < 0.01)
if f1 or f2:
msg = "The MLE may be on the boundary of the parameter space."
warnings.warn(msg, ConvergenceWarning)
# Compute the Hessian at the MLE. Note that this is the
# Hessian with respect to the random effects covariance matrix
# (not its square root). It is used for obtaining standard
# errors, not for optimization.
hess = self.hessian(params)
hess_diag = np.diag(hess)
if free is not None:
pcov = np.zeros_like(hess)
pat = self._freepat.get_packed(use_sqrt=False, has_fe=True)
ii = np.flatnonzero(pat)
hess_diag = hess_diag[ii]
if len(ii) > 0:
hess1 = hess[np.ix_(ii, ii)]
pcov[np.ix_(ii, ii)] = np.linalg.inv(-hess1)
else:
pcov = np.linalg.inv(-hess)
if np.any(hess_diag >= 0):
msg = ("The Hessian matrix at the estimated parameter values " +
"is not positive definite.")
warnings.warn(msg, ConvergenceWarning)
# Prepare a results class instance
params_packed = params.get_packed(use_sqrt=False, has_fe=True)
results = MixedLMResults(self, params_packed, pcov / scale)
results.params_object = params
results.fe_params = fe_params
results.cov_re = cov_re
results.vcomp = vcomp
results.scale = scale
results.cov_re_unscaled = cov_re_unscaled
results.method = "REML" if self.reml else "ML"
results.converged = converged
results.hist = hist
results.reml = self.reml
results.cov_pen = self.cov_pen
results.k_fe = self.k_fe
results.k_re = self.k_re
results.k_re2 = self.k_re2
results.k_vc = self.k_vc
results.use_sqrt = self.use_sqrt
results.freepat = self._freepat
return MixedLMResultsWrapper(results)
def get_distribution(self, params, scale, exog):
return _mixedlm_distribution(self, params, scale, exog)
class _mixedlm_distribution(object):
"""
A private class for simulating data from a given mixed linear model.
Parameters
----------
model : MixedLM instance
A mixed linear model
params : array_like
A parameter vector defining a mixed linear model. See
notes for more information.
scale : scalar
The unexplained variance
exog : array_like
An array of fixed effect covariates. If None, model.exog
is used.
Notes
-----
The params array is a vector containing fixed effects parameters,
random effects parameters, and variance component parameters, in
that order. The lower triangle of the random effects covariance
matrix is stored. The random effects and variance components
parameters are divided by the scale parameter.
This class is used in Mediation, and possibly elsewhere.
"""
def __init__(self, model, params, scale, exog):
self.model = model
self.exog = exog if exog is not None else model.exog
po = MixedLMParams.from_packed(
params, model.k_fe, model.k_re, False, True)
self.fe_params = po.fe_params
self.cov_re = scale * po.cov_re
self.vcomp = scale * po.vcomp
self.scale = scale
group_idx = np.zeros(model.nobs, dtype=np.int)
for k, g in enumerate(model.group_labels):
group_idx[model.row_indices[g]] = k
self.group_idx = group_idx
def rvs(self, n):
"""
Return a vector of simulated values from a mixed linear
model.
The parameter n is ignored, but required by the interface
"""
model = self.model
# Fixed effects
y = np.dot(self.exog, self.fe_params)
# Random effects
u = np.random.normal(size=(model.n_groups, model.k_re))
u = np.dot(u, np.linalg.cholesky(self.cov_re).T)
y += (u[self.group_idx, :] * model.exog_re).sum(1)
# Variance components
for j, _ in enumerate(model.exog_vc.names):
ex = model.exog_vc.mats[j]
v = self.vcomp[j]
for i, g in enumerate(model.group_labels):
exg = ex[i]
ii = model.row_indices[g]
u = np.random.normal(size=exg.shape[1])
y[ii] += np.sqrt(v) * np.dot(exg, u)
# Residual variance
y += np.sqrt(self.scale) * np.random.normal(size=len(y))
return y
class MixedLMResults(base.LikelihoodModelResults, base.ResultMixin):
'''
Class to contain results of fitting a linear mixed effects model.
MixedLMResults inherits from statsmodels.LikelihoodModelResults
Parameters
----------
See statsmodels.LikelihoodModelResults
Attributes
----------
model : class instance
Pointer to MixedLM model instance that called fit.
normalized_cov_params : array
The sampling covariance matrix of the estimates
params : array
A packed parameter vector for the profile parameterization.
The first `k_fe` elements are the estimated fixed effects
coefficients. The remaining elements are the estimated
variance parameters. The variance parameters are all divided
by `scale` and are not the variance parameters shown
in the summary.
fe_params : array
The fitted fixed-effects coefficients
cov_re : array
The fitted random-effects covariance matrix
bse_fe : array
The standard errors of the fitted fixed effects coefficients
bse_re : array
The standard errors of the fitted random effects covariance
matrix and variance components. The first `k_re * (k_re + 1)`
parameters are the standard errors for the lower triangle of
`cov_re`, the remaining elements are the standard errors for
the variance components.
See Also
--------
statsmodels.LikelihoodModelResults
'''
def __init__(self, model, params, cov_params):
super(MixedLMResults, self).__init__(model, params,
normalized_cov_params=cov_params)
self.nobs = self.model.nobs
self.df_resid = self.nobs - np.linalg.matrix_rank(self.model.exog)
@cache_readonly
def fittedvalues(self):
"""
Returns the fitted values for the model.
The fitted values reflect the mean structure specified by the
fixed effects and the predicted random effects.
"""
fit = np.dot(self.model.exog, self.fe_params)
re = self.random_effects
for group_ix, group in enumerate(self.model.group_labels):
ix = self.model.row_indices[group]
mat = []
if self.model.exog_re_li is not None:
mat.append(self.model.exog_re_li[group_ix])
for j in range(self.k_vc):
mat.append(self.model.exog_vc.mats[j][group_ix])
mat = np.concatenate(mat, axis=1)
fit[ix] += np.dot(mat, re[group])
return fit
@cache_readonly
def resid(self):
"""
Returns the residuals for the model.
The residuals reflect the mean structure specified by the
fixed effects and the predicted random effects.
"""
return self.model.endog - self.fittedvalues
@cache_readonly
def bse_fe(self):
"""
Returns the standard errors of the fixed effect regression
coefficients.
"""
p = self.model.exog.shape[1]
return np.sqrt(np.diag(self.cov_params())[0:p])
@cache_readonly
def bse_re(self):
"""
Returns the standard errors of the variance parameters.
The first `k_re x (k_re + 1)` elements of the returned array
are the standard errors of the lower triangle of `cov_re`.
The remaining elements are the standard errors of the variance
components.
Note that the sampling distribution of variance parameters is
strongly skewed unless the sample size is large, so these
standard errors may not give meaningful confidence intervals
or p-values if used in the usual way.
"""
p = self.model.exog.shape[1]
return np.sqrt(self.scale * np.diag(self.cov_params())[p:])
def _expand_re_names(self, group_ix):
names = list(self.model.data.exog_re_names)
for j, v in enumerate(self.model.exog_vc.names):
vg = self.model.exog_vc.colnames[j][group_ix]
na = ["%s[%s]" % (v, s) for s in vg]
names.extend(na)
return names
@cache_readonly
def random_effects(self):
"""
The conditional means of random effects given the data.
Returns
-------
random_effects : dict
A dictionary mapping the distinct `group` values to the
conditional means of the random effects for the group
given the data.
"""
try:
cov_re_inv = np.linalg.inv(self.cov_re)
except np.linalg.LinAlgError:
raise ValueError("Cannot predict random effects from " +
"singular covariance structure.")
vcomp = self.vcomp
k_re = self.k_re
ranef_dict = {}
for group_ix, group in enumerate(self.model.group_labels):
endog = self.model.endog_li[group_ix]
exog = self.model.exog_li[group_ix]
ex_r = self.model._aex_r[group_ix]
ex2_r = self.model._aex_r2[group_ix]
vc_var = self.model._expand_vcomp(vcomp, group_ix)
# Get the residuals relative to fixed effects
resid = endog
if self.k_fe > 0:
expval = np.dot(exog, self.fe_params)
resid = resid - expval
solver = _smw_solver(self.scale, ex_r, ex2_r, cov_re_inv,
1 / vc_var)
vir = solver(resid)
xtvir = _dot(ex_r.T, vir)
xtvir[0:k_re] = np.dot(self.cov_re, xtvir[0:k_re])
xtvir[k_re:] *= vc_var
ranef_dict[group] = pd.Series(
xtvir, index=self._expand_re_names(group_ix))
return ranef_dict
@cache_readonly
def random_effects_cov(self):
"""
Returns the conditional covariance matrix of the random
effects for each group given the data.
Returns
-------
random_effects_cov : dict
A dictionary mapping the distinct values of the `group`
variable to the conditional covariance matrix of the
random effects given the data.
"""
try:
cov_re_inv = np.linalg.inv(self.cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
vcomp = self.vcomp
ranef_dict = {}
for group_ix in range(self.model.n_groups):
ex_r = self.model._aex_r[group_ix]
ex2_r = self.model._aex_r2[group_ix]
label = self.model.group_labels[group_ix]
vc_var = self.model._expand_vcomp(vcomp, group_ix)
solver = _smw_solver(self.scale, ex_r, ex2_r, cov_re_inv,
1 / vc_var)
n = ex_r.shape[0]
m = self.cov_re.shape[0]
mat1 = np.empty((n, m + len(vc_var)))
mat1[:, 0:m] = np.dot(ex_r[:, 0:m], self.cov_re)
mat1[:, m:] = np.dot(ex_r[:, m:], np.diag(vc_var))
mat2 = solver(mat1)
mat2 = np.dot(mat1.T, mat2)
v = -mat2
v[0:m, 0:m] += self.cov_re
ix = np.arange(m, v.shape[0])
v[ix, ix] += vc_var
na = self._expand_re_names(group_ix)
v = pd.DataFrame(v, index=na, columns=na)
ranef_dict[label] = v
return ranef_dict
# Need to override since t-tests are only used for fixed effects
# parameters.
def t_test(self, r_matrix, scale=None, use_t=None):
"""
Compute a t-test for a each linear hypothesis of the form Rb = q
Parameters
----------
r_matrix : array_like
If an array is given, a p x k 2d array or length k 1d
array specifying the linear restrictions. It is assumed
that the linear combination is equal to zero.
scale : float, optional
An optional `scale` to use. Default is the scale specified
by the model fit.
use_t : bool, optional
If use_t is None, then the default of the model is used.
If use_t is True, then the p-values are based on the t
distribution.
If use_t is False, then the p-values are based on the normal
distribution.
Returns
-------
res : ContrastResults instance
The results for the test are attributes of this results instance.
The available results have the same elements as the parameter table
in `summary()`.
"""
if scale is not None:
import warnings
warnings.warn('scale is has no effect and is deprecated. It will'
'be removed in the next version.',
DeprecationWarning)
if r_matrix.shape[1]!= self.k_fe:
raise ValueError("r_matrix for t-test should have %d columns"
% self.k_fe)
d = self.k_re2 + self.k_vc
z0 = np.zeros((r_matrix.shape[0], d))
r_matrix = np.concatenate((r_matrix, z0), axis=1)
tst_rslt = super(MixedLMResults, self).t_test(r_matrix, use_t=use_t)
return tst_rslt
def summary(self, yname=None, xname_fe=None, xname_re=None,
title=None, alpha=.05):
"""
Summarize the mixed model regression results.
Parameters
----------
yname : str, optional
Default is `y`
xname_fe : list of strings, optional
Fixed effects covariate names
xname_re : list of strings, optional
Random effects covariate names
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
info = OrderedDict()
info["Model:"] = "MixedLM"
if yname is None:
yname = self.model.endog_names
param_names = self.model.data.param_names[:]
k_fe_params = len(self.fe_params)
k_re_params = len(param_names) - len(self.fe_params)
if xname_fe is not None:
if len(xname_fe)!= k_fe_params:
msg = "xname_fe should be a list of length %d" % k_fe_params
raise ValueError(msg)
param_names[:k_fe_params] = xname_fe
if xname_re is not None:
if len(xname_re)!= k_re_params:
msg = "xname_re should be a list of length %d" % k_re_params
raise ValueError(msg)
param_names[k_fe_params:] = xname_re
info["No. Observations:"] = str(self.model.n_totobs)
info["No. Groups:"] = str(self.model.n_groups)
gs = np.array([len(x) for x in self.model.endog_li])
info["Min. group size:"] = "%.0f" % min(gs)
info["Max. group size:"] = "%.0f" % max(gs)
info["Mean group size:"] = "%.1f" % np.mean(gs)
info["Dependent Variable:"] = yname
info["Method:"] = self.method
info["Scale:"] = self.scale
info["Likelihood:"] = self.llf
info["Converged:"] = "Yes" if self.converged else "No"
smry.add_dict(info)
smry.add_title("Mixed Linear Model Regression Results")
float_fmt = "%.3f"
sdf = np.nan * np.ones((self.k_fe + self.k_re2 + self.k_vc, 6))
# Coefficient estimates
sdf[0:self.k_fe, 0] = self.fe_params
# Standard errors
sdf[0:self.k_fe, 1] = np.sqrt(np.diag(self.cov_params()[0:self.k_fe]))
# Z-scores
sdf[0:self.k_fe, 2] = sdf[0:self.k_fe, 0] / sdf[0:self.k_fe, 1]
# p-values
sdf[0:self.k_fe, 3] = 2 * norm.cdf(-np.abs(sdf[0:self.k_fe, 2]))
# Confidence intervals
qm = -norm.ppf(alpha / 2)
sdf[0:self.k_fe, 4] = sdf[0:self.k_fe, 0] - qm * sdf[0:self.k_fe, 1]
sdf[0:self.k_fe, 5] = sdf[0:self.k_fe, 0] + qm * sdf[0:self.k_fe, 1]
# All random effects variances and covariances
jj = self.k_fe
for i in range(self.k_re):
for j in range(i + 1):
sdf[jj, 0] = self.cov_re[i, j]
sdf[jj, 1] = np.sqrt(self.scale) * self.bse[jj]
jj += 1
# Variance components
for i in range(self.k_vc):
sdf[jj, 0] = self.vcomp[i]
sdf[jj, 1] = np.sqrt(self.scale) * self.bse[jj]
jj += 1
sdf = pd.DataFrame(index=param_names, data=sdf)
sdf.columns = ['Coef.', 'Std.Err.', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
for col in sdf.columns:
sdf[col] = [float_fmt % x if np.isfinite(x) else ""
for x in sdf[col]]
smry.add_df(sdf, align='r')
return smry
@cache_readonly
def llf(self):
return self.model.loglike(self.params_object, profile_fe=False)
@cache_readonly
def aic(self):
"""Akaike information criterion"""
if self.reml:
return np.nan
if self.freepat is not None:
df = self.freepat.get_packed(use_sqrt=False, has_fe=True).sum() + 1
else:
df = self.params.size + 1
return -2 * (self.llf - df)
@cache_readonly
def bic(self):
"""Bayesian information criterion"""
if self.reml:
return np.nan
if self.freepat is not None:
df = self.freepat.get_packed(use_sqrt=False, has_fe=True).sum() + 1
else:
df = self.params.size + 1
return -2 * self.llf + np.log(self.nobs) * df
def profile_re(self, re_ix, vtype, num_low=5, dist_low=1., num_high=5,
dist_high=1.):
"""
Profile-likelihood inference for variance parameters.
Parameters
----------
re_ix : integer
If vtype is `re`, this value is the index of the variance
parameter for which to construct a profile likelihood. If
`vtype` is 'vc' then `re_ix` is the name of the variance
parameter to be profiled.
vtype : string
Either're' or 'vc', depending on whether the profile
analysis is for a random effect or a variance component.
num_low : integer
The number of points at which to calculate the likelihood
below the MLE of the parameter of interest.
dist_low : float
The distance below the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
num_high : integer
The number of points at which to calculate the likelihood
abov the MLE of the parameter of interest.
dist_high : float
The distance above the MLE of the parameter of interest to
begin calculating points on the profile likelihood.
Returns
-------
An array with two columns. The first column contains the
values to which the parameter of interest is constrained. The
second column contains the corresponding likelihood values.
Notes
-----
Only variance parameters can be profiled.
"""
pmodel = self.model
k_fe = pmodel.k_fe
k_re = pmodel.k_re
k_vc = pmodel.k_vc
endog, exog = pmodel.endog, pmodel.exog
# Need to permute the columns of the random effects design
# matrix so that the profiled variable is in the first column.
if vtype =='re':
ix = np.arange(k_re)
ix[0] = re_ix
ix[re_ix] = 0
exog_re = pmodel.exog_re.copy()[:, ix]
# Permute the covariance structure to match the permuted
# design matrix.
params = self.params_object.copy()
cov_re_unscaled = params.cov_re
cov_re_unscaled = cov_re_unscaled[np.ix_(ix, ix)]
params.cov_re = cov_re_unscaled
ru0 = cov_re_unscaled[0, 0]
# Convert dist_low and dist_high to the profile
# parameterization
cov_re = self.scale * cov_re_unscaled
low = (cov_re[0, 0] - dist_low) / self.scale
high = (cov_re[0, 0] + dist_high) / self.scale
elif vtype == 'vc':
re_ix = self.model.exog_vc.names.index(re_ix)
params = self.params_object.copy()
vcomp = self.vcomp
low = (vcomp[re_ix] - dist_low) / self.scale
high = (vcomp[re_ix] + dist_high) / self.scale
ru0 = vcomp[re_ix] / self.scale
# Define the sequence of values to which the parameter of
# interest will be constrained.
if low <= 0:
raise ValueError("dist_low is too large and would result in a "
"negative variance. Try a smaller value.")
left = np.linspace(low, ru0, num_low + 1)
right = np.linspace(ru0, high, num_high+1)[1:]
rvalues = np.concatenate((left, right))
# Indicators of which parameters are free and fixed.
free = MixedLMParams(k_fe, k_re, k_vc)
if self.freepat is None:
free.fe_params = np.ones(k_fe)
vcomp = np.ones(k_vc)
mat = np.ones((k_re, k_re))
else:
# If a freepat already has been specified, we add the
# constraint to it.
free.fe_params = self.freepat.fe_params
vcomp = self.freepat.vcomp
mat = self.freepat.cov_re
if vtype =='re':
mat = mat[np.ix_(ix, ix)]
if vtype =='re':
mat[0, 0] = 0
else:
vcomp[re_ix] = 0
free.cov_re = mat
free.vcomp = vcomp
klass = self.model.__class__
init_kwargs = pmodel._get_init_kwds()
if vtype =='re':
init_kwargs['exog_re'] = exog_re
likev = []
for x in rvalues:
model = klass(endog, exog, **init_kwargs)
if vtype =='re':
cov_re = params.cov_re.copy()
cov_re[0, 0] = x
params.cov_re = cov_re
else:
params.vcomp[re_ix] = x
# TODO should use fit_kwargs
rslt = model.fit(start_params=params, free=free,
reml=self.reml, cov_pen=self.cov_pen)._results
likev.append([x * rslt.scale, rslt.llf])
likev = np.asarray(likev)
return likev
class MixedLMResultsWrapper(base.LikelihoodResultsWrapper):
_attrs = {'bse_re': ('generic_columns', 'exog_re_names_full'),
'fe_params': ('generic_columns', 'xnames'),
'bse_fe': ('generic_columns', 'xnames'),
'cov_re': ('generic_columns_2d', 'exog_re_names'),
'cov_re_unscaled': ('generic_columns_2d', 'exog_re_names'),
}
_upstream_attrs = base.LikelihoodResultsWrapper._wrap_attrs
_wrap_attrs = base.wrap.union_dicts(_attrs, _upstream_attrs)
_methods = {}
_upstream_methods = base.LikelihoodResultsWrapper._wrap_methods
_wrap_methods = base.wrap.union_dicts(_methods, _upstream_methods)
def _handle_missing(data, groups, formula, re_formula, vc_formula):
tokens = set([])
forms = [formula]
if re_formula is not None:
forms.append(re_formula)
if vc_formula is not None:
forms.extend(vc_formula.values())
import tokenize
from statsmodels.compat.python import StringIO, asunicode
skiptoks = {"(", ")", "*", ":", "+", "-", "**", "/"}
for fml in forms:
# Unicode conversion is for Py2 compatability
rl = StringIO(fml)
def rlu():
line = rl.readline()
return asunicode(line, 'ascii')
g = tokenize.generate_tokens(rlu)
for tok in g:
if tok not in skiptoks:
tokens.add(tok.string)
tokens = sorted(tokens & set(data.columns))
data = data[tokens]
ii = pd.notnull(data).all(1)
if type(groups)!= "str":
ii &= pd.notnull(groups)
return data.loc[ii, :], groups[np.asarray(ii)] |
|
statsmodels__statsmodels | optimization.rst | Description / Module doc | Generate description to this module | BSD 3-Clause New or Revised License | statsmodels__statsmodels/docs/source/optimization.rst | [
"statsmodels__statsmodels/statsmodels/base/optimizer.py"
] | Optimization
statsmodels uses three types of algorithms for the estimation of the
parameters of a model.
1. Basic linear models such as WLS and OLS <regression> are directly
estimated using appropriate linear algebra.
2. RLM <rlm> and GLM <glm>, use iteratively re-weighted least
squares. However, you can optionally select one of the scipy
optimizers discussed below.
3. For all other models, we use optimizers from scipy.
Where practical, certain models allow for the optional selection of a
scipy optimizer. A particular scipy optimizer might be default or an
option. Depending on the model and the data, choosing an appropriate
scipy optimizer enables avoidance of a local minima, fitting models in
less time, or fitting a model with less memory.
statsmodels supports the following optimizers along with keyword
arguments associated with that specific optimizer:
- newton - Newton-Raphson iteration. While not directly from scipy, we
consider it an optimizer because only the score and hessian are
required.
tol : float
Relative error in params acceptable for convergence.
- nm - scipy's fmin_nm
xtol : float
Relative error in params acceptable for convergence
ftol : float
Relative error in loglike(params) acceptable for convergence
maxfun : int
Maximum number of function evaluations to make.
- bfgs - Broyden–Fletcher–Goldfarb–Shanno optimization, scipy's
fmin_bfgs.
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon
If fprime is approximated, use this value for the step size.
Only relevant if LikelihoodModel.score is None.
- lbfgs - A more memory-efficient (limited memory) implementation of
bfgs. Scipy's fmin_l_bfgs_b.
m : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many
terms in an approximation to it.)
pgtol : float
The iteration will stop when
max{|proj g_i | i = 1, ..., n} <= pgtol where pg_i is the i-th
component of the projected gradient.
factr : float
The iteration stops when
(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps, where
eps is the machine precision, which is automatically generated
by the code. Typical values for factr are: 1e12 for low
accuracy; 1e7 for moderate accuracy; 10.0 for extremely high
accuracy. See Notes for relationship to ftol, which is exposed
(instead of factr) by the scipy.optimize.minimize interface to
L-BFGS-B.
maxfun : int
Maximum number of iterations.
epsilon : float
Step size used when approx_grad is True, for numerically
calculating the gradient
approx_grad : bool
Whether to approximate the gradient numerically (in which case
func returns only the function value).
- cg - Conjugate gradient optimization. Scipy's fmin_cg.
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon : float
If fprime is approximated, use this value for the step size.
Can be scalar or vector. Only relevant if
Likelihoodmodel.score is None.
- ncg - Newton conjugate gradient. Scipy's fmin_ncg.
fhess_p : callable f'(x, *args)
Function which computes the Hessian of f times an arbitrary
vector, p. Should only be supplied if LikelihoodModel.hessian
is None.
avextol : float
Stop when the average relative error in the minimizer falls
below this amount.
epsilon : float or ndarray
If fhess is approximated, use this value for the step size.
Only relevant if Likelihoodmodel.hessian is None.
- powell - Powell's method. Scipy's fmin_powell.
xtol : float
Line-search error tolerance
ftol : float
Relative error in loglike(params) for acceptable for
convergence.
maxfun : int
Maximum number of function evaluations to make.
start_direc : ndarray
Initial direction set.
- basinhopping - Basin hopping. This is part of scipy's basinhopping
tools.
niter : integer
The number of basin hopping iterations.
niter_success : integer
Stop the run if the global minimum candidate remains the same
for this number of iterations.
T : float
The "temperature" parameter for the accept or reject
criterion. Higher "temperatures" mean that larger jumps in
function value will be accepted. For best results T should be
comparable to the separation (in function value) between local
minima.
stepsize : float
Initial step size for use in the random displacement.
interval : integer
The interval for how often to update the stepsize.
minimizer : dict
Extra keyword arguments to be passed to the minimizer
scipy.optimize.minimize(), for example 'method' - the
minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
tolerance for termination. Other arguments are mapped from
explicit argument of `fit`:
- args <- fargs
- jac <- score
- hess <- hess
- minimize - Allows the use of any scipy optimizer.
min_method : str, optional
Name of minimization method to use. Any method specific
arguments can be passed directly. For a list of methods and
their arguments, see documentation of scipy.optimize.minimize.
If no method is specified, then BFGS is used.
Model Class
Generally, there is no need for an end-user to directly call these
functions and classes. However, we provide the class because the
different optimization techniques have unique keyword arguments that may
be useful to the user.
| """
Functions that are general enough to use for any model fitting. The idea is
to untie these from LikelihoodModel so that they may be re-used generally.
"""
import numpy as np
from scipy import optimize
def _check_method(method, methods):
if method not in methods:
message = "Unknown fit method %s" % method
raise ValueError(message)
class Optimizer(object):
def _fit(self, objective, gradient, start_params, fargs, kwargs,
hessian=None, method='newton', maxiter=100, full_output=True,
disp=True, callback=None, retall=False):
"""
Fit function for any model with an objective function.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is an array of zeros.
method : str {'newton','nm','bfgs','powell','cg','ncg','basinhopping',
'minimize'}
Method can be 'newton' for Newton-Raphson, 'nm' for Nelder-Mead,
'bfgs' for Broyden-Fletcher-Goldfarb-Shanno, 'powell' for modified
Powell's method, 'cg' for conjugate gradient, 'ncg' for Newton-
conjugate gradient, 'basinhopping' for global basin-hopping
solver, if available or a generic'minimize' which is a wrapper for
scipy.optimize.minimize. `method` determines which solver from
scipy.optimize is used. The explicit arguments in `fit` are passed
to the solver, with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports..
maxiter : int
The maximum number of iterations to perform.
full_output : bool
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : bool
Set to True to print convergence messages.
fargs : tuple
Extra arguments passed to the likelihood function, i.e.,
loglike(x,*args)
callback : callable callback(xk)
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
retall : bool
Set to True to return list of solutions at each iteration.
Available in Results object's mle_retvals attribute.
Returns
-------
xopt : array
The solution to the objective function
retvals : dict, None
If `full_output` is True then this is a dictionary which holds
information returned from the solver used. If it is False, this is
None.
optim_settings : dict
A dictionary that contains the parameters passed to the solver.
Notes
-----
The 'basinhopping' solver ignores `maxiter`, `retall`, `full_output`
explicit arguments.
Optional arguments for the solvers (available in Results.mle_settings)::
'newton'
tol : float
Relative error in params acceptable for convergence.
'nm' -- Nelder Mead
xtol : float
Relative error in params acceptable for convergence
ftol : float
Relative error in loglike(params) acceptable for
convergence
maxfun : int
Maximum number of function evaluations to make.
'bfgs'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon
If fprime is approximated, use this value for the step
size. Only relevant if LikelihoodModel.score is None.
'lbfgs'
m : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many
terms in an approximation to it.)
pgtol : float
The iteration will stop when
``max{|proj g_i | i = 1,..., n} <= pgtol`` where pg_i is
the i-th component of the projected gradient.
factr : float
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
where eps is the machine precision, which is automatically
generated by the code. Typical values for factr are: 1e12
for low accuracy; 1e7 for moderate accuracy; 10.0 for
extremely high accuracy. See Notes for relationship to
ftol, which is exposed (instead of factr) by the
scipy.optimize.minimize interface to L-BFGS-B.
maxfun : int
Maximum number of iterations.
epsilon : float
Step size used when approx_grad is True, for numerically
calculating the gradient
approx_grad : bool
Whether to approximate the gradient numerically (in which
case func returns only the function value).
'cg'
gtol : float
Stop when norm of gradient is less than gtol.
norm : float
Order of norm (np.Inf is max, -np.Inf is min)
epsilon : float
If fprime is approximated, use this value for the step
size. Can be scalar or vector. Only relevant if
Likelihoodmodel.score is None.
'ncg'
fhess_p : callable f'(x,*args)
Function which computes the Hessian of f times an arbitrary
vector, p. Should only be supplied if
LikelihoodModel.hessian is None.
avextol : float
Stop when the average relative error in the minimizer
falls below this amount.
epsilon : float or ndarray
If fhess is approximated, use this value for the step size.
Only relevant if Likelihoodmodel.hessian is None.
'powell'
xtol : float
Line-search error tolerance
ftol : float
Relative error in loglike(params) for acceptable for
convergence.
maxfun : int
Maximum number of function evaluations to make.
start_direc : ndarray
Initial direction set.
'basinhopping'
niter : integer
The number of basin hopping iterations.
niter_success : integer
Stop the run if the global minimum candidate remains the
same for this number of iterations.
T : float
The "temperature" parameter for the accept or reject
criterion. Higher "temperatures" mean that larger jumps
in function value will be accepted. For best results
`T` should be comparable to the separation (in function
value) between local minima.
stepsize : float
Initial step size for use in the random displacement.
interval : integer
The interval for how often to update the `stepsize`.
minimizer : dict
Extra keyword arguments to be passed to the minimizer
`scipy.optimize.minimize()`, for example'method' - the
minimization method (e.g. 'L-BFGS-B'), or 'tol' - the
tolerance for termination. Other arguments are mapped from
explicit argument of `fit`:
- `args` <- `fargs`
- `jac` <- `score`
- `hess` <- `hess`
'minimize'
min_method : str, optional
Name of minimization method to use.
Any method specific arguments can be passed directly.
For a list of methods and their arguments, see
documentation of `scipy.optimize.minimize`.
If no method is specified, then BFGS is used.
"""
#TODO: generalize the regularization stuff
# Extract kwargs specific to fit_regularized calling fit
extra_fit_funcs = kwargs.setdefault('extra_fit_funcs', dict())
methods = ['newton', 'nm', 'bfgs', 'lbfgs', 'powell', 'cg', 'ncg',
'basinhopping','minimize']
methods += extra_fit_funcs.keys()
method = method.lower()
_check_method(method, methods)
fit_funcs = {
'newton': _fit_newton,
'nm': _fit_nm, # Nelder-Mead
'bfgs': _fit_bfgs,
'lbfgs': _fit_lbfgs,
'cg': _fit_cg,
'ncg': _fit_ncg,
'powell': _fit_powell,
'basinhopping': _fit_basinhopping,
'minimize': _fit_minimize # wrapper for scipy.optimize.minimize
}
#NOTE: fit_regularized checks the methods for these but it should be
# moved up probably
if extra_fit_funcs:
fit_funcs.update(extra_fit_funcs)
func = fit_funcs[method]
xopt, retvals = func(objective, gradient, start_params, fargs, kwargs,
disp=disp, maxiter=maxiter, callback=callback,
retall=retall, full_output=full_output,
hess=hessian)
optim_settings = {'optimizer': method,'start_params': start_params,
'maxiter': maxiter, 'full_output': full_output,
'disp': disp, 'fargs': fargs, 'callback': callback,
'retall': retall}
optim_settings.update(kwargs)
# set as attributes or return?
return xopt, retvals, optim_settings
def _fit_constrained(self, params):
"""
TODO: how to add constraints?
Something like
sm.add_constraint(Model, func)
or
model_instance.add_constraint(func)
model_instance.add_constraint("x1 + x2 = 2")
result = model_instance.fit()
"""
raise NotImplementedError
def _fit_regularized(self, params):
# TODO: code won't necessarily be general here. 3 options.
# 1) setup for scipy.optimize.fmin_sqlsqp
# 2) setup for cvxopt
# 3) setup for openopt
raise NotImplementedError
########################################
# Helper functions to fit
def _fit_minimize(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
kwargs.setdefault('min_method', 'BFGS')
# prepare options dict for minimize
filter_opts = ['extra_fit_funcs', 'niter','min_method', 'tol']
options = dict((k,v) for k,v in kwargs.items() if k not in filter_opts)
options['disp'] = disp
options['maxiter'] = maxiter
# Use Hessian/Jacobian only if they're required by the method
no_hess = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'COBYLA', 'SLSQP']
no_jac = ['Nelder-Mead', 'Powell', 'COBYLA']
if kwargs['min_method'] in no_hess:
hess = None
if kwargs['min_method'] in no_jac:
score = None
res = optimize.minimize(f, start_params, args=fargs, method=kwargs['min_method'],
jac=score, hess=hess, callback=callback, options=options)
xopt = res.x
retvals = None
if full_output:
nit = getattr(res, 'nit', np.nan) # scipy 0.14 compat
retvals = {'fopt': res.fun, 'iterations': nit,
'fcalls': res.nfev, 'warnflag': res.status,
'converged': res.success}
if retall:
retvals.update({'allvecs': res.values()})
return xopt, retvals
def _fit_newton(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None, ridge_factor=1e-10):
tol = kwargs.setdefault('tol', 1e-8)
iterations = 0
oldparams = np.inf
newparams = np.asarray(start_params)
if retall:
history = [oldparams, newparams]
while (iterations < maxiter and np.any(np.abs(newparams -
oldparams) > tol)):
H = np.asarray(hess(newparams))
# regularize Hessian, not clear what ridge factor should be
# keyword option with absolute default 1e-10, see #1847
if not np.all(ridge_factor == 0):
H[np.diag_indices(H.shape[0])] += ridge_factor
oldparams = newparams
newparams = oldparams - np.dot(np.linalg.inv(H),
score(oldparams))
if retall:
history.append(newparams)
if callback is not None:
callback(newparams)
iterations += 1
fval = f(newparams, *fargs) # this is the negative likelihood
if iterations == maxiter:
warnflag = 1
if disp:
print("Warning: Maximum number of iterations has been "
"exceeded.")
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
else:
warnflag = 0
if disp:
print("Optimization terminated successfully.")
print(" Current function value: %f" % fval)
print(" Iterations %d" % iterations)
if full_output:
(xopt, fopt, niter,
gopt, hopt) = (newparams, f(newparams, *fargs),
iterations, score(newparams),
hess(newparams))
converged = not warnflag
retvals = {'fopt': fopt, 'iterations': niter,'score': gopt,
'Hessian': hopt, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': history})
else:
xopt = newparams
retvals = None
return xopt, retvals
def _fit_bfgs(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)
norm = kwargs.setdefault('norm', np.Inf)
epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
retvals = optimize.fmin_bfgs(f, start_params, score, args=fargs,
gtol=gtol, norm=norm, epsilon=epsilon,
maxiter=maxiter, full_output=full_output,
disp=disp, retall=retall, callback=callback)
if full_output:
if not retall:
xopt, fopt, gopt, Hinv, fcalls, gcalls, warnflag = retvals
else:
(xopt, fopt, gopt, Hinv, fcalls,
gcalls, warnflag, allvecs) = retvals
converged = not warnflag
retvals = {'fopt': fopt, 'gopt': gopt, 'Hinv': Hinv,
'fcalls': fcalls, 'gcalls': gcalls, 'warnflag':
warnflag, 'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals
def _fit_lbfgs(f, score, start_params, fargs, kwargs, disp=True, maxiter=100,
callback=None, retall=False, full_output=True, hess=None):
"""
Fit model using L-BFGS algorithm
Parameters
----------
f : function
Returns negative log likelihood given parameters.
score : function
Returns gradient of negative log likelihood with respect to params.
Notes
-----
Within the mle part of statsmodels, the log likelihood function and
its gradient with respect to the parameters do not have notationally
consistent sign.
"""
# Use unconstrained optimization by default.
bounds = kwargs.setdefault('bounds', [(None, None)] * len(start_params))
kwargs.setdefault('iprint', 0)
# Pass the following keyword argument names through to fmin_l_bfgs_b
# if they are present in kwargs, otherwise use the fmin_l_bfgs_b
# default values.
names = ('m', 'pgtol', 'factr','maxfun', 'epsilon', 'approx_grad')
extra_kwargs = dict((x, kwargs[x]) for x in names if x in kwargs)
# Extract values for the options related to the gradient.
approx_grad = kwargs.get('approx_grad', False)
loglike_and_score = kwargs.get('loglike_and_score', None)
epsilon = kwargs.get('epsilon', None)
# The approx_grad flag has superpowers nullifying the score function arg.
if approx_grad:
score = None
# Choose among three options for dealing with the gradient (the gradient
# of a log likelihood function with respect to its parameters
# is more specifically called the score in statistics terminology).
# The first option is to use the finite-differences
# approximation that is built into the fmin_l_bfgs_b optimizer.
# The second option is to use the provided score function.
# The third option is to use the score component of a provided
# function that simultaneously evaluates the log likelihood and score.
if epsilon and not approx_grad:
raise ValueError('a finite-differences epsilon was provided '
'even though we are not using approx_grad')
if approx_grad and loglike_and_score:
raise ValueError('gradient approximation was requested '
'even though an analytic loglike_and_score function '
'was given')
if loglike_and_score:
func = lambda p, *a : tuple(-x for x in loglike_and_score(p, *a))
elif score:
func = f
extra_kwargs['fprime'] = score
elif approx_grad:
func = f
retvals = optimize.fmin_l_bfgs_b(func, start_params, maxiter=maxiter,
callback=callback, args=fargs,
bounds=bounds, disp=disp,
**extra_kwargs)
if full_output:
xopt, fopt, d = retvals
# The warnflag is
# 0 if converged
# 1 if too many function evaluations or too many iterations
# 2 if stopped for another reason, given in d['task']
warnflag = d['warnflag']
converged = (warnflag == 0)
gopt = d['grad']
fcalls = d['funcalls']
iterations = d['nit']
retvals = {'fopt': fopt, 'gopt': gopt, 'fcalls': fcalls,
'warnflag': warnflag, 'converged': converged,
'iterations': iterations}
else:
xopt = retvals[0]
retvals = None
return xopt, retvals
def _fit_nm(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
xtol = kwargs.setdefault('xtol', 0.0001)
ftol = kwargs.setdefault('ftol', 0.0001)
maxfun = kwargs.setdefault('maxfun', None)
retvals = optimize.fmin(f, start_params, args=fargs, xtol=xtol,
ftol=ftol, maxiter=maxiter, maxfun=maxfun,
full_output=full_output, disp=disp, retall=retall,
callback=callback)
if full_output:
if not retall:
xopt, fopt, niter, fcalls, warnflag = retvals
else:
xopt, fopt, niter, fcalls, warnflag, allvecs = retvals
converged = not warnflag
retvals = {'fopt': fopt, 'iterations': niter,
'fcalls': fcalls, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals
def _fit_cg(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
gtol = kwargs.setdefault('gtol', 1.0000000000000001e-05)
norm = kwargs.setdefault('norm', np.Inf)
epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
retvals = optimize.fmin_cg(f, start_params, score, gtol=gtol, norm=norm,
epsilon=epsilon, maxiter=maxiter,
full_output=full_output, disp=disp,
retall=retall, callback=callback)
if full_output:
if not retall:
xopt, fopt, fcalls, gcalls, warnflag = retvals
else:
xopt, fopt, fcalls, gcalls, warnflag, allvecs = retvals
converged = not warnflag
retvals = {'fopt': fopt, 'fcalls': fcalls, 'gcalls': gcalls,
'warnflag': warnflag, 'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals
def _fit_ncg(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
fhess_p = kwargs.setdefault('fhess_p', None)
avextol = kwargs.setdefault('avextol', 1.0000000000000001e-05)
epsilon = kwargs.setdefault('epsilon', 1.4901161193847656e-08)
retvals = optimize.fmin_ncg(f, start_params, score, fhess_p=fhess_p,
fhess=hess, args=fargs, avextol=avextol,
epsilon=epsilon, maxiter=maxiter,
full_output=full_output, disp=disp,
retall=retall, callback=callback)
if full_output:
if not retall:
xopt, fopt, fcalls, gcalls, hcalls, warnflag = retvals
else:
xopt, fopt, fcalls, gcalls, hcalls, warnflag, allvecs =\
retvals
converged = not warnflag
retvals = {'fopt': fopt, 'fcalls': fcalls, 'gcalls': gcalls,
'hcalls': hcalls, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals
def _fit_powell(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
xtol = kwargs.setdefault('xtol', 0.0001)
ftol = kwargs.setdefault('ftol', 0.0001)
maxfun = kwargs.setdefault('maxfun', None)
start_direc = kwargs.setdefault('start_direc', None)
retvals = optimize.fmin_powell(f, start_params, args=fargs, xtol=xtol,
ftol=ftol, maxiter=maxiter, maxfun=maxfun,
full_output=full_output, disp=disp,
retall=retall, callback=callback,
direc=start_direc)
if full_output:
if not retall:
xopt, fopt, direc, niter, fcalls, warnflag = retvals
else:
xopt, fopt, direc, niter, fcalls, warnflag, allvecs =\
retvals
converged = not warnflag
retvals = {'fopt': fopt, 'direc': direc, 'iterations': niter,
'fcalls': fcalls, 'warnflag': warnflag,
'converged': converged}
if retall:
retvals.update({'allvecs': allvecs})
else:
xopt = retvals
retvals = None
return xopt, retvals
def _fit_basinhopping(f, score, start_params, fargs, kwargs, disp=True,
maxiter=100, callback=None, retall=False,
full_output=True, hess=None):
from copy import copy
kwargs = copy(kwargs)
niter = kwargs.setdefault('niter', 100)
niter_success = kwargs.setdefault('niter_success', None)
T = kwargs.setdefault('T', 1.0)
stepsize = kwargs.setdefault('stepsize', 0.5)
interval = kwargs.setdefault('interval', 50)
minimizer_kwargs = kwargs.get('minimizer', {})
minimizer_kwargs['args'] = fargs
minimizer_kwargs['jac'] = score
method = minimizer_kwargs.get('method', None)
if method and method!= 'L-BFGS-B': # l_bfgs_b doesn't take a hessian
minimizer_kwargs['hess'] = hess
retvals = optimize.basinhopping(f, start_params,
minimizer_kwargs=minimizer_kwargs,
niter=niter, niter_success=niter_success,
T=T, stepsize=stepsize, disp=disp,
callback=callback, interval=interval)
if full_output:
xopt, fopt, niter, fcalls = map(lambda x : getattr(retvals, x),
['x', 'fun', 'nit', 'nfev'])
converged = 'completed successfully' in retvals.message[0]
retvals = {'fopt': fopt, 'iterations': niter,
'fcalls': fcalls, 'converged': converged}
else:
xopt = retvals.x
retvals = None
return xopt, retvals |
|
statsmodels__statsmodels | regression.rst | Description / Module doc | Generate description to this module | BSD 3-Clause New or Revised License | statsmodels__statsmodels/docs/source/regression.rst | [
"statsmodels__statsmodels/statsmodels/regression/quantile_regression.py",
"statsmodels__statsmodels/statsmodels/regression/linear_model.py",
"statsmodels__statsmodels/statsmodels/regression/dimred.py",
"statsmodels__statsmodels/statsmodels/regression/process_regression.py",
"statsmodels__statsmodels/statsmodels/regression/recursive_ls.py"
] | Linear Regression
Linear models with independently and identically distributed errors, and
for errors with heteroscedasticity or autocorrelation. This module
allows estimation by ordinary least squares (OLS), weighted least
squares (WLS), generalized least squares (GLS), and feasible generalized
least squares with autocorrelated AR(p) errors.
Examples
# Load modules and data import numpy as np import statsmodels.api as sm
spector_data = sm.datasets.spector.load(as_pandas=False)
spector_data.exog = sm.add_constant(spector_data.exog, prepend=False)
# Fit and summarize OLS model mod = sm.OLS(spector_data.endog,
spector_data.exog) res = mod.fit() print(res.summary())
Detailed examples can be found here:
- OLS
- WLS
- GLS
- Recursive LS
Technical Documentation
The statistical model is assumed to be
Y = Xβ + μ, where μ ∼ N(0,Σ).
Depending on the properties of Σ, we have currently four classes
available:
- GLS : generalized least squares for arbitrary covariance Σ
- OLS : ordinary least squares for i.i.d. errors Σ = I
- WLS : weighted least squares for heteroskedastic errors diag(Σ)
- GLSAR : feasible generalized least squares with autocorrelated AR(p)
errors Σ = Σ(ρ)
All regression models define the same methods and follow the same
structure, and can be used in a similar fashion. Some of them contain
additional model specific methods and attributes.
GLS is the superclass of the other regression classes except for
RecursiveLS. | #!/usr/bin/env python
'''
Quantile regression model
Model parameters are estimated using iterated reweighted least squares. The
asymptotic covariance matrix estimated using kernel density estimation.
Author: Vincent Arel-Bundock
License: BSD-3
Created: 2013-03-19
The original IRLS function was written for Matlab by Shapour Mohammadi,
University of Tehran, 2008 ([email protected]), with some lines based on
code written by James P. Lesage in Applied Econometrics Using MATLAB(1999).PP.
73-4. Translated to python with permission from original author by Christian
Prinoth (christian at prinoth dot name).
'''
from statsmodels.compat.python import range
import numpy as np
import warnings
import scipy.stats as stats
from scipy.linalg import pinv
from scipy.stats import norm
from statsmodels.tools.tools import chain_dot
from statsmodels.tools.decorators import cache_readonly
from statsmodels.regression.linear_model import (RegressionModel,
RegressionResults,
RegressionResultsWrapper)
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
IterationLimitWarning)
class QuantReg(RegressionModel):
'''Quantile Regression
Estimate a quantile regression model using iterative reweighted least
squares.
Parameters
----------
endog : array or dataframe
endogenous/response variable
exog : array or dataframe
exogenous/explanatory variable(s)
Notes
-----
The Least Absolute Deviation (LAD) estimator is a special case where
quantile is set to 0.5 (q argument of the fit method).
The asymptotic covariance matrix is estimated following the procedure in
Greene (2008, p.407-408), using either the logistic or gaussian kernels
(kernel argument of the fit method).
References
----------
General:
* Birkes, D. and Y. Dodge(1993). Alternative Methods of Regression, John Wiley and Sons.
* Green,W. H. (2008). Econometric Analysis. Sixth Edition. International Student Edition.
* Koenker, R. (2005). Quantile Regression. New York: Cambridge University Press.
* LeSage, J. P.(1999). Applied Econometrics Using MATLAB,
Kernels (used by the fit method):
* Green (2008) Table 14.2
Bandwidth selection (used by the fit method):
* Bofinger, E. (1975). Estimation of a density function using order statistics. Australian Journal of Statistics 17: 1-17.
* Chamberlain, G. (1994). Quantile regression, censoring, and the structure of wages. In Advances in Econometrics, Vol. 1: Sixth World Congress, ed. C. A. Sims, 171-209. Cambridge: Cambridge University Press.
* Hall, P., and S. Sheather. (1988). On the distribution of the Studentized quantile. Journal of the Royal Statistical Society, Series B 50: 381-391.
Keywords: Least Absolute Deviation(LAD) Regression, Quantile Regression,
Regression, Robust Estimation.
'''
def __init__(self, endog, exog, **kwargs):
super(QuantReg, self).__init__(endog, exog, **kwargs)
def whiten(self, data):
"""
QuantReg model whitener does nothing: returns data.
"""
return data
def fit(self, q=.5, vcov='robust', kernel='epa', bandwidth='hsheather',
max_iter=1000, p_tol=1e-6, **kwargs):
'''Solve by Iterative Weighted Least Squares
Parameters
----------
q : float
Quantile must be between 0 and 1
vcov : string, method used to calculate the variance-covariance matrix
of the parameters. Default is ``robust``:
- robust : heteroskedasticity robust standard errors (as suggested
in Greene 6th edition)
- iid : iid errors (as in Stata 12)
kernel : string, kernel to use in the kernel density estimation for the
asymptotic covariance matrix:
- epa: Epanechnikov
- cos: Cosine
- gau: Gaussian
- par: Parzene
bandwidth: string, Bandwidth selection method in kernel density
estimation for asymptotic covariance estimate (full
references in QuantReg docstring):
- hsheather: Hall-Sheather (1988)
- bofinger: Bofinger (1975)
- chamberlain: Chamberlain (1994)
'''
if q < 0 or q > 1:
raise Exception('p must be between 0 and 1')
kern_names = ['biw', 'cos', 'epa', 'gau', 'par']
if kernel not in kern_names:
raise Exception("kernel must be one of " + ', '.join(kern_names))
else:
kernel = kernels[kernel]
if bandwidth == 'hsheather':
bandwidth = hall_sheather
elif bandwidth == 'bofinger':
bandwidth = bofinger
elif bandwidth == 'chamberlain':
bandwidth = chamberlain
else:
raise Exception("bandwidth must be in 'hsheather', 'bofinger', 'chamberlain'")
endog = self.endog
exog = self.exog
nobs = self.nobs
exog_rank = np.linalg.matrix_rank(self.exog)
self.rank = exog_rank
self.df_model = float(self.rank - self.k_constant)
self.df_resid = self.nobs - self.rank
n_iter = 0
xstar = exog
beta = np.ones(exog_rank)
# TODO: better start, initial beta is used only for convergence check
# Note the following doesn't work yet,
# the iteration loop always starts with OLS as initial beta
# if start_params is not None:
# if len(start_params)!= rank:
# raise ValueError('start_params has wrong length')
# beta = start_params
# else:
# # start with OLS
# beta = np.dot(np.linalg.pinv(exog), endog)
diff = 10
cycle = False
history = dict(params = [], mse=[])
while n_iter < max_iter and diff > p_tol and not cycle:
n_iter += 1
beta0 = beta
xtx = np.dot(xstar.T, exog)
xty = np.dot(xstar.T, endog)
beta = np.dot(pinv(xtx), xty)
resid = endog - np.dot(exog, beta)
mask = np.abs(resid) <.000001
resid[mask] = ((resid[mask] >= 0) * 2 - 1) *.000001
resid = np.where(resid < 0, q * resid, (1-q) * resid)
resid = np.abs(resid)
xstar = exog / resid[:, np.newaxis]
diff = np.max(np.abs(beta - beta0))
history['params'].append(beta)
history['mse'].append(np.mean(resid*resid))
if (n_iter >= 300) and (n_iter % 100 == 0):
# check for convergence circle, shouldn't happen
for ii in range(2, 10):
if np.all(beta == history['params'][-ii]):
cycle = True
warnings.warn("Convergence cycle detected", ConvergenceWarning)
break
if n_iter == max_iter:
warnings.warn("Maximum number of iterations (" + str(max_iter) +
") reached.", IterationLimitWarning)
e = endog - np.dot(exog, beta)
# Greene (2008, p.407) writes that Stata 6 uses this bandwidth:
# h = 0.9 * np.std(e) / (nobs**0.2)
# Instead, we calculate bandwidth as in Stata 12
iqre = stats.scoreatpercentile(e, 75) - stats.scoreatpercentile(e, 25)
h = bandwidth(nobs, q)
h = min(np.std(endog),
iqre / 1.34) * (norm.ppf(q + h) - norm.ppf(q - h))
fhat0 = 1. / (nobs * h) * np.sum(kernel(e / h))
if vcov == 'robust':
d = np.where(e > 0, (q/fhat0)**2, ((1-q)/fhat0)**2)
xtxi = pinv(np.dot(exog.T, exog))
xtdx = np.dot(exog.T * d[np.newaxis, :], exog)
vcov = chain_dot(xtxi, xtdx, xtxi)
elif vcov == 'iid':
vcov = (1. / fhat0)**2 * q * (1 - q) * pinv(np.dot(exog.T, exog))
else:
raise Exception("vcov must be 'robust' or 'iid'")
lfit = QuantRegResults(self, beta, normalized_cov_params=vcov)
lfit.q = q
lfit.iterations = n_iter
lfit.sparsity = 1. / fhat0
lfit.bandwidth = h
lfit.history = history
return RegressionResultsWrapper(lfit)
def _parzen(u):
z = np.where(np.abs(u) <=.5, 4./3 - 8. * u**2 + 8. * np.abs(u)**3,
8. * (1 - np.abs(u))**3 / 3.)
z[np.abs(u) > 1] = 0
return z
kernels = {}
kernels['biw'] = lambda u: 15. / 16 * (1 - u**2)**2 * np.where(np.abs(u) <= 1, 1, 0)
kernels['cos'] = lambda u: np.where(np.abs(u) <=.5, 1 + np.cos(2 * np.pi * u), 0)
kernels['epa'] = lambda u: 3. / 4 * (1-u**2) * np.where(np.abs(u) <= 1, 1, 0)
kernels['gau'] = lambda u: norm.pdf(u)
kernels['par'] = _parzen
#kernels['bet'] = lambda u: np.where(np.abs(u) <= 1,.75 * (1 - u) * (1 + u), 0)
#kernels['log'] = lambda u: logistic.pdf(u) * (1 - logistic.pdf(u))
#kernels['tri'] = lambda u: np.where(np.abs(u) <= 1, 1 - np.abs(u), 0)
#kernels['trw'] = lambda u: 35. / 32 * (1 - u**2)**3 * np.where(np.abs(u) <= 1, 1, 0)
#kernels['uni'] = lambda u: 1. / 2 * np.where(np.abs(u) <= 1, 1, 0)
def hall_sheather(n, q, alpha=.05):
z = norm.ppf(q)
num = 1.5 * norm.pdf(z)**2.
den = 2. * z**2. + 1.
h = n**(-1. / 3) * norm.ppf(1. - alpha / 2.)**(2./3) * (num / den)**(1./3)
return h
def bofinger(n, q):
num = 9. / 2 * norm.pdf(2 * norm.ppf(q))**4
den = (2 * norm.ppf(q)**2 + 1)**2
h = n**(-1. / 5) * (num / den)**(1. / 5)
return h
def chamberlain(n, q, alpha=.05):
return norm.ppf(1 - alpha / 2) * np.sqrt(q*(1 - q) / n)
class QuantRegResults(RegressionResults):
'''Results instance for the QuantReg model'''
@cache_readonly
def prsquared(self):
q = self.q
endog = self.model.endog
e = self.resid
e = np.where(e < 0, (1 - q) * e, q * e)
e = np.abs(e)
ered = endog - stats.scoreatpercentile(endog, q * 100)
ered = np.where(ered < 0, (1 - q) * ered, q * ered)
ered = np.abs(ered)
return 1 - np.sum(e) / np.sum(ered)
#@cache_readonly
def scale(self):
return 1.
@cache_readonly
def bic(self):
return np.nan
@cache_readonly
def aic(self):
return np.nan
@cache_readonly
def llf(self):
return np.nan
@cache_readonly
def rsquared(self):
return np.nan
@cache_readonly
def rsquared_adj(self):
return np.nan
@cache_readonly
def mse(self):
return np.nan
@cache_readonly
def mse_model(self):
return np.nan
@cache_readonly
def mse_total(self):
return np.nan
@cache_readonly
def centered_tss(self):
return np.nan
@cache_readonly
def uncentered_tss(self):
return np.nan
@cache_readonly
def HC0_se(self):
raise NotImplementedError
@cache_readonly
def HC1_se(self):
raise NotImplementedError
@cache_readonly
def HC2_se(self):
raise NotImplementedError
@cache_readonly
def HC3_se(self):
raise NotImplementedError
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
eigvals = self.eigenvals
condno = self.condition_number
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None)
]
top_right = [('Pseudo R-squared:', ["%#8.4g" % self.prsquared]),
('Bandwidth:', ["%#8.4g" % self.bandwidth]),
('Sparsity:', ["%#8.4g" % self.sparsity]),
('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None)
]
if title is None:
title = self.model.__class__.__name__ +'' + "Regression Results"
# create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
# add warnings/notes, added to text format only
etext = []
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: # TODO: what is recommended
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
smry.add_extra_txt(etext)
return smry
# TODO: Determine which tests are valid for GLSAR, and under what conditions
# TODO: Fix issue with constant and GLS
# TODO: GLS: add options Iterative GLS, for iterative fgls if sigma is None
# TODO: GLS: default if sigma is none should be two-step GLS
# TODO: Check nesting when performing model based tests, lr, wald, lm
"""
This module implements standard regression models:
Generalized Least Squares (GLS)
Ordinary Least Squares (OLS)
Weighted Least Squares (WLS)
Generalized Least Squares with autoregressive error terms GLSAR(p)
Models are specified with an endogenous response variable and an
exogenous design matrix and are fit using their `fit` method.
Subclasses that have more complicated covariance matrices
should write over the 'whiten' method as the fit method
prewhitens the response by calling 'whiten'.
General reference for regression models:
D. C. Montgomery and E.A. Peck. "Introduction to Linear Regression
Analysis." 2nd. Ed., Wiley, 1992.
Econometrics references for regression models:
R. Davidson and J.G. MacKinnon. "Econometric Theory and Methods," Oxford,
2004.
W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
"""
from statsmodels.compat.python import lrange, lzip, range
import numpy as np
from scipy.linalg import toeplitz
from scipy import stats
from scipy import optimize
from statsmodels.tools.tools import chain_dot, pinv_extended
from statsmodels.tools.decorators import (cache_readonly,
cache_writable)
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.emplike.elregress import _ELRegOpts
import warnings
from statsmodels.tools.sm_exceptions import InvalidTestWarning
# need import in module instead of lazily to copy `__doc__`
from statsmodels.regression._prediction import PredictionResults
from. import _prediction as pred
__docformat__ ='restructuredtext en'
__all__ = ['GLS', 'WLS', 'OLS', 'GLSAR', 'PredictionResults']
_fit_regularized_doc =\
r"""
Return a regularized fit to a linear regression model.
Parameters
----------
method : string
'elastic_net' and'sqrt_lasso' are currently implemented.
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
L1_wt: scalar
The fraction of the penalty given to the L1 penalty term.
Must be between 0 and 1 (inclusive). If 0, the fit is a
ridge fit, if 1 it is a lasso fit.
start_params : array_like
Starting values for ``params``.
profile_scale : bool
If True the penalized fit is computed using the profile
(concentrated) log-likelihood for the Gaussian model.
Otherwise the fit uses the residual sum of squares.
refit : bool
If True, the model is refit using only the variables that
have non-zero coefficients in the regularized fit. The
refitted model is not regularized.
distributed : bool
If True, the model uses distributed methods for fitting,
will raise an error if True and partitions is None.
generator : function
generator used to partition the model, allows for handling
of out of memory/parallel computing.
partitions : scalar
The number of partitions desired for the distributed
estimation.
threshold : scalar or array_like
The threshold below which coefficients are zeroed out,
only used for distributed estimation
Returns
-------
A RegularizedResults instance.
Notes
-----
The elastic net uses a combination of L1 and L2 penalties.
The implementation closely follows the glmnet package in R.
The function that is minimized is:
.. math::
0.5*RSS/n + alpha*((1-L1\_wt)*|params|_2^2/2 + L1\_wt*|params|_1)
where RSS is the usual regression sum of squares, n is the
sample size, and :math:`|*|_1` and :math:`|*|_2` are the L1 and L2
norms.
For WLS and GLS, the RSS is calculated using the whitened endog and
exog data.
Post-estimation results are based on the same data used to
select variables, hence may be subject to overfitting biases.
The elastic_net method uses the following keyword arguments:
maxiter : int
Maximum number of iterations
cnvrg_tol : float
Convergence threshold for line searches
zero_tol : float
Coefficients below this threshold are treated as zero.
The square root lasso approach is a variation of the Lasso
that is largely self-tuning (the optimal tuning parameter
does not depend on the standard deviation of the regression
errors). If the errors are Gaussian, the tuning parameter
can be taken to be
alpha = 1.1 * np.sqrt(n) * norm.ppf(1 - 0.05 / (2 * p))
where n is the sample size and p is the number of predictors.
The square root lasso uses the following keyword arguments:
zero_tol : float
Coefficients below this threshold are treated as zero.
References
----------
Friedman, Hastie, Tibshirani (2008). Regularization paths for
generalized linear models via coordinate descent. Journal of
Statistical Software 33(1), 1-22 Feb 2010.
A Belloni, V Chernozhukov, L Wang (2011). Square-root Lasso:
pivotal recovery of sparse signals via conic programming.
Biometrika 98(4), 791-806.
https://arxiv.org/pdf/1009.5689.pdf
"""
def _get_sigma(sigma, nobs):
"""
Returns sigma (matrix, nobs by nobs) for GLS and the inverse of its
Cholesky decomposition. Handles dimensions and checks integrity.
If sigma is None, returns None, None. Otherwise returns sigma,
cholsigmainv.
"""
if sigma is None:
return None, None
sigma = np.asarray(sigma).squeeze()
if sigma.ndim == 0:
sigma = np.repeat(sigma, nobs)
if sigma.ndim == 1:
if sigma.shape!= (nobs,):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = 1/np.sqrt(sigma)
else:
if sigma.shape!= (nobs, nobs):
raise ValueError("Sigma must be a scalar, 1d of length %s or a 2d "
"array of shape %s x %s" % (nobs, nobs, nobs))
cholsigmainv = np.linalg.cholesky(np.linalg.inv(sigma)).T
return sigma, cholsigmainv
class RegressionModel(base.LikelihoodModel):
"""
Base class for linear regression models. Should not be directly called.
Intended for subclassing.
"""
def __init__(self, endog, exog, **kwargs):
super(RegressionModel, self).__init__(endog, exog, **kwargs)
self._data_attr.extend(['pinv_wexog', 'wendog', 'wexog', 'weights'])
def initialize(self):
self.wexog = self.whiten(self.exog)
self.wendog = self.whiten(self.endog)
# overwrite nobs from class Model:
self.nobs = float(self.wexog.shape[0])
self._df_model = None
self._df_resid = None
self.rank = None
@property
def df_model(self):
"""
The model degree of freedom, defined as the rank of the regressor
matrix minus 1 if a constant is included.
"""
if self._df_model is None:
if self.rank is None:
self.rank = np.linalg.matrix_rank(self.exog)
self._df_model = float(self.rank - self.k_constant)
return self._df_model
@df_model.setter
def df_model(self, value):
self._df_model = value
@property
def df_resid(self):
"""
The residual degree of freedom, defined as the number of observations
minus the rank of the regressor matrix.
"""
if self._df_resid is None:
if self.rank is None:
self.rank = np.linalg.matrix_rank(self.exog)
self._df_resid = self.nobs - self.rank
return self._df_resid
@df_resid.setter
def df_resid(self, value):
self._df_resid = value
def whiten(self, X):
raise NotImplementedError("Subclasses should implement.")
def fit(self, method="pinv", cov_type='nonrobust', cov_kwds=None,
use_t=None, **kwargs):
"""
Full fit of the model.
The results include an estimate of covariance matrix, (whitened)
residuals and an estimate of scale.
Parameters
----------
method : str, optional
Can be "pinv", "qr". "pinv" uses the Moore-Penrose pseudoinverse
to solve the least squares problem. "qr" uses the QR
factorization.
cov_type : str, optional
See `regression.linear_model.RegressionResults` for a description
of the available covariance estimators
cov_kwds : list or None, optional
See `linear_model.RegressionResults.get_robustcov_results` for a
description required keywords for alternative covariance estimators
use_t : bool, optional
Flag indicating to use the Student's t distribution when computing
p-values. Default behavior depends on cov_type. See
`linear_model.RegressionResults.get_robustcov_results` for
implementation details.
Returns
-------
A RegressionResults class instance.
See Also
--------
regression.linear_model.RegressionResults
regression.linear_model.RegressionResults.get_robustcov_results
Notes
-----
The fit method uses the pseudoinverse of the design/exogenous variables
to solve the least squares minimization.
"""
if method == "pinv":
if not (hasattr(self, 'pinv_wexog') and
hasattr(self, 'normalized_cov_params') and
hasattr(self, 'rank')):
self.pinv_wexog, singular_values = pinv_extended(self.wexog)
self.normalized_cov_params = np.dot(
self.pinv_wexog, np.transpose(self.pinv_wexog))
# Cache these singular values for use later.
self.wexog_singular_values = singular_values
self.rank = np.linalg.matrix_rank(np.diag(singular_values))
beta = np.dot(self.pinv_wexog, self.wendog)
elif method == "qr":
if not (hasattr(self, 'exog_Q') and
hasattr(self, 'exog_R') and
hasattr(self, 'normalized_cov_params') and
hasattr(self, 'rank')):
Q, R = np.linalg.qr(self.wexog)
self.exog_Q, self.exog_R = Q, R
self.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
# Cache singular values from R.
self.wexog_singular_values = np.linalg.svd(R, 0, 0)
self.rank = np.linalg.matrix_rank(R)
else:
Q, R = self.exog_Q, self.exog_R
# used in ANOVA
self.effects = effects = np.dot(Q.T, self.wendog)
beta = np.linalg.solve(R, effects)
else:
raise ValueError('method has to be "pinv" or "qr"')
if self._df_model is None:
self._df_model = float(self.rank - self.k_constant)
if self._df_resid is None:
self.df_resid = self.nobs - self.rank
if isinstance(self, OLS):
lfit = OLSResults(
self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t)
else:
lfit = RegressionResults(
self, beta,
normalized_cov_params=self.normalized_cov_params,
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t,
**kwargs)
return RegressionResultsWrapper(lfit)
def predict(self, params, exog=None):
"""
Return linear predicted values from a design matrix.
Parameters
----------
params : array_like
Parameters of a linear model
exog : array_like, optional.
Design / exogenous data. Model exog is used if None.
Returns
-------
An array of fitted values
Notes
-----
If the model has not yet been fit, params is not optional.
"""
# JP: this doesn't look correct for GLMAR
# SS: it needs its own predict method
if exog is None:
exog = self.exog
return np.dot(exog, params)
def get_distribution(self, params, scale, exog=None, dist_class=None):
"""
Returns a random number generator for the predictive distribution.
Parameters
----------
params : array_like
The model parameters (regression coefficients).
scale : scalar
The variance parameter.
exog : array_like
The predictor variable matrix.
dist_class : class
A random number generator class. Must take 'loc' and'scale'
as arguments and return a random number generator implementing
an ``rvs`` method for simulating random values. Defaults to Gaussian.
Returns
-------
gen
Frozen random number generator object with mean and variance
determined by the fitted linear model. Use the ``rvs`` method
to generate random values.
Notes
-----
Due to the behavior of ``scipy.stats.distributions objects``,
the returned random number generator must be called with
``gen.rvs(n)`` where ``n`` is the number of observations in
the data set used to fit the model. If any other value is
used for ``n``, misleading results will be produced.
"""
fit = self.predict(params, exog)
if dist_class is None:
from scipy.stats.distributions import norm
dist_class = norm
gen = dist_class(loc=fit, scale=np.sqrt(scale))
return gen
class GLS(RegressionModel):
__doc__ = r"""
Generalized least squares model with a general covariance structure.
%(params)s
sigma : scalar or array
`sigma` is the weighting matrix of the covariance.
The default is None for no scaling. If `sigma` is a scalar, it is
assumed that `sigma` is an n x n diagonal matrix with the given
scalar, `sigma` as the value of each diagonal element. If `sigma`
is an n-length vector, then `sigma` is assumed to be a diagonal
matrix with the given `sigma` on the diagonal. This should be the
same as WLS.
%(extra_params)s
Attributes
----------
pinv_wexog : array
`pinv_wexog` is the p x n Moore-Penrose pseudoinverse of `wexog`.
cholsimgainv : array
The transpose of the Cholesky decomposition of the pseudoinverse.
df_model : float
p - 1, where p is the number of regressors including the intercept.
of freedom.
df_resid : float
Number of observations n less the number of parameters p.
llf : float
The value of the likelihood function of the fitted model.
nobs : float
The number of observations n.
normalized_cov_params : array
p x p array :math:`(X^{T}\Sigma^{-1}X)^{-1}`
results : RegressionResults instance
A property that returns the RegressionResults class if fit.
sigma : array
`sigma` is the n x n covariance structure of the error terms.
wexog : array
Design matrix whitened by `cholsigmainv`
wendog : array
Response variable whitened by `cholsigmainv`
Notes
-----
If sigma is a function of the data making one of the regressors
a constant, then the current postestimation statistics will not be correct.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> data = sm.datasets.longley.load(as_pandas=False)
>>> data.exog = sm.add_constant(data.exog)
>>> ols_resid = sm.OLS(data.endog, data.exog).fit().resid
>>> res_fit = sm.OLS(ols_resid[1:], ols_resid[:-1]).fit()
>>> rho = res_fit.params
`rho` is a consistent estimator of the correlation of the residuals from
an OLS fit of the longley data. It is assumed that this is the true rho
of the AR process data.
>>> from scipy.linalg import toeplitz
>>> order = toeplitz(np.arange(16))
>>> sigma = rho**order
`sigma` is an n x n matrix of the autocorrelation structure of the
data.
>>> gls_model = sm.GLS(data.endog, data.exog, sigma=sigma)
>>> gls_results = gls_model.fit()
>>> print(gls_results.summary())
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, sigma=None, missing='none', hasconst=None,
**kwargs):
# TODO: add options igls, for iterative fgls if sigma is None
# TODO: default if sigma is none should be two-step GLS
sigma, cholsigmainv = _get_sigma(sigma, len(endog))
super(GLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, sigma=sigma,
cholsigmainv=cholsigmainv, **kwargs)
# store attribute names for data arrays
self._data_attr.extend(['sigma', 'cholsigmainv'])
def whiten(self, X):
"""
GLS whiten method.
Parameters
----------
X : array_like
Data to be whitened.
Returns
-------
np.dot(cholsigmainv,X)
See Also
--------
regression.GLS
"""
X = np.asarray(X)
if self.sigma is None or self.sigma.shape == ():
return X
elif self.sigma.ndim == 1:
if X.ndim == 1:
return X * self.cholsigmainv
else:
return X * self.cholsigmainv[:, None]
else:
return np.dot(self.cholsigmainv, X)
def loglike(self, params):
"""
Returns the value of the Gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `endog`.
Parameters
----------
params : array_like
The parameter estimates
Returns
-------
loglike : float
The value of the log-likelihood function for a GLS Model.
Notes
-----
The log-likelihood function for the normal distribution is
.. math:: -\\frac{n}{2}\\log\\left(\\left(Y-\\hat{Y}\\right)^{\\prime}\\left(Y-\\hat{Y}\\right)\\right)-\\frac{n}{2}\\left(1+\\log\\left(\\frac{2\\pi}{n}\\right)\\right)-\\frac{1}{2}\\log\\left(\\left|\\Sigma\\right|\\right)
Y and Y-hat are whitened.
"""
# TODO: combine this with OLS/WLS loglike and add _det_sigma argument
nobs2 = self.nobs / 2.0
SSR = np.sum((self.wendog - np.dot(self.wexog, params))**2, axis=0)
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with likelihood constant
if np.any(self.sigma):
# FIXME: robust-enough check? unneeded if _det_sigma gets defined
if self.sigma.ndim == 2:
det = np.linalg.slogdet(self.sigma)
llf -=.5*det[1]
else:
llf -= 0.5*np.sum(np.log(self.sigma))
# with error covariance matrix
return llf
def hessian_factor(self, params, scale=None, observed=True):
"""Weights for calculating Hessian
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
hessian_factor : ndarray, 1d
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`
"""
if self.sigma is None or self.sigma.shape == ():
return np.ones(self.exog.shape[0])
elif self.sigma.ndim == 1:
return self.cholsigmainv
else:
return np.diag(self.cholsigmainv)
def fit_regularized(self, method="elastic_net", alpha=0.,
L1_wt=1., start_params=None, profile_scale=False,
refit=False, **kwargs):
# Docstring attached below
# Need to adjust since RSS/n term in elastic net uses nominal
# n in denominator
if self.sigma is not None:
alpha = alpha * np.sum(1 / np.diag(self.sigma)) / len(self.endog)
rslt = OLS(self.wendog, self.wexog).fit_regularized(
method=method, alpha=alpha,
L1_wt=L1_wt,
start_params=start_params,
profile_scale=profile_scale,
refit=refit, **kwargs)
from statsmodels.base.elastic_net import (
RegularizedResults, RegularizedResultsWrapper)
rrslt = RegularizedResults(self, rslt.params)
return RegularizedResultsWrapper(rrslt)
fit_regularized.__doc__ = _fit_regularized_doc
class WLS(RegressionModel):
__doc__ = """
A regression model with diagonal but non-identity covariance structure.
The weights are presumed to be (proportional to) the inverse of
the variance of the observations. That is, if the variables are
to be transformed by 1/sqrt(W) you must supply weights = 1/W.
%(params)s
weights : array_like, optional
1d array of weights. If you supply 1/W then the variables are
pre- multiplied by 1/sqrt(W). If no weights are supplied the
default value is 1 and WLS results are the same as OLS.
%(extra_params)s
Attributes
----------
weights : array
The stored weights supplied as an argument.
See Also
--------
regression.GLS
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> wls_model = sm.WLS(Y,X, weights=list(range(1,8)))
>>> results = wls_model.fit()
>>> results.params
array([ 2.91666667, 0.0952381 ])
>>> results.tvalues
array([ 2.0652652, 0.35684428])
>>> print(results.t_test([1, 0]))
<T test: effect=array([ 2.91666667]), sd=array([[ 1.41224801]]), t=array([[ 2.0652652]]), p=array([[ 0.04690139]]), df_denom=5>
>>> print(results.f_test([0, 1]))
<F test: F=array([[ 0.12733784]]), p=[[ 0.73577409]], df_denom=5, df_num=1>
Notes
-----
If the weights are a function of the data, then the post estimation
statistics such as fvalue and mse_model might not be correct, as the
package does not yet support no-constant regression.
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog, weights=1., missing='none', hasconst=None,
**kwargs):
weights = np.array(weights)
if weights.shape == ():
if (missing == 'drop' and'missing_idx' in kwargs and
kwargs['missing_idx'] is not None):
# patsy may have truncated endog
weights = np.repeat(weights, len(kwargs['missing_idx']))
else:
weights = np.repeat(weights, len(endog))
# handle case that endog might be of len == 1
if len(weights) == 1:
weights = np.array([weights.squeeze()])
else:
weights = weights.squeeze()
super(WLS, self).__init__(endog, exog, missing=missing,
weights=weights, hasconst=hasconst, **kwargs)
nobs = self.exog.shape[0]
weights = self.weights
# Experimental normalization of weights
weights = weights / np.sum(weights) * nobs
if weights.size!= nobs and weights.shape[0]!= nobs:
raise ValueError('Weights must be scalar or same length as design')
def whiten(self, X):
"""
Whitener for WLS model, multiplies each column by sqrt(self.weights)
Parameters
----------
X : array_like
Data to be whitened
Returns
-------
whitened : array_like
sqrt(weights)*X
"""
X = np.asarray(X)
if X.ndim == 1:
return X * np.sqrt(self.weights)
elif X.ndim == 2:
return np.sqrt(self.weights)[:, None]*X
def loglike(self, params):
r"""
Returns the value of the gaussian log-likelihood function at params.
Given the whitened design matrix, the log-likelihood is evaluated
at the parameter vector `params` for the dependent variable `Y`.
Parameters
----------
params : array_like
The parameter estimates.
Returns
-------
llf : float
The value of the log-likelihood function for a WLS Model.
Notes
--------
.. math:: -\frac{n}{2}\log SSR -\frac{n}{2}\left(1+\log\left(\frac{2\pi}{n}\right)\right)-\frac{1}{2}\log\left(\left|W\right|\right)
where :math:`W` is a diagonal weight matrix matrix and
:math:`SSR=\left(Y-\hat{Y}\right)^\prime W \left(Y-\hat{Y}\right)` is
the sum of the squared weighted residuals.
"""
nobs2 = self.nobs / 2.0
SSR = np.sum((self.wendog - np.dot(self.wexog, params))**2, axis=0)
llf = -np.log(SSR) * nobs2 # concentrated likelihood
llf -= (1+np.log(np.pi/nobs2))*nobs2 # with constant
llf += 0.5 * np.sum(np.log(self.weights))
return llf
def hessian_factor(self, params, scale=None, observed=True):
"""Weights for calculating Hessian
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
hessian_factor : ndarray, 1d
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`
"""
return self.weights
def fit_regularized(self, method="elastic_net", alpha=0.,
L1_wt=1., start_params=None, profile_scale=False,
refit=False, **kwargs):
# Docstring attached below
# Need to adjust since RSS/n in elastic net uses nominal n in
# denominator
alpha = alpha * np.sum(self.weights) / len(self.weights)
rslt = OLS(self.wendog, self.wexog).fit_regularized(
method=method, alpha=alpha,
L1_wt=L1_wt,
start_params=start_params,
profile_scale=profile_scale,
refit=refit, **kwargs)
from statsmodels.base.elastic_net import (
RegularizedResults, RegularizedResultsWrapper)
rrslt = RegularizedResults(self, rslt.params)
return RegularizedResultsWrapper(rrslt)
fit_regularized.__doc__ = _fit_regularized_doc
class OLS(WLS):
__doc__ = """
A simple ordinary least squares model.
%(params)s
%(extra_params)s
Attributes
----------
weights : scalar
Has an attribute weights = array(1.0) due to inheritance from WLS.
See Also
--------
GLS
Examples
--------
>>> import numpy as np
>>>
>>> import statsmodels.api as sm
>>>
>>> Y = [1,3,4,5,2,3,4]
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>>
>>> model = sm.OLS(Y,X)
>>> results = model.fit()
>>> results.params
array([ 2.14285714, 0.25 ])
>>> results.tvalues
array([ 1.87867287, 0.98019606])
>>> print(results.t_test([1, 0]))
<T test: effect=array([ 2.14285714]), sd=array([[ 1.14062282]]), t=array([[ 1.87867287]]), p=array([[ 0.05953974]]), df_denom=5>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 19.46078431]]), p=[[ 0.00437251]], df_denom=5, df_num=2>
Notes
-----
No constant is added by the model unless you are using formulas.
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + base._extra_param_doc}
# TODO: change example to use datasets. This was the point of datasets!
def __init__(self, endog, exog=None, missing='none', hasconst=None,
**kwargs):
super(OLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, **kwargs)
if "weights" in self._init_keys:
self._init_keys.remove("weights")
def loglike(self, params, scale=None):
"""
The likelihood function for the OLS model.
Parameters
----------
params : array_like
The coefficients with which to estimate the log-likelihood.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
The likelihood function evaluated at params.
"""
nobs2 = self.nobs / 2.0
nobs = float(self.nobs)
resid = self.endog - np.dot(self.exog, params)
if hasattr(self, 'offset'):
resid -= self.offset
ssr = np.sum(resid**2)
if scale is None:
# profile log likelihood
llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2
else:
# log-likelihood
llf = -nobs2 * np.log(2 * np.pi * scale) - ssr / (2*scale)
return llf
def whiten(self, Y):
"""
OLS model whitener does nothing: returns Y.
"""
return Y
def score(self, params, scale=None):
"""
Evaluate the score function at a given point.
The score corresponds to the profile (concentrated)
log-likelihood in which the scale parameter has been profiled
out.
Parameters
----------
params : array_like
The parameter vector at which the score function is
computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
The score vector.
"""
if not hasattr(self, "_wexog_xprod"):
self._setup_score_hess()
xtxb = np.dot(self._wexog_xprod, params)
sdr = -self._wexog_x_wendog + xtxb
if scale is None:
ssr = self._wendog_xprod - 2 * np.dot(self._wexog_x_wendog.T,
params)
ssr += np.dot(params, xtxb)
return -self.nobs * sdr / ssr
else:
return -sdr / scale
def _setup_score_hess(self):
y = self.wendog
if hasattr(self, 'offset'):
y = y - self.offset
self._wendog_xprod = np.sum(y * y)
self._wexog_xprod = np.dot(self.wexog.T, self.wexog)
self._wexog_x_wendog = np.dot(self.wexog.T, y)
def hessian(self, params, scale=None):
"""
Evaluate the Hessian function at a given point.
Parameters
----------
params : array_like
The parameter vector at which the Hessian is computed.
scale : float or None
If None, return the profile (concentrated) log likelihood
(profiled over the scale parameter), else return the
log-likelihood using the given scale value.
Returns
-------
The Hessian matrix.
"""
if not hasattr(self, "_wexog_xprod"):
self._setup_score_hess()
xtxb = np.dot(self._wexog_xprod, params)
if scale is None:
ssr = self._wendog_xprod - 2 * np.dot(self._wexog_x_wendog.T,
params)
ssr += np.dot(params, xtxb)
ssrp = -2*self._wexog_x_wendog + 2*xtxb
hm = self._wexog_xprod / ssr - np.outer(ssrp, ssrp) / ssr**2
return -self.nobs * hm / 2
else:
return -self._wexog_xprod / scale
def hessian_factor(self, params, scale=None, observed=True):
"""Weights for calculating Hessian
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
hessian_factor : ndarray, 1d
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`
"""
return np.ones(self.exog.shape[0])
def fit_regularized(self, method="elastic_net", alpha=0.,
L1_wt=1., start_params=None, profile_scale=False,
refit=False, **kwargs):
# Docstring attached below
# In the future we could add support for other penalties, e.g. SCAD.
if method not in ("elastic_net", "sqrt_lasso"):
msg = "Unknown method '%s' for fit_regularized" % method
raise ValueError(msg)
# Set default parameters.
defaults = {"maxiter": 50, "cnvrg_tol": 1e-10,
"zero_tol": 1e-8}
defaults.update(kwargs)
if method == "sqrt_lasso":
from statsmodels.base.elastic_net import (
RegularizedResults, RegularizedResultsWrapper
)
params = self._sqrt_lasso(alpha, refit, defaults["zero_tol"])
results = RegularizedResults(self, params)
return RegularizedResultsWrapper(results)
from statsmodels.base.elastic_net import fit_elasticnet
if L1_wt == 0:
return self._fit_ridge(alpha)
# If a scale parameter is passed in, the non-profile
# likelihood (residual sum of squares divided by -2) is used,
# otherwise the profile likelihood is used.
if profile_scale:
loglike_kwds = {}
score_kwds = {}
hess_kwds = {}
else:
loglike_kwds = {"scale": 1}
score_kwds = {"scale": 1}
hess_kwds = {"scale": 1}
return fit_elasticnet(self, method=method,
alpha=alpha,
L1_wt=L1_wt,
start_params=start_params,
loglike_kwds=loglike_kwds,
score_kwds=score_kwds,
hess_kwds=hess_kwds,
refit=refit,
check_step=False,
**defaults)
fit_regularized.__doc__ = _fit_regularized_doc
def _sqrt_lasso(self, alpha, refit, zero_tol):
try:
import cvxopt
except ImportError:
msg = "sqrt_lasso fitting requires the cvxopt module to be installed"
raise ValueError(msg)
n = len(self.endog)
p = self.exog.shape[1]
h0 = cvxopt.matrix(0., (2*p+1, 1))
h1 = cvxopt.matrix(0., (n+1, 1))
h1[1:, 0] = cvxopt.matrix(self.endog, (n, 1))
G0 = cvxopt.spmatrix([], [], [], (2*p+1, 2*p+1))
for i in range(1, 2*p+1):
G0[i, i] = -1
G1 = cvxopt.matrix(0., (n+1, 2*p+1))
G1[0, 0] = -1
G1[1:, 1:p+1] = self.exog
G1[1:, p+1:] = -self.exog
c = cvxopt.matrix(alpha / n, (2*p + 1, 1))
c[0] = 1 / np.sqrt(n)
from cvxopt import solvers
solvers.options["show_progress"] = False
rslt = solvers.socp(c, Gl=G0, hl=h0, Gq=[G1], hq=[h1])
x = np.asarray(rslt['x']).flat
bp = x[1:p+1]
bn = x[p+1:]
params = bp - bn
if not refit:
return params
ii = np.flatnonzero(np.abs(params) > zero_tol)
rfr = OLS(self.endog, self.exog[:, ii]).fit()
params *= 0
params[ii] = rfr.params
return params
def _fit_ridge(self, alpha):
"""
Fit a linear model using ridge regression.
Parameters
----------
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
Notes
-----
Equivalent to fit_regularized with L1_wt = 0 (but implemented
more efficiently).
"""
u, s, vt = np.linalg.svd(self.exog, 0)
v = vt.T
q = np.dot(u.T, self.endog) * s
s2 = s * s
if np.isscalar(alpha):
sd = s2 + alpha * self.nobs
params = q / sd
params = np.dot(v, params)
else:
vtav = self.nobs * np.dot(vt, alpha[:, None] * v)
d = np.diag(vtav) + s2
np.fill_diagonal(vtav, d)
r = np.linalg.solve(vtav, q)
params = np.dot(v, r)
from statsmodels.base.elastic_net import RegularizedResults
return RegularizedResults(self, params)
class GLSAR(GLS):
__doc__ = """
A regression model with an AR(p) covariance structure.
%(params)s
rho : int
Order of the autoregressive covariance
%(extra_params)s
Examples
--------
>>> import statsmodels.api as sm
>>> X = range(1,8)
>>> X = sm.add_constant(X)
>>> Y = [1,3,4,5,8,10,9]
>>> model = sm.GLSAR(Y, X, rho=2)
>>> for i in range(6):
... results = model.fit()
... print("AR coefficients: {0}".format(model.rho))
... rho, sigma = sm.regression.yule_walker(results.resid,
... order=model.order)
... model = sm.GLSAR(Y, X, rho)
...
AR coefficients: [ 0. 0.]
AR coefficients: [-0.52571491 -0.84496178]
AR coefficients: [-0.6104153 -0.86656458]
AR coefficients: [-0.60439494 -0.857867 ]
AR coefficients: [-0.6048218 -0.85846157]
AR coefficients: [-0.60479146 -0.85841922]
>>> results.params
array([-0.66661205, 1.60850853])
>>> results.tvalues
array([ -2.10304127, 21.8047269 ])
>>> print(results.t_test([1, 0]))
<T test: effect=array([-0.66661205]), sd=array([[ 0.31697526]]), t=array([[-2.10304127]]), p=array([[ 0.06309969]]), df_denom=3>
>>> print(results.f_test(np.identity(2)))
<F test: F=array([[ 1815.23061844]]), p=[[ 0.00002372]], df_denom=3, df_num=2>
Or, equivalently
>>> model2 = sm.GLSAR(Y, X, rho=2)
>>> res = model2.iterative_fit(maxiter=6)
>>> model2.rho
array([-0.60479146, -0.85841922])
Notes
-----
GLSAR is considered to be experimental.
The linear autoregressive process of order p--AR(p)--is defined as:
TODO
""" % {'params': base._model_params_doc,
'extra_params': base._missing_param_doc + base._extra_param_doc}
def __init__(self, endog, exog=None, rho=1, missing='none', **kwargs):
# this looks strange, interpreting rho as order if it is int
if isinstance(rho, np.int):
self.order = rho
self.rho = np.zeros(self.order, np.float64)
else:
self.rho = np.squeeze(np.asarray(rho))
if len(self.rho.shape) not in [0, 1]:
raise ValueError("AR parameters must be a scalar or a vector")
if self.rho.shape == ():
self.rho.shape = (1,)
self.order = self.rho.shape[0]
if exog is None:
# JP this looks wrong, should be a regression on constant
# results for rho estimate now identical to yule-walker on y
# super(AR, self).__init__(endog, add_constant(endog))
super(GLSAR, self).__init__(endog, np.ones((endog.shape[0], 1)),
missing=missing, **kwargs)
else:
super(GLSAR, self).__init__(endog, exog, missing=missing,
**kwargs)
def iterative_fit(self, maxiter=3, rtol=1e-4, **kwds):
"""
Perform an iterative two-stage procedure to estimate a GLS model.
The model is assumed to have AR(p) errors, AR(p) parameters and
regression coefficients are estimated iteratively.
Parameters
----------
maxiter : integer, optional
the number of iterations
rtol : float, optional
Relative tolerance between estimated coefficients to stop the
estimation. Stops if
max(abs(last - current) / abs(last)) < rtol
"""
# TODO: update this after going through example.
converged = False
i = -1 # need to initialize for maxiter < 1 (skip loop)
history = {'params': [], 'rho': [self.rho]}
for i in range(maxiter - 1):
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
results = self.fit()
history['params'].append(results.params)
if i == 0:
last = results.params
else:
diff = np.max(np.abs(last - results.params) / np.abs(last))
if diff < rtol:
converged = True
break
last = results.params
self.rho, _ = yule_walker(results.resid,
order=self.order, df=None)
history['rho'].append(self.rho)
# why not another call to self.initialize
# Use kwarg to insert history
if not converged and maxiter > 0:
# maxiter <= 0 just does OLS
if hasattr(self, 'pinv_wexog'):
del self.pinv_wexog
self.initialize()
# if converged then this is a duplicate fit, because we didn't
# update rho
results = self.fit(history=history, **kwds)
results.iter = i + 1
# add last fit to history, not if duplicate fit
if not converged:
results.history['params'].append(results.params)
results.iter += 1
results.converged = converged
return results
def whiten(self, X):
"""
Whiten a series of columns according to an AR(p)
covariance structure. This drops initial p observations.
Parameters
----------
X : array_like
The data to be whitened,
Returns
-------
whitened array
"""
# TODO: notation for AR process
X = np.asarray(X, np.float64)
_X = X.copy()
# the following loops over the first axis, works for 1d and nd
for i in range(self.order):
_X[(i+1):] = _X[(i+1):] - self.rho[i] * X[0:-(i+1)]
return _X[self.order:]
def yule_walker(X, order=1, method="unbiased", df=None, inv=False,
demean=True):
"""
Estimate AR(p) parameters from a sequence X using Yule-Walker equation.
Unbiased or maximum-likelihood estimator (mle)
See, for example:
https://en.wikipedia.org/wiki/Autoregressive_moving_average_model
Parameters
----------
X : array_like
1d array
order : integer, optional
The order of the autoregressive process. Default is 1.
method : string, optional
Method can be 'unbiased' or'mle' and this determines
denominator in estimate of autocorrelation function (ACF) at
lag k. If'mle', the denominator is n=X.shape[0], if 'unbiased'
the denominator is n-k. The default is unbiased.
df : integer, optional
Specifies the degrees of freedom. If `df` is supplied, then it
is assumed the X has `df` degrees of freedom rather than `n`.
Default is None.
inv : bool
If inv is True the inverse of R is also returned. Default is
False.
demean : bool
True, the mean is subtracted from `X` before estimation.
Returns
-------
rho
The autoregressive coefficients
sigma
TODO
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.datasets.sunspots import load
>>> data = load(as_pandas=False)
>>> rho, sigma = sm.regression.yule_walker(data.endog,
... order=4, method="mle")
>>> rho
array([ 1.28310031, -0.45240924, -0.20770299, 0.04794365])
>>> sigma
16.808022730464351
"""
# TODO: define R better, look back at notes and technical notes on YW.
# First link here is useful
# http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YuleWalkerAndMore.htm
method = str(method).lower()
if method not in ["unbiased", "mle"]:
raise ValueError("ACF estimation method must be 'unbiased' or 'MLE'")
X = np.array(X, dtype=np.float64)
if demean:
X -= X.mean() # automatically demean's X
n = df or X.shape[0]
if method == "unbiased": # this is df_resid ie., n - p
denom = lambda k: n - k
else:
denom = lambda k: n
if X.ndim > 1 and X.shape[1]!= 1:
raise ValueError("expecting a vector to estimate AR parameters")
r = np.zeros(order+1, np.float64)
r[0] = (X**2).sum() / denom(0)
for k in range(1, order+1):
r[k] = (X[0:-k] * X[k:]).sum() / denom(k)
R = toeplitz(r[:-1])
rho = np.linalg.solve(R, r[1:])
sigmasq = r[0] - (r[1:]*rho).sum()
if inv:
return rho, np.sqrt(sigmasq), np.linalg.inv(R)
else:
return rho, np.sqrt(sigmasq)
def burg(endog, order=1, demean=True):
"""
Burg's AP(p) parameter estimator
Parameters
----------
endog : array_like
The endogenous variable
order : int, optional
Order of the AR. Default is 1.
demean : bool, optional
Flag indicating to subtract the mean from endog before estimation
Returns
-------
rho : ndarray
AR(p) coefficients computed using Burg's algorithm
sigma2 : float
Estimate of the residual variance
Notes
-----
AR model estimated includes a constant estimated using the sample mean.
This value is not reported.
References
----------
.. [1] Brockwell, P.J. and Davis, R.A., 2016. Introduction to time series
and forecasting. Springer.
"""
# Avoid circular imports
from statsmodels.tsa.stattools import levinson_durbin_pacf, pacf_burg
endog = np.squeeze(np.asarray(endog))
if endog.ndim!= 1:
raise ValueError('endog must be 1-d or squeezable to 1-d.')
order = int(order)
if order < 1:
raise ValueError('order must be an integer larger than 1')
if demean:
endog = endog - endog.mean()
pacf, sigma = pacf_burg(endog, order, demean=demean)
ar, _ = levinson_durbin_pacf(pacf)
return ar, sigma[-1]
class RegressionResults(base.LikelihoodModelResults):
r"""
This class summarizes the fit of a linear regression model.
It handles the output of contrasts, estimates of covariance, etc.
Attributes
----------
pinv_wexog
See specific model class docstring
cov_HC0
Heteroscedasticity robust covariance matrix. See HC0_se below.
cov_HC1
Heteroscedasticity robust covariance matrix. See HC1_se below.
cov_HC2
Heteroscedasticity robust covariance matrix. See HC2_se below.
cov_HC3
Heteroscedasticity robust covariance matrix. See HC3_se below.
cov_type
Parameter covariance estimator used for standard errors and t-stats
df_model
Model degrees of freedom. The number of regressors `p`. Does not
include the constant if one is present
df_resid
Residual degrees of freedom. `n - p - 1`, if a constant is present.
`n - p` if a constant is not included.
het_scale
adjusted squared residuals for heteroscedasticity robust standard
errors. Is only available after `HC#_se` or `cov_HC#` is called.
See HC#_se for more information.
history
Estimation history for iterative estimators
HC0_se
White's (1980) heteroskedasticity robust standard errors.
Defined as sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1)
where e_i = resid[i]
HC0_se is a cached property.
When HC0_se or cov_HC0 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is just
resid**2.
HC1_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as sqrt(diag(n/(n-p)*HC_0)
HC1_see is a cached property.
When HC1_se or cov_HC1 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
n/(n-p)*resid**2.
HC2_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
HC2_see is a cached property.
When HC2_se or cov_HC2 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii).
HC3_se
MacKinnon and White's (1985) alternative heteroskedasticity robust
standard errors.
Defined as (X.T X)^(-1)X.T diag(e_i^(2)/(1-h_ii)^(2)) X(X.T X)^(-1)
where h_ii = x_i(X.T X)^(-1)x_i.T
HC3_see is a cached property.
When HC3_se or cov_HC3 is called the RegressionResults instance will
then have another attribute `het_scale`, which is in this case is
resid^(2)/(1-h_ii)^(2).
model
A pointer to the model instance that called fit() or results.
params
The linear coefficients that minimize the least squares
criterion. This is usually called Beta for the classical
linear model.
resid_pearson
`wresid` normalized to have unit variance.
"""
_cache = {} # needs to be a class attribute for scale setter?
def __init__(self, model, params, normalized_cov_params=None, scale=1.,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
super(RegressionResults, self).__init__(
model, params, normalized_cov_params, scale)
self._cache = {}
if hasattr(model, 'wexog_singular_values'):
self._wexog_singular_values = model.wexog_singular_values
else:
self._wexog_singular_values = None
self.df_model = model.df_model
self.df_resid = model.df_resid
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {
'description': 'Standard Errors assume that the'+
'covariance matrix of the errors is correctly'+
'specified.'}
if use_t is None:
use_t = True # TODO: class default
self.use_t = use_t
else:
if cov_kwds is None:
cov_kwds = {}
if 'use_t' in cov_kwds:
# TODO: we want to get rid of 'use_t' in cov_kwds
use_t_2 = cov_kwds.pop('use_t')
if use_t is None:
use_t = use_t_2
# TODO: warn or not?
self.get_robustcov_results(cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
for key in kwargs:
setattr(self, key, kwargs[key])
def __str__(self):
self.summary()
def conf_int(self, alpha=.05, cols=None):
"""
Returns the confidence interval of the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval.
ie., The default `alpha` =.05 returns a 95% confidence interval.
cols : array_like, optional
`cols` specifies which confidence intervals to return
Notes
-----
The confidence interval is based on Student's t-distribution.
"""
# keep method for docstring for now
ci = super(RegressionResults, self).conf_int(alpha=alpha, cols=cols)
return ci
@cache_readonly
def nobs(self):
"""Number of observations n."""
return float(self.model.wexog.shape[0])
@cache_readonly
def fittedvalues(self):
"""The predicted values for the original (unwhitened) design."""
return self.model.predict(self.params, self.model.exog)
@cache_readonly
def wresid(self):
"""
The residuals of the transformed/whitened regressand and regressor(s)
"""
return self.model.wendog - self.model.predict(
self.params, self.model.wexog)
@cache_readonly
def resid(self):
"""The residuals of the model."""
return self.model.endog - self.model.predict(
self.params, self.model.exog)
# TODO: fix writable example
@cache_writable()
def scale(self):
"""
A scale factor for the covariance matrix. Default value is
ssr/(n-p). Note that the square root of `scale` is often
called the standard error of the regression.
"""
wresid = self.wresid
return np.dot(wresid, wresid) / self.df_resid
@cache_readonly
def ssr(self):
"""Sum of squared (whitened) residuals."""
wresid = self.wresid
return np.dot(wresid, wresid)
@cache_readonly
def centered_tss(self):
"""The total (weighted) sum of squares centered about the mean."""
model = self.model
weights = getattr(model, 'weights', None)
sigma = getattr(model,'sigma', None)
if weights is not None:
mean = np.average(model.endog, weights=weights)
return np.sum(weights * (model.endog - mean)**2)
elif sigma is not None:
# Exactly matches WLS when sigma is diagonal
iota = np.ones_like(model.endog)
iota = model.whiten(iota)
mean = model.wendog.dot(iota) / iota.dot(iota)
err = model.endog - mean
err = model.whiten(err)
return np.sum(err**2)
else:
centered_endog = model.wendog - model.wendog.mean()
return np.dot(centered_endog, centered_endog)
@cache_readonly
def uncentered_tss(self):
"""
Uncentered sum of squares. Sum of the squared values of the
(whitened) endogenous response variable.
"""
wendog = self.model.wendog
return np.dot(wendog, wendog)
@cache_readonly
def ess(self):
"""Explained sum of squares. If a constant is present, the centered
total sum of squares minus the sum of squared residuals. If there is no
constant, the uncentered total sum of squares is used."""
if self.k_constant:
return self.centered_tss - self.ssr
else:
return self.uncentered_tss - self.ssr
@cache_readonly
def rsquared(self):
"""
R-squared of a model with an intercept. This is defined here
as 1 - `ssr`/`centered_tss` if the constant is included in the
model and 1 - `ssr`/`uncentered_tss` if the constant is
omitted.
"""
if self.k_constant:
return 1 - self.ssr/self.centered_tss
else:
return 1 - self.ssr/self.uncentered_tss
@cache_readonly
def rsquared_adj(self):
"""
Adjusted R-squared. This is defined here as 1 -
(`nobs`-1)/`df_resid` * (1-`rsquared`) if a constant is
included and 1 - `nobs`/`df_resid` * (1-`rsquared`) if no
constant is included.
"""
return 1 - (np.divide(self.nobs - self.k_constant, self.df_resid)
* (1 - self.rsquared))
@cache_readonly
def mse_model(self):
"""
Mean squared error the model. This is the explained sum of
squares divided by the model degrees of freedom.
"""
return self.ess/self.df_model
@cache_readonly
def mse_resid(self):
"""
Mean squared error of the residuals. The sum of squared
residuals divided by the residual degrees of freedom.
"""
return self.ssr/self.df_resid
@cache_readonly
def mse_total(self):
"""
Total mean squared error. Defined as the uncentered total sum
of squares divided by n the number of observations.
"""
if self.k_constant:
return self.centered_tss / (self.df_resid + self.df_model)
else:
return self.uncentered_tss / (self.df_resid + self.df_model)
@cache_readonly
def fvalue(self):
"""F-statistic of the fully specified model. Calculated as the mean
squared error of the model divided by the mean squared error of the
residuals."""
if hasattr(self, 'cov_type') and self.cov_type!= 'nonrobust':
# with heteroscedasticity or correlation robustness
k_params = self.normalized_cov_params.shape[0]
mat = np.eye(k_params)
const_idx = self.model.data.const_idx
# TODO: What if model includes implicit constant, e.g. all
# dummies but no constant regressor?
# TODO: Restats as LM test by projecting orthogonalizing
# to constant?
if self.model.data.k_constant == 1:
# if constant is implicit, return nan see #2444
if const_idx is None:
return np.nan
idx = lrange(k_params)
idx.pop(const_idx)
mat = mat[idx] # remove constant
if mat.size == 0: # see #3642
return np.nan
ft = self.f_test(mat)
# using backdoor to set another attribute that we already have
self._cache['f_pvalue'] = ft.pvalue
return ft.fvalue
else:
# for standard homoscedastic case
return self.mse_model/self.mse_resid
@cache_readonly
def f_pvalue(self):
"""p-value of the F-statistic"""
return stats.f.sf(self.fvalue, self.df_model, self.df_resid)
@cache_readonly
def bse(self):
"""The standard errors of the parameter estimates."""
return np.sqrt(np.diag(self.cov_params()))
@cache_readonly
def aic(self):
r"""Akaike's information criteria. For a model with a constant
:math:`-2llf + 2(df\_model + 1)`. For a model without a constant
:math:`-2llf + 2(df\_model)`."""
return -2 * self.llf + 2 * (self.df_model + self.k_constant)
@cache_readonly
def bic(self):
r"""Bayes' information criteria. For a model with a constant
:math:`-2llf + \log(n)(df\_model+1)`. For a model without a constant
:math:`-2llf + \log(n)(df\_model)`"""
return (-2 * self.llf + np.log(self.nobs) * (self.df_model +
self.k_constant))
@cache_readonly
def eigenvals(self):
"""
Return eigenvalues sorted in decreasing order.
"""
if self._wexog_singular_values is not None:
eigvals = self._wexog_singular_values ** 2
else:
eigvals = np.linalg.linalg.eigvalsh(np.dot(self.model.wexog.T,
self.model.wexog))
return np.sort(eigvals)[::-1]
@cache_readonly
def condition_number(self):
"""
Return condition number of exogenous matrix.
Calculated as ratio of largest to smallest eigenvalue.
"""
eigvals = self.eigenvals
return np.sqrt(eigvals[0]/eigvals[-1])
# TODO: make these properties reset bse
def _HCCM(self, scale):
H = np.dot(self.model.pinv_wexog,
scale[:, None] * self.model.pinv_wexog.T)
return H
@cache_readonly
def cov_HC0(self):
"""
See statsmodels.RegressionResults
"""
self.het_scale = self.wresid**2
cov_HC0 = self._HCCM(self.het_scale)
return cov_HC0
@cache_readonly
def cov_HC1(self):
"""
See statsmodels.RegressionResults
"""
self.het_scale = self.nobs/(self.df_resid)*(self.wresid**2)
cov_HC1 = self._HCCM(self.het_scale)
return cov_HC1
@cache_readonly
def cov_HC2(self):
"""
See statsmodels.RegressionResults
"""
# probably could be optimized
h = np.diag(chain_dot(self.model.wexog,
self.normalized_cov_params,
self.model.wexog.T))
self.het_scale = self.wresid**2/(1-h)
cov_HC2 = self._HCCM(self.het_scale)
return cov_HC2
@cache_readonly
def cov_HC3(self):
"""
See statsmodels.RegressionResults
"""
h = np.diag(chain_dot(
self.model.wexog, self.normalized_cov_params, self.model.wexog.T))
self.het_scale = (self.wresid / (1 - h))**2
cov_HC3 = self._HCCM(self.het_scale)
return cov_HC3
@cache_readonly
def HC0_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC0))
@cache_readonly
def HC1_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC1))
@cache_readonly
def HC2_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC2))
@cache_readonly
def HC3_se(self):
"""
See statsmodels.RegressionResults
"""
return np.sqrt(np.diag(self.cov_HC3))
@cache_readonly
def resid_pearson(self):
"""
Residuals, normalized to have unit variance.
Returns
-------
An array wresid standardized by the sqrt if scale
"""
if not hasattr(self,'resid'):
raise ValueError('Method requires residuals.')
eps = np.finfo(self.wresid.dtype).eps
if np.sqrt(self.scale) < 10 * eps * self.model.endog.mean():
# don't divide if scale is zero close to numerical precision
from warnings import warn
warn("All residuals are 0, cannot compute normed residuals.",
RuntimeWarning)
return self.wresid
else:
return self.wresid / np.sqrt(self.scale)
def _is_nested(self, restricted):
"""
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current
model. The result instance of the restricted model is required to
have two attributes, residual sum of squares, `ssr`, residual
degrees of freedom, `df_resid`.
Returns
-------
nested : bool
True if nested, otherwise false
Notes
-----
A most nests another model if the regressors in the smaller
model are spanned by the regressors in the larger model and
the regressand is identical.
"""
if self.model.nobs!= restricted.model.nobs:
return False
full_rank = self.model.rank
restricted_rank = restricted.model.rank
if full_rank <= restricted_rank:
return False
restricted_exog = restricted.model.wexog
full_wresid = self.wresid
scores = restricted_exog * full_wresid[:, None]
score_l2 = np.sqrt(np.mean(scores.mean(0) ** 2))
# TODO: Could be improved, and may fail depending on scale of
# regressors
return np.allclose(score_l2, 0)
def compare_lm_test(self, restricted, demean=True, use_lr=False):
"""Use Lagrange Multiplier test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the
current model. The result instance of the restricted model
is required to have two attributes, residual sum of
squares, `ssr`, residual degrees of freedom, `df_resid`.
demean : bool
Flag indicating whether the demean the scores based on the
residuals from the restricted model. If True, the
covariance of the scores are used and the LM test is
identical to the large sample version of the LR test.
Returns
-------
lm_value : float
test statistic, chi2 distributed
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df
between models
Notes
-----
TODO: explain LM text
"""
import statsmodels.stats.sandwich_covariance as sw
from numpy.linalg import inv
if not self._is_nested(restricted):
raise ValueError("Restricted model is not nested by full model.")
wresid = restricted.wresid
wexog = self.model.wexog
scores = wexog * wresid[:, None]
n = self.nobs
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
s = scores.mean(axis=0)
if use_lr:
scores = wexog * self.wresid[:, None]
demean = False
if demean:
scores = scores - scores.mean(0)[None, :]
# Form matters here. If homoskedastics can be sigma^2 (X'X)^-1
# If Heteroskedastic then the form below is fine
# If HAC then need to use HAC
# If Cluster, shoudl use cluster
cov_type = getattr(self, 'cov_type', 'nonrobust')
if cov_type == 'nonrobust':
sigma2 = np.mean(wresid**2)
XpX = np.dot(wexog.T, wexog) / n
Sinv = inv(sigma2 * XpX)
elif cov_type in ('HC0', 'HC1', 'HC2', 'HC3'):
Sinv = inv(np.dot(scores.T, scores) / n)
elif cov_type == 'HAC':
maxlags = self.cov_kwds['maxlags']
Sinv = inv(sw.S_hac_simple(scores, maxlags) / n)
elif cov_type == 'cluster':
# cluster robust standard errors
groups = self.cov_kwds['groups']
# TODO: Might need demean option in S_crosssection by group?
Sinv = inv(sw.S_crosssection(scores, groups))
else:
raise ValueError('Only nonrobust, HC, HAC and cluster are'+
'currently connected')
lm_value = n * chain_dot(s, Sinv, s.T)
p_value = stats.chi2.sf(lm_value, df_diff)
return lm_value, p_value, df_diff
def compare_f_test(self, restricted):
"""use F test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the
current model. The result instance of the restricted model
is required to have two attributes, residual sum of
squares, `ssr`, residual degrees of freedom, `df_resid`.
Returns
-------
f_value : float
test statistic, F distributed
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in
df between models
Notes
-----
See mailing list discussion October 17,
This test compares the residual sum of squares of the two
models. This is not a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results under
the assumption of homoscedasticity and no autocorrelation
(sphericity).
"""
has_robust1 = getattr(self, 'cov_type', 'nonrobust')!= 'nonrobust'
has_robust2 = (getattr(restricted, 'cov_type', 'nonrobust')!=
'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('F test for comparison is likely invalid with'+
'robust covariance, proceeding anyway',
InvalidTestWarning)
ssr_full = self.ssr
ssr_restr = restricted.ssr
df_full = self.df_resid
df_restr = restricted.df_resid
df_diff = (df_restr - df_full)
f_value = (ssr_restr - ssr_full) / df_diff / ssr_full * df_full
p_value = stats.f.sf(f_value, df_diff, df_full)
return f_value, p_value, df_diff
def compare_lr_test(self, restricted, large_sample=False):
"""
Likelihood ratio test to test whether restricted model is correct
Parameters
----------
restricted : Result instance
The restricted model is assumed to be nested in the current model.
The result instance of the restricted model is required to have two
attributes, residual sum of squares, `ssr`, residual degrees of
freedom, `df_resid`.
large_sample : bool
Flag indicating whether to use a heteroskedasticity robust version
of the LR test, which is a modified LM test.
Returns
-------
lr_stat : float
likelihood ratio, chisquare distributed with df_diff degrees of
freedom
p_value : float
p-value of the test statistic
df_diff : int
degrees of freedom of the restriction, i.e. difference in df
between models
Notes
-----
The exact likelihood ratio is valid for homoskedastic data,
and is defined as
.. math:: D=-2\\log\\left(\\frac{\\mathcal{L}_{null}}
{\\mathcal{L}_{alternative}}\\right)
where :math:`\\mathcal{L}` is the likelihood of the
model. With :math:`D` distributed as chisquare with df equal
to difference in number of parameters or equivalently
difference in residual degrees of freedom.
The large sample version of the likelihood ratio is defined as
.. math:: D=n s^{\\prime}S^{-1}s
where :math:`s=n^{-1}\\sum_{i=1}^{n} s_{i}`
.. math:: s_{i} = x_{i,alternative} \\epsilon_{i,null}
is the average score of the model evaluated using the
residuals from null model and the regressors from the
alternative model and :math:`S` is the covariance of the
scores, :math:`s_{i}`. The covariance of the scores is
estimated using the same estimator as in the alternative
model.
This test compares the loglikelihood of the two models. This
may not be a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results
without taking unspecified heteroscedasticity or correlation
into account.
This test compares the loglikelihood of the two models. This
may not be a valid test, if there is unspecified
heteroscedasticity or correlation. This method will issue a
warning if this is detected but still return the results
without taking unspecified heteroscedasticity or correlation
into account.
is the average score of the model evaluated using the
residuals from null model and the regressors from the
alternative model and :math:`S` is the covariance of the
scores, :math:`s_{i}`. The covariance of the scores is
estimated using the same estimator as in the alternative
model.
TODO: put into separate function, needs tests
"""
# See mailing list discussion October 17,
if large_sample:
return self.compare_lm_test(restricted, use_lr=True)
has_robust1 = (getattr(self, 'cov_type', 'nonrobust')!= 'nonrobust')
has_robust2 = (
getattr(restricted, 'cov_type', 'nonrobust')!= 'nonrobust')
if has_robust1 or has_robust2:
warnings.warn('Likelihood Ratio test is likely invalid with'+
'robust covariance, proceeding anyway',
InvalidTestWarning)
llf_full = self.llf
llf_restr = restricted.llf
df_full = self.df_resid
df_restr = restricted.df_resid
lrdf = (df_restr - df_full)
lrstat = -2*(llf_restr - llf_full)
lr_pvalue = stats.chi2.sf(lrstat, lrdf)
return lrstat, lr_pvalue, lrdf
def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwds):
"""create new results instance with robust covariance as default
Parameters
----------
cov_type : string
the type of robust sandwich estimator to use. see Notes below
use_t : bool
If true, then the t distribution is used for inference.
If false, then the normal distribution is used.
If `use_t` is None, then an appropriate default is used, which is
`true` if the cov_type is nonrobust, and `false` in all other
cases.
kwds : depends on cov_type
Required or optional arguments for robust covariance calculation.
see Notes below
Returns
-------
results : results instance
This method creates a new results instance with the
requested robust covariance as the default covariance of
the parameters. Inferential statistics like p-values and
hypothesis tests will be based on this covariance matrix.
Notes
-----
The following covariance types and required or optional arguments are
currently available:
- 'fixed scale' and optional keyword argument'scale' which uses
a predefined scale estimate with default equal to one.
- 'HC0', 'HC1', 'HC2', 'HC3' and no keyword arguments:
heteroscedasticity robust covariance
- 'HAC' and keywords
- `maxlag` integer (required) : number of lags to use
- `kernel` callable or str (optional) : kernel
currently available kernels are ['bartlett', 'uniform'],
default is Bartlett
- `use_correction` bool (optional) : If true, use small sample
correction
- 'cluster' and required keyword `groups`, integer group indicator
- `groups` array_like, integer (required) :
index of clusters or groups
- `use_correction` bool (optional) :
If True the sandwich covariance is calculated with a small
sample correction.
If False the sandwich covariance is calculated without
small sample correction.
- `df_correction` bool (optional)
If True (default), then the degrees of freedom for the
inferential statistics and hypothesis tests, such as
pvalues, f_pvalue, conf_int, and t_test and f_test, are
based on the number of groups minus one instead of the
total number of observations minus the number of explanatory
variables. `df_resid` of the results instance is adjusted.
If False, then `df_resid` of the results instance is not
adjusted.
- 'hac-groupsum' Driscoll and Kraay, heteroscedasticity and
autocorrelation robust standard errors in panel data
keywords
- `time` array_like (required) : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` callable or str (optional) : kernel
currently available kernels are ['bartlett', 'uniform'],
default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional) :
If False the the sandwich covariance is calulated without
small sample correction.
If `use_correction = 'cluster'` (default), then the same
small sample correction as in the case of 'covtype='cluster''
is used.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
# TODO: we need more options here
- 'hac-panel' heteroscedasticity and autocorrelation robust standard
errors in panel data.
The data needs to be sorted in this case, the time series
for each panel unit or cluster need to be stacked. The
membership to a timeseries of an individual or group can
be either specified by group indicators or by increasing
time periods.
keywords
- either `groups` or `time` : array_like (required)
`groups` : indicator for groups
`time` : index of time periods
- `maxlag` integer (required) : number of lags to use
- `kernel` callable or str (optional) : kernel
currently available kernels are ['bartlett', 'uniform'],
default is Bartlett
- `use_correction` False or string in ['hac', 'cluster'] (optional) :
If False the sandwich covariance is calculated without
small sample correction.
- `df_correction` bool (optional)
adjustment to df_resid, see cov_type 'cluster' above
# TODO: we need more options here
Reminder:
`use_correction` in "hac-groupsum" and "hac-panel" is not bool,
needs to be in [False, 'hac', 'cluster']
TODO: Currently there is no check for extra or misspelled keywords,
except in the case of cov_type `HCx`
"""
import statsmodels.stats.sandwich_covariance as sw
from statsmodels.base.covtype import normalize_cov_type, descriptions
cov_type = normalize_cov_type(cov_type)
if 'kernel' in kwds:
kwds['weights_func'] = kwds.pop('kernel')
if 'weights_func' in kwds and not callable(kwds['weights_func']):
kwds['weights_func'] = sw.kernel_dict[kwds['weights_func']]
# TODO: make separate function that returns a robust cov plus info
use_self = kwds.pop('use_self', False)
if use_self:
res = self
else:
res = self.__class__(
self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
res.cov_type = cov_type
# use_t might already be defined by the class, and already set
if use_t is None:
use_t = self.use_t
res.cov_kwds = {'use_t': use_t} # store for information
res.use_t = use_t
adjust_df = False
if cov_type in ['cluster', 'hac-panel', 'hac-groupsum']:
df_correction = kwds.get('df_correction', None)
# TODO: check also use_correction, do I need all combinations?
if df_correction is not False: # i.e. in [None, True]:
# user didn't explicitely set it to False
adjust_df = True
res.cov_kwds['adjust_df'] = adjust_df
# verify and set kwds, and calculate cov
# TODO: this should be outsourced in a function so we can reuse it in
# other models
# TODO: make it DRYer repeated code for checking kwds
if cov_type in ['fixed scale', 'fixed_scale']:
res.cov_kwds['description'] = descriptions['fixed_scale']
res.cov_kwds['scale'] = scale = kwds.get('scale', 1.)
res.cov_params_default = scale * res.normalized_cov_params
elif cov_type.upper() in ('HC0', 'HC1', 'HC2', 'HC3'):
if kwds:
raise ValueError('heteroscedasticity robust covariance '
'does not use keywords')
res.cov_kwds['description'] = descriptions[cov_type.upper()]
res.cov_params_default = getattr(self, 'cov_' + cov_type.upper())
elif cov_type.lower() == 'hac':
maxlags = kwds['maxlags'] # required?, default in cov_hac_simple
res.cov_kwds['maxlags'] = maxlags
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
use_correction = kwds.get('use_correction', False)
res.cov_kwds['use_correction'] = use_correction
res.cov_kwds['description'] = descriptions['HAC'].format(
maxlags=maxlags,
correction=['without', 'with'][use_correction])
res.cov_params_default = sw.cov_hac_simple(
self, nlags=maxlags, weights_func=weights_func,
use_correction=use_correction)
elif cov_type.lower() == 'cluster':
# cluster robust standard errors, one- or two-way
groups = kwds['groups']
if not hasattr(groups,'shape'):
groups = np.asarray(groups).T
if groups.ndim >= 2:
groups = groups.squeeze()
res.cov_kwds['groups'] = groups
use_correction = kwds.get('use_correction', True)
res.cov_kwds['use_correction'] = use_correction
if groups.ndim == 1:
if adjust_df:
# need to find number of groups
# duplicate work
self.n_groups = n_groups = len(np.unique(groups))
res.cov_params_default = sw.cov_cluster(
self, groups, use_correction=use_correction)
elif groups.ndim == 2:
if hasattr(groups, 'values'):
groups = groups.values
if adjust_df:
# need to find number of groups
# duplicate work
n_groups0 = len(np.unique(groups[:, 0]))
n_groups1 = len(np.unique(groups[:, 1]))
self.n_groups = (n_groups0, n_groups1)
n_groups = min(n_groups0, n_groups1) # use for adjust_df
# Note: sw.cov_cluster_2groups has 3 returns
res.cov_params_default = sw.cov_cluster_2groups(
self, groups, use_correction=use_correction)[0]
else:
raise ValueError('only two groups are supported')
res.cov_kwds['description'] = descriptions['cluster']
elif cov_type.lower() == 'hac-panel':
# cluster robust standard errors
res.cov_kwds['time'] = time = kwds.get('time', None)
res.cov_kwds['groups'] = groups = kwds.get('groups', None)
# TODO: nlags is currently required
# nlags = kwds.get('nlags', True)
# res.cov_kwds['nlags'] = nlags
# TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'hac')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if groups is not None:
groups = np.asarray(groups)
tt = (np.nonzero(groups[:-1]!= groups[1:])[0] + 1).tolist()
nobs_ = len(groups)
elif time is not None:
time = np.asarray(time)
# TODO: clumsy time index in cov_nw_panel
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1).tolist()
nobs_ = len(time)
else:
raise ValueError('either time or groups needs to be given')
groupidx = lzip([0] + tt, tt + [nobs_])
self.n_groups = n_groups = len(groupidx)
res.cov_params_default = sw.cov_nw_panel(self, maxlags, groupidx,
weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = descriptions['HAC-Panel']
elif cov_type.lower() == 'hac-groupsum':
# Driscoll-Kraay standard errors
res.cov_kwds['time'] = time = kwds['time']
# TODO: nlags is currently required
# nlags = kwds.get('nlags', True)
# res.cov_kwds['nlags'] = nlags
# TODO: `nlags` or `maxlags`
res.cov_kwds['maxlags'] = maxlags = kwds['maxlags']
use_correction = kwds.get('use_correction', 'cluster')
res.cov_kwds['use_correction'] = use_correction
weights_func = kwds.get('weights_func', sw.weights_bartlett)
res.cov_kwds['weights_func'] = weights_func
if adjust_df:
# need to find number of groups
tt = (np.nonzero(time[1:] < time[:-1])[0] + 1)
self.n_groups = n_groups = len(tt) + 1
res.cov_params_default = sw.cov_nw_groupsum(
self, maxlags, time, weights_func=weights_func,
use_correction=use_correction)
res.cov_kwds['description'] = descriptions['HAC-Groupsum']
else:
raise ValueError('cov_type not recognized. See docstring for'+
'available options and spelling')
if adjust_df:
# Note: df_resid is used for scale and others, add new attribute
res.df_resid_inference = n_groups - 1
return res
def get_prediction(self, exog=None, transform=True, weights=None,
row_labels=None, **kwds):
return pred.get_prediction(
self, exog=exog, transform=transform, weights=weights,
row_labels=row_labels, **kwds)
get_prediction.__doc__ = pred.get_prediction.__doc__
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
from statsmodels.stats.stattools import (
jarque_bera, omni_normtest, durbin_watson)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
# TODO: Avoid adding attributes in non-__init__
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[-1])
# TODO not used yet
# diagn_left_header = ['Models stats']
# diagn_right_header = ['Residual stats']
# TODO: requiring list/iterable is a bit annoying
# need more control over formatting
# TODO: default don't work if it's not identically spelled
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None),
('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
rsquared_type = '' if self.k_constant else'(uncentered)'
top_right = [('R-squared' + rsquared_type + ':', ["%#8.3f" % self.rsquared]),
('Adj. R-squared' + rsquared_type + ':', ["%#8.3f" % self.rsquared_adj]),
('F-statistic:', ["%#8.4g" % self.fvalue]),
('Prob (F-statistic):', ["%#6.3g" % self.f_pvalue]),
('Log-Likelihood:', None),
('AIC:', ["%#8.4g" % self.aic]),
('BIC:', ["%#8.4g" % self.bic])
]
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:',
["%#8.3f" % durbin_watson(self.wresid)]
),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ +'' + "Regression Results"
# create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
yname=yname, xname=xname,
title="")
# add warnings/notes, added to text format only
etext = []
if hasattr(self, 'cov_type'):
etext.append(self.cov_kwds['description'])
if self.model.exog.shape[0] < self.model.exog.shape[1]:
wstr = "The input rank is higher than the number of observations."
etext.append(wstr)
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: # TODO: what is recommended?
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
etext = ["[{0}] {1}".format(i + 1, text)
for i, text in enumerate(etext)]
etext.insert(0, "Warnings:")
smry.add_extra_txt(etext)
return smry
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary function to summarize the regression results
Parameters
----------
yname : str
Name of the dependent variable (optional)
xname : list[str], optional
Names for the exogenous variables. Default is `var_##` for ## in
the number of regressors. Must match the number of parameters
in the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format : str
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results
"""
# Diagnostics
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest,
durbin_watson)
from collections import OrderedDict
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
dw = durbin_watson(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
eigvals = np.sort(eigvals) # in increasing order
diagnostic = OrderedDict([
('Omnibus:', "%.3f" % omni),
('Prob(Omnibus):', "%.3f" % omnipv),
('Skew:', "%.3f" % skew),
('Kurtosis:', "%.3f" % kurtosis),
('Durbin-Watson:', "%.3f" % dw),
('Jarque-Bera (JB):', "%.3f" % jb),
('Prob(JB):', "%.3f" % jbpv),
('Condition No.:', "%.0f" % condno)
])
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
smry.add_dict(diagnostic)
# Warnings
if eigvals[-1] < 1e-10:
warn = "The smallest eigenvalue is %6.3g. This might indicate that\
there are strong multicollinearity problems or that the design\
matrix is singular." % eigvals[-1]
smry.add_text(warn)
if condno > 1000:
warn = "* The condition number is large (%.g). This might indicate \
strong multicollinearity or other numerical problems." % condno
smry.add_text(warn)
return smry
class OLSResults(RegressionResults):
"""
Results class for for an OLS model.
Most of the methods and attributes are inherited from RegressionResults.
The special methods that are only available for OLS are:
- get_influence
- outlier_test
- el_test
- conf_int_el
See Also
--------
RegressionResults
"""
def get_influence(self):
"""
get an instance of Influence with influence and outlier measures
Returns
-------
infl : Influence instance
the instance has methods to calculate the main influence and
outlier measures for the OLS regression
See Also
--------
statsmodels.stats.outliers_influence.OLSInfluence
"""
from statsmodels.stats.outliers_influence import OLSInfluence
return OLSInfluence(self)
def outlier_test(self, method='bonf', alpha=.05, labels=None,
order=False, cutoff=None):
"""
Test observations for outliers according to method
Parameters
----------
method : str
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
familywise error rate
labels : None or array_like
If `labels` is not None, then it will be used as index to the
returned pandas DataFrame. See also Returns below
order : bool
Whether or not to order the results by the absolute value of the
studentized residuals. If labels are provided they will also be sorted.
cutoff : None or float in [0, 1]
If cutoff is not None, then the return only includes observations with
multiple testing corrected p-values strictly below the cutoff. The
returned array or dataframe can be empty if t
Returns
-------
table : ndarray or DataFrame
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
from statsmodels.stats.outliers_influence import outlier_test
return outlier_test(self, method, alpha, labels=labels,
order=order, cutoff=cutoff)
def el_test(self, b0_vals, param_nums, return_weights=0,
ret_params=0, method='nm',
stochastic_exog=1, return_params=0):
"""
Tests single or joint hypotheses of the regression parameters using
Empirical Likelihood.
Parameters
----------
b0_vals : 1darray
The hypothesized value of the parameter to be tested
param_nums : 1darray
The parameter number to be tested
print_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. Default is False
ret_params : bool
If true, returns the parameter vector that maximizes the likelihood
ratio at b0_vals. Also returns the weights. Default is False
method : string
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'
stochastic_exog : bool
When TRUE, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. Default = TRUE
Returns
-------
res : tuple
The p-value and -2 times the log-likelihood ratio for the
hypothesized values.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.stackloss.load(as_pandas=False)
>>> endog = data.endog
>>> exog = sm.add_constant(data.exog)
>>> model = sm.OLS(endog, exog)
>>> fitted = model.fit()
>>> fitted.params
>>> array([-39.91967442, 0.7156402, 1.29528612, -0.15212252])
>>> fitted.rsquared
>>> 0.91357690446068196
>>> # Test that the slope on the first variable is 0
>>> fitted.el_test([0], [1])
>>> (27.248146353888796, 1.7894660442330235e-07)
"""
params = np.copy(self.params)
opt_fun_inst = _ELRegOpts() # to store weights
if len(param_nums) == len(params):
llr = opt_fun_inst._opt_nuis_regress(
[],
param_nums=param_nums,
endog=self.model.endog,
exog=self.model.exog,
nobs=self.model.nobs,
nvar=self.model.exog.shape[1],
params=params,
b0_vals=b0_vals,
stochastic_exog=stochastic_exog)
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
x0 = np.delete(params, param_nums)
args = (param_nums, self.model.endog, self.model.exog,
self.model.nobs, self.model.exog.shape[1], params,
b0_vals, stochastic_exog)
if method == 'nm':
llr = optimize.fmin(opt_fun_inst._opt_nuis_regress, x0,
maxfun=10000, maxiter=10000, full_output=1,
disp=0, args=args)[1]
if method == 'powell':
llr = optimize.fmin_powell(opt_fun_inst._opt_nuis_regress, x0,
full_output=1, disp=0,
args=args)[1]
pval = 1 - stats.chi2.cdf(llr, len(param_nums))
if ret_params:
return llr, pval, opt_fun_inst.new_weights, opt_fun_inst.new_params
elif return_weights:
return llr, pval, opt_fun_inst.new_weights
else:
return llr, pval
def conf_int_el(self, param_num, sig=.05, upper_bound=None,
lower_bound=None, method='nm', stochastic_exog=1):
"""
Computes the confidence interval for the parameter given by param_num
using Empirical Likelihood
Parameters
----------
param_num : float
The parameter for which the confidence interval is desired
sig : float
The significance level. Default is.05
upper_bound : float
The maximum value the upper limit can be. Default is the
99.9% confidence value under OLS assumptions.
lower_bound : float
The minimum value the lower limit can be. Default is the 99.9%
confidence value under OLS assumptions.
method : string
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'
Returns
-------
ci : tuple
The confidence interval
See Also
--------
el_test
Notes
-----
This function uses brentq to find the value of beta where
test_beta([beta], param_num)[1] is equal to the critical
value.
The function returns the results of each iteration of brentq at
each value of beta.
The current function value of the last printed optimization
should be the critical value at the desired significance level.
For alpha=.05, the value is 3.841459.
To ensure optimization terminated successfully, it is suggested to
do el_test([lower_limit], [param_num])
If the optimization does not terminate successfully, consider switching
optimization algorithms.
If optimization is still not successful, try changing the values of
start_int_params. If the current function value repeatedly jumps
from a number between 0 and the critical value and a very large number
(>50), the starting parameters of the interior minimization need
to be changed.
"""
r0 = stats.chi2.ppf(1 - sig, 1)
if upper_bound is None:
upper_bound = self.conf_int(.01)[param_num][1]
if lower_bound is None:
lower_bound = self.conf_int(.01)[param_num][0]
f = lambda b0: self.el_test(np.array([b0]), np.array([param_num]),
method=method,
stochastic_exog=stochastic_exog)[0]-r0
lowerl = optimize.brenth(f, lower_bound,
self.params[param_num])
upperl = optimize.brenth(f, self.params[param_num],
upper_bound)
# ^ Seems to be faster than brentq in most cases
return (lowerl, upperl)
class RegressionResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'chisq': 'columns',
'sresid': 'rows',
'weights': 'rows',
'wresid': 'rows',
'bcov_unscaled': 'cov',
'bcov_scaled': 'cov',
'HC0_se': 'columns',
'HC1_se': 'columns',
'HC2_se': 'columns',
'HC3_se': 'columns',
'norm_resid': 'rows',
}
_wrap_attrs = wrap.union_dicts(base.LikelihoodResultsWrapper._attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(
base.LikelihoodResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(RegressionResultsWrapper,
RegressionResults)
import warnings
import numpy as np
import pandas as pd
from statsmodels.base import model
import statsmodels.base.wrapper as wrap
class _DimReductionRegression(model.Model):
"""
A base class for dimension reduction regression methods.
"""
def __init__(self, endog, exog, **kwargs):
super(_DimReductionRegression, self).__init__(endog, exog, **kwargs)
def _prep(self, n_slice):
# Sort the data by endog
ii = np.argsort(self.endog)
x = self.exog[ii, :]
# Whiten the data
x -= x.mean(0)
covx = np.cov(x.T)
covxr = np.linalg.cholesky(covx)
x = np.linalg.solve(covxr, x.T).T
self.wexog = x
self._covxr = covxr
# Split the data into slices
self._split_wexog = np.array_split(x, n_slice)
class SlicedInverseReg(_DimReductionRegression):
"""
Sliced Inverse Regression (SIR)
Parameters
----------
endog : array_like (1d)
The dependent variable
exog : array_like (2d)
The covariates
References
----------
KC Li (1991). Sliced inverse regression for dimension reduction.
JASA 86, 316-342.
"""
def fit(self, **kwargs):
"""
Estimate the EDR space.
Parameters
----------
slice_n : int, optional
Number of observations per slice
"""
# Sample size per slice
slice_n = kwargs.get("slice_n", 20)
# Number of slices
n_slice = self.exog.shape[0] // slice_n
self._prep(n_slice)
mn = [z.mean(0) for z in self._split_wexog]
n = [z.shape[0] for z in self._split_wexog]
mn = np.asarray(mn)
n = np.asarray(n)
mnc = np.cov(mn.T, fweights=n)
a, b = np.linalg.eigh(mnc)
jj = np.argsort(-a)
a = a[jj]
b = b[:, jj]
params = np.linalg.solve(self._covxr.T, b)
results = DimReductionResults(self, params, eigs=a)
return DimReductionResultsWrapper(results)
class PrincipalHessianDirections(_DimReductionRegression):
"""
Principal Hessian Directions
Parameters
----------
endog : array_like (1d)
The dependent variable
exog : array_like (2d)
The covariates
References
----------
KC Li (1992). On Principal Hessian Directions for Data
Visualization and Dimension Reduction: Another application
of Stein's lemma. JASA 87:420.
"""
def fit(self, **kwargs):
"""
Estimate the EDR space using PHD.
Parameters
----------
resid : bool, optional
If True, use least squares regression to remove the
linear relationship between each covariate and the
response, before conducting PHD.
"""
resid = kwargs.get("resid", False)
y = self.endog - self.endog.mean()
x = self.exog - self.exog.mean(0)
if resid:
from statsmodels.regression.linear_model import OLS
r = OLS(y, x).fit()
y = r.resid
cm = np.einsum('i,ij,ik->jk', y, x, x)
cm /= len(y)
cx = np.cov(x.T)
cb = np.linalg.solve(cx, cm)
a, b = np.linalg.eig(cb)
jj = np.argsort(-np.abs(a))
a = a[jj]
params = b[:, jj]
results = DimReductionResults(self, params, eigs=a)
return DimReductionResultsWrapper(results)
class SlicedAverageVarianceEstimation(_DimReductionRegression):
"""
Sliced Average Variance Estimation (SAVE)
Parameters
----------
endog : array_like (1d)
The dependent variable
exog : array_like (2d)
The covariates
bc : bool, optional
If True, use the bias-correctedCSAVE method of Li and Zhu.
References
----------
RD Cook. SAVE: A method for dimension reduction and graphics
in regression.
http://www.stat.umn.edu/RegGraph/RecentDev/save.pdf
Y Li, L-X Zhu (2007). Asymptotics for sliced average
variance estimation. The Annals of Statistics.
https://arxiv.org/pdf/0708.0462.pdf
"""
def __init__(self, endog, exog, **kwargs):
super(SAVE, self).__init__(endog, exog, **kwargs)
self.bc = False
if "bc" in kwargs and kwargs["bc"] is True:
self.bc = True
def fit(self, **kwargs):
"""
Estimate the EDR space.
Parameters
----------
slice_n : int
Number of observations per slice
"""
# Sample size per slice
slice_n = kwargs.get("slice_n", 50)
# Number of slices
n_slice = self.exog.shape[0] // slice_n
self._prep(n_slice)
cv = [np.cov(z.T) for z in self._split_wexog]
ns = [z.shape[0] for z in self._split_wexog]
p = self.wexog.shape[1]
if not self.bc:
# Cook's original approach
vm = 0
for w, cvx in zip(ns, cv):
icv = np.eye(p) - cvx
vm += w * np.dot(icv, icv)
vm /= len(cv)
else:
# The bias-corrected approach of Li and Zhu
# \Lambda_n in Li, Zhu
av = 0
for c in cv:
av += np.dot(c, c)
av /= len(cv)
# V_n in Li, Zhu
vn = 0
for x in self._split_wexog:
r = x - x.mean(0)
for i in range(r.shape[0]):
u = r[i, :]
m = np.outer(u, u)
vn += np.dot(m, m)
vn /= self.exog.shape[0]
c = np.mean(ns)
k1 = c * (c - 1) / ((c - 1)**2 + 1)
k2 = (c - 1) / ((c - 1)**2 + 1)
av2 = k1 * av - k2 * vn
vm = np.eye(p) - 2 * sum(cv) / len(cv) + av2
a, b = np.linalg.eigh(vm)
jj = np.argsort(-a)
a = a[jj]
b = b[:, jj]
params = np.linalg.solve(self._covxr.T, b)
results = DimReductionResults(self, params, eigs=a)
return DimReductionResultsWrapper(results)
class DimReductionResults(model.Results):
"""
Results class for a dimension reduction regression.
"""
def __init__(self, model, params, eigs):
super(DimReductionResults, self).__init__(
model, params)
self.eigs = eigs
class DimReductionResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'params': 'columns',
}
_wrap_attrs = _attrs
wrap.populate_wrapper(DimReductionResultsWrapper, # noqa:E305
DimReductionResults)
class CovarianceReduction(_DimReductionRegression):
"""
Dimension reduction for covariance matrices (CORE).
Parameters
----------
endog : array_like
The dependent variable, treated as group labels
exog : array_like
The independent variables.
dim : integer
The dimension of the subspace onto which the covariance
matrices are projected.
Returns
-------
An orthogonal matrix P such that replacing each group's
covariance matrix C with P'CP optimally preserves the
differences among these matrices.
Notes
-----
This is a likelihood-based dimension reduction procedure based
on Wishart models for sample covariance matrices. The goal
is to find a projection matrix P so that C_i | P'C_iP and
C_j | P'C_jP are equal in distribution for all i, j, where
the C_i are the within-group covariance matrices.
The model and methodology are as described in Cook and Forzani,
but the optimization method follows Edelman et. al.
References
----------
DR Cook, L Forzani (2008). Covariance reducing models: an alternative
to spectral modeling of covariance matrices. Biometrika 95:4.
A Edelman, TA Arias, ST Smith (1998). The geometry of algorithms with
orthogonality constraints. SIAM J Matrix Anal Appl.
http://math.mit.edu/~edelman/publications/geometry_of_algorithms.pdf
"""
def __init__(self, endog, exog, dim):
super(CovarianceReduction, self).__init__(endog, exog)
covs, ns = [], []
df = pd.DataFrame(self.exog, index=self.endog)
for _, v in df.groupby(df.index):
covs.append(v.cov().values)
ns.append(v.shape[0])
self.nobs = len(endog)
# The marginal covariance
covm = 0
for i, _ in enumerate(covs):
covm += covs[i] * ns[i]
covm /= self.nobs
self.covm = covm
self.covs = covs
self.ns = ns
self.dim = dim
def loglike(self, params):
"""
Evaluate the log-likelihood
Parameters
----------
params : array_like
The projection matrix used to reduce the covariances, flattened
to 1d.
Returns the log-likelihood.
"""
p = self.covm.shape[0]
proj = params.reshape((p, self.dim))
c = np.dot(proj.T, np.dot(self.covm, proj))
_, ldet = np.linalg.slogdet(c)
f = self.nobs * ldet / 2
for j, c in enumerate(self.covs):
c = np.dot(proj.T, np.dot(c, proj))
_, ldet = np.linalg.slogdet(c)
f -= self.ns[j] * ldet / 2
return f
def score(self, params):
"""
Evaluate the score function.
Parameters
----------
params : array_like
The projection matrix used to reduce the covariances,
flattened to 1d.
Returns the score function evaluated at 'params'.
"""
p = self.covm.shape[0]
proj = params.reshape((p, self.dim))
c0 = np.dot(proj.T, np.dot(self.covm, proj))
cP = np.dot(self.covm, proj)
g = self.nobs * np.linalg.solve(c0, cP.T).T
for j, c in enumerate(self.covs):
c0 = np.dot(proj.T, np.dot(c, proj))
cP = np.dot(c, proj)
g -= self.ns[j] * np.linalg.solve(c0, cP.T).T
return g.ravel()
def fit(self, start_params=None, maxiter=100, gtol=1e-4):
"""
Fit the covariance reduction model.
Parameters
----------
start_params : array_like
Starting value for the projection matrix. May be
rectangular, or flattened.
maxiter : integer
The maximum number of gradient steps to take.
gtol : float
Convergence criterion for the gradient norm.
Returns
-------
An orthogonal p x d matrix P that optimizes the likelihood.
"""
p = self.covm.shape[0]
d = self.dim
# Starting value for params
if start_params is None:
params = np.zeros((p, d))
params[0:d, 0:d] = np.eye(d)
params = params.ravel()
else:
params = start_params.ravel()
llf = self.loglike(params)
for _ in range(maxiter):
g = self.score(params)
g -= np.dot(g, params) * params / np.dot(params, params)
if np.sqrt(np.sum(g * g)) < gtol:
break
gm = g.reshape((p, d))
u, s, vt = np.linalg.svd(gm, 0)
paramsm = params.reshape((p, d))
pa0 = np.dot(paramsm, vt.T)
def geo(t):
# Parameterize the geodesic path in the direction
# of the gradient as a function of t (real).
pa = pa0 * np.cos(s * t) + u * np.sin(s * t)
return np.dot(pa, vt).ravel()
# Try to find an uphill step along the geodesic path.
step = 2.
while step > 1e-10:
pa = geo(step)
llf1 = self.loglike(pa)
if llf1 > llf:
params = pa
llf = llf1
break
step /= 2
if step <= 1e-10:
msg = "CovReduce optimization did not converge"
warnings.warn(msg)
break
params = params.reshape((p, d))
results = DimReductionResults(self, params, eigs=None)
results.llf = llf
return DimReductionResultsWrapper(results)
# aliases for expert users
SIR = SlicedInverseReg
PHD = PrincipalHessianDirections
SAVE = SlicedAverageVarianceEstimation
CORE = CovarianceReduction
# -*- coding: utf-8 -*-
"""
This module implements maximum likelihood-based estimation (MLE) of
Gaussian models for finite-dimensional observations made on
infinite-dimensional processes.
The ProcessMLE class supports regression analyses on grouped data,
where the observations within a group are dependent (they are made on
the same underlying process). The main application is repeated
measures regression for temporal (longitudinal) data, in which the
repeated measures occur at arbitrary real-valued time points.
The mean structure is specified as a linear model. The covariance
parameters depend on covariates via a link function.
"""
import numpy as np
import pandas as pd
import patsy
import statsmodels.base.model as base
import statsmodels.api as sm
import collections
from statsmodels.compat.python import string_types
from scipy.optimize import minimize
from statsmodels.iolib import summary2
from statsmodels.tools.numdiff import approx_fprime
import warnings
class ProcessCovariance(object):
r"""
A covariance model for a process indexed by a real parameter.
An implementation of this class is based on a positive definite
correlation function h that maps real numbers to the interval [0,
1], such as the Gaussian (squared exponential) correlation
function :math:`\exp(-x^2)`. It also depends on a positive
scaling function `s` and a positive smoothness function `u`.
"""
def get_cov(self, time, sc, sm):
"""
Returns the covariance matrix for given time values.
Parameters
----------
time : array_like
The time points for the observations. If len(time) = p,
a pxp covariance matrix is returned.
sc : array_like
The scaling parameters for the observations.
sm : array_like
The smoothness parameters for the observation. See class
docstring for details.
"""
raise NotImplementedError
def jac(self, time, sc, sm):
"""
The Jacobian of the covariance respect to the parameters.
See get_cov for parameters.
Returns
-------
jsc : list-like
jsc[i] is the derivative of the covariance matrix
with respect to the i^th scaling parameter.
jsm : list-like
jsm[i] is the derivative of the covariance matrix
with respect to the i^th smoothness parameter.
"""
raise NotImplementedError
class GaussianCovariance(ProcessCovariance):
r"""
An implementation of ProcessCovariance using the Gaussian kernel.
This class represents a parametric covariance model for a Gaussian
process as described in the work of Paciorek et al. cited below.
Following Paciorek et al [1]_, the covariance between observations with
index `i` and `j` is given by:
.. math::
s[i] \cdot s[j] \cdot h(|time[i] - time[j]| / \sqrt{(u[i] + u[j]) /
2}) \cdot \frac{u[i]^{1/4}u[j]^{1/4}}{\sqrt{(u[i] + u[j])/2}}
The ProcessMLE class allows linear models with this covariance
structure to be fit using maximum likelihood (ML), which is
equivalent to generalized least squares (GLS) in this setting.
The mean and covariance parameters of the model are fit jointly.
The mean, scaling, and smoothing parameters can be linked to
covariates. The mean parameters are linked linearly, and the
scaling and smoothing parameters use an exponential link to
preserve positivity.
The reference of Paciorek et al. below provides more details.
Note that here we only implement the 1-dimensional version of
their approach.
References
----------
.. [1] Paciorek, C. J. and Schervish, M. J. (2006). Spatial modeling using
a new class of nonstationary covariance functions. Environmetrics,
17:483–506.
https://papers.nips.cc/paper/2350-nonstationary-covariance-functions-for-gaussian-process-regression.pdf
"""
def get_cov(self, time, sc, sm):
da = np.subtract.outer(time, time)
ds = np.add.outer(sm, sm) / 2
qmat = da * da / ds
cm = np.exp(-qmat / 2) / np.sqrt(ds)
cm *= np.outer(sm, sm)**0.25
cm *= np.outer(sc, sc)
return cm
def jac(self, time, sc, sm):
da = np.subtract.outer(time, time)
ds = np.add.outer(sm, sm) / 2
sds = np.sqrt(ds)
daa = da * da
qmat = daa / ds
p = len(time)
eqm = np.exp(-qmat / 2)
sm4 = np.outer(sm, sm)**0.25
cmx = eqm * sm4 / sds
dq0 = -daa / ds**2
di = np.zeros((p, p))
fi = np.zeros((p, p))
scc = np.outer(sc, sc)
# Derivatives with respect to the smoothing parameters.
jsm = []
for i, _ in enumerate(sm):
di *= 0
di[i, :] += 0.5
di[:, i] += 0.5
dbottom = 0.5 * di / sds
dtop = -0.5 * eqm * dq0 * di
b = dtop / sds - eqm * dbottom / ds
c = eqm / sds
v = 0.25 * sm**0.25 / sm[i]**0.75
fi *= 0
fi[i, :] = v
fi[:, i] = v
fi[i, i] = 0.5 / sm[i]**0.5
b = c * fi + b * sm4
b *= scc
jsm.append(b)
# Derivatives with respect to the scaling parameters.
jsc = []
for i in range(0, len(sc)):
b = np.zeros((p, p))
b[i, :] = cmx[i, :] * sc
b[:, i] += cmx[:, i] * sc
jsc.append(b)
return jsc, jsm
def _check_args(endog, exog, exog_scale, exog_smooth, exog_noise, time,
groups):
v = [
len(endog),
exog.shape[0],
exog_scale.shape[0],
exog_smooth.shape[0],
exog_noise.shape[0],
len(time),
len(groups)
]
if min(v)!= max(v):
msg = ("The leading dimensions of all array arguments " +
"must be equal.")
raise ValueError(msg)
class ProcessMLE(base.LikelihoodModel):
"""
Fit a Gaussian mean/variance regression model.
This class fits a one-dimensional Gaussian process model with
parameterized mean and covariance structures to grouped data. For
each group, there is an independent realization of a latent
Gaussian process indexed by an observed real-valued time
variable.. The data consist of the Gaussian process observed at a
finite number of `time` values.
The process mean and variance can be lined to covariates. The
mean structure is linear in the covariates. The covariance
structure is non-stationary, and is defined parametrically through
'scaling', and'smoothing' parameters. The covariance of the
process between two observations in the same group is a function
of the distance between the time values of the two observations.
The scaling and smoothing parameters can be linked to covariates.
The observed data are modeled as the sum of the Gaussian process
realization and independent white noise. The standard deviation
of the white noise can be linked to covariates.
The data should be provided in 'long form', with a group label to
indicate which observations belong to the same group.
Observations in different groups are always independent.
Parameters
----------
endog : array_like
The dependent variable.
exog : array_like
The design matrix for the mean structure
exog_scale : array_like
The design matrix for the scaling structure
exog_smooth : array_like
The design matrix for the smoothness structure
exog_noise : array_like
The design matrix for the white noise structure. The
linear predictor is the log of the white noise standard
deviation.
time : array_like (1-dimensional)
The univariate index values, used to calculate distances
between observations in the same group, which determines
their correlations.
groups : array_like (1-dimensional)
The group values.
cov : a ProcessCovariance instance
Defaults to GaussianCovariance.
"""
def __init__(self,
endog,
exog,
exog_scale,
exog_smooth,
exog_noise,
time,
groups,
cov=None,
**kwargs):
super(ProcessMLE, self).__init__(
endog,
exog,
exog_scale=exog_scale,
exog_smooth=exog_smooth,
exog_noise=exog_noise,
time=time,
groups=groups,
**kwargs)
# Create parameter names
xnames = []
if hasattr(exog, "columns"):
xnames = list(exog.columns)
else:
xnames = ["Mean%d" % j for j in range(exog.shape[1])]
if hasattr(exog_scale, "columns"):
xnames += list(exog_scale.columns)
else:
xnames += ["Scale%d" % j for j in range(exog_scale.shape[1])]
if hasattr(exog_smooth, "columns"):
xnames += list(exog_smooth.columns)
else:
xnames += ["Smooth%d" % j for j in range(exog_smooth.shape[1])]
if hasattr(exog_noise, "columns"):
xnames += list(exog_noise.columns)
else:
xnames += ["Noise%d" % j for j in range(exog_noise.shape[1])]
self.data.param_names = xnames
if cov is None:
cov = GaussianCovariance()
self.cov = cov
_check_args(endog, exog, exog_scale, exog_smooth, exog_noise,
time, groups)
groups_ix = collections.defaultdict(lambda: [])
for i, g in enumerate(groups):
groups_ix[g].append(i)
self._groups_ix = groups_ix
# Default, can be set in call to fit.
self.verbose = False
self.k_exog = self.exog.shape[1]
self.k_scale = self.exog_scale.shape[1]
self.k_smooth = self.exog_smooth.shape[1]
self.k_noise = self.exog_noise.shape[1]
def _split_param_names(self):
xnames = self.data.param_names
q = 0
mean_names = xnames[q:q+self.k_exog]
q += self.k_exog
scale_names = xnames[q:q+self.k_scale]
q += self.k_scale
smooth_names = xnames[q:q+self.k_smooth]
q += self.k_noise
noise_names = xnames[q:q+self.k_noise]
return mean_names, scale_names, smooth_names, noise_names
@classmethod
def from_formula(cls,
formula,
data,
subset=None,
drop_cols=None,
*args,
**kwargs):
if "scale_formula" in kwargs:
scale_formula = kwargs["scale_formula"]
else:
raise ValueError("scale_formula is a required argument")
if "smooth_formula" in kwargs:
smooth_formula = kwargs["smooth_formula"]
else:
raise ValueError("smooth_formula is a required argument")
if "noise_formula" in kwargs:
noise_formula = kwargs["noise_formula"]
else:
raise ValueError("noise_formula is a required argument")
if "time" in kwargs:
time = kwargs["time"]
else:
raise ValueError("time is a required argument")
if "groups" in kwargs:
groups = kwargs["groups"]
else:
raise ValueError("groups is a required argument")
if subset is not None:
warnings.warn("'subset' is ignored")
if drop_cols is not None:
warnings.warn("'drop_cols' is ignored")
if isinstance(time, string_types):
time = np.asarray(data[time])
if isinstance(groups, string_types):
groups = np.asarray(data[groups])
exog_scale = patsy.dmatrix(scale_formula, data)
scale_design_info = exog_scale.design_info
scale_names = scale_design_info.column_names
exog_scale = np.asarray(exog_scale)
exog_smooth = patsy.dmatrix(smooth_formula, data)
smooth_design_info = exog_smooth.design_info
smooth_names = smooth_design_info.column_names
exog_smooth = np.asarray(exog_smooth)
exog_noise = patsy.dmatrix(noise_formula, data)
noise_design_info = exog_noise.design_info
noise_names = noise_design_info.column_names
exog_noise = np.asarray(exog_noise)
mod = super(ProcessMLE, cls).from_formula(
formula,
data=data,
subset=None,
exog_scale=exog_scale,
exog_smooth=exog_smooth,
exog_noise=exog_noise,
time=time,
groups=groups)
mod.data.scale_design_info = scale_design_info
mod.data.smooth_design_info = smooth_design_info
mod.data.noise_design_info = noise_design_info
mod.data.param_names = (mod.exog_names + scale_names +
smooth_names + noise_names)
return mod
def unpack(self, z):
"""
Split the packed parameter vector into blocks.
"""
# Mean parameters
pm = self.exog.shape[1]
mnpar = z[0:pm]
# Standard deviation parameters
pv = self.exog_scale.shape[1]
scpar = z[pm:pm + pv]
# Smoothness parameters
ps = self.exog_smooth.shape[1]
smpar = z[pm + pv:pm + pv + ps]
# Observation white noise standard deviation
nopar = z[pm + pv + ps:]
return mnpar, scpar, smpar, nopar
def _get_start(self):
# Use OLS to get starting values for mean structure parameters
model = sm.OLS(self.endog, self.exog)
result = model.fit()
m = self.exog_scale.shape[1] + self.exog_smooth.shape[1]
m += self.exog_noise.shape[1]
return np.concatenate((result.params, np.zeros(m)))
def loglike(self, params):
"""
Calculate the log-likelihood function for the model.
Parameters
----------
params : array_like
The packed parameters for the model.
Returns
-------
The log-likelihood value at the given parameter point.
Notes
-----
The mean, scaling, and smoothing parameters are packed into
a vector. Use `unpack` to access the component vectors.
"""
mnpar, scpar, smpar, nopar = self.unpack(params)
# Residuals
resid = self.endog - np.dot(self.exog, mnpar)
# Scaling parameters
sc = np.exp(np.dot(self.exog_scale, scpar))
# Smoothness parameters
sm = np.exp(np.dot(self.exog_smooth, smpar))
# White noise standard deviation
no = np.exp(np.dot(self.exog_noise, nopar))
# Get the log-likelihood
ll = 0.
for _, ix in self._groups_ix.items():
# Get the covariance matrix for this person.
cm = self.cov.get_cov(self.time[ix], sc[ix], sm[ix])
cm.flat[::cm.shape[0] + 1] += no[ix]**2
re = resid[ix]
ll -= 0.5 * np.linalg.slogdet(cm)[1]
ll -= 0.5 * np.dot(re, np.linalg.solve(cm, re))
if self.verbose:
print("L=", ll)
return ll
def score(self, params):
"""
Calculate the score function for the model.
Parameters
----------
params : array_like
The packed parameters for the model.
Returns
-------
The score vector at the given parameter point.
Notes
-----
The mean, scaling, and smoothing parameters are packed into
a vector. Use `unpack` to access the component vectors.
"""
mnpar, scpar, smpar, nopar = self.unpack(params)
pm, pv, ps = len(mnpar), len(scpar), len(smpar)
# Residuals
resid = self.endog - np.dot(self.exog, mnpar)
# Scaling
sc = np.exp(np.dot(self.exog_scale, scpar))
# Smoothness
sm = np.exp(np.dot(self.exog_smooth, smpar))
# White noise standard deviation
no = np.exp(np.dot(self.exog_noise, nopar))
# Get the log-likelihood
score = np.zeros(len(mnpar) + len(scpar) + len(smpar) + len(nopar))
for _, ix in self._groups_ix.items():
sc_i = sc[ix]
sm_i = sm[ix]
no_i = no[ix]
resid_i = resid[ix]
time_i = self.time[ix]
exog_i = self.exog[ix, :]
exog_scale_i = self.exog_scale[ix, :]
exog_smooth_i = self.exog_smooth[ix, :]
exog_noise_i = self.exog_noise[ix, :]
# Get the covariance matrix for this person.
cm = self.cov.get_cov(time_i, sc_i, sm_i)
cm.flat[::cm.shape[0] + 1] += no[ix]**2
cmi = np.linalg.inv(cm)
jacv, jacs = self.cov.jac(time_i, sc_i, sm_i)
# The derivatives for the mean parameters.
dcr = np.linalg.solve(cm, resid_i)
score[0:pm] += np.dot(exog_i.T, dcr)
# The derivatives for the scaling parameters.
rx = np.outer(resid_i, resid_i)
qm = np.linalg.solve(cm, rx)
qm = 0.5 * np.linalg.solve(cm, qm.T)
scx = sc_i[:, None] * exog_scale_i
for i, _ in enumerate(ix):
jq = np.sum(jacv[i] * qm)
score[pm:pm + pv] += jq * scx[i, :]
score[pm:pm + pv] -= 0.5 * np.sum(jacv[i] * cmi) * scx[i, :]
# The derivatives for the smoothness parameters.
smx = sm_i[:, None] * exog_smooth_i
for i, _ in enumerate(ix):
jq = np.sum(jacs[i] * qm)
score[pm + pv:pm + pv + ps] += jq * smx[i, :]
score[pm + pv:pm + pv + ps] -= (
0.5 * np.sum(jacs[i] * cmi) * smx[i, :])
# The derivatives with respect to the standard deviation parameters
sno = no_i[:, None]**2 * exog_noise_i
score[pm + pv + ps:] -= np.dot(cmi.flat[::cm.shape[0] + 1], sno)
bm = np.dot(cmi, np.dot(rx, cmi))
score[pm + pv + ps:] += np.dot(bm.flat[::bm.shape[0] + 1], sno)
if self.verbose:
print("|G|=", np.sqrt(np.sum(score * score)))
return score
def hessian(self, params):
hess = approx_fprime(params, self.score)
return hess
def fit(self, start_params=None, method=None, maxiter=None,
**kwargs):
"""
Fit a grouped Gaussian process regression using MLE.
Parameters
----------
start_params : array_like
Optional starting values.
method : string or array of strings
Method or sequence of methods for scipy optimize.
maxiter : int
The maximum number of iterations in the optimization.
Returns
-------
An instance of ProcessMLEResults.
"""
if "verbose" in kwargs:
self.verbose = kwargs["verbose"]
minim_opts = {}
if "minim_opts" in kwargs:
minim_opts = kwargs["minim_opts"]
if start_params is None:
start_params = self._get_start()
if isinstance(method, str):
method = [method]
elif method is None:
method = ["powell", "bfgs"]
for j, meth in enumerate(method):
if meth not in ("powell",):
def jac(x):
return -self.score(x)
else:
jac = None
if maxiter is not None:
if np.isscalar(maxiter):
minim_opts["maxiter"] = maxiter
else:
minim_opts["maxiter"] = maxiter[j % len(maxiter)]
f = minimize(
lambda x: -self.loglike(x),
method=meth,
x0=start_params,
jac=jac,
options=minim_opts)
if not f.success:
msg = "Fitting did not converge"
if jac is not None:
msg += ", |gradient|=%.6f" % np.sqrt(np.sum(f.jac**2))
if j < len(method) - 1:
msg += ", trying %s next..." % method[j+1]
warnings.warn(msg)
if np.isfinite(f.x).all():
start_params = f.x
hess = self.hessian(f.x)
try:
cov_params = -np.linalg.inv(hess)
except Exception:
cov_params = None
class rslt:
pass
r = rslt()
r.params = f.x
r.normalized_cov_params = cov_params
r.optim_retvals = f
r.scale = 1
rslt = ProcessMLEResults(self, r)
return rslt
def covariance(self, time, scale_params, smooth_params, scale_data,
smooth_data):
"""
Returns a Gaussian process covariance matrix.
Parameters
----------
time : array_like
The time points at which the fitted covariance matrix is
calculated.
scale_params : array_like
The regression parameters for the scaling part
of the covariance structure.
smooth_params : array_like
The regression parameters for the smoothing part
of the covariance structure.
scale_data : Dataframe
The data used to determine the scale parameter,
must have len(time) rows.
smooth_data: Dataframe
The data used to determine the smoothness parameter,
must have len(time) rows.
Returns
-------
A covariance matrix.
Notes
-----
If the model was fit using formulas, `scale` and `smooth` should
be Dataframes, containing all variables that were present in the
respective scaling and smoothing formulas used to fit the model.
Otherwise, `scale` and `smooth` should contain data arrays whose
columns align with the fitted scaling and smoothing parameters.
The covariance is only for the Gaussian process and does not include
the white noise variance.
"""
if not hasattr(self.data, "scale_design_info"):
sca = np.dot(scale_data, scale_params)
smo = np.dot(smooth_data, smooth_params)
else:
sc = patsy.dmatrix(self.data.scale_design_info, scale_data)
sm = patsy.dmatrix(self.data.smooth_design_info, smooth_data)
sca = np.exp(np.dot(sc, scale_params))
smo = np.exp(np.dot(sm, smooth_params))
return self.cov.get_cov(time, sca, smo)
def predict(self, params, exog=None, *args, **kwargs):
"""
Obtain predictions of the mean structure.
Parameters
----------
params : array_like
The model parameters, may be truncated to include only mean
parameters.
exog : array_like
The design matrix for the mean structure. If not provided,
the model's design matrix is used.
"""
if exog is None:
exog = self.exog
elif hasattr(self.data, "design_info"):
# Run the provided data through the formula if present
exog = patsy.dmatrix(self.data.design_info, exog)
if len(params) > exog.shape[1]:
params = params[0:exog.shape[1]]
return np.dot(exog, params)
class ProcessMLEResults(base.GenericLikelihoodModelResults):
"""
Results class for Gaussian process regression models.
"""
def __init__(self, model, mlefit):
super(ProcessMLEResults, self).__init__(
model, mlefit)
pa = model.unpack(mlefit.params)
self.mean_params = pa[0]
self.scale_params = pa[1]
self.smooth_params = pa[2]
self.no_params = pa[3]
self.df_resid = model.endog.shape[0] - len(mlefit.params)
self.k_exog = self.model.exog.shape[1]
self.k_scale = self.model.exog_scale.shape[1]
self.k_smooth = self.model.exog_smooth.shape[1]
self.k_noise = self.model.exog_noise.shape[1]
def predict(self, exog=None, transform=True, *args, **kwargs):
if not transform:
warnings.warn("'transform=False' is ignored in predict")
if len(args) > 0 or len(kwargs) > 0:
warnings.warn("extra arguments ignored in 'predict'")
return self.model.predict(self.params, exog)
def covariance(self, time, scale, smooth):
"""
Returns a fitted covariance matrix.
Parameters
----------
time : array_like
The time points at which the fitted covariance
matrix is calculated.
scale : array_like
The data used to determine the scale parameter,
must have len(time) rows.
smooth: array_like
The data used to determine the smoothness parameter,
must have len(time) rows.
Returns
-------
A covariance matrix.
Notes
-----
If the model was fit using formulas, `scale` and `smooth` should
be Dataframes, containing all variables that were present in the
respective scaling and smoothing formulas used to fit the model.
Otherwise, `scale` and `smooth` should be data arrays whose
columns align with the fitted scaling and smoothing parameters.
"""
return self.model.covariance(time, self.scale_params,
self.smooth_params, scale, smooth)
def covariance_group(self, group):
# Check if the group exists, since _groups_ix is a
# DefaultDict use len instead of catching a KeyError.
ix = self.model._groups_ix[group]
if len(ix) == 0:
msg = "Group '%s' does not exist" % str(group)
raise ValueError(msg)
scale_data = self.model.exog_scale[ix, :]
smooth_data = self.model.exog_smooth[ix, :]
_, scale_names, smooth_names, _ = self.model._split_param_names()
scale_data = pd.DataFrame(scale_data, columns=scale_names)
smooth_data = pd.DataFrame(smooth_data, columns=smooth_names)
time = self.model.time[ix]
return self.model.covariance(time,
self.scale_params,
self.smooth_params,
scale_data,
smooth_data)
def summary(self, yname=None, xname=None, title=None, alpha=0.05):
df = pd.DataFrame()
df["Type"] = (["Mean"] * self.k_exog + ["Scale"] * self.k_scale +
["Smooth"] * self.k_smooth + ["SD"] * self.k_noise)
df["coef"] = self.params
try:
df["std err"] = np.sqrt(np.diag(self.cov_params()))
except Exception:
df["std err"] = np.nan
from scipy.stats.distributions import norm
df["tvalues"] = df.coef / df["std err"]
df["P>|t|"] = 2 * norm.sf(np.abs(df.tvalues))
f = norm.ppf(1 - alpha / 2)
df["[%.3f" % (alpha / 2)] = df.coef - f * df["std err"]
df["%.3f]" % (1 - alpha / 2)] = df.coef + f * df["std err"]
df.index = self.model.data.param_names
summ = summary2.Summary()
if title is None:
title = "Gaussian process regression results"
summ.add_title(title)
summ.add_df(df)
return summ
"""
Recursive least squares model
Author: Chad Fulton
License: Simplified-BSD
"""
import numpy as np
import pandas as pd
from statsmodels.compat import unicode
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.statespace.mlemodel import (
MLEModel, MLEResults, MLEResultsWrapper, PredictionResults,
PredictionResultsWrapper)
from statsmodels.tsa.statespace.tools import concat
from statsmodels.tools.tools import Bunch
from statsmodels.tools.decorators import cache_readonly
import statsmodels.base.wrapper as wrap
# Columns are alpha = 0.1, 0.05, 0.025, 0.01, 0.005
_cusum_squares_scalars = np.array([
[1.0729830, 1.2238734, 1.3581015, 1.5174271, 1.6276236],
[-0.6698868, -0.6700069, -0.6701218, -0.6702672, -0.6703724],
[-0.5816458, -0.7351697, -0.8858694, -1.0847745, -1.2365861]
])
class RecursiveLS(MLEModel):
r"""
Recursive least squares
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
exog : array_like
Array of exogenous regressors, shaped nobs x k.
constraints : array_like, str, or tuple
- array : An r x k array where r is the number of restrictions to
test and k is the number of regressors. It is assumed that the
linear combination is equal to zero.
- str : The full hypotheses to test can be given as a string.
See the examples.
- tuple : A tuple of arrays in the form (R, q), ``q`` can be
either a scalar or a length p row vector.
Notes
-----
Recursive least squares (RLS) corresponds to expanding window ordinary
least squares (OLS).
This model applies the Kalman filter to compute recursive estimates of the
coefficients and recursive residuals.
References
----------
.. [*] Durbin, James, and Siem Jan Koopman. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
def __init__(self, endog, exog, constraints=None, **kwargs):
# Standardize data
endog_using_pandas = _is_using_pandas(endog, None)
if not endog_using_pandas:
endog = np.asanyarray(endog)
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
self.k_exog = exog.shape[1]
# Handle constraints
self.k_constraints = 0
self._r_matrix = self._q_matrix = None
if constraints is not None:
from patsy import DesignInfo
from statsmodels.base.data import handle_data
data = handle_data(endog, exog, **kwargs)
names = data.param_names
LC = DesignInfo(names).linear_constraint(constraints)
self._r_matrix, self._q_matrix = LC.coefs, LC.constants
self.k_constraints = self._r_matrix.shape[0]
constraint_endog = np.zeros((len(endog), len(self._r_matrix)))
if endog_using_pandas:
constraint_endog = pd.DataFrame(constraint_endog,
index=endog.index)
endog = concat([endog, constraint_endog], axis=1)
endog.values[:, 1:] = self._q_matrix[:, 0]
else:
endog[:, 1:] = self._q_matrix[:, 0]
# Handle coefficient initialization
kwargs.setdefault('initialization', 'diffuse')
# Initialize the state space representation
super(RecursiveLS, self).__init__(
endog, k_states=self.k_exog, exog=exog, **kwargs)
# Use univariate filtering by default
self.ssm.filter_univariate = True
# Concentrate the scale out of the likelihood function
self.ssm.filter_concentrated = True
# Setup the state space representation
self['design'] = np.zeros((self.k_endog, self.k_states, self.nobs))
self['design', 0] = self.exog[:, :, None].T
if self._r_matrix is not None:
self['design', 1:, :] = self._r_matrix[:, :, None]
self['transition'] = np.eye(self.k_states)
# Notice that the filter output does not depend on the measurement
# variance, so we set it here to 1
self['obs_cov', 0, 0] = 1.
self['transition'] = np.eye(self.k_states)
# Linear constraints are technically imposed by adding "fake" endog
# variables that are used during filtering, but for all model- and
# results-based purposes we want k_endog = 1.
if self._r_matrix is not None:
self.k_endog = 1
@classmethod
def from_formula(cls, formula, data, subset=None, constraints=None):
return super(MLEModel, cls).from_formula(formula, data, subset,
constraints=constraints)
def fit(self):
"""
Fits the model by application of the Kalman filter
Returns
-------
RecursiveLSResults
"""
smoother_results = self.smooth(return_ssm=True)
with self.ssm.fixed_scale(smoother_results.scale):
res = self.smooth()
return res
def filter(self, return_ssm=False, **kwargs):
# Get the state space output
result = super(RecursiveLS, self).filter([], transformed=True,
cov_type='none',
return_ssm=True, **kwargs)
# Wrap in a results object
if not return_ssm:
params = result.filtered_state[:, -1]
cov_kwds = {
'custom_cov_type': 'nonrobust',
'custom_cov_params': result.filtered_state_cov[:, :, -1],
'custom_description': ('Parameters and covariance matrix'
'estimates are RLS estimates'
'conditional on the entire sample.')
}
result = RecursiveLSResultsWrapper(
RecursiveLSResults(self, params, result, cov_type='custom',
cov_kwds=cov_kwds)
)
return result
def smooth(self, return_ssm=False, **kwargs):
# Get the state space output
result = super(RecursiveLS, self).smooth([], transformed=True,
cov_type='none',
return_ssm=True, **kwargs)
# Wrap in a results object
if not return_ssm:
params = result.filtered_state[:, -1]
cov_kwds = {
'custom_cov_type': 'nonrobust',
'custom_cov_params': result.filtered_state_cov[:, :, -1],
'custom_description': ('Parameters and covariance matrix'
'estimates are RLS estimates'
'conditional on the entire sample.')
}
result = RecursiveLSResultsWrapper(
RecursiveLSResults(self, params, result, cov_type='custom',
cov_kwds=cov_kwds)
)
return result
@property
def endog_names(self):
endog_names = super(RecursiveLS, self).endog_names
return endog_names[0] if isinstance(endog_names, list) else endog_names
@property
def param_names(self):
return self.exog_names
@property
def start_params(self):
# Only parameter is the measurement disturbance standard deviation
return np.zeros(0)
def update(self, params, **kwargs):
"""
Update the parameters of the model
Updates the representation matrices to fill in the new parameter
values.
Parameters
----------
params : array_like
Array of new parameters.
transformed : boolean, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True..
Returns
-------
params : array_like
Array of parameters.
"""
pass
class RecursiveLSResults(MLEResults):
"""
Class to hold results from fitting a recursive least squares model.
Parameters
----------
model : RecursiveLS instance
The fitted model instance
Attributes
----------
specification : dictionary
Dictionary including all attributes from the recursive least squares
model instance.
See Also
--------
statsmodels.tsa.statespace.kalman_filter.FilterResults
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
def __init__(self, model, params, filter_results, cov_type='opg',
**kwargs):
super(RecursiveLSResults, self).__init__(
model, params, filter_results, cov_type, **kwargs)
# Since we are overriding params with things that aren't MLE params,
# need to adjust df's
q = max(self.loglikelihood_burn, self.k_diffuse_states)
self.df_model = q - self.model.k_constraints
self.df_resid = self.nobs_effective - self.df_model
# Save _init_kwds
self._init_kwds = self.model._get_init_kwds()
# Save the model specification
self.specification = Bunch(**{
'k_exog': self.model.k_exog,
'k_constraints': self.model.k_constraints})
# Adjust results to remove "faux" endog from the constraints
if self.model._r_matrix is not None:
for name in ['forecasts', 'forecasts_error',
'forecasts_error_cov','standardized_forecasts_error',
'forecasts_error_diffuse_cov']:
setattr(self, name, getattr(self, name)[0:1])
@property
def recursive_coefficients(self):
"""
Estimates of regression coefficients, recursively estimated
Returns
-------
out: Bunch
Has the following attributes:
- `filtered`: a time series array with the filtered estimate of
the component
- `filtered_cov`: a time series array with the filtered estimate of
the variance/covariance of the component
- `smoothed`: a time series array with the smoothed estimate of
the component
- `smoothed_cov`: a time series array with the smoothed estimate of
the variance/covariance of the component
- `offset`: an integer giving the offset in the state vector where
this component begins
"""
out = None
spec = self.specification
start = offset = 0
end = offset + spec.k_exog
out = Bunch(
filtered=self.filtered_state[start:end],
filtered_cov=self.filtered_state_cov[start:end, start:end],
smoothed=None, smoothed_cov=None,
offset=offset
)
if self.smoothed_state is not None:
out.smoothed = self.smoothed_state[start:end]
if self.smoothed_state_cov is not None:
out.smoothed_cov = (
self.smoothed_state_cov[start:end, start:end])
return out
@cache_readonly
def resid_recursive(self):
r"""
Recursive residuals
Returns
-------
resid_recursive : array_like
An array of length `nobs` holding the recursive
residuals.
Notes
-----
These quantities are defined in, for example, Harvey (1989)
section 5.4. In fact, there he defines the standardized innovations in
equation 5.4.1, but in his version they have non-unit variance, whereas
the standardized forecast errors computed by the Kalman filter here
assume unit variance. To convert to Harvey's definition, we need to
multiply by the standard deviation.
Harvey notes that in smaller samples, "although the second moment
of the :math:`\tilde \sigma_*^{-1} \tilde v_t`'s is unity, the
variance is not necessarily equal to unity as the mean need not be
equal to zero", and he defines an alternative version (which are
not provided here).
"""
return (self.filter_results.standardized_forecasts_error[0] *
self.scale**0.5)
@cache_readonly
def cusum(self):
r"""
Cumulative sum of standardized recursive residuals statistics
Returns
-------
cusum : array_like
An array of length `nobs - k_exog` holding the
CUSUM statistics.
Notes
-----
The CUSUM statistic takes the form:
.. math::
W_t = \frac{1}{\hat \sigma} \sum_{j=k+1}^t w_j
where :math:`w_j` is the recursive residual at time :math:`j` and
:math:`\hat \sigma` is the estimate of the standard deviation
from the full sample.
Excludes the first `k_exog` datapoints.
Due to differences in the way :math:`\hat \sigma` is calculated, the
output of this function differs slightly from the output in the
R package strucchange and the Stata contributed.ado file cusum6. The
calculation in this package is consistent with the description of
Brown et al. (1975)
References
----------
.. [*] Brown, R. L., J. Durbin, and J. M. Evans. 1975.
"Techniques for Testing the Constancy of
Regression Relationships over Time."
Journal of the Royal Statistical Society.
Series B (Methodological) 37 (2): 149-92.
"""
d = max(self.nobs_diffuse, self.loglikelihood_burn)
return (np.cumsum(self.resid_recursive[d:]) /
np.std(self.resid_recursive[d:], ddof=1))
@cache_readonly
def cusum_squares(self):
r"""
Cumulative sum of squares of standardized recursive residuals
statistics
Returns
-------
cusum_squares : array_like
An array of length `nobs - k_exog` holding the
CUSUM of squares statistics.
Notes
-----
The CUSUM of squares statistic takes the form:
.. math::
s_t = \left ( \sum_{j=k+1}^t w_j^2 \right ) \Bigg /
\left ( \sum_{j=k+1}^T w_j^2 \right )
where :math:`w_j` is the recursive residual at time :math:`j`.
Excludes the first `k_exog` datapoints.
References
----------
.. [*] Brown, R. L., J. Durbin, and J. M. Evans. 1975.
"Techniques for Testing the Constancy of
Regression Relationships over Time."
Journal of the Royal Statistical Society.
Series B (Methodological) 37 (2): 149-92.
"""
d = max(self.nobs_diffuse, self.loglikelihood_burn)
numer = np.cumsum(self.resid_recursive[d:]**2)
denom = numer[-1]
return numer / denom
@cache_readonly
def llf_recursive_obs(self):
"""
(float) Loglikelihood at observation, computed from recursive residuals
"""
from scipy.stats import norm
return np.log(norm.pdf(self.resid_recursive, loc=0,
scale=self.scale**0.5))
@cache_readonly
def llf_recursive(self):
"""
(float) Loglikelihood defined by recursive residuals, equivalent to OLS
"""
return np.sum(self.llf_recursive_obs)
@cache_readonly
def ssr(self):
"""ssr"""
d = max(self.nobs_diffuse, self.loglikelihood_burn)
return (self.nobs - d) * self.filter_results.obs_cov[0, 0, 0]
@cache_readonly
def centered_tss(self):
"""Centered tss"""
return np.sum((self.filter_results.endog[0] -
np.mean(self.filter_results.endog))**2)
@cache_readonly
def uncentered_tss(self):
"""uncentered tss"""
return np.sum((self.filter_results.endog[0])**2)
@cache_readonly
def ess(self):
"""esss"""
if self.k_constant:
return self.centered_tss - self.ssr
else:
return self.uncentered_tss - self.ssr
@cache_readonly
def rsquared(self):
"""rsquared"""
if self.k_constant:
return 1 - self.ssr / self.centered_tss
else:
return 1 - self.ssr / self.uncentered_tss
@cache_readonly
def mse_model(self):
"""mse_model"""
return self.ess / self.df_model
@cache_readonly
def mse_resid(self):
"""mse_resid"""
return self.ssr / self.df_resid
@cache_readonly
def mse_total(self):
"""mse_total"""
if self.k_constant:
return self.centered_tss / (self.df_resid + self.df_model)
else:
return self.uncentered_tss / (self.df_resid + self.df_model)
def get_prediction(self, start=None, end=None, dynamic=False,
index=None, **kwargs):
# Note: need to override this, because we currently don't support
# dynamic prediction or forecasts when there are constraints.
if start is None:
start = self.model._index[0]
# Handle start, end, dynamic
start, end, out_of_sample, prediction_index = (
self.model._get_prediction_index(start, end, index))
# Handle `dynamic`
if isinstance(dynamic, (bytes, unicode)):
dynamic, _, _ = self.model._get_index_loc(dynamic)
if self.model._r_matrix is not None and (out_of_sample or dynamic):
raise NotImplementedError('Cannot yet perform out-of-sample or'
'dynamic prediction in models with'
'constraints.')
# Perform the prediction
# This is a (k_endog x npredictions) array; don't want to squeeze in
# case of npredictions = 1
prediction_results = self.filter_results.predict(
start, end + out_of_sample + 1, dynamic, **kwargs)
# Return a new mlemodel.PredictionResults object
return PredictionResultsWrapper(PredictionResults(
self, prediction_results, row_labels=prediction_index))
get_prediction.__doc__ = MLEResults.get_prediction.__doc__
def plot_recursive_coefficient(self, variables=0, alpha=0.05,
legend_loc='upper left', fig=None,
figsize=None):
r"""
Plot the recursively estimated coefficients on a given variable
Parameters
----------
variables : int or str or iterable of int or string, optional
Integer index or string name of the variable whose coefficient will
be plotted. Can also be an iterable of integers or strings. Default
is the first variable.
alpha : float, optional
The confidence intervals for the coefficient are (1 - alpha) %
legend_loc : string, optional
The location of the legend in the plot. Default is upper left.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
All plots contain (1 - `alpha`) % confidence intervals.
"""
# Get variables
if isinstance(variables, (int, str)):
variables = [variables]
k_variables = len(variables)
# If a string was given for `variable`, try to get it from exog names
exog_names = self.model.exog_names
for i in range(k_variables):
variable = variables[i]
if isinstance(variable, str):
variables[i] = exog_names.index(variable)
# Create the plot
from scipy.stats import norm
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
plt = _import_mpl()
fig = create_mpl_fig(fig, figsize)
for i in range(k_variables):
variable = variables[i]
ax = fig.add_subplot(k_variables, 1, i + 1)
# Get dates, if applicable
if hasattr(self.data, 'dates') and self.data.dates is not None:
dates = self.data.dates._mpl_repr()
else:
dates = np.arange(self.nobs)
d = max(self.nobs_diffuse, self.loglikelihood_burn)
# Plot the coefficient
coef = self.recursive_coefficients
ax.plot(dates[d:], coef.filtered[variable, d:],
label='Recursive estimates: %s' % exog_names[variable])
# Legend
handles, labels = ax.get_legend_handles_labels()
# Get the critical value for confidence intervals
if alpha is not None:
critical_value = norm.ppf(1 - alpha / 2.)
# Plot confidence intervals
std_errors = np.sqrt(coef.filtered_cov[variable, variable, :])
ci_lower = (
coef.filtered[variable] - critical_value * std_errors)
ci_upper = (
coef.filtered[variable] + critical_value * std_errors)
ci_poly = ax.fill_between(
dates[d:], ci_lower[d:], ci_upper[d:], alpha=0.2
)
ci_label = ('$%.3g \\%%$ confidence interval'
% ((1 - alpha)*100))
# Only add CI to legend for the first plot
if i == 0:
# Proxy artist for fill_between legend entry
# See https://matplotlib.org/1.3.1/users/legend_guide.html
p = plt.Rectangle((0, 0), 1, 1,
fc=ci_poly.get_facecolor()[0])
handles.append(p)
labels.append(ci_label)
ax.legend(handles, labels, loc=legend_loc)
# Remove xticks for all but the last plot
if i < k_variables - 1:
ax.xaxis.set_ticklabels([])
fig.tight_layout()
return fig
def _cusum_significance_bounds(self, alpha, ddof=0, points=None):
"""
Parameters
----------
alpha : float, optional
The significance bound is alpha %.
ddof : int, optional
The number of periods additional to `k_exog` to exclude in
constructing the bounds. Default is zero. This is usually used
only for testing purposes.
points : iterable, optional
The points at which to evaluate the significance bounds. Default is
two points, beginning and end of the sample.
Notes
-----
Comparing against the cusum6 package for Stata, this does not produce
exactly the same confidence bands (which are produced in cusum6 by
lw, uw) because they burn the first k_exog + 1 periods instead of the
first k_exog. If this change is performed
(so that `tmp = (self.nobs - d - 1)**0.5`), then the output here
matches cusum6.
The cusum6 behavior does not seem to be consistent with
Brown et al. (1975); it is likely they did that because they needed
three initial observations to get the initial OLS estimates, whereas
we do not need to do that.
"""
# Get the constant associated with the significance level
if alpha == 0.01:
scalar = 1.143
elif alpha == 0.05:
scalar = 0.948
elif alpha == 0.10:
scalar = 0.950
else:
raise ValueError('Invalid significance level.')
# Get the points for the significance bound lines
d = max(self.nobs_diffuse, self.loglikelihood_burn)
tmp = (self.nobs - d - ddof)**0.5
def upper_line(x):
return scalar * tmp + 2 * scalar * (x - d) / tmp
if points is None:
points = np.array([d, self.nobs])
return -upper_line(points), upper_line(points)
def plot_cusum(self, alpha=0.05, legend_loc='upper left',
fig=None, figsize=None):
r"""
Plot the CUSUM statistic and significance bounds.
Parameters
----------
alpha : float, optional
The plotted significance bounds are alpha %.
legend_loc : string, optional
The location of the legend in the plot. Default is upper left.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Evidence of parameter instability may be found if the CUSUM statistic
moves out of the significance bounds.
References
----------
.. [*] Brown, R. L., J. Durbin, and J. M. Evans. 1975.
"Techniques for Testing the Constancy of
Regression Relationships over Time."
Journal of the Royal Statistical Society.
Series B (Methodological) 37 (2): 149-92.
"""
# Create the plot
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
ax = fig.add_subplot(1, 1, 1)
# Get dates, if applicable
if hasattr(self.data, 'dates') and self.data.dates is not None:
dates = self.data.dates._mpl_repr()
else:
dates = np.arange(self.nobs)
d = max(self.nobs_diffuse, self.loglikelihood_burn)
# Plot cusum series and reference line
ax.plot(dates[d:], self.cusum, label='CUSUM')
ax.hlines(0, dates[d], dates[-1], color='k', alpha=0.3)
# Plot significance bounds
lower_line, upper_line = self._cusum_significance_bounds(alpha)
ax.plot([dates[d], dates[-1]], upper_line, 'k--',
label='%d%% significance' % (alpha * 100))
ax.plot([dates[d], dates[-1]], lower_line, 'k--')
ax.legend(loc=legend_loc)
return fig
def _cusum_squares_significance_bounds(self, alpha, points=None):
"""
Notes
-----
Comparing against the cusum6 package for Stata, this does not produce
exactly the same confidence bands (which are produced in cusum6 by
lww, uww) because they use a different method for computing the
critical value; in particular, they use tabled values from
Table C, pp. 364-365 of "The Econometric Analysis of Time Series"
Harvey, (1990), and use the value given to 99 observations for any
larger number of observations. In contrast, we use the approximating
critical values suggested in Edgerton and Wells (1994) which allows
computing relatively good approximations for any number of
observations.
"""
# Get the approximate critical value associated with the significance
# level
d = max(self.nobs_diffuse, self.loglikelihood_burn)
n = 0.5 * (self.nobs - d) - 1
try:
ix = [0.1, 0.05, 0.025, 0.01, 0.005].index(alpha / 2)
except ValueError:
raise ValueError('Invalid significance level.')
scalars = _cusum_squares_scalars[:, ix]
crit = scalars[0] / n**0.5 + scalars[1] / n + scalars[2] / n**1.5
# Get the points for the significance bound lines
if points is None:
points = np.array([d, self.nobs])
line = (points - d) / (self.nobs - d)
return line - crit, line + crit
def plot_cusum_squares(self, alpha=0.05, legend_loc='upper left',
fig=None, figsize=None):
r"""
Plot the CUSUM of squares statistic and significance bounds.
Parameters
----------
alpha : float, optional
The plotted significance bounds are alpha %.
legend_loc : string, optional
The location of the legend in the plot. Default is upper left.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Evidence of parameter instability may be found if the CUSUM of squares
statistic moves out of the significance bounds.
Critical values used in creating the significance bounds are computed
using the approximate formula of [1]_.
References
----------
.. [*] Brown, R. L., J. Durbin, and J. M. Evans. 1975.
"Techniques for Testing the Constancy of
Regression Relationships over Time."
Journal of the Royal Statistical Society.
Series B (Methodological) 37 (2): 149-92.
.. [1] Edgerton, David, and Curt Wells. 1994.
"Critical Values for the Cusumsq Statistic
in Medium and Large Sized Samples."
Oxford Bulletin of Economics and Statistics 56 (3): 355-65.
"""
# Create the plot
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
ax = fig.add_subplot(1, 1, 1)
# Get dates, if applicable
if hasattr(self.data, 'dates') and self.data.dates is not None:
dates = self.data.dates._mpl_repr()
else:
dates = np.arange(self.nobs)
d = max(self.nobs_diffuse, self.loglikelihood_burn)
# Plot cusum series and reference line
ax.plot(dates[d:], self.cusum_squares, label='CUSUM of squares')
ref_line = (np.arange(d, self.nobs) - d) / (self.nobs - d)
ax.plot(dates[d:], ref_line, 'k', alpha=0.3)
# Plot significance bounds
lower_line, upper_line = self._cusum_squares_significance_bounds(alpha)
ax.plot([dates[d], dates[-1]], upper_line, 'k--',
label='%d%% significance' % (alpha * 100))
ax.plot([dates[d], dates[-1]], lower_line, 'k--')
ax.legend(loc=legend_loc)
return fig
class RecursiveLSResultsWrapper(MLEResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(RecursiveLSResultsWrapper, # noqa:E305
RecursiveLSResults) |
|
sqlalchemy__sqlalchemy | collection_api.rst | Module doc | Generate documentation for this module | MIT License | sqlalchemy__sqlalchemy/doc/build/orm/collection_api.rst | [
"sqlalchemy__sqlalchemy/lib/sqlalchemy/orm/collections.py"
] | Collection Customization and API Details
The _orm.relationship function defines a linkage between two classes.
When the linkage defines a one-to-many or many-to-many relationship,
it's represented as a Python collection when objects are loaded and
manipulated. This section presents additional information about
collection configuration and techniques.
Customizing Collection Access
Mapping a one-to-many or many-to-many relationship results in a
collection of values accessible through an attribute on the parent
instance. The two common collection types for these are list and set,
which in Declarative <orm_declarative_styles_toplevel> mappings that use
_orm.Mapped is established by using the collection type within the
_orm.Mapped container, as demonstrated in the Parent.children collection
below where list is used:
from sqlalchemy import ForeignKey
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
class Base(DeclarativeBase):
pass
class Parent(Base):
__tablename__ = "parent"
parent_id: Mapped[int] = mapped_column(primary_key=True)
# use a list
children: Mapped[List["Child"]] = relationship()
class Child(Base):
__tablename__ = "child"
child_id: Mapped[int] = mapped_column(primary_key=True)
parent_id: Mapped[int] = mapped_column(ForeignKey("parent.id"))
Or for a set, illustrated in the same Parent.children collection:
from typing import Set
from sqlalchemy import ForeignKey
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
class Base(DeclarativeBase):
pass
class Parent(Base):
__tablename__ = "parent"
parent_id: Mapped[int] = mapped_column(primary_key=True)
# use a set
children: Mapped[Set["Child"]] = relationship()
class Child(Base):
__tablename__ = "child"
child_id: Mapped[int] = mapped_column(primary_key=True)
parent_id: Mapped[int] = mapped_column(ForeignKey("parent.id"))
Note
If using Python 3.7 or 3.8, annotations for collections need to use
typing.List or typing.Set, e.g. Mapped[List["Child"]] or
Mapped[Set["Child"]]; the list and set Python built-ins don't yet
support generic annotation in these Python versions, such as:
from typing import List
class Parent(Base):
__tablename__ = "parent"
parent_id: Mapped[int] = mapped_column(primary_key=True)
# use a List, Python 3.8 and earlier children: Mapped[List["Child"]]
= relationship()
When using mappings without the _orm.Mapped annotation, such as when
using imperative mappings <orm_imperative_mapping> or untyped Python
code, as well as in a few special cases, the collection class for a
_orm.relationship can always be specified directly using the
_orm.relationship.collection_class parameter:
# non-annotated mapping
class Parent(Base):
__tablename__ = "parent"
parent_id = mapped_column(Integer, primary_key=True)
children = relationship("Child", collection_class=set)
class Child(Base):
__tablename__ = "child"
child_id = mapped_column(Integer, primary_key=True)
parent_id = mapped_column(ForeignKey("parent.id"))
In the absence of _orm.relationship.collection_class or _orm.Mapped, the
default collection type is list.
Beyond list and set builtins, there is also support for two varities of
dictionary, described below at orm_dictionary_collection. There is also
support for any arbitrary mutable sequence type can be set up as the
target collection, with some additional configuration steps; this is
described in the section orm_custom_collection.
Dictionary Collections
A little extra detail is needed when using a dictionary as a collection.
This because objects are always loaded from the database as lists, and a
key-generation strategy must be available to populate the dictionary
correctly. The .attribute_keyed_dict function is by far the most common
way to achieve a simple dictionary collection. It produces a dictionary
class that will apply a particular attribute of the mapped class as a
key. Below we map an Item class containing a dictionary of Note items
keyed to the Note.keyword attribute. When using .attribute_keyed_dict,
the _orm.Mapped annotation may be typed using the _orm.KeyFuncDict or
just plain dict as illustrated in the following example. However, the
_orm.relationship.collection_class parameter is required in this case so
that the .attribute_keyed_dict may be appropriately parametrized:
from typing import Dict
from typing import Optional
from sqlalchemy import ForeignKey
from sqlalchemy.orm import attribute_keyed_dict
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import relationship
class Base(DeclarativeBase):
pass
class Item(Base):
__tablename__ = "item"
id: Mapped[int] = mapped_column(primary_key=True)
notes: Mapped[Dict[str, "Note"]] = relationship(
collection_class=attribute_keyed_dict("keyword"),
cascade="all, delete-orphan",
)
class Note(Base):
__tablename__ = "note"
id: Mapped[int] = mapped_column(primary_key=True)
item_id: Mapped[int] = mapped_column(ForeignKey("item.id"))
keyword: Mapped[str]
text: Mapped[Optional[str]]
def __init__(self, keyword: str, text: str):
self.keyword = keyword
self.text = text
Item.notes is then a dictionary:
>>> item = Item()
>>> item.notes["a"] = Note("a", "atext")
>>> item.notes.items()
{'a': <__main__.Note object at 0x2eaaf0>}
.attribute_keyed_dict will ensure that the .keyword attribute of each
Note complies with the key in the dictionary. Such as, when assigning to
Item.notes, the dictionary key we supply must match that of the actual
Note object:
item = Item()
item.notes = {
"a": Note("a", "atext"),
"b": Note("b", "btext"),
}
The attribute which .attribute_keyed_dict uses as a key does not need to
be mapped at all! Using a regular Python @property allows virtually any
detail or combination of details about the object to be used as the key,
as below when we establish it as a tuple of Note.keyword and the first
ten letters of the Note.text field:
class Item(Base):
__tablename__ = "item"
id: Mapped[int] = mapped_column(primary_key=True)
notes: Mapped[Dict[str, "Note"]] = relationship(
collection_class=attribute_keyed_dict("note_key"),
back_populates="item",
cascade="all, delete-orphan",
)
class Note(Base):
__tablename__ = "note"
id: Mapped[int] = mapped_column(primary_key=True)
item_id: Mapped[int] = mapped_column(ForeignKey("item.id"))
keyword: Mapped[str]
text: Mapped[str]
item: Mapped["Item"] = relationship()
@property
def note_key(self):
return (self.keyword, self.text[0:10])
def __init__(self, keyword: str, text: str):
self.keyword = keyword
self.text = text
Above we added a Note.item relationship, with a bi-directional
_orm.relationship.back_populates configuration. Assigning to this
reverse relationship, the Note is added to the Item.notes dictionary and
the key is generated for us automatically:
>>> item = Item()
>>> n1 = Note("a", "atext")
>>> n1.item = item
>>> item.notes
{('a', 'atext'): <__main__.Note object at 0x2eaaf0>}
Other built-in dictionary types include .column_keyed_dict, which is
almost like .attribute_keyed_dict except given the _schema.Column object
directly:
from sqlalchemy.orm import column_keyed_dict
class Item(Base):
__tablename__ = "item"
id: Mapped[int] = mapped_column(primary_key=True)
notes: Mapped[Dict[str, "Note"]] = relationship(
collection_class=column_keyed_dict(Note.__table__.c.keyword),
cascade="all, delete-orphan",
)
as well as .mapped_collection which is passed any callable function.
Note that it's usually easier to use .attribute_keyed_dict along with a
@property as mentioned earlier:
from sqlalchemy.orm import mapped_collection
class Item(Base):
__tablename__ = "item"
id: Mapped[int] = mapped_column(primary_key=True)
notes: Mapped[Dict[str, "Note"]] = relationship(
collection_class=mapped_collection(lambda note: note.text[0:10]),
cascade="all, delete-orphan",
)
Dictionary mappings are often combined with the "Association Proxy"
extension to produce streamlined dictionary views. See
proxying_dictionaries and composite_association_proxy for examples.
Dealing with Key Mutations and back-populating for Dictionary collections
When using .attribute_keyed_dict, the "key" for the dictionary is taken
from an attribute on the target object. Changes to this key are not
tracked. This means that the key must be assigned towards when it is
first used, and if the key changes, the collection will not be mutated.
A typical example where this might be an issue is when relying upon
backrefs to populate an attribute mapped collection. Given the
following:
class A(Base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
bs: Mapped[Dict[str, "B"]] = relationship(
collection_class=attribute_keyed_dict("data"),
back_populates="a",
)
class B(Base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True)
a_id: Mapped[int] = mapped_column(ForeignKey("a.id"))
data: Mapped[str]
a: Mapped["A"] = relationship(back_populates="bs")
Above, if we create a B() that refers to a specific A(), the back
populates will then add the B() to the A.bs collection, however if the
value of B.data is not set yet, the key will be None:
>>> a1 = A()
>>> b1 = B(a=a1)
>>> a1.bs
{None: <test3.B object at 0x7f7b1023ef70>}
Setting b1.data after the fact does not update the collection:
>>> b1.data = "the key"
>>> a1.bs
{None: <test3.B object at 0x7f7b1023ef70>}
This can also be seen if one attempts to set up B() in the constructor.
The order of arguments changes the result:
>>> B(a=a1, data="the key")
<test3.B object at 0x7f7b10114280>
>>> a1.bs
{None: <test3.B object at 0x7f7b10114280>}
vs:
>>> B(data="the key", a=a1)
<test3.B object at 0x7f7b10114340>
>>> a1.bs
{'the key': <test3.B object at 0x7f7b10114340>}
If backrefs are being used in this way, ensure that attributes are
populated in the correct order using an __init__ method.
An event handler such as the following may also be used to track changes
in the collection as well:
from sqlalchemy import event
from sqlalchemy.orm import attributes
@event.listens_for(B.data, "set")
def set_item(obj, value, previous, initiator):
if obj.a is not None:
previous = None if previous == attributes.NO_VALUE else previous
obj.a.bs[value] = obj
obj.a.bs.pop(previous)
Custom Collection Implementations
You can use your own types for collections as well. In simple cases,
inheriting from list or set, adding custom behavior, is all that's
needed. In other cases, special decorators are needed to tell SQLAlchemy
more detail about how the collection operates.
Do I need a custom collection implementation?
In most cases not at all! The most common use cases for a "custom"
collection is one that validates or marshals incoming values into a new
form, such as a string that becomes a class instance, or one which goes
a step beyond and represents the data internally in some fashion,
presenting a "view" of that data on the outside of a different form.
For the first use case, the _orm.validates decorator is by far the
simplest way to intercept incoming values in all cases for the purposes
of validation and simple marshaling. See simple_validators for an
example of this.
For the second use case, the associationproxy_toplevel extension is a
well-tested, widely used system that provides a read/write "view" of a
collection in terms of some attribute present on the target object. As
the target attribute can be a @property that returns virtually anything,
a wide array of "alternative" views of a collection can be constructed
with just a few functions. This approach leaves the underlying mapped
collection unaffected and avoids the need to carefully tailor collection
behavior on a method-by-method basis.
Customized collections are useful when the collection needs to have
special behaviors upon access or mutation operations that can't
otherwise be modeled externally to the collection. They can of course be
combined with the above two approaches.
Collections in SQLAlchemy are transparently instrumented.
Instrumentation means that normal operations on the collection are
tracked and result in changes being written to the database at flush
time. Additionally, collection operations can fire events which indicate
some secondary operation must take place. Examples of a secondary
operation include saving the child item in the parent's
~sqlalchemy.orm.session.Session (i.e. the save-update cascade), as well
as synchronizing the state of a bi-directional relationship (i.e. a
.backref).
The collections package understands the basic interface of lists, sets
and dicts and will automatically apply instrumentation to those built-in
types and their subclasses. Object-derived types that implement a basic
collection interface are detected and instrumented via duck-typing:
class ListLike:
def __init__(self):
self.data = []
def append(self, item):
self.data.append(item)
def remove(self, item):
self.data.remove(item)
def extend(self, items):
self.data.extend(items)
def __iter__(self):
return iter(self.data)
def foo(self):
return "foo"
append, remove, and extend are known members of list, and will be
instrumented automatically. __iter__ is not a mutator method and won't
be instrumented, and foo won't be either.
Duck-typing (i.e. guesswork) isn't rock-solid, of course, so you can be
explicit about the interface you are implementing by providing an
__emulates__ class attribute:
class SetLike:
__emulates__ = set
def __init__(self):
self.data = set()
def append(self, item):
self.data.add(item)
def remove(self, item):
self.data.remove(item)
def __iter__(self):
return iter(self.data)
This class looks similar to a Python list (i.e. "list-like") as it has
an append method, but the __emulates__ attribute forces it to be treated
as a set. remove is known to be part of the set interface and will be
instrumented.
But this class won't work quite yet: a little glue is needed to adapt it
for use by SQLAlchemy. The ORM needs to know which methods to use to
append, remove and iterate over members of the collection. When using a
type like list or set, the appropriate methods are well-known and used
automatically when present. However the class above, which only roughly
resembles a set, does not provide the expected add method, so we must
indicate to the ORM the method that will instead take the place of the
add method, in this case using a decorator @collection.appender; this is
illustrated in the next section.
Annotating Custom Collections via Decorators
Decorators can be used to tag the individual methods the ORM needs to
manage collections. Use them when your class doesn't quite meet the
regular interface for its container type, or when you otherwise would
like to use a different method to get the job done.
from sqlalchemy.orm.collections import collection
class SetLike:
__emulates__ = set
def __init__(self):
self.data = set()
@collection.appender
def append(self, item):
self.data.add(item)
def remove(self, item):
self.data.remove(item)
def __iter__(self):
return iter(self.data)
And that's all that's needed to complete the example. SQLAlchemy will
add instances via the append method. remove and __iter__ are the default
methods for sets and will be used for removing and iteration. Default
methods can be changed as well:
from sqlalchemy.orm.collections import collection
class MyList(list):
@collection.remover
def zark(self, item):
# do something special...
...
@collection.iterator
def hey_use_this_instead_for_iteration(self):
...
There is no requirement to be "list-like" or "set-like" at all.
Collection classes can be any shape, so long as they have the append,
remove and iterate interface marked for SQLAlchemy's use. Append and
remove methods will be called with a mapped entity as the single
argument, and iterator methods are called with no arguments and must
return an iterator.
Custom Dictionary-Based Collections
The .KeyFuncDict class can be used as a base class for your custom types
or as a mix-in to quickly add dict collection support to other classes.
It uses a keying function to delegate to __setitem__ and __delitem__:
from sqlalchemy.orm.collections import KeyFuncDict
class MyNodeMap(KeyFuncDict):
"""Holds 'Node' objects, keyed by the 'name' attribute."""
def __init__(self, *args, **kw):
super().__init__(keyfunc=lambda node: node.name)
dict.__init__(self, *args, **kw)
When subclassing .KeyFuncDict, user-defined versions of __setitem__() or
__delitem__() should be decorated with
.collection.internally_instrumented, if they call down to those same
methods on .KeyFuncDict. This because the methods on .KeyFuncDict are
already instrumented - calling them from within an already instrumented
call can cause events to be fired off repeatedly, or inappropriately,
leading to internal state corruption in rare cases:
from sqlalchemy.orm.collections import KeyFuncDict, collection
class MyKeyFuncDict(KeyFuncDict):
"""Use @internally_instrumented when your methods
call down to already-instrumented methods.
"""
@collection.internally_instrumented
def __setitem__(self, key, value, _sa_initiator=None):
# do something with key, value
super(MyKeyFuncDict, self).__setitem__(key, value, _sa_initiator)
@collection.internally_instrumented
def __delitem__(self, key, _sa_initiator=None):
# do something with key
super(MyKeyFuncDict, self).__delitem__(key, _sa_initiator)
The ORM understands the dict interface just like lists and sets, and
will automatically instrument all "dict-like" methods if you choose to
subclass dict or provide dict-like collection behavior in a duck-typed
class. You must decorate appender and remover methods, however- there
are no compatible methods in the basic dictionary interface for
SQLAlchemy to use by default. Iteration will go through values() unless
otherwise decorated.
Instrumentation and Custom Types
Many custom types and existing library classes can be used as a entity
collection type as-is without further ado. However, it is important to
note that the instrumentation process will modify the type, adding
decorators around methods automatically.
The decorations are lightweight and no-op outside of relationships, but
they do add unneeded overhead when triggered elsewhere. When using a
library class as a collection, it can be good practice to use the
"trivial subclass" trick to restrict the decorations to just your usage
in relationships. For example:
class MyAwesomeList(some.great.library.AwesomeList):
pass
# ... relationship(..., collection_class=MyAwesomeList)
The ORM uses this approach for built-ins, quietly substituting a trivial
subclass when a list, set or dict is used directly.
| # orm/collections.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: allow-untyped-defs, allow-untyped-calls
"""Support for collections of mapped entities.
The collections package supplies the machinery used to inform the ORM of
collection membership changes. An instrumentation via decoration approach is
used, allowing arbitrary types (including built-ins) to be used as entity
collections without requiring inheritance from a base class.
Instrumentation decoration relays membership change events to the
:class:`.CollectionAttributeImpl` that is currently managing the collection.
The decorators observe function call arguments and return values, tracking
entities entering or leaving the collection. Two decorator approaches are
provided. One is a bundle of generic decorators that map function arguments
and return values to events::
from sqlalchemy.orm.collections import collection
class MyClass:
#...
@collection.adds(1)
def store(self, item):
self.data.append(item)
@collection.removes_return()
def pop(self):
return self.data.pop()
The second approach is a bundle of targeted decorators that wrap appropriate
append and remove notifiers around the mutation methods present in the
standard Python ``list``, ``set`` and ``dict`` interfaces. These could be
specified in terms of generic decorator recipes, but are instead hand-tooled
for increased efficiency. The targeted decorators occasionally implement
adapter-like behavior, such as mapping bulk-set methods (``extend``,
``update``, ``__setslice__``, etc.) into the series of atomic mutation events
that the ORM requires.
The targeted decorators are used internally for automatic instrumentation of
entity collection classes. Every collection class goes through a
transformation process roughly like so:
1. If the class is a built-in, substitute a trivial sub-class
2. Is this class already instrumented?
3. Add in generic decorators
4. Sniff out the collection interface through duck-typing
5. Add targeted decoration to any undecorated interface method
This process modifies the class at runtime, decorating methods and adding some
bookkeeping properties. This isn't possible (or desirable) for built-in
classes like ``list``, so trivial sub-classes are substituted to hold
decoration::
class InstrumentedList(list):
pass
Collection classes can be specified in ``relationship(collection_class=)`` as
types or a function that returns an instance. Collection classes are
inspected and instrumented during the mapper compilation phase. The
collection_class callable will be executed once to produce a specimen
instance, and the type of that specimen will be instrumented. Functions that
return built-in types like ``lists`` will be adapted to produce instrumented
instances.
When extending a known type like ``list``, additional decorations are not
generally not needed. Odds are, the extension method will delegate to a
method that's already instrumented. For example::
class QueueIsh(list):
def push(self, item):
self.append(item)
def shift(self):
return self.pop(0)
There's no need to decorate these methods. ``append`` and ``pop`` are already
instrumented as part of the ``list`` interface. Decorating them would fire
duplicate events, which should be avoided.
The targeted decoration tries not to rely on other methods in the underlying
collection class, but some are unavoidable. Many depend on'read' methods
being present to properly instrument a 'write', for example, ``__setitem__``
needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also
reimplemented in terms of atomic appends and removes, so the ``extend``
decoration will actually perform many ``append`` operations and not call the
underlying method at all.
Tight control over bulk operation and the firing of events is also possible by
implementing the instrumentation internally in your methods. The basic
instrumentation package works under the general assumption that collection
mutation will not raise unusual exceptions. If you want to closely
orchestrate append and remove events with exception management, internal
instrumentation may be the answer. Within your method,
``collection_adapter(self)`` will retrieve an object that you can use for
explicit control over triggering append and remove events.
The owning object and :class:`.CollectionAttributeImpl` are also reachable
through the adapter, allowing for some very sophisticated behavior.
"""
from __future__ import annotations
import operator
import threading
import typing
from typing import Any
from typing import Callable
from typing import cast
from typing import Collection
from typing import Dict
from typing import Iterable
from typing import List
from typing import NoReturn
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
import weakref
from.base import NO_KEY
from.. import exc as sa_exc
from.. import util
from..sql.base import NO_ARG
from..util.compat import inspect_getfullargspec
from..util.typing import Protocol
if typing.TYPE_CHECKING:
from.attributes import AttributeEventToken
from.attributes import CollectionAttributeImpl
from.mapped_collection import attribute_keyed_dict
from.mapped_collection import column_keyed_dict
from.mapped_collection import keyfunc_mapping
from.mapped_collection import KeyFuncDict # noqa: F401
from.state import InstanceState
__all__ = [
"collection",
"collection_adapter",
"keyfunc_mapping",
"column_keyed_dict",
"attribute_keyed_dict",
"column_keyed_dict",
"attribute_keyed_dict",
"MappedCollection",
"KeyFuncDict",
]
__instrumentation_mutex = threading.Lock()
_CollectionFactoryType = Callable[[], "_AdaptedCollectionProtocol"]
_T = TypeVar("_T", bound=Any)
_KT = TypeVar("_KT", bound=Any)
_VT = TypeVar("_VT", bound=Any)
_COL = TypeVar("_COL", bound="Collection[Any]")
_FN = TypeVar("_FN", bound="Callable[..., Any]")
class _CollectionConverterProtocol(Protocol):
def __call__(self, collection: _COL) -> _COL:
...
class _AdaptedCollectionProtocol(Protocol):
_sa_adapter: CollectionAdapter
_sa_appender: Callable[..., Any]
_sa_remover: Callable[..., Any]
_sa_iterator: Callable[..., Iterable[Any]]
_sa_converter: _CollectionConverterProtocol
class collection:
"""Decorators for entity collection classes.
The decorators fall into two groups: annotations and interception recipes.
The annotating decorators (appender, remover, iterator, converter,
internally_instrumented) indicate the method's purpose and take no
arguments. They are not written with parens::
@collection.appender
def append(self, append):...
The recipe decorators all require parens, even those that take no
arguments::
@collection.adds('entity')
def insert(self, position, entity):...
@collection.removes_return()
def popitem(self):...
"""
# Bundled as a class solely for ease of use: packaging, doc strings,
# importability.
@staticmethod
def appender(fn):
"""Tag the method as the collection appender.
The appender method is called with one positional argument: the value
to append. The method will be automatically decorated with 'adds(1)'
if not already decorated::
@collection.appender
def add(self, append):...
# or, equivalently
@collection.appender
@collection.adds(1)
def add(self, append):...
# for mapping type, an 'append' may kick out a previous value
# that occupies that slot. consider d['a'] = 'foo'- any previous
# value in d['a'] is discarded.
@collection.appender
@collection.replaces(1)
def add(self, entity):
key = some_key_func(entity)
previous = None
if key in self:
previous = self[key]
self[key] = entity
return previous
If the value to append is not allowed in the collection, you may
raise an exception. Something to remember is that the appender
will be called for each object mapped by a database query. If the
database contains rows that violate your collection semantics, you
will need to get creative to fix the problem, as access via the
collection will not work.
If the appender method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = "appender"
return fn
@staticmethod
def remover(fn):
"""Tag the method as the collection remover.
The remover method is called with one positional argument: the value
to remove. The method will be automatically decorated with
:meth:`removes_return` if not already decorated::
@collection.remover
def zap(self, entity):...
# or, equivalently
@collection.remover
@collection.removes_return()
def zap(self, ):...
If the value to remove is not present in the collection, you may
raise an exception or return None to ignore the error.
If the remove method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = "remover"
return fn
@staticmethod
def iterator(fn):
"""Tag the method as the collection remover.
The iterator method is called with no arguments. It is expected to
return an iterator over all collection members::
@collection.iterator
def __iter__(self):...
"""
fn._sa_instrument_role = "iterator"
return fn
@staticmethod
def internally_instrumented(fn):
"""Tag the method as instrumented.
This tag will prevent any decoration from being applied to the
method. Use this if you are orchestrating your own calls to
:func:`.collection_adapter` in one of the basic SQLAlchemy
interface methods, or to prevent an automatic ABC method
decoration from wrapping your implementation::
# normally an 'extend' method on a list-like class would be
# automatically intercepted and re-implemented in terms of
# SQLAlchemy events and append(). your implementation will
# never be called, unless:
@collection.internally_instrumented
def extend(self, items):...
"""
fn._sa_instrumented = True
return fn
@staticmethod
@util.deprecated(
"1.3",
"The :meth:`.collection.converter` handler is deprecated and will "
"be removed in a future release. Please refer to the "
":class:`.AttributeEvents.bulk_replace` listener interface in "
"conjunction with the :func:`.event.listen` function.",
)
def converter(fn):
"""Tag the method as the collection converter.
This optional method will be called when a collection is being
replaced entirely, as in::
myobj.acollection = [newvalue1, newvalue2]
The converter method will receive the object being assigned and should
return an iterable of values suitable for use by the ``appender``
method. A converter must not assign values or mutate the collection,
its sole job is to adapt the value the user provides into an iterable
of values for the ORM's use.
The default converter implementation will use duck-typing to do the
conversion. A dict-like collection will be convert into an iterable
of dictionary values, and other types will simply be iterated::
@collection.converter
def convert(self, other):...
If the duck-typing of the object does not match the type of this
collection, a TypeError is raised.
Supply an implementation of this method if you want to expand the
range of possible types that can be assigned in bulk or perform
validation on the values about to be assigned.
"""
fn._sa_instrument_role = "converter"
return fn
@staticmethod
def adds(arg):
"""Mark the method as adding an entity to the collection.
Adds "add to collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value. Arguments can be specified positionally (i.e. integer) or by
name::
@collection.adds(1)
def push(self, item):...
@collection.adds('entity')
def do_stuff(self, thing, entity=None):...
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_append_event", arg)
return fn
return decorator
@staticmethod
def replaces(arg):
"""Mark the method as replacing an entity in the collection.
Adds "add to collection" and "remove from collection" handling to
the method. The decorator argument indicates which method argument
holds the SQLAlchemy-relevant value to be added, and return value, if
any will be considered the value to remove.
Arguments can be specified positionally (i.e. integer) or by name::
@collection.replaces(2)
def __setitem__(self, index, item):...
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_append_event", arg)
fn._sa_instrument_after = "fire_remove_event"
return fn
return decorator
@staticmethod
def removes(arg):
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value to be removed. Arguments can be specified positionally (i.e.
integer) or by name::
@collection.removes(1)
def zap(self, item):...
For methods where the value to remove is not known at call-time, use
collection.removes_return.
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_remove_event", arg)
return fn
return decorator
@staticmethod
def removes_return():
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The return
value of the method, if any, is considered the value to remove. The
method arguments are not inspected::
@collection.removes_return()
def pop(self):...
For methods where the value to remove is known at call-time, use
collection.remove.
"""
def decorator(fn):
fn._sa_instrument_after = "fire_remove_event"
return fn
return decorator
if TYPE_CHECKING:
def collection_adapter(collection: Collection[Any]) -> CollectionAdapter:
"""Fetch the :class:`.CollectionAdapter` for a collection."""
else:
collection_adapter = operator.attrgetter("_sa_adapter")
class CollectionAdapter:
"""Bridges between the ORM and arbitrary Python collections.
Proxies base-level collection operations (append, remove, iterate)
to the underlying Python collection, and emits add/remove events for
entities entering or leaving the collection.
The ORM uses :class:`.CollectionAdapter` exclusively for interaction with
entity collections.
"""
__slots__ = (
"attr",
"_key",
"_data",
"owner_state",
"_converter",
"invalidated",
"empty",
)
attr: CollectionAttributeImpl
_key: str
# this is actually a weakref; see note in constructor
_data: Callable[..., _AdaptedCollectionProtocol]
owner_state: InstanceState[Any]
_converter: _CollectionConverterProtocol
invalidated: bool
empty: bool
def __init__(
self,
attr: CollectionAttributeImpl,
owner_state: InstanceState[Any],
data: _AdaptedCollectionProtocol,
):
self.attr = attr
self._key = attr.key
# this weakref stays referenced throughout the lifespan of
# CollectionAdapter. so while the weakref can return None, this
# is realistically only during garbage collection of this object, so
# we type this as a callable that returns _AdaptedCollectionProtocol
# in all cases.
self._data = weakref.ref(data) # type: ignore
self.owner_state = owner_state
data._sa_adapter = self
self._converter = data._sa_converter
self.invalidated = False
self.empty = False
def _warn_invalidated(self) -> None:
util.warn("This collection has been invalidated.")
@property
def data(self) -> _AdaptedCollectionProtocol:
"The entity collection being adapted."
return self._data()
@property
def _referenced_by_owner(self) -> bool:
"""return True if the owner state still refers to this collection.
This will return False within a bulk replace operation,
where this collection is the one being replaced.
"""
return self.owner_state.dict[self._key] is self._data()
def bulk_appender(self):
return self._data()._sa_appender
def append_with_event(
self, item: Any, initiator: Optional[AttributeEventToken] = None
) -> None:
"""Add an entity to the collection, firing mutation events."""
self._data()._sa_appender(item, _sa_initiator=initiator)
def _set_empty(self, user_data):
assert (
not self.empty
), "This collection adapter is already in the 'empty' state"
self.empty = True
self.owner_state._empty_collections[self._key] = user_data
def _reset_empty(self) -> None:
assert (
self.empty
), "This collection adapter is not in the 'empty' state"
self.empty = False
self.owner_state.dict[
self._key
] = self.owner_state._empty_collections.pop(self._key)
def _refuse_empty(self) -> NoReturn:
raise sa_exc.InvalidRequestError(
"This is a special 'empty' collection which cannot accommodate "
"internal mutation operations"
)
def append_without_event(self, item: Any) -> None:
"""Add or restore an entity to the collection, firing no events."""
if self.empty:
self._refuse_empty()
self._data()._sa_appender(item, _sa_initiator=False)
def append_multiple_without_event(self, items: Iterable[Any]) -> None:
"""Add or restore an entity to the collection, firing no events."""
if self.empty:
self._refuse_empty()
appender = self._data()._sa_appender
for item in items:
appender(item, _sa_initiator=False)
def bulk_remover(self):
return self._data()._sa_remover
def remove_with_event(
self, item: Any, initiator: Optional[AttributeEventToken] = None
) -> None:
"""Remove an entity from the collection, firing mutation events."""
self._data()._sa_remover(item, _sa_initiator=initiator)
def remove_without_event(self, item: Any) -> None:
"""Remove an entity from the collection, firing no events."""
if self.empty:
self._refuse_empty()
self._data()._sa_remover(item, _sa_initiator=False)
def clear_with_event(
self, initiator: Optional[AttributeEventToken] = None
) -> None:
"""Empty the collection, firing a mutation event for each entity."""
if self.empty:
self._refuse_empty()
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=initiator)
def clear_without_event(self) -> None:
"""Empty the collection, firing no events."""
if self.empty:
self._refuse_empty()
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=False)
def __iter__(self):
"""Iterate over entities in the collection."""
return iter(self._data()._sa_iterator())
def __len__(self):
"""Count entities in the collection."""
return len(list(self._data()._sa_iterator()))
def __bool__(self):
return True
def _fire_append_wo_mutation_event_bulk(
self, items, initiator=None, key=NO_KEY
):
if not items:
return
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
for item in items:
self.attr.fire_append_wo_mutation_event(
self.owner_state,
self.owner_state.dict,
item,
initiator,
key,
)
def fire_append_wo_mutation_event(self, item, initiator=None, key=NO_KEY):
"""Notify that a entity is entering the collection but is already
present.
Initiator is a token owned by the InstrumentedAttribute that
initiated the membership mutation, and should be left as None
unless you are passing along an initiator value from a chained
operation.
.. versionadded:: 1.4.15
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
return self.attr.fire_append_wo_mutation_event(
self.owner_state, self.owner_state.dict, item, initiator, key
)
else:
return item
def fire_append_event(self, item, initiator=None, key=NO_KEY):
"""Notify that a entity has entered the collection.
Initiator is a token owned by the InstrumentedAttribute that
initiated the membership mutation, and should be left as None
unless you are passing along an initiator value from a chained
operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
return self.attr.fire_append_event(
self.owner_state, self.owner_state.dict, item, initiator, key
)
else:
return item
def _fire_remove_event_bulk(self, items, initiator=None, key=NO_KEY):
if not items:
return
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
for item in items:
self.attr.fire_remove_event(
self.owner_state,
self.owner_state.dict,
item,
initiator,
key,
)
def fire_remove_event(self, item, initiator=None, key=NO_KEY):
"""Notify that a entity has been removed from the collection.
Initiator is the InstrumentedAttribute that initiated the membership
mutation, and should be left as None unless you are passing along
an initiator value from a chained operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
self.attr.fire_remove_event(
self.owner_state, self.owner_state.dict, item, initiator, key
)
def fire_pre_remove_event(self, initiator=None, key=NO_KEY):
"""Notify that an entity is about to be removed from the collection.
Only called if the entity cannot be removed after calling
fire_remove_event().
"""
if self.invalidated:
self._warn_invalidated()
self.attr.fire_pre_remove_event(
self.owner_state,
self.owner_state.dict,
initiator=initiator,
key=key,
)
def __getstate__(self):
return {
"key": self._key,
"owner_state": self.owner_state,
"owner_cls": self.owner_state.class_,
"data": self.data,
"invalidated": self.invalidated,
"empty": self.empty,
}
def __setstate__(self, d):
self._key = d["key"]
self.owner_state = d["owner_state"]
# see note in constructor regarding this type: ignore
self._data = weakref.ref(d["data"]) # type: ignore
self._converter = d["data"]._sa_converter
d["data"]._sa_adapter = self
self.invalidated = d["invalidated"]
self.attr = getattr(d["owner_cls"], self._key).impl
self.empty = d.get("empty", False)
def bulk_replace(values, existing_adapter, new_adapter, initiator=None):
"""Load a new collection, firing events based on prior like membership.
Appends instances in ``values`` onto the ``new_adapter``. Events will be
fired for any instance not present in the ``existing_adapter``. Any
instances in ``existing_adapter`` not present in ``values`` will have
remove events fired upon them.
:param values: An iterable of collection member instances
:param existing_adapter: A :class:`.CollectionAdapter` of
instances to be replaced
:param new_adapter: An empty :class:`.CollectionAdapter`
to load with ``values``
"""
assert isinstance(values, list)
idset = util.IdentitySet
existing_idset = idset(existing_adapter or ())
constants = existing_idset.intersection(values or ())
additions = idset(values or ()).difference(constants)
removals = existing_idset.difference(constants)
appender = new_adapter.bulk_appender()
for member in values or ():
if member in additions:
appender(member, _sa_initiator=initiator)
elif member in constants:
appender(member, _sa_initiator=False)
if existing_adapter:
existing_adapter._fire_append_wo_mutation_event_bulk(
constants, initiator=initiator
)
existing_adapter._fire_remove_event_bulk(removals, initiator=initiator)
def prepare_instrumentation(
factory: Union[Type[Collection[Any]], _CollectionFactoryType],
) -> _CollectionFactoryType:
"""Prepare a callable for future use as a collection class factory.
Given a collection class factory (either a type or no-arg callable),
return another factory that will produce compatible instances when
called.
This function is responsible for converting collection_class=list
into the run-time behavior of collection_class=InstrumentedList.
"""
impl_factory: _CollectionFactoryType
# Convert a builtin to 'Instrumented*'
if factory in __canned_instrumentation:
impl_factory = __canned_instrumentation[factory]
else:
impl_factory = cast(_CollectionFactoryType, factory)
cls: Union[_CollectionFactoryType, Type[Collection[Any]]]
# Create a specimen
cls = type(impl_factory())
# Did factory callable return a builtin?
if cls in __canned_instrumentation:
# if so, just convert.
# in previous major releases, this codepath wasn't working and was
# not covered by tests. prior to that it supplied a "wrapper"
# function that would return the class, though the rationale for this
# case is not known
impl_factory = __canned_instrumentation[cls]
cls = type(impl_factory())
# Instrument the class if needed.
if __instrumentation_mutex.acquire():
try:
if getattr(cls, "_sa_instrumented", None)!= id(cls):
_instrument_class(cls)
finally:
__instrumentation_mutex.release()
return impl_factory
def _instrument_class(cls):
"""Modify methods in a class and install instrumentation."""
# In the normal call flow, a request for any of the 3 basic collection
# types is transformed into one of our trivial subclasses
# (e.g. InstrumentedList). Catch anything else that sneaks in here...
if cls.__module__ == "__builtin__":
raise sa_exc.ArgumentError(
"Can not instrument a built-in type. Use a "
"subclass, even a trivial one."
)
roles, methods = _locate_roles_and_methods(cls)
_setup_canned_roles(cls, roles, methods)
_assert_required_roles(cls, roles, methods)
_set_collection_attributes(cls, roles, methods)
def _locate_roles_and_methods(cls):
"""search for _sa_instrument_role-decorated methods in
method resolution order, assign to roles.
"""
roles: Dict[str, str] = {}
methods: Dict[str, Tuple[Optional[str], Optional[int], Optional[str]]] = {}
for supercls in cls.__mro__:
for name, method in vars(supercls).items():
if not callable(method):
continue
# note role declarations
if hasattr(method, "_sa_instrument_role"):
role = method._sa_instrument_role
assert role in (
"appender",
"remover",
"iterator",
"converter",
)
roles.setdefault(role, name)
# transfer instrumentation requests from decorated function
# to the combined queue
before: Optional[Tuple[str, int]] = None
after: Optional[str] = None
if hasattr(method, "_sa_instrument_before"):
op, argument = method._sa_instrument_before
assert op in ("fire_append_event", "fire_remove_event")
before = op, argument
if hasattr(method, "_sa_instrument_after"):
op = method._sa_instrument_after
assert op in ("fire_append_event", "fire_remove_event")
after = op
if before:
methods[name] = before + (after,)
elif after:
methods[name] = None, None, after
return roles, methods
def _setup_canned_roles(cls, roles, methods):
"""see if this class has "canned" roles based on a known
collection type (dict, set, list). Apply those roles
as needed to the "roles" dictionary, and also
prepare "decorator" methods
"""
collection_type = util.duck_type_collection(cls)
if collection_type in __interfaces:
assert collection_type is not None
canned_roles, decorators = __interfaces[collection_type]
for role, name in canned_roles.items():
roles.setdefault(role, name)
# apply ABC auto-decoration to methods that need it
for method, decorator in decorators.items():
fn = getattr(cls, method, None)
if (
fn
and method not in methods
and not hasattr(fn, "_sa_instrumented")
):
setattr(cls, method, decorator(fn))
def _assert_required_roles(cls, roles, methods):
"""ensure all roles are present, and apply implicit instrumentation if
needed
"""
if "appender" not in roles or not hasattr(cls, roles["appender"]):
raise sa_exc.ArgumentError(
"Type %s must elect an appender method to be "
"a collection class" % cls.__name__
)
elif roles["appender"] not in methods and not hasattr(
getattr(cls, roles["appender"]), "_sa_instrumented"
):
methods[roles["appender"]] = ("fire_append_event", 1, None)
if "remover" not in roles or not hasattr(cls, roles["remover"]):
raise sa_exc.ArgumentError(
"Type %s must elect a remover method to be "
"a collection class" % cls.__name__
)
elif roles["remover"] not in methods and not hasattr(
getattr(cls, roles["remover"]), "_sa_instrumented"
):
methods[roles["remover"]] = ("fire_remove_event", 1, None)
if "iterator" not in roles or not hasattr(cls, roles["iterator"]):
raise sa_exc.ArgumentError(
"Type %s must elect an iterator method to be "
"a collection class" % cls.__name__
)
def _set_collection_attributes(cls, roles, methods):
"""apply ad-hoc instrumentation from decorators, class-level defaults
and implicit role declarations
"""
for method_name, (before, argument, after) in methods.items():
setattr(
cls,
method_name,
_instrument_membership_mutator(
getattr(cls, method_name), before, argument, after
),
)
# intern the role map
for role, method_name in roles.items():
setattr(cls, "_sa_%s" % role, getattr(cls, method_name))
cls._sa_adapter = None
if not hasattr(cls, "_sa_converter"):
cls._sa_converter = None
cls._sa_instrumented = id(cls)
def _instrument_membership_mutator(method, before, argument, after):
"""Route method args and/or return value through the collection
adapter."""
# This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))'
if before:
fn_args = list(
util.flatten_iterator(inspect_getfullargspec(method)[0])
)
if isinstance(argument, int):
pos_arg = argument
named_arg = len(fn_args) > argument and fn_args[argument] or None
else:
if argument in fn_args:
pos_arg = fn_args.index(argument)
else:
pos_arg = None
named_arg = argument
del fn_args
def wrapper(*args, **kw):
if before:
if pos_arg is None:
if named_arg not in kw:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument
)
value = kw[named_arg]
else:
if len(args) > pos_arg:
value = args[pos_arg]
elif named_arg in kw:
value = kw[named_arg]
else:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument
)
initiator = kw.pop("_sa_initiator", None)
if initiator is False:
executor = None
else:
executor = args[0]._sa_adapter
if before and executor:
getattr(executor, before)(value, initiator)
if not after or not executor:
return method(*args, **kw)
else:
res = method(*args, **kw)
if res is not None:
getattr(executor, after)(res, initiator)
return res
wrapper._sa_instrumented = True # type: ignore[attr-defined]
if hasattr(method, "_sa_instrument_role"):
wrapper._sa_instrument_role = method._sa_instrument_role # type: ignore[attr-defined] # noqa: E501
wrapper.__name__ = method.__name__
wrapper.__doc__ = method.__doc__
return wrapper
def __set_wo_mutation(collection, item, _sa_initiator=None):
"""Run set wo mutation events.
The collection is not mutated.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
executor.fire_append_wo_mutation_event(
item, _sa_initiator, key=None
)
def __set(collection, item, _sa_initiator, key):
"""Run set events.
This event always occurs before the collection is actually mutated.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
item = executor.fire_append_event(item, _sa_initiator, key=key)
return item
def __del(collection, item, _sa_initiator, key):
"""Run del events.
This event occurs before the collection is actually mutated, *except*
in the case of a pop operation, in which case it occurs afterwards.
For pop operations, the __before_pop hook is called before the
operation occurs.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
executor.fire_remove_event(item, _sa_initiator, key=key)
def __before_pop(collection, _sa_initiator=None):
"""An event which occurs on a before a pop() operation occurs."""
executor = collection._sa_adapter
if executor:
executor.fire_pre_remove_event(_sa_initiator)
def _list_decorators() -> Dict[str, Callable[[_FN], _FN]]:
"""Tailored instrumentation wrappers for any list-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(list, fn.__name__).__doc__
def append(fn):
def append(self, item, _sa_initiator=None):
item = __set(self, item, _sa_initiator, NO_KEY)
fn(self, item)
_tidy(append)
return append
def remove(fn):
def remove(self, value, _sa_initiator=None):
__del(self, value, _sa_initiator, NO_KEY)
# testlib.pragma exempt:__eq__
fn(self, value)
_tidy(remove)
return remove
def insert(fn):
def insert(self, index, value):
value = __set(self, value, None, index)
fn(self, index, value)
_tidy(insert)
return insert
def __setitem__(fn):
def __setitem__(self, index, value):
if not isinstance(index, slice):
existing = self[index]
if existing is not None:
__del(self, existing, None, index)
value = __set(self, value, None, index)
fn(self, index, value)
else:
# slice assignment requires __delitem__, insert, __len__
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
if index.stop is not None:
stop = index.stop
else:
stop = len(self)
if stop < 0:
stop += len(self)
if step == 1:
if value is self:
return
for i in range(start, stop, step):
if len(self) > start:
del self[start]
for i, item in enumerate(value):
self.insert(i + start, item)
else:
rng = list(range(start, stop, step))
if len(value)!= len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s"
% (len(value), len(rng))
)
for i, item in zip(rng, value):
self.__setitem__(i, item)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, index):
if not isinstance(index, slice):
item = self[index]
__del(self, item, None, index)
fn(self, index)
else:
# slice deletion requires __getslice__ and a slice-groking
# __getitem__ for stepped deletion
# note: not breaking this into atomic dels
for item in self[index]:
__del(self, item, None, index)
fn(self, index)
_tidy(__delitem__)
return __delitem__
def extend(fn):
def extend(self, iterable):
for value in list(iterable):
self.append(value)
_tidy(extend)
return extend
def __iadd__(fn):
def __iadd__(self, iterable):
# list.__iadd__ takes any iterable and seems to let TypeError
# raise as-is instead of returning NotImplemented
for value in list(iterable):
self.append(value)
return self
_tidy(__iadd__)
return __iadd__
def pop(fn):
def pop(self, index=-1):
__before_pop(self)
item = fn(self, index)
__del(self, item, None, index)
return item
_tidy(pop)
return pop
def clear(fn):
def clear(self, index=-1):
for item in self:
__del(self, item, None, index)
fn(self)
_tidy(clear)
return clear
# __imul__ : not wrapping this. all members of the collection are already
# present, so no need to fire appends... wrapping it with an explicit
# decorator is still possible, so events on *= can be had if they're
# desired. hard to imagine a use case for __imul__, though.
l = locals().copy()
l.pop("_tidy")
return l
def _dict_decorators() -> Dict[str, Callable[[_FN], _FN]]:
"""Tailored instrumentation wrappers for any dict-like mapping class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(dict, fn.__name__).__doc__
def __setitem__(fn):
def __setitem__(self, key, value, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator, key)
value = __set(self, value, _sa_initiator, key)
fn(self, key, value)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, key, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator, key)
fn(self, key)
_tidy(__delitem__)
return __delitem__
def clear(fn):
def clear(self):
for key in self:
__del(self, self[key], None, key)
fn(self)
_tidy(clear)
return clear
def pop(fn):
def pop(self, key, default=NO_ARG):
__before_pop(self)
_to_del = key in self
if default is NO_ARG:
item = fn(self, key)
else:
item = fn(self, key, default)
if _to_del:
__del(self, item, None, key)
return item
_tidy(pop)
return pop
def popitem(fn):
def popitem(self):
__before_pop(self)
item = fn(self)
__del(self, item[1], None, 1)
return item
_tidy(popitem)
return popitem
def setdefault(fn):
def setdefault(self, key, default=None):
if key not in self:
self.__setitem__(key, default)
return default
else:
value = self.__getitem__(key)
if value is default:
__set_wo_mutation(self, value, None)
return value
_tidy(setdefault)
return setdefault
def update(fn):
def update(self, __other=NO_ARG, **kw):
if __other is not NO_ARG:
if hasattr(__other, "keys"):
for key in list(__other):
if key not in self or self[key] is not __other[key]:
self[key] = __other[key]
else:
__set_wo_mutation(self, __other[key], None)
else:
for key, value in __other:
if key not in self or self[key] is not value:
self[key] = value
else:
__set_wo_mutation(self, value, None)
for key in kw:
if key not in self or self[key] is not kw[key]:
self[key] = kw[key]
else:
__set_wo_mutation(self, kw[key], None)
_tidy(update)
return update
l = locals().copy()
l.pop("_tidy")
return l
_set_binop_bases = (set, frozenset)
def _set_binops_check_strict(self: Any, obj: Any) -> bool:
"""Allow only set, frozenset and self.__class__-derived
objects in binops."""
return isinstance(obj, _set_binop_bases + (self.__class__,))
def _set_binops_check_loose(self: Any, obj: Any) -> bool:
"""Allow anything set-like to participate in set binops."""
return (
isinstance(obj, _set_binop_bases + (self.__class__,))
or util.duck_type_collection(obj) == set
)
def _set_decorators() -> Dict[str, Callable[[_FN], _FN]]:
"""Tailored instrumentation wrappers for any set-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(set, fn.__name__).__doc__
def add(fn):
def add(self, value, _sa_initiator=None):
if value not in self:
value = __set(self, value, _sa_initiator, NO_KEY)
else:
__set_wo_mutation(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(add)
return add
def discard(fn):
def discard(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator, NO_KEY)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(discard)
return discard
def remove(fn):
def remove(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator, NO_KEY)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(remove)
return remove
def pop(fn):
def pop(self):
__before_pop(self)
item = fn(self)
# for set in particular, we have no way to access the item
# that will be popped before pop is called.
__del(self, item, None, NO_KEY)
return item
_tidy(pop)
return pop
def clear(fn):
def clear(self):
for item in list(self):
self.remove(item)
_tidy(clear)
return clear
def update(fn):
def update(self, value):
for item in value:
self.add(item)
_tidy(update)
return update
def __ior__(fn):
def __ior__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.add(item)
return self
_tidy(__ior__)
return __ior__
def difference_update(fn):
def difference_update(self, value):
for item in value:
self.discard(item)
_tidy(difference_update)
return difference_update
def __isub__(fn):
def __isub__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.discard(item)
return self
_tidy(__isub__)
return __isub__
def intersection_update(fn):
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(intersection_update)
return intersection_update
def __iand__(fn):
def __iand__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__iand__)
return __iand__
def symmetric_difference_update(fn):
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(symmetric_difference_update)
return symmetric_difference_update
def __ixor__(fn):
def __ixor__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__ixor__)
return __ixor__
l = locals().copy()
l.pop("_tidy")
return l
class InstrumentedList(List[_T]):
"""An instrumented version of the built-in list."""
class InstrumentedSet(Set[_T]):
"""An instrumented version of the built-in set."""
class InstrumentedDict(Dict[_KT, _VT]):
"""An instrumented version of the built-in dict."""
__canned_instrumentation: util.immutabledict[
Any, _CollectionFactoryType
] = util.immutabledict(
{
list: InstrumentedList,
set: InstrumentedSet,
dict: InstrumentedDict,
}
)
__interfaces: util.immutabledict[
Any,
Tuple[
Dict[str, str],
Dict[str, Callable[..., Any]],
],
] = util.immutabledict(
{
list: (
{
"appender": "append",
"remover": "remove",
"iterator": "__iter__",
},
_list_decorators(),
),
set: (
{"appender": "add", "remover": "remove", "iterator": "__iter__"},
_set_decorators(),
),
# decorators are required for dicts and object collections.
dict: ({"iterator": "values"}, _dict_decorators()),
}
)
def __go(lcls):
global keyfunc_mapping, mapped_collection
global column_keyed_dict, column_mapped_collection
global MappedCollection, KeyFuncDict
global attribute_keyed_dict, attribute_mapped_collection
from.mapped_collection import keyfunc_mapping
from.mapped_collection import column_keyed_dict
from.mapped_collection import attribute_keyed_dict
from.mapped_collection import KeyFuncDict
from.mapped_collection import mapped_collection
from.mapped_collection import column_mapped_collection
from.mapped_collection import attribute_mapped_collection
from.mapped_collection import MappedCollection
# ensure instrumentation is associated with
# these built-in classes; if a user-defined class
# subclasses these and uses @internally_instrumented,
# the superclass is otherwise not instrumented.
# see [ticket:2406].
_instrument_class(InstrumentedList)
_instrument_class(InstrumentedSet)
_instrument_class(KeyFuncDict)
__go(locals()) |
|
sqlalchemy__sqlalchemy | events.rst | Module doc | Generate documentation for this module | MIT License | sqlalchemy__sqlalchemy/doc/build/orm/events.rst | [
"sqlalchemy__sqlalchemy/lib/sqlalchemy/orm/instrumentation.py"
] | ORM Events
The ORM includes a wide variety of hooks available for subscription.
For an introduction to the most commonly used ORM events, see the
section session_events_toplevel. The event system in general is
discussed at event_toplevel. Non-ORM events such as those regarding
connections and low-level statement execution are described in
core_event_toplevel.
Session Events
The most basic event hooks are available at the level of the ORM
_orm.Session object. The types of things that are intercepted here
include:
- Persistence Operations - the ORM flush process that sends changes to
the database can be extended using events that fire off at different
parts of the flush, to augment or modify the data being sent to the
database or to allow other things to happen when persistence occurs.
Read more about persistence events at session_persistence_events.
- Object lifecycle events - hooks when objects are added, persisted,
deleted from sessions. Read more about these at
session_lifecycle_events.
- Execution Events - Part of the 2.0 style execution model, all SELECT
statements against ORM entities emitted, as well as bulk UPDATE and
DELETE statements outside of the flush process, are intercepted from
the _orm.Session.execute method using the
_orm.SessionEvents.do_orm_execute method. Read more about this event
at session_execute_events.
Be sure to read the session_events_toplevel chapter for context on these
events.
Mapper Events
Mapper event hooks encompass things that happen as related to individual
or multiple _orm.Mapper objects, which are the central configurational
object that maps a user-defined class to a _schema.Table object. Types
of things which occur at the _orm.Mapper level include:
- Per-object persistence operations - the most popular mapper hooks
are the unit-of-work hooks such as _orm.MapperEvents.before_insert,
_orm.MapperEvents.after_update, etc. These events are contrasted to
the more coarse grained session-level events such as
_orm.SessionEvents.before_flush in that they occur within the flush
process on a per-object basis; while finer grained activity on an
object is more straightforward, availability of _orm.Session
features is limited.
- Mapper configuration events - the other major class of mapper hooks
are those which occur as a class is mapped, as a mapper is
finalized, and when sets of mappers are configured to refer to each
other. These events include _orm.MapperEvents.instrument_class,
_orm.MapperEvents.before_mapper_configured and
_orm.MapperEvents.mapper_configured at the individual _orm.Mapper
level, and _orm.MapperEvents.before_configured and
_orm.MapperEvents.after_configured at the level of collections of
_orm.Mapper objects.
Instance Events
Instance events are focused on the construction of ORM mapped instances,
including when they are instantiated as transient objects, when they are
loaded from the database and become persistent objects, as well as when
database refresh or expiration operations occur on the object.
Attribute Events
Attribute events are triggered as things occur on individual attributes
of ORM mapped objects. These events form the basis for things like
custom validation functions <simple_validators> as well as
backref handlers <relationships_backref>. | # orm/instrumentation.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: allow-untyped-defs, allow-untyped-calls
"""Defines SQLAlchemy's system of class instrumentation.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
instrumentation.py deals with registration of end-user classes
for state tracking. It interacts closely with state.py
and attributes.py which establish per-instance and per-class-attribute
instrumentation, respectively.
The class instrumentation system can be customized on a per-class
or global basis using the :mod:`sqlalchemy.ext.instrumentation`
module, which provides the means to build and specify
alternate instrumentation forms.
.. versionchanged: 0.8
The instrumentation extension system was moved out of the
ORM and into the external :mod:`sqlalchemy.ext.instrumentation`
package. When that package is imported, it installs
itself within sqlalchemy.orm so that its more comprehensive
resolution mechanics take effect.
"""
from __future__ import annotations
from typing import Any
from typing import Callable
from typing import cast
from typing import Collection
from typing import Dict
from typing import Generic
from typing import Iterable
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
import weakref
from. import base
from. import collections
from. import exc
from. import interfaces
from. import state
from._typing import _O
from.attributes import _is_collection_attribute_impl
from.. import util
from..event import EventTarget
from..util import HasMemoized
from..util.typing import Literal
from..util.typing import Protocol
if TYPE_CHECKING:
from._typing import _RegistryType
from.attributes import AttributeImpl
from.attributes import QueryableAttribute
from.collections import _AdaptedCollectionProtocol
from.collections import _CollectionFactoryType
from.decl_base import _MapperConfig
from.events import InstanceEvents
from.mapper import Mapper
from.state import InstanceState
from..event import dispatcher
_T = TypeVar("_T", bound=Any)
DEL_ATTR = util.symbol("DEL_ATTR")
class _ExpiredAttributeLoaderProto(Protocol):
def __call__(
self,
state: state.InstanceState[Any],
toload: Set[str],
passive: base.PassiveFlag,
) -> None:
...
class _ManagerFactory(Protocol):
def __call__(self, class_: Type[_O]) -> ClassManager[_O]:
...
class ClassManager(
HasMemoized,
Dict[str, "QueryableAttribute[Any]"],
Generic[_O],
EventTarget,
):
"""Tracks state information at the class level."""
dispatch: dispatcher[ClassManager[_O]]
MANAGER_ATTR = base.DEFAULT_MANAGER_ATTR
STATE_ATTR = base.DEFAULT_STATE_ATTR
_state_setter = staticmethod(util.attrsetter(STATE_ATTR))
expired_attribute_loader: _ExpiredAttributeLoaderProto
"previously known as deferred_scalar_loader"
init_method: Optional[Callable[..., None]]
original_init: Optional[Callable[..., None]] = None
factory: Optional[_ManagerFactory]
declarative_scan: Optional[weakref.ref[_MapperConfig]] = None
registry: _RegistryType
if not TYPE_CHECKING:
# starts as None during setup
registry = None
class_: Type[_O]
_bases: List[ClassManager[Any]]
@property
@util.deprecated(
"1.4",
message="The ClassManager.deferred_scalar_loader attribute is now "
"named expired_attribute_loader",
)
def deferred_scalar_loader(self):
return self.expired_attribute_loader
@deferred_scalar_loader.setter # type: ignore[no-redef]
@util.deprecated(
"1.4",
message="The ClassManager.deferred_scalar_loader attribute is now "
"named expired_attribute_loader",
)
def deferred_scalar_loader(self, obj):
self.expired_attribute_loader = obj
def __init__(self, class_):
self.class_ = class_
self.info = {}
self.new_init = None
self.local_attrs = {}
self.originals = {}
self._finalized = False
self.factory = None
self.init_method = None
self._bases = [
mgr
for mgr in cast(
"List[Optional[ClassManager[Any]]]",
[
opt_manager_of_class(base)
for base in self.class_.__bases__
if isinstance(base, type)
],
)
if mgr is not None
]
for base_ in self._bases:
self.update(base_)
cast(
"InstanceEvents", self.dispatch._events
)._new_classmanager_instance(class_, self)
for basecls in class_.__mro__:
mgr = opt_manager_of_class(basecls)
if mgr is not None:
self.dispatch._update(mgr.dispatch)
self.manage()
if "__del__" in class_.__dict__:
util.warn(
"__del__() method on class %s will "
"cause unreachable cycles and memory leaks, "
"as SQLAlchemy instrumentation often creates "
"reference cycles. Please remove this method." % class_
)
def _update_state(
self,
finalize: bool = False,
mapper: Optional[Mapper[_O]] = None,
registry: Optional[_RegistryType] = None,
declarative_scan: Optional[_MapperConfig] = None,
expired_attribute_loader: Optional[
_ExpiredAttributeLoaderProto
] = None,
init_method: Optional[Callable[..., None]] = None,
) -> None:
if mapper:
self.mapper = mapper # type: ignore[assignment]
if registry:
registry._add_manager(self)
if declarative_scan:
self.declarative_scan = weakref.ref(declarative_scan)
if expired_attribute_loader:
self.expired_attribute_loader = expired_attribute_loader
if init_method:
assert not self._finalized, (
"class is already instrumented, "
"init_method %s can't be applied" % init_method
)
self.init_method = init_method
if not self._finalized:
self.original_init = (
self.init_method
if self.init_method is not None
and self.class_.__init__ is object.__init__
else self.class_.__init__
)
if finalize and not self._finalized:
self._finalize()
def _finalize(self) -> None:
if self._finalized:
return
self._finalized = True
self._instrument_init()
_instrumentation_factory.dispatch.class_instrument(self.class_)
def __hash__(self) -> int: # type: ignore[override]
return id(self)
def __eq__(self, other: Any) -> bool:
return other is self
@property
def is_mapped(self) -> bool:
return "mapper" in self.__dict__
@HasMemoized.memoized_attribute
def _all_key_set(self):
return frozenset(self)
@HasMemoized.memoized_attribute
def _collection_impl_keys(self):
return frozenset(
[attr.key for attr in self.values() if attr.impl.collection]
)
@HasMemoized.memoized_attribute
def _scalar_loader_impls(self):
return frozenset(
[
attr.impl
for attr in self.values()
if attr.impl.accepts_scalar_loader
]
)
@HasMemoized.memoized_attribute
def _loader_impls(self):
return frozenset([attr.impl for attr in self.values()])
@util.memoized_property
def mapper(self) -> Mapper[_O]:
# raises unless self.mapper has been assigned
raise exc.UnmappedClassError(self.class_)
def _all_sqla_attributes(self, exclude=None):
"""return an iterator of all classbound attributes that are
implement :class:`.InspectionAttr`.
This includes :class:`.QueryableAttribute` as well as extension
types such as :class:`.hybrid_property` and
:class:`.AssociationProxy`.
"""
found: Dict[str, Any] = {}
# constraints:
# 1. yield keys in cls.__dict__ order
# 2. if a subclass has the same key as a superclass, include that
# key as part of the ordering of the superclass, because an
# overridden key is usually installed by the mapper which is going
# on a different ordering
# 3. don't use getattr() as this fires off descriptors
for supercls in self.class_.__mro__[0:-1]:
inherits = supercls.__mro__[1]
for key in supercls.__dict__:
found.setdefault(key, supercls)
if key in inherits.__dict__:
continue
val = found[key].__dict__[key]
if (
isinstance(val, interfaces.InspectionAttr)
and val.is_attribute
):
yield key, val
def _get_class_attr_mro(self, key, default=None):
"""return an attribute on the class without tripping it."""
for supercls in self.class_.__mro__:
if key in supercls.__dict__:
return supercls.__dict__[key]
else:
return default
def _attr_has_impl(self, key: str) -> bool:
"""Return True if the given attribute is fully initialized.
i.e. has an impl.
"""
return key in self and self[key].impl is not None
def _subclass_manager(self, cls: Type[_T]) -> ClassManager[_T]:
"""Create a new ClassManager for a subclass of this ClassManager's
class.
This is called automatically when attributes are instrumented so that
the attributes can be propagated to subclasses against their own
class-local manager, without the need for mappers etc. to have already
pre-configured managers for the full class hierarchy. Mappers
can post-configure the auto-generated ClassManager when needed.
"""
return register_class(cls, finalize=False)
def _instrument_init(self):
self.new_init = _generate_init(self.class_, self, self.original_init)
self.install_member("__init__", self.new_init)
@util.memoized_property
def _state_constructor(self) -> Type[state.InstanceState[_O]]:
self.dispatch.first_init(self, self.class_)
return state.InstanceState
def manage(self):
"""Mark this instance as the manager for its class."""
setattr(self.class_, self.MANAGER_ATTR, self)
@util.hybridmethod
def manager_getter(self):
return _default_manager_getter
@util.hybridmethod
def state_getter(self):
"""Return a (instance) -> InstanceState callable.
"state getter" callables should raise either KeyError or
AttributeError if no InstanceState could be found for the
instance.
"""
return _default_state_getter
@util.hybridmethod
def dict_getter(self):
return _default_dict_getter
def instrument_attribute(
self,
key: str,
inst: QueryableAttribute[Any],
propagated: bool = False,
) -> None:
if propagated:
if key in self.local_attrs:
return # don't override local attr with inherited attr
else:
self.local_attrs[key] = inst
self.install_descriptor(key, inst)
self._reset_memoizations()
self[key] = inst
for cls in self.class_.__subclasses__():
manager = self._subclass_manager(cls)
manager.instrument_attribute(key, inst, True)
def subclass_managers(self, recursive):
for cls in self.class_.__subclasses__():
mgr = opt_manager_of_class(cls)
if mgr is not None and mgr is not self:
yield mgr
if recursive:
yield from mgr.subclass_managers(True)
def post_configure_attribute(self, key):
_instrumentation_factory.dispatch.attribute_instrument(
self.class_, key, self[key]
)
def uninstrument_attribute(self, key, propagated=False):
if key not in self:
return
if propagated:
if key in self.local_attrs:
return # don't get rid of local attr
else:
del self.local_attrs[key]
self.uninstall_descriptor(key)
self._reset_memoizations()
del self[key]
for cls in self.class_.__subclasses__():
manager = opt_manager_of_class(cls)
if manager:
manager.uninstrument_attribute(key, True)
def unregister(self) -> None:
"""remove all instrumentation established by this ClassManager."""
for key in list(self.originals):
self.uninstall_member(key)
self.mapper = None # type: ignore
self.dispatch = None # type: ignore
self.new_init = None
self.info.clear()
for key in list(self):
if key in self.local_attrs:
self.uninstrument_attribute(key)
if self.MANAGER_ATTR in self.class_.__dict__:
delattr(self.class_, self.MANAGER_ATTR)
def install_descriptor(
self, key: str, inst: QueryableAttribute[Any]
) -> None:
if key in (self.STATE_ATTR, self.MANAGER_ATTR):
raise KeyError(
"%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." % key
)
setattr(self.class_, key, inst)
def uninstall_descriptor(self, key: str) -> None:
delattr(self.class_, key)
def install_member(self, key: str, implementation: Any) -> None:
if key in (self.STATE_ATTR, self.MANAGER_ATTR):
raise KeyError(
"%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." % key
)
self.originals.setdefault(key, self.class_.__dict__.get(key, DEL_ATTR))
setattr(self.class_, key, implementation)
def uninstall_member(self, key: str) -> None:
original = self.originals.pop(key, None)
if original is not DEL_ATTR:
setattr(self.class_, key, original)
else:
delattr(self.class_, key)
def instrument_collection_class(
self, key: str, collection_class: Type[Collection[Any]]
) -> _CollectionFactoryType:
return collections.prepare_instrumentation(collection_class)
def initialize_collection(
self,
key: str,
state: InstanceState[_O],
factory: _CollectionFactoryType,
) -> Tuple[collections.CollectionAdapter, _AdaptedCollectionProtocol]:
user_data = factory()
impl = self.get_impl(key)
assert _is_collection_attribute_impl(impl)
adapter = collections.CollectionAdapter(impl, state, user_data)
return adapter, user_data
def is_instrumented(self, key: str, search: bool = False) -> bool:
if search:
return key in self
else:
return key in self.local_attrs
def get_impl(self, key: str) -> AttributeImpl:
return self[key].impl
@property
def attributes(self) -> Iterable[Any]:
return iter(self.values())
# InstanceState management
def new_instance(self, state: Optional[InstanceState[_O]] = None) -> _O:
# here, we would prefer _O to be bound to "object"
# so that mypy sees that __new__ is present. currently
# it's bound to Any as there were other problems not having
# it that way but these can be revisited
instance = self.class_.__new__(self.class_) # type: ignore
if state is None:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
return instance # type: ignore[no-any-return]
def setup_instance(
self, instance: _O, state: Optional[InstanceState[_O]] = None
) -> None:
if state is None:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
def teardown_instance(self, instance: _O) -> None:
delattr(instance, self.STATE_ATTR)
def _serialize(
self, state: InstanceState[_O], state_dict: Dict[str, Any]
) -> _SerializeManager:
return _SerializeManager(state, state_dict)
def _new_state_if_none(
self, instance: _O
) -> Union[Literal[False], InstanceState[_O]]:
"""Install a default InstanceState if none is present.
A private convenience method used by the __init__ decorator.
"""
if hasattr(instance, self.STATE_ATTR):
return False
elif self.class_ is not instance.__class__ and self.is_mapped:
# this will create a new ClassManager for the
# subclass, without a mapper. This is likely a
# user error situation but allow the object
# to be constructed, so that it is usable
# in a non-ORM context at least.
return self._subclass_manager(
instance.__class__
)._new_state_if_none(instance)
else:
state = self._state_constructor(instance, self)
self._state_setter(instance, state)
return state
def has_state(self, instance: _O) -> bool:
return hasattr(instance, self.STATE_ATTR)
def has_parent(
self, state: InstanceState[_O], key: str, optimistic: bool = False
) -> bool:
"""TODO"""
return self.get_impl(key).hasparent(state, optimistic=optimistic)
def __bool__(self) -> bool:
"""All ClassManagers are non-zero regardless of attribute state."""
return True
def __repr__(self) -> str:
return "<%s of %r at %x>" % (
self.__class__.__name__,
self.class_,
id(self),
)
class _SerializeManager:
"""Provide serialization of a :class:`.ClassManager`.
The :class:`.InstanceState` uses ``__init__()`` on serialize
and ``__call__()`` on deserialize.
"""
def __init__(self, state: state.InstanceState[Any], d: Dict[str, Any]):
self.class_ = state.class_
manager = state.manager
manager.dispatch.pickle(state, d)
def __call__(self, state, inst, state_dict):
state.manager = manager = opt_manager_of_class(self.class_)
if manager is None:
raise exc.UnmappedInstanceError(
inst,
"Cannot deserialize object of type %r - "
"no mapper() has "
"been configured for this class within the current "
"Python process!" % self.class_,
)
elif manager.is_mapped and not manager.mapper.configured:
manager.mapper._check_configure()
# setup _sa_instance_state ahead of time so that
# unpickle events can access the object normally.
# see [ticket:2362]
if inst is not None:
manager.setup_instance(inst, state)
manager.dispatch.unpickle(state, state_dict)
class InstrumentationFactory(EventTarget):
"""Factory for new ClassManager instances."""
dispatch: dispatcher[InstrumentationFactory]
def create_manager_for_cls(self, class_: Type[_O]) -> ClassManager[_O]:
assert class_ is not None
assert opt_manager_of_class(class_) is None
# give a more complicated subclass
# a chance to do what it wants here
manager, factory = self._locate_extended_factory(class_)
if factory is None:
factory = ClassManager
manager = ClassManager(class_)
else:
assert manager is not None
self._check_conflicts(class_, factory)
manager.factory = factory
return manager
def _locate_extended_factory(
self, class_: Type[_O]
) -> Tuple[Optional[ClassManager[_O]], Optional[_ManagerFactory]]:
"""Overridden by a subclass to do an extended lookup."""
return None, None
def _check_conflicts(
self, class_: Type[_O], factory: Callable[[Type[_O]], ClassManager[_O]]
) -> None:
"""Overridden by a subclass to test for conflicting factories."""
def unregister(self, class_: Type[_O]) -> None:
manager = manager_of_class(class_)
manager.unregister()
self.dispatch.class_uninstrument(class_)
# this attribute is replaced by sqlalchemy.ext.instrumentation
# when imported.
_instrumentation_factory = InstrumentationFactory()
# these attributes are replaced by sqlalchemy.ext.instrumentation
# when a non-standard InstrumentationManager class is first
# used to instrument a class.
instance_state = _default_state_getter = base.instance_state
instance_dict = _default_dict_getter = base.instance_dict
manager_of_class = _default_manager_getter = base.manager_of_class
opt_manager_of_class = _default_opt_manager_getter = base.opt_manager_of_class
def register_class(
class_: Type[_O],
finalize: bool = True,
mapper: Optional[Mapper[_O]] = None,
registry: Optional[_RegistryType] = None,
declarative_scan: Optional[_MapperConfig] = None,
expired_attribute_loader: Optional[_ExpiredAttributeLoaderProto] = None,
init_method: Optional[Callable[..., None]] = None,
) -> ClassManager[_O]:
"""Register class instrumentation.
Returns the existing or newly created class manager.
"""
manager = opt_manager_of_class(class_)
if manager is None:
manager = _instrumentation_factory.create_manager_for_cls(class_)
manager._update_state(
mapper=mapper,
registry=registry,
declarative_scan=declarative_scan,
expired_attribute_loader=expired_attribute_loader,
init_method=init_method,
finalize=finalize,
)
return manager
def unregister_class(class_):
"""Unregister class instrumentation."""
_instrumentation_factory.unregister(class_)
def is_instrumented(instance, key):
"""Return True if the given attribute on the given instance is
instrumented by the attributes package.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
"""
return manager_of_class(instance.__class__).is_instrumented(
key, search=True
)
def _generate_init(class_, class_manager, original_init):
"""Build an __init__ decorator that triggers ClassManager events."""
# TODO: we should use the ClassManager's notion of the
# original '__init__' method, once ClassManager is fixed
# to always reference that.
if original_init is None:
original_init = class_.__init__
# Go through some effort here and don't change the user's __init__
# calling signature, including the unlikely case that it has
# a return value.
# FIXME: need to juggle local names to avoid constructor argument
# clashes.
func_body = """\
def __init__(%(apply_pos)s):
new_state = class_manager._new_state_if_none(%(self_arg)s)
if new_state:
return new_state._initialize_instance(%(apply_kw)s)
else:
return original_init(%(apply_kw)s)
"""
func_vars = util.format_argspec_init(original_init, grouped=False)
func_text = func_body % func_vars
func_defaults = getattr(original_init, "__defaults__", None)
func_kw_defaults = getattr(original_init, "__kwdefaults__", None)
env = locals().copy()
env["__name__"] = __name__
exec(func_text, env)
__init__ = env["__init__"]
__init__.__doc__ = original_init.__doc__
__init__._sa_original_init = original_init
if func_defaults:
__init__.__defaults__ = func_defaults
if func_kw_defaults:
__init__.__kwdefaults__ = func_kw_defaults
return __init__ |
|
sqlalchemy__sqlalchemy | visitors.rst | Module doc | Generate documentation for this module | MIT License | sqlalchemy__sqlalchemy/doc/build/core/visitors.rst | [
"sqlalchemy__sqlalchemy/lib/sqlalchemy/sql/visitors.py"
] | Visitor and Traversal Utilities
The sqlalchemy.sql.visitors module consists of classes and functions
that serve the purpose of generically traversing a Core SQL expression
structure. This is not unlike the Python ast module in that is presents
a system by which a program can operate upon each component of a SQL
expression. Common purposes this serves are locating various kinds of
elements such as _schema.Table or .BindParameter objects, as well as
altering the state of the structure such as replacing certain FROM
clauses with others.
Note
the sqlalchemy.sql.visitors module is an internal API and is not fully
public. It is subject to change and may additionally not function as
expected for use patterns that aren't considered within SQLAlchemy's own
internals.
The sqlalchemy.sql.visitors module is part of the internals of
SQLAlchemy and it is not usually used by calling application code. It is
however used in certain edge cases such as when constructing caching
routines as well as when building out custom SQL expressions using the
Custom SQL Constructs and Compilation Extension <sqlalchemy.ext.compiler_toplevel>.
| # sql/visitors.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Visitor/traversal interface and library functions.
"""
from __future__ import annotations
from collections import deque
from enum import Enum
import itertools
import operator
import typing
from typing import Any
from typing import Callable
from typing import cast
from typing import ClassVar
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import overload
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from.. import exc
from.. import util
from..util import langhelpers
from..util._has_cy import HAS_CYEXTENSION
from..util.typing import Literal
from..util.typing import Protocol
from..util.typing import Self
if TYPE_CHECKING:
from.annotation import _AnnotationDict
from.elements import ColumnElement
if typing.TYPE_CHECKING or not HAS_CYEXTENSION:
from._py_util import prefix_anon_map as prefix_anon_map
from._py_util import cache_anon_map as anon_map
else:
from sqlalchemy.cyextension.util import ( # noqa: F401,E501
prefix_anon_map as prefix_anon_map,
)
from sqlalchemy.cyextension.util import ( # noqa: F401,E501
cache_anon_map as anon_map,
)
__all__ = [
"iterate",
"traverse_using",
"traverse",
"cloned_traverse",
"replacement_traverse",
"Visitable",
"ExternalTraversal",
"InternalTraversal",
"anon_map",
]
class _CompilerDispatchType(Protocol):
def __call__(_self, self: Visitable, visitor: Any, **kw: Any) -> Any:
...
class Visitable:
"""Base class for visitable objects.
:class:`.Visitable` is used to implement the SQL compiler dispatch
functions. Other forms of traversal such as for cache key generation
are implemented separately using the :class:`.HasTraverseInternals`
interface.
.. versionchanged:: 2.0 The :class:`.Visitable` class was named
:class:`.Traversible` in the 1.4 series; the name is changed back
to :class:`.Visitable` in 2.0 which is what it was prior to 1.4.
Both names remain importable in both 1.4 and 2.0 versions.
"""
__slots__ = ()
__visit_name__: str
_original_compiler_dispatch: _CompilerDispatchType
if typing.TYPE_CHECKING:
def _compiler_dispatch(self, visitor: Any, **kw: Any) -> str:
...
def __init_subclass__(cls) -> None:
if "__visit_name__" in cls.__dict__:
cls._generate_compiler_dispatch()
super().__init_subclass__()
@classmethod
def _generate_compiler_dispatch(cls) -> None:
visit_name = cls.__visit_name__
if "_compiler_dispatch" in cls.__dict__:
# class has a fixed _compiler_dispatch() method.
# copy it to "original" so that we can get it back if
# sqlalchemy.ext.compiles overrides it.
cls._original_compiler_dispatch = cls._compiler_dispatch
return
if not isinstance(visit_name, str):
raise exc.InvalidRequestError(
f"__visit_name__ on class {cls.__name__} must be a string "
"at the class level"
)
name = "visit_%s" % visit_name
getter = operator.attrgetter(name)
def _compiler_dispatch(
self: Visitable, visitor: Any, **kw: Any
) -> str:
"""Look for an attribute named "visit_<visit_name>" on the
visitor, and call it with the same kw params.
"""
try:
meth = getter(visitor)
except AttributeError as err:
return visitor.visit_unsupported_compilation(self, err, **kw) # type: ignore # noqa: E501
else:
return meth(self, **kw) # type: ignore # noqa: E501
cls._compiler_dispatch = ( # type: ignore
cls._original_compiler_dispatch
) = _compiler_dispatch
def __class_getitem__(cls, key: Any) -> Any:
# allow generic classes in py3.9+
return cls
class InternalTraversal(Enum):
r"""Defines visitor symbols used for internal traversal.
The :class:`.InternalTraversal` class is used in two ways. One is that
it can serve as the superclass for an object that implements the
various visit methods of the class. The other is that the symbols
themselves of :class:`.InternalTraversal` are used within
the ``_traverse_internals`` collection. Such as, the :class:`.Case`
object defines ``_traverse_internals`` as ::
_traverse_internals = [
("value", InternalTraversal.dp_clauseelement),
("whens", InternalTraversal.dp_clauseelement_tuples),
("else_", InternalTraversal.dp_clauseelement),
]
Above, the :class:`.Case` class indicates its internal state as the
attributes named ``value``, ``whens``, and ``else_``. They each
link to an :class:`.InternalTraversal` method which indicates the type
of datastructure referred towards.
Using the ``_traverse_internals`` structure, objects of type
:class:`.InternalTraversible` will have the following methods automatically
implemented:
* :meth:`.HasTraverseInternals.get_children`
* :meth:`.HasTraverseInternals._copy_internals`
* :meth:`.HasCacheKey._gen_cache_key`
Subclasses can also implement these methods directly, particularly for the
:meth:`.HasTraverseInternals._copy_internals` method, when special steps
are needed.
.. versionadded:: 1.4
"""
dp_has_cache_key = "HC"
"""Visit a :class:`.HasCacheKey` object."""
dp_has_cache_key_list = "HL"
"""Visit a list of :class:`.HasCacheKey` objects."""
dp_clauseelement = "CE"
"""Visit a :class:`_expression.ClauseElement` object."""
dp_fromclause_canonical_column_collection = "FC"
"""Visit a :class:`_expression.FromClause` object in the context of the
``columns`` attribute.
The column collection is "canonical", meaning it is the originally
defined location of the :class:`.ColumnClause` objects. Right now
this means that the object being visited is a
:class:`_expression.TableClause`
or :class:`_schema.Table` object only.
"""
dp_clauseelement_tuples = "CTS"
"""Visit a list of tuples which contain :class:`_expression.ClauseElement`
objects.
"""
dp_clauseelement_list = "CL"
"""Visit a list of :class:`_expression.ClauseElement` objects.
"""
dp_clauseelement_tuple = "CT"
"""Visit a tuple of :class:`_expression.ClauseElement` objects.
"""
dp_executable_options = "EO"
dp_with_context_options = "WC"
dp_fromclause_ordered_set = "CO"
"""Visit an ordered set of :class:`_expression.FromClause` objects. """
dp_string = "S"
"""Visit a plain string value.
Examples include table and column names, bound parameter keys, special
keywords such as "UNION", "UNION ALL".
The string value is considered to be significant for cache key
generation.
"""
dp_string_list = "SL"
"""Visit a list of strings."""
dp_anon_name = "AN"
"""Visit a potentially "anonymized" string value.
The string value is considered to be significant for cache key
generation.
"""
dp_boolean = "B"
"""Visit a boolean value.
The boolean value is considered to be significant for cache key
generation.
"""
dp_operator = "O"
"""Visit an operator.
The operator is a function from the :mod:`sqlalchemy.sql.operators`
module.
The operator value is considered to be significant for cache key
generation.
"""
dp_type = "T"
"""Visit a :class:`.TypeEngine` object
The type object is considered to be significant for cache key
generation.
"""
dp_plain_dict = "PD"
"""Visit a dictionary with string keys.
The keys of the dictionary should be strings, the values should
be immutable and hashable. The dictionary is considered to be
significant for cache key generation.
"""
dp_dialect_options = "DO"
"""Visit a dialect options structure."""
dp_string_clauseelement_dict = "CD"
"""Visit a dictionary of string keys to :class:`_expression.ClauseElement`
objects.
"""
dp_string_multi_dict = "MD"
"""Visit a dictionary of string keys to values which may either be
plain immutable/hashable or :class:`.HasCacheKey` objects.
"""
dp_annotations_key = "AK"
"""Visit the _annotations_cache_key element.
This is a dictionary of additional information about a ClauseElement
that modifies its role. It should be included when comparing or caching
objects, however generating this key is relatively expensive. Visitors
should check the "_annotations" dict for non-None first before creating
this key.
"""
dp_plain_obj = "PO"
"""Visit a plain python object.
The value should be immutable and hashable, such as an integer.
The value is considered to be significant for cache key generation.
"""
dp_named_ddl_element = "DD"
"""Visit a simple named DDL element.
The current object used by this method is the :class:`.Sequence`.
The object is only considered to be important for cache key generation
as far as its name, but not any other aspects of it.
"""
dp_prefix_sequence = "PS"
"""Visit the sequence represented by :class:`_expression.HasPrefixes`
or :class:`_expression.HasSuffixes`.
"""
dp_table_hint_list = "TH"
"""Visit the ``_hints`` collection of a :class:`_expression.Select`
object.
"""
dp_setup_join_tuple = "SJ"
dp_memoized_select_entities = "ME"
dp_statement_hint_list = "SH"
"""Visit the ``_statement_hints`` collection of a
:class:`_expression.Select`
object.
"""
dp_unknown_structure = "UK"
"""Visit an unknown structure.
"""
dp_dml_ordered_values = "DML_OV"
"""Visit the values() ordered tuple list of an
:class:`_expression.Update` object."""
dp_dml_values = "DML_V"
"""Visit the values() dictionary of a :class:`.ValuesBase`
(e.g. Insert or Update) object.
"""
dp_dml_multi_values = "DML_MV"
"""Visit the values() multi-valued list of dictionaries of an
:class:`_expression.Insert` object.
"""
dp_propagate_attrs = "PA"
"""Visit the propagate attrs dict. This hardcodes to the particular
elements we care about right now."""
"""Symbols that follow are additional symbols that are useful in
caching applications.
Traversals for :class:`_expression.ClauseElement` objects only need to use
those symbols present in :class:`.InternalTraversal`. However, for
additional caching use cases within the ORM, symbols dealing with the
:class:`.HasCacheKey` class are added here.
"""
dp_ignore = "IG"
"""Specify an object that should be ignored entirely.
This currently applies function call argument caching where some
arguments should not be considered to be part of a cache key.
"""
dp_inspectable = "IS"
"""Visit an inspectable object where the return value is a
:class:`.HasCacheKey` object."""
dp_multi = "M"
"""Visit an object that may be a :class:`.HasCacheKey` or may be a
plain hashable object."""
dp_multi_list = "MT"
"""Visit a tuple containing elements that may be :class:`.HasCacheKey` or
may be a plain hashable object."""
dp_has_cache_key_tuples = "HT"
"""Visit a list of tuples which contain :class:`.HasCacheKey`
objects.
"""
dp_inspectable_list = "IL"
"""Visit a list of inspectable objects which upon inspection are
HasCacheKey objects."""
_TraverseInternalsType = List[Tuple[str, InternalTraversal]]
"""a structure that defines how a HasTraverseInternals should be
traversed.
This structure consists of a list of (attributename, internaltraversal)
tuples, where the "attributename" refers to the name of an attribute on an
instance of the HasTraverseInternals object, and "internaltraversal" refers
to an :class:`.InternalTraversal` enumeration symbol defining what kind
of data this attribute stores, which indicates to the traverser how it should
be handled.
"""
class HasTraverseInternals:
"""base for classes that have a "traverse internals" element,
which defines all kinds of ways of traversing the elements of an object.
Compared to :class:`.Visitable`, which relies upon an external visitor to
define how the object is travered (i.e. the :class:`.SQLCompiler`), the
:class:`.HasTraverseInternals` interface allows classes to define their own
traversal, that is, what attributes are accessed and in what order.
"""
__slots__ = ()
_traverse_internals: _TraverseInternalsType
_is_immutable: bool = False
@util.preload_module("sqlalchemy.sql.traversals")
def get_children(
self, *, omit_attrs: Tuple[str,...] = (), **kw: Any
) -> Iterable[HasTraverseInternals]:
r"""Return immediate child :class:`.visitors.HasTraverseInternals`
elements of this :class:`.visitors.HasTraverseInternals`.
This is used for visit traversal.
\**kw may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
traversals = util.preloaded.sql_traversals
try:
traverse_internals = self._traverse_internals
except AttributeError:
# user-defined classes may not have a _traverse_internals
return []
dispatch = traversals._get_children.run_generated_dispatch
return itertools.chain.from_iterable(
meth(obj, **kw)
for attrname, obj, meth in dispatch(
self, traverse_internals, "_generated_get_children_traversal"
)
if attrname not in omit_attrs and obj is not None
)
class _InternalTraversalDispatchType(Protocol):
def __call__(s, self: object, visitor: HasTraversalDispatch) -> Any:
...
class HasTraversalDispatch:
r"""Define infrastructure for classes that perform internal traversals
.. versionadded:: 2.0
"""
__slots__ = ()
_dispatch_lookup: ClassVar[Dict[Union[InternalTraversal, str], str]] = {}
def dispatch(self, visit_symbol: InternalTraversal) -> Callable[..., Any]:
"""Given a method from :class:`.HasTraversalDispatch`, return the
corresponding method on a subclass.
"""
name = _dispatch_lookup[visit_symbol]
return getattr(self, name, None) # type: ignore
def run_generated_dispatch(
self,
target: object,
internal_dispatch: _TraverseInternalsType,
generate_dispatcher_name: str,
) -> Any:
dispatcher: _InternalTraversalDispatchType
try:
dispatcher = target.__class__.__dict__[generate_dispatcher_name]
except KeyError:
# traversals.py -> _preconfigure_traversals()
# may be used to run these ahead of time, but
# is not enabled right now.
# this block will generate any remaining dispatchers.
dispatcher = self.generate_dispatch(
target.__class__, internal_dispatch, generate_dispatcher_name
)
return dispatcher(target, self)
def generate_dispatch(
self,
target_cls: Type[object],
internal_dispatch: _TraverseInternalsType,
generate_dispatcher_name: str,
) -> _InternalTraversalDispatchType:
dispatcher = self._generate_dispatcher(
internal_dispatch, generate_dispatcher_name
)
# assert isinstance(target_cls, type)
setattr(target_cls, generate_dispatcher_name, dispatcher)
return dispatcher
def _generate_dispatcher(
self, internal_dispatch: _TraverseInternalsType, method_name: str
) -> _InternalTraversalDispatchType:
names = []
for attrname, visit_sym in internal_dispatch:
meth = self.dispatch(visit_sym)
if meth is not None:
visit_name = _dispatch_lookup[visit_sym]
names.append((attrname, visit_name))
code = (
(" return [\n")
+ (
", \n".join(
" (%r, self.%s, visitor.%s)"
% (attrname, attrname, visit_name)
for attrname, visit_name in names
)
)
+ ("\n ]\n")
)
meth_text = ("def %s(self, visitor):\n" % method_name) + code + "\n"
return cast(
_InternalTraversalDispatchType,
langhelpers._exec_code_in_env(meth_text, {}, method_name),
)
ExtendedInternalTraversal = InternalTraversal
def _generate_traversal_dispatch() -> None:
lookup = _dispatch_lookup
for sym in InternalTraversal:
key = sym.name
if key.startswith("dp_"):
visit_key = key.replace("dp_", "visit_")
sym_name = sym.value
assert sym_name not in lookup, sym_name
lookup[sym] = lookup[sym_name] = visit_key
_dispatch_lookup = HasTraversalDispatch._dispatch_lookup
_generate_traversal_dispatch()
class ExternallyTraversible(HasTraverseInternals, Visitable):
__slots__ = ()
_annotations: Mapping[Any, Any] = util.EMPTY_DICT
if typing.TYPE_CHECKING:
def _annotate(self, values: _AnnotationDict) -> Self:
...
def get_children(
self, *, omit_attrs: Tuple[str,...] = (), **kw: Any
) -> Iterable[ExternallyTraversible]:
...
def _clone(self, **kw: Any) -> Self:
"""clone this element"""
raise NotImplementedError()
def _copy_internals(
self, *, omit_attrs: Tuple[str,...] = (), **kw: Any
) -> None:
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
raise NotImplementedError()
_ET = TypeVar("_ET", bound=ExternallyTraversible)
_CE = TypeVar("_CE", bound="ColumnElement[Any]")
_TraverseCallableType = Callable[[_ET], None]
class _CloneCallableType(Protocol):
def __call__(self, element: _ET, **kw: Any) -> _ET:
...
class _TraverseTransformCallableType(Protocol[_ET]):
def __call__(self, element: _ET, **kw: Any) -> Optional[_ET]:
...
_ExtT = TypeVar("_ExtT", bound="ExternalTraversal")
class ExternalTraversal(util.MemoizedSlots):
"""Base class for visitor objects which can traverse externally using
the :func:`.visitors.traverse` function.
Direct usage of the :func:`.visitors.traverse` function is usually
preferred.
"""
__slots__ = ("_visitor_dict", "_next")
__traverse_options__: Dict[str, Any] = {}
_next: Optional[ExternalTraversal]
def traverse_single(self, obj: Visitable, **kw: Any) -> Any:
for v in self.visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(
self, obj: Optional[ExternallyTraversible]
) -> Iterator[ExternallyTraversible]:
"""Traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
@overload
def traverse(self, obj: Literal[None]) -> None:
...
@overload
def traverse(self, obj: ExternallyTraversible) -> ExternallyTraversible:
...
def traverse(
self, obj: Optional[ExternallyTraversible]
) -> Optional[ExternallyTraversible]:
"""Traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
def _memoized_attr__visitor_dict(
self,
) -> Dict[str, _TraverseCallableType[Any]]:
visitors = {}
for name in dir(self):
if name.startswith("visit_"):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def visitor_iterator(self) -> Iterator[ExternalTraversal]:
"""Iterate through this visitor and each 'chained' visitor."""
v: Optional[ExternalTraversal] = self
while v:
yield v
v = getattr(v, "_next", None)
def chain(self: _ExtT, visitor: ExternalTraversal) -> _ExtT:
"""'Chain' an additional ExternalTraversal onto this ExternalTraversal
The chained visitor will receive all visit events after this one.
"""
tail = list(self.visitor_iterator)[-1]
tail._next = visitor
return self
class CloningExternalTraversal(ExternalTraversal):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.cloned_traverse` function.
Direct usage of the :func:`.visitors.cloned_traverse` function is usually
preferred.
"""
__slots__ = ()
def copy_and_process(
self, list_: List[ExternallyTraversible]
) -> List[ExternallyTraversible]:
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
@overload
def traverse(self, obj: Literal[None]) -> None:
...
@overload
def traverse(self, obj: ExternallyTraversible) -> ExternallyTraversible:
...
def traverse(
self, obj: Optional[ExternallyTraversible]
) -> Optional[ExternallyTraversible]:
"""Traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict
)
class ReplacingExternalTraversal(CloningExternalTraversal):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.replacement_traverse` function.
Direct usage of the :func:`.visitors.replacement_traverse` function is
usually preferred.
"""
__slots__ = ()
def replace(
self, elem: ExternallyTraversible
) -> Optional[ExternallyTraversible]:
"""Receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
@overload
def traverse(self, obj: Literal[None]) -> None:
...
@overload
def traverse(self, obj: ExternallyTraversible) -> ExternallyTraversible:
...
def traverse(
self, obj: Optional[ExternallyTraversible]
) -> Optional[ExternallyTraversible]:
"""Traverse and visit the given expression structure."""
def replace(
element: ExternallyTraversible,
**kw: Any,
) -> Optional[ExternallyTraversible]:
for v in self.visitor_iterator:
e = cast(ReplacingExternalTraversal, v).replace(element)
if e is not None:
return e
return None
return replacement_traverse(obj, self.__traverse_options__, replace)
# backwards compatibility
Traversible = Visitable
ClauseVisitor = ExternalTraversal
CloningVisitor = CloningExternalTraversal
ReplacingCloningVisitor = ReplacingExternalTraversal
def iterate(
obj: Optional[ExternallyTraversible],
opts: Mapping[str, Any] = util.EMPTY_DICT,
) -> Iterator[ExternallyTraversible]:
r"""Traverse the given expression structure, returning an iterator.
Traversal is configured to be breadth-first.
The central API feature used by the :func:`.visitors.iterate`
function is the
:meth:`_expression.ClauseElement.get_children` method of
:class:`_expression.ClauseElement` objects. This method should return all
the :class:`_expression.ClauseElement` objects which are associated with a
particular :class:`_expression.ClauseElement` object. For example, a
:class:`.Case` structure will refer to a series of
:class:`_expression.ColumnElement` objects within its "whens" and "else\_"
member variables.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
"""
if obj is None:
return
yield obj
children = obj.get_children(**opts)
if not children:
return
stack = deque([children])
while stack:
t_iterator = stack.popleft()
for t in t_iterator:
yield t
stack.append(t.get_children(**opts))
@overload
def traverse_using(
iterator: Iterable[ExternallyTraversible],
obj: Literal[None],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> None:
...
@overload
def traverse_using(
iterator: Iterable[ExternallyTraversible],
obj: ExternallyTraversible,
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> ExternallyTraversible:
...
def traverse_using(
iterator: Iterable[ExternallyTraversible],
obj: Optional[ExternallyTraversible],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> Optional[ExternallyTraversible]:
"""Visit the given expression structure using the given iterator of
objects.
:func:`.visitors.traverse_using` is usually called internally as the result
of the :func:`.visitors.traverse` function.
:param iterator: an iterable or sequence which will yield
:class:`_expression.ClauseElement`
structures; the iterator is assumed to be the
product of the :func:`.visitors.iterate` function.
:param obj: the :class:`_expression.ClauseElement`
that was used as the target of the
:func:`.iterate` function.
:param visitors: dictionary of visit functions. See :func:`.traverse`
for details on this dictionary.
.. seealso::
:func:`.traverse`
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
@overload
def traverse(
obj: Literal[None],
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> None:
...
@overload
def traverse(
obj: ExternallyTraversible,
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> ExternallyTraversible:
...
def traverse(
obj: Optional[ExternallyTraversible],
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> Optional[ExternallyTraversible]:
"""Traverse and visit the given expression structure using the default
iterator.
e.g.::
from sqlalchemy.sql import visitors
stmt = select(some_table).where(some_table.c.foo == 'bar')
def visit_bindparam(bind_param):
print("found bound value: %s" % bind_param.value)
visitors.traverse(stmt, {}, {"bindparam": visit_bindparam})
The iteration of objects uses the :func:`.visitors.iterate` function,
which does a breadth-first traversal using a stack.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
:param visitors: dictionary of visit functions. The dictionary should
have strings as keys, each of which would correspond to the
``__visit_name__`` of a particular kind of SQL expression object, and
callable functions as values, each of which represents a visitor function
for that kind of object.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
@overload
def cloned_traverse(
obj: Literal[None],
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> None:
...
# a bit of controversy here, as the clone of the lead element
# *could* in theory replace with an entirely different kind of element.
# however this is really not how cloned_traverse is ever used internally
# at least.
@overload
def cloned_traverse(
obj: _ET,
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> _ET:
...
def cloned_traverse(
obj: Optional[ExternallyTraversible],
opts: Mapping[str, Any],
visitors: Mapping[str, _TraverseCallableType[Any]],
) -> Optional[ExternallyTraversible]:
"""Clone the given expression structure, allowing modifications by
visitors for mutable objects.
Traversal usage is the same as that of :func:`.visitors.traverse`.
The visitor functions present in the ``visitors`` dictionary may also
modify the internals of the given structure as the traversal proceeds.
The :func:`.cloned_traverse` function does **not** provide objects that are
part of the :class:`.Immutable` interface to the visit methods (this
primarily includes :class:`.ColumnClause`, :class:`.Column`,
:class:`.TableClause` and :class:`.Table` objects). As this traversal is
only intended to allow in-place mutation of objects, :class:`.Immutable`
objects are skipped. The :meth:`.Immutable._clone` method is still called
on each object to allow for objects to replace themselves with a different
object based on a clone of their sub-internals (e.g. a
:class:`.ColumnClause` that clones its subquery to return a new
:class:`.ColumnClause`).
.. versionchanged:: 2.0 The :func:`.cloned_traverse` function omits
objects that are part of the :class:`.Immutable` interface.
The central API feature used by the :func:`.visitors.cloned_traverse`
and :func:`.visitors.replacement_traverse` functions, in addition to the
:meth:`_expression.ClauseElement.get_children`
function that is used to achieve
the iteration, is the :meth:`_expression.ClauseElement._copy_internals`
method.
For a :class:`_expression.ClauseElement`
structure to support cloning and replacement
traversals correctly, it needs to be able to pass a cloning function into
its internal members in order to make copies of them.
.. seealso::
:func:`.visitors.traverse`
:func:`.visitors.replacement_traverse`
"""
cloned: Dict[int, ExternallyTraversible] = {}
stop_on = set(opts.get("stop_on", []))
def deferred_copy_internals(
obj: ExternallyTraversible,
) -> ExternallyTraversible:
return cloned_traverse(obj, opts, visitors)
def clone(elem: ExternallyTraversible, **kw: Any) -> ExternallyTraversible:
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
if "replace" in kw:
newelem = cast(
Optional[ExternallyTraversible], kw["replace"](elem)
)
if newelem is not None:
cloned[id(elem)] = newelem
return newelem
# the _clone method for immutable normally returns "self".
# however, the method is still allowed to return a
# different object altogether; ColumnClause._clone() will
# based on options clone the subquery to which it is associated
# and return the new corresponding column.
cloned[id(elem)] = newelem = elem._clone(clone=clone, **kw)
newelem._copy_internals(clone=clone, **kw)
# however, visit methods which are tasked with in-place
# mutation of the object should not get access to the immutable
# object.
if not elem._is_immutable:
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(
obj, deferred_copy_internals=deferred_copy_internals, **opts
)
clone = None # type: ignore[assignment] # remove gc cycles
return obj
@overload
def replacement_traverse(
obj: Literal[None],
opts: Mapping[str, Any],
replace: _TraverseTransformCallableType[Any],
) -> None:
...
@overload
def replacement_traverse(
obj: _CE,
opts: Mapping[str, Any],
replace: _TraverseTransformCallableType[Any],
) -> _CE:
...
@overload
def replacement_traverse(
obj: ExternallyTraversible,
opts: Mapping[str, Any],
replace: _TraverseTransformCallableType[Any],
) -> ExternallyTraversible:
...
def replacement_traverse(
obj: Optional[ExternallyTraversible],
opts: Mapping[str, Any],
replace: _TraverseTransformCallableType[Any],
) -> Optional[ExternallyTraversible]:
"""Clone the given expression structure, allowing element
replacement by a given replacement function.
This function is very similar to the :func:`.visitors.cloned_traverse`
function, except instead of being passed a dictionary of visitors, all
elements are unconditionally passed into the given replace function.
The replace function then has the option to return an entirely new object
which will replace the one given. If it returns ``None``, then the object
is kept in place.
The difference in usage between :func:`.visitors.cloned_traverse` and
:func:`.visitors.replacement_traverse` is that in the former case, an
already-cloned object is passed to the visitor function, and the visitor
function can then manipulate the internal state of the object.
In the case of the latter, the visitor function should only return an
entirely different object, or do nothing.
The use case for :func:`.visitors.replacement_traverse` is that of
replacing a FROM clause inside of a SQL structure with a different one,
as is a common use case within the ORM.
"""
cloned = {}
stop_on = {id(x) for x in opts.get("stop_on", [])}
def deferred_copy_internals(
obj: ExternallyTraversible,
) -> ExternallyTraversible:
return replacement_traverse(obj, opts, replace)
def clone(elem: ExternallyTraversible, **kw: Any) -> ExternallyTraversible:
if (
id(elem) in stop_on
or "no_replacement_traverse" in elem._annotations
):
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem # type: ignore
else:
# base "already seen" on id(), not hash, so that we don't
# replace an Annotated element with its non-annotated one, and
# vice versa
id_elem = id(elem)
if id_elem not in cloned:
if "replace" in kw:
newelem = kw["replace"](elem)
if newelem is not None:
cloned[id_elem] = newelem
return newelem # type: ignore
cloned[id_elem] = newelem = elem._clone(**kw)
newelem._copy_internals(clone=clone, **kw)
return cloned[id_elem] # type: ignore
if obj is not None:
obj = clone(
obj, deferred_copy_internals=deferred_copy_internals, **opts
)
clone = None # type: ignore[assignment] # remove gc cycles
return obj |
|
sqlalchemy__alembic | commands.rst | Module doc | Generate documentation for this module | MIT License | sqlalchemy__alembic/docs/build/api/commands.rst | [
"sqlalchemy__alembic/alembic/command.py"
] | Commands
Note
this section discusses the internal API of Alembic as regards its
command invocation system. This section is only useful for developers
who wish to extend the capabilities of Alembic. For documentation on
using Alembic commands, please see /tutorial.
Alembic commands are all represented by functions in the
alembic.command.toplevel package. They all accept the same style of
usage, being sent the .Config object as the first argument.
Commands can be run programmatically, by first constructing a .Config
object, as in:
from alembic.config import Config
from alembic import command
alembic_cfg = Config("/path/to/yourapp/alembic.ini")
command.upgrade(alembic_cfg, "head")
In many cases, and perhaps more often than not, an application will wish
to call upon a series of Alembic commands and/or other features. It is
usually a good idea to link multiple commands along a single connection
and transaction, if feasible. This can be achieved using the
.Config.attributes dictionary in order to share a connection:
with engine.begin() as connection:
alembic_cfg.attributes['connection'] = connection
command.upgrade(alembic_cfg, "head")
This recipe requires that env.py consumes this connection argument; see
the example in connection_sharing for details.
To write small API functions that make direct use of database and script
directory information, rather than just running one of the built-in
commands, use the .ScriptDirectory and .MigrationContext classes
directly.
| from __future__ import annotations
import os
from typing import List
from typing import Optional
from typing import TYPE_CHECKING
from typing import Union
from. import autogenerate as autogen
from. import util
from.runtime.environment import EnvironmentContext
from.script import ScriptDirectory
if TYPE_CHECKING:
from alembic.config import Config
from alembic.script.base import Script
from.runtime.environment import ProcessRevisionDirectiveFn
def list_templates(config: Config):
"""List available templates.
:param config: a :class:`.Config` object.
"""
config.print_stdout("Available templates:\n")
for tempname in os.listdir(config.get_template_directory()):
with open(
os.path.join(config.get_template_directory(), tempname, "README")
) as readme:
synopsis = next(readme).rstrip()
config.print_stdout("%s - %s", tempname, synopsis)
config.print_stdout("\nTemplates are used via the 'init' command, e.g.:")
config.print_stdout("\n alembic init --template generic./scripts")
def init(
config: Config,
directory: str,
template: str = "generic",
package: bool = False,
) -> None:
"""Initialize a new scripts directory.
:param config: a :class:`.Config` object.
:param directory: string path of the target directory
:param template: string name of the migration environment template to
use.
:param package: when True, write ``__init__.py`` files into the
environment location as well as the versions/ location.
"""
if os.access(directory, os.F_OK) and os.listdir(directory):
raise util.CommandError(
"Directory %s already exists and is not empty" % directory
)
template_dir = os.path.join(config.get_template_directory(), template)
if not os.access(template_dir, os.F_OK):
raise util.CommandError("No such template %r" % template)
if not os.access(directory, os.F_OK):
with util.status(
f"Creating directory {os.path.abspath(directory)!r}",
**config.messaging_opts,
):
os.makedirs(directory)
versions = os.path.join(directory, "versions")
with util.status(
f"Creating directory {os.path.abspath(versions)!r}",
**config.messaging_opts,
):
os.makedirs(versions)
script = ScriptDirectory(directory)
config_file: str | None = None
for file_ in os.listdir(template_dir):
file_path = os.path.join(template_dir, file_)
if file_ == "alembic.ini.mako":
assert config.config_file_name is not None
config_file = os.path.abspath(config.config_file_name)
if os.access(config_file, os.F_OK):
util.msg(
f"File {config_file!r} already exists, skipping",
**config.messaging_opts,
)
else:
script._generate_template(
file_path, config_file, script_location=directory
)
elif os.path.isfile(file_path):
output_file = os.path.join(directory, file_)
script._copy_file(file_path, output_file)
if package:
for path in [
os.path.join(os.path.abspath(directory), "__init__.py"),
os.path.join(os.path.abspath(versions), "__init__.py"),
]:
with util.status("Adding {path!r}", **config.messaging_opts):
with open(path, "w"):
pass
assert config_file is not None
util.msg(
"Please edit configuration/connection/logging "
f"settings in {config_file!r} before proceeding.",
**config.messaging_opts,
)
def revision(
config: Config,
message: Optional[str] = None,
autogenerate: bool = False,
sql: bool = False,
head: str = "head",
splice: bool = False,
branch_label: Optional[str] = None,
version_path: Optional[str] = None,
rev_id: Optional[str] = None,
depends_on: Optional[str] = None,
process_revision_directives: Optional[ProcessRevisionDirectiveFn] = None,
) -> Union[Optional[Script], List[Optional[Script]]]:
"""Create a new revision file.
:param config: a :class:`.Config` object.
:param message: string message to apply to the revision; this is the
``-m`` option to ``alembic revision``.
:param autogenerate: whether or not to autogenerate the script from
the database; this is the ``--autogenerate`` option to
``alembic revision``.
:param sql: whether to dump the script out as a SQL string; when specified,
the script is dumped to stdout. This is the ``--sql`` option to
``alembic revision``.
:param head: head revision to build the new revision upon as a parent;
this is the ``--head`` option to ``alembic revision``.
:param splice: whether or not the new revision should be made into a
new head of its own; is required when the given ``head`` is not itself
a head. This is the ``--splice`` option to ``alembic revision``.
:param branch_label: string label to apply to the branch; this is the
``--branch-label`` option to ``alembic revision``.
:param version_path: string symbol identifying a specific version path
from the configuration; this is the ``--version-path`` option to
``alembic revision``.
:param rev_id: optional revision identifier to use instead of having
one generated; this is the ``--rev-id`` option to ``alembic revision``.
:param depends_on: optional list of "depends on" identifiers; this is the
``--depends-on`` option to ``alembic revision``.
:param process_revision_directives: this is a callable that takes the
same form as the callable described at
:paramref:`.EnvironmentContext.configure.process_revision_directives`;
will be applied to the structure generated by the revision process
where it can be altered programmatically. Note that unlike all
the other parameters, this option is only available via programmatic
use of :func:`.command.revision`
"""
script_directory = ScriptDirectory.from_config(config)
command_args = dict(
message=message,
autogenerate=autogenerate,
sql=sql,
head=head,
splice=splice,
branch_label=branch_label,
version_path=version_path,
rev_id=rev_id,
depends_on=depends_on,
)
revision_context = autogen.RevisionContext(
config,
script_directory,
command_args,
process_revision_directives=process_revision_directives,
)
environment = util.asbool(config.get_main_option("revision_environment"))
if autogenerate:
environment = True
if sql:
raise util.CommandError(
"Using --sql with --autogenerate does not make any sense"
)
def retrieve_migrations(rev, context):
revision_context.run_autogenerate(rev, context)
return []
elif environment:
def retrieve_migrations(rev, context):
revision_context.run_no_autogenerate(rev, context)
return []
elif sql:
raise util.CommandError(
"Using --sql with the revision command when "
"revision_environment is not configured does not make any sense"
)
if environment:
with EnvironmentContext(
config,
script_directory,
fn=retrieve_migrations,
as_sql=sql,
template_args=revision_context.template_args,
revision_context=revision_context,
):
script_directory.run_env()
# the revision_context now has MigrationScript structure(s) present.
# these could theoretically be further processed / rewritten *here*,
# in addition to the hooks present within each run_migrations() call,
# or at the end of env.py run_migrations_online().
scripts = [script for script in revision_context.generate_scripts()]
if len(scripts) == 1:
return scripts[0]
else:
return scripts
def check(
config: "Config",
) -> None:
"""Check if revision command with autogenerate has pending upgrade ops.
:param config: a :class:`.Config` object.
.. versionadded:: 1.9.0
"""
script_directory = ScriptDirectory.from_config(config)
command_args = dict(
message=None,
autogenerate=True,
sql=False,
head="head",
splice=False,
branch_label=None,
version_path=None,
rev_id=None,
depends_on=None,
)
revision_context = autogen.RevisionContext(
config,
script_directory,
command_args,
)
def retrieve_migrations(rev, context):
revision_context.run_autogenerate(rev, context)
return []
with EnvironmentContext(
config,
script_directory,
fn=retrieve_migrations,
as_sql=False,
template_args=revision_context.template_args,
revision_context=revision_context,
):
script_directory.run_env()
# the revision_context now has MigrationScript structure(s) present.
migration_script = revision_context.generated_revisions[-1]
diffs = migration_script.upgrade_ops.as_diffs()
if diffs:
raise util.AutogenerateDiffsDetected(
f"New upgrade operations detected: {diffs}"
)
else:
config.print_stdout("No new upgrade operations detected.")
def merge(
config: Config,
revisions: str,
message: Optional[str] = None,
branch_label: Optional[str] = None,
rev_id: Optional[str] = None,
) -> Optional[Script]:
"""Merge two revisions together. Creates a new migration file.
:param config: a :class:`.Config` instance
:param message: string message to apply to the revision
:param branch_label: string label name to apply to the new revision
:param rev_id: hardcoded revision identifier instead of generating a new
one.
.. seealso::
:ref:`branches`
"""
script = ScriptDirectory.from_config(config)
template_args = {
"config": config # Let templates use config for
# e.g. multiple databases
}
environment = util.asbool(config.get_main_option("revision_environment"))
if environment:
def nothing(rev, context):
return []
with EnvironmentContext(
config,
script,
fn=nothing,
as_sql=False,
template_args=template_args,
):
script.run_env()
return script.generate_revision(
rev_id or util.rev_id(),
message,
refresh=True,
head=revisions,
branch_labels=branch_label,
**template_args, # type:ignore[arg-type]
)
def upgrade(
config: Config,
revision: str,
sql: bool = False,
tag: Optional[str] = None,
) -> None:
"""Upgrade to a later version.
:param config: a :class:`.Config` instance.
:param revision: string revision target or range for --sql mode
:param sql: if True, use ``--sql`` mode
:param tag: an arbitrary "tag" that can be intercepted by custom
``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument`
method.
"""
script = ScriptDirectory.from_config(config)
starting_rev = None
if ":" in revision:
if not sql:
raise util.CommandError("Range revision not allowed")
starting_rev, revision = revision.split(":", 2)
def upgrade(rev, context):
return script._upgrade_revs(revision, rev)
with EnvironmentContext(
config,
script,
fn=upgrade,
as_sql=sql,
starting_rev=starting_rev,
destination_rev=revision,
tag=tag,
):
script.run_env()
def downgrade(
config: Config,
revision: str,
sql: bool = False,
tag: Optional[str] = None,
) -> None:
"""Revert to a previous version.
:param config: a :class:`.Config` instance.
:param revision: string revision target or range for --sql mode
:param sql: if True, use ``--sql`` mode
:param tag: an arbitrary "tag" that can be intercepted by custom
``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument`
method.
"""
script = ScriptDirectory.from_config(config)
starting_rev = None
if ":" in revision:
if not sql:
raise util.CommandError("Range revision not allowed")
starting_rev, revision = revision.split(":", 2)
elif sql:
raise util.CommandError(
"downgrade with --sql requires <fromrev>:<torev>"
)
def downgrade(rev, context):
return script._downgrade_revs(revision, rev)
with EnvironmentContext(
config,
script,
fn=downgrade,
as_sql=sql,
starting_rev=starting_rev,
destination_rev=revision,
tag=tag,
):
script.run_env()
def show(config, rev):
"""Show the revision(s) denoted by the given symbol.
:param config: a :class:`.Config` instance.
:param revision: string revision target
"""
script = ScriptDirectory.from_config(config)
if rev == "current":
def show_current(rev, context):
for sc in script.get_revisions(rev):
config.print_stdout(sc.log_entry)
return []
with EnvironmentContext(config, script, fn=show_current):
script.run_env()
else:
for sc in script.get_revisions(rev):
config.print_stdout(sc.log_entry)
def history(
config: Config,
rev_range: Optional[str] = None,
verbose: bool = False,
indicate_current: bool = False,
) -> None:
"""List changeset scripts in chronological order.
:param config: a :class:`.Config` instance.
:param rev_range: string revision range
:param verbose: output in verbose mode.
:param indicate_current: indicate current revision.
"""
base: Optional[str]
head: Optional[str]
script = ScriptDirectory.from_config(config)
if rev_range is not None:
if ":" not in rev_range:
raise util.CommandError(
"History range requires [start]:[end], " "[start]:, or :[end]"
)
base, head = rev_range.strip().split(":")
else:
base = head = None
environment = (
util.asbool(config.get_main_option("revision_environment"))
or indicate_current
)
def _display_history(config, script, base, head, currents=()):
for sc in script.walk_revisions(
base=base or "base", head=head or "heads"
):
if indicate_current:
sc._db_current_indicator = sc.revision in currents
config.print_stdout(
sc.cmd_format(
verbose=verbose,
include_branches=True,
include_doc=True,
include_parents=True,
)
)
def _display_history_w_current(config, script, base, head):
def _display_current_history(rev, context):
if head == "current":
_display_history(config, script, base, rev, rev)
elif base == "current":
_display_history(config, script, rev, head, rev)
else:
_display_history(config, script, base, head, rev)
return []
with EnvironmentContext(config, script, fn=_display_current_history):
script.run_env()
if base == "current" or head == "current" or environment:
_display_history_w_current(config, script, base, head)
else:
_display_history(config, script, base, head)
def heads(config, verbose=False, resolve_dependencies=False):
"""Show current available heads in the script directory.
:param config: a :class:`.Config` instance.
:param verbose: output in verbose mode.
:param resolve_dependencies: treat dependency version as down revisions.
"""
script = ScriptDirectory.from_config(config)
if resolve_dependencies:
heads = script.get_revisions("heads")
else:
heads = script.get_revisions(script.get_heads())
for rev in heads:
config.print_stdout(
rev.cmd_format(
verbose, include_branches=True, tree_indicators=False
)
)
def branches(config, verbose=False):
"""Show current branch points.
:param config: a :class:`.Config` instance.
:param verbose: output in verbose mode.
"""
script = ScriptDirectory.from_config(config)
for sc in script.walk_revisions():
if sc.is_branch_point:
config.print_stdout(
"%s\n%s\n",
sc.cmd_format(verbose, include_branches=True),
"\n".join(
"%s -> %s"
% (
" " * len(str(sc.revision)),
rev_obj.cmd_format(
False, include_branches=True, include_doc=verbose
),
)
for rev_obj in (
script.get_revision(rev) for rev in sc.nextrev
)
),
)
def current(config: Config, verbose: bool = False) -> None:
"""Display the current revision for a database.
:param config: a :class:`.Config` instance.
:param verbose: output in verbose mode.
"""
script = ScriptDirectory.from_config(config)
def display_version(rev, context):
if verbose:
config.print_stdout(
"Current revision(s) for %s:",
util.obfuscate_url_pw(context.connection.engine.url),
)
for rev in script.get_all_current(rev):
config.print_stdout(rev.cmd_format(verbose))
return []
with EnvironmentContext(
config, script, fn=display_version, dont_mutate=True
):
script.run_env()
def stamp(
config: Config,
revision: str,
sql: bool = False,
tag: Optional[str] = None,
purge: bool = False,
) -> None:
"""'stamp' the revision table with the given revision; don't
run any migrations.
:param config: a :class:`.Config` instance.
:param revision: target revision or list of revisions. May be a list
to indicate stamping of multiple branch heads.
.. note:: this parameter is called "revisions" in the command line
interface.
:param sql: use ``--sql`` mode
:param tag: an arbitrary "tag" that can be intercepted by custom
``env.py`` scripts via the :class:`.EnvironmentContext.get_tag_argument`
method.
:param purge: delete all entries in the version table before stamping.
"""
script = ScriptDirectory.from_config(config)
if sql:
destination_revs = []
starting_rev = None
for _revision in util.to_list(revision):
if ":" in _revision:
srev, _revision = _revision.split(":", 2)
if starting_rev!= srev:
if starting_rev is None:
starting_rev = srev
else:
raise util.CommandError(
"Stamp operation with --sql only supports a "
"single starting revision at a time"
)
destination_revs.append(_revision)
else:
destination_revs = util.to_list(revision)
def do_stamp(rev, context):
return script._stamp_revs(util.to_tuple(destination_revs), rev)
with EnvironmentContext(
config,
script,
fn=do_stamp,
as_sql=sql,
starting_rev=starting_rev if sql else None,
destination_rev=util.to_tuple(destination_revs),
tag=tag,
purge=purge,
):
script.run_env()
def edit(config: Config, rev: str) -> None:
"""Edit revision script(s) using $EDITOR.
:param config: a :class:`.Config` instance.
:param rev: target revision.
"""
script = ScriptDirectory.from_config(config)
if rev == "current":
def edit_current(rev, context):
if not rev:
raise util.CommandError("No current revisions")
for sc in script.get_revisions(rev):
util.open_in_editor(sc.path)
return []
with EnvironmentContext(config, script, fn=edit_current):
script.run_env()
else:
revs = script.get_revisions(rev)
if not revs:
raise util.CommandError(
"No revision files indicated by symbol '%s'" % rev
)
for sc in revs:
assert sc
util.open_in_editor(sc.path)
def ensure_version(config: Config, sql: bool = False) -> None:
"""Create the alembic version table if it doesn't exist already.
:param config: a :class:`.Config` instance.
:param sql: use ``--sql`` mode
.. versionadded:: 1.7.6
"""
script = ScriptDirectory.from_config(config)
def do_ensure_version(rev, context):
context._ensure_version_table()
return []
with EnvironmentContext(
config,
script,
fn=do_ensure_version,
as_sql=sql,
):
script.run_env() |
|
sqlalchemy__alembic | operations.rst | Module doc | Generate documentation for this module | MIT License | sqlalchemy__alembic/docs/build/api/operations.rst | [
"sqlalchemy__alembic/alembic/operations/ops.py"
] | Operation Directives
Within migration scripts, actual database migration operations are
handled via an instance of .Operations. The .Operations class lists out
available migration operations that are linked to a .MigrationContext,
which communicates instructions originated by the .Operations object
into SQL that is sent to a database or SQL output stream.
Most methods on the .Operations class are generated dynamically using a
"plugin" system, described in the next section operation_plugins.
Additionally, when Alembic migration scripts actually run, the methods
on the current .Operations object are proxied out to the alembic.op
module, so that they are available using module-style access.
For an overview of how to use an .Operations object directly in
programs, as well as for reference to the standard operation methods as
well as "batch" methods, see ops.
Operation Plugins
The Operations object is extensible using a plugin system. This system
allows one to add new op.<some_operation> methods at runtime. The steps
to use this system are to first create a subclass of .MigrateOperation,
register it using the .Operations.register_operation class decorator,
then build a default "implementation" function which is established
using the .Operations.implementation_for decorator.
Below we illustrate a very simple operation CreateSequenceOp which will
implement a new method op.create_sequence() for use in migration
scripts:
from alembic.operations import Operations, MigrateOperation
@Operations.register_operation("create_sequence")
class CreateSequenceOp(MigrateOperation):
"""Create a SEQUENCE."""
def __init__(self, sequence_name, schema=None):
self.sequence_name = sequence_name
self.schema = schema
@classmethod
def create_sequence(cls, operations, sequence_name, **kw):
"""Issue a "CREATE SEQUENCE" instruction."""
op = CreateSequenceOp(sequence_name, **kw)
return operations.invoke(op)
def reverse(self):
# only needed to support autogenerate
return DropSequenceOp(self.sequence_name, schema=self.schema)
@Operations.register_operation("drop_sequence")
class DropSequenceOp(MigrateOperation):
"""Drop a SEQUENCE."""
def __init__(self, sequence_name, schema=None):
self.sequence_name = sequence_name
self.schema = schema
@classmethod
def drop_sequence(cls, operations, sequence_name, **kw):
"""Issue a "DROP SEQUENCE" instruction."""
op = DropSequenceOp(sequence_name, **kw)
return operations.invoke(op)
def reverse(self):
# only needed to support autogenerate
return CreateSequenceOp(self.sequence_name, schema=self.schema)
Above, the CreateSequenceOp and DropSequenceOp classes represent new
operations that will be available as op.create_sequence() and
op.drop_sequence(). The reason the operations are represented as
stateful classes is so that an operation and a specific set of arguments
can be represented generically; the state can then correspond to
different kinds of operations, such as invoking the instruction against
a database, or autogenerating Python code for the operation into a
script.
In order to establish the migrate-script behavior of the new operations,
we use the .Operations.implementation_for decorator:
@Operations.implementation_for(CreateSequenceOp)
def create_sequence(operations, operation):
if operation.schema is not None:
name = "%s.%s" % (operation.schema, operation.sequence_name)
else:
name = operation.sequence_name
operations.execute("CREATE SEQUENCE %s" % name)
@Operations.implementation_for(DropSequenceOp)
def drop_sequence(operations, operation):
if operation.schema is not None:
name = "%s.%s" % (operation.schema, operation.sequence_name)
else:
name = operation.sequence_name
operations.execute("DROP SEQUENCE %s" % name)
Above, we use the simplest possible technique of invoking our DDL, which
is just to call .Operations.execute with literal SQL. If this is all a
custom operation needs, then this is fine. However, options for more
comprehensive support include building out a custom SQL construct, as
documented at sqlalchemy.ext.compiler_toplevel.
With the above two steps, a migration script can now use new methods
op.create_sequence() and op.drop_sequence() that will proxy to our
object as a classmethod:
def upgrade():
op.create_sequence("my_sequence")
def downgrade():
op.drop_sequence("my_sequence")
The registration of new operations only needs to occur in time for the
env.py script to invoke .MigrationContext.run_migrations; within the
module level of the env.py script is sufficient.
autogen_custom_ops - how to add autogenerate support to custom
operations.
Built-in Operation Objects
The migration operations present on .Operations are themselves delivered
via operation objects that represent an operation and its arguments. All
operations descend from the .MigrateOperation class, and are registered
with the .Operations class using the .Operations.register_operation
class decorator. The .MigrateOperation objects also serve as the basis
for how the autogenerate system renders new migration scripts.
| from __future__ import annotations
from abc import abstractmethod
import re
from typing import Any
from typing import Callable
from typing import cast
from typing import FrozenSet
from typing import Iterator
from typing import List
from typing import MutableMapping
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import Union
from sqlalchemy.types import NULLTYPE
from. import schemaobj
from.base import BatchOperations
from.base import Operations
from.. import util
from..util import sqla_compat
if TYPE_CHECKING:
from typing import Literal
from sqlalchemy.sql.dml import Insert
from sqlalchemy.sql.dml import Update
from sqlalchemy.sql.elements import BinaryExpression
from sqlalchemy.sql.elements import ColumnElement
from sqlalchemy.sql.elements import conv
from sqlalchemy.sql.elements import quoted_name
from sqlalchemy.sql.elements import TextClause
from sqlalchemy.sql.functions import Function
from sqlalchemy.sql.schema import CheckConstraint
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.schema import Computed
from sqlalchemy.sql.schema import Constraint
from sqlalchemy.sql.schema import ForeignKeyConstraint
from sqlalchemy.sql.schema import Identity
from sqlalchemy.sql.schema import Index
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.schema import PrimaryKeyConstraint
from sqlalchemy.sql.schema import SchemaItem
from sqlalchemy.sql.schema import Table
from sqlalchemy.sql.schema import UniqueConstraint
from sqlalchemy.sql.selectable import TableClause
from sqlalchemy.sql.type_api import TypeEngine
from..autogenerate.rewriter import Rewriter
from..runtime.migration import MigrationContext
from..script.revision import _RevIdType
class MigrateOperation:
"""base class for migration command and organization objects.
This system is part of the operation extensibility API.
.. seealso::
:ref:`operation_objects`
:ref:`operation_plugins`
:ref:`customizing_revision`
"""
@util.memoized_property
def info(self):
"""A dictionary that may be used to store arbitrary information
along with this :class:`.MigrateOperation` object.
"""
return {}
_mutations: FrozenSet[Rewriter] = frozenset()
def reverse(self) -> MigrateOperation:
raise NotImplementedError
def to_diff_tuple(self) -> Tuple[Any,...]:
raise NotImplementedError
class AddConstraintOp(MigrateOperation):
"""Represent an add constraint operation."""
add_constraint_ops = util.Dispatcher()
@property
def constraint_type(self):
raise NotImplementedError()
@classmethod
def register_add_constraint(cls, type_: str) -> Callable:
def go(klass):
cls.add_constraint_ops.dispatch_for(type_)(klass.from_constraint)
return klass
return go
@classmethod
def from_constraint(cls, constraint: Constraint) -> AddConstraintOp:
return cls.add_constraint_ops.dispatch(constraint.__visit_name__)(
constraint
)
@abstractmethod
def to_constraint(
self, migration_context: Optional[MigrationContext] = None
) -> Constraint:
pass
def reverse(self) -> DropConstraintOp:
return DropConstraintOp.from_constraint(self.to_constraint())
def to_diff_tuple(self) -> Tuple[str, Constraint]:
return ("add_constraint", self.to_constraint())
@Operations.register_operation("drop_constraint")
@BatchOperations.register_operation("drop_constraint", "batch_drop_constraint")
class DropConstraintOp(MigrateOperation):
"""Represent a drop constraint operation."""
def __init__(
self,
constraint_name: Optional[sqla_compat._ConstraintNameDefined],
table_name: str,
type_: Optional[str] = None,
*,
schema: Optional[str] = None,
_reverse: Optional[AddConstraintOp] = None,
) -> None:
self.constraint_name = constraint_name
self.table_name = table_name
self.constraint_type = type_
self.schema = schema
self._reverse = _reverse
def reverse(self) -> AddConstraintOp:
return AddConstraintOp.from_constraint(self.to_constraint())
def to_diff_tuple(
self,
) -> Tuple[str, SchemaItem]:
if self.constraint_type == "foreignkey":
return ("remove_fk", self.to_constraint())
else:
return ("remove_constraint", self.to_constraint())
@classmethod
def from_constraint(cls, constraint: Constraint) -> DropConstraintOp:
types = {
"unique_constraint": "unique",
"foreign_key_constraint": "foreignkey",
"primary_key_constraint": "primary",
"check_constraint": "check",
"column_check_constraint": "check",
"table_or_column_check_constraint": "check",
}
constraint_table = sqla_compat._table_for_constraint(constraint)
return cls(
sqla_compat.constraint_name_or_none(constraint.name),
constraint_table.name,
schema=constraint_table.schema,
type_=types.get(constraint.__visit_name__),
_reverse=AddConstraintOp.from_constraint(constraint),
)
def to_constraint(self) -> Constraint:
if self._reverse is not None:
constraint = self._reverse.to_constraint()
constraint.name = self.constraint_name
constraint_table = sqla_compat._table_for_constraint(constraint)
constraint_table.name = self.table_name
constraint_table.schema = self.schema
return constraint
else:
raise ValueError(
"constraint cannot be produced; "
"original constraint is not present"
)
@classmethod
def drop_constraint(
cls,
operations: Operations,
constraint_name: str,
table_name: str,
type_: Optional[str] = None,
*,
schema: Optional[str] = None,
) -> None:
r"""Drop a constraint of the given name, typically via DROP CONSTRAINT.
:param constraint_name: name of the constraint.
:param table_name: table name.
:param type\_: optional, required on MySQL. can be
'foreignkey', 'primary', 'unique', or 'check'.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
"""
op = cls(constraint_name, table_name, type_=type_, schema=schema)
return operations.invoke(op)
@classmethod
def batch_drop_constraint(
cls,
operations: BatchOperations,
constraint_name: str,
type_: Optional[str] = None,
) -> None:
"""Issue a "drop constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``table_name`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.drop_constraint`
"""
op = cls(
constraint_name,
operations.impl.table_name,
type_=type_,
schema=operations.impl.schema,
)
return operations.invoke(op)
@Operations.register_operation("create_primary_key")
@BatchOperations.register_operation(
"create_primary_key", "batch_create_primary_key"
)
@AddConstraintOp.register_add_constraint("primary_key_constraint")
class CreatePrimaryKeyOp(AddConstraintOp):
"""Represent a create primary key operation."""
constraint_type = "primarykey"
def __init__(
self,
constraint_name: Optional[sqla_compat._ConstraintNameDefined],
table_name: str,
columns: Sequence[str],
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
self.constraint_name = constraint_name
self.table_name = table_name
self.columns = columns
self.schema = schema
self.kw = kw
@classmethod
def from_constraint(cls, constraint: Constraint) -> CreatePrimaryKeyOp:
constraint_table = sqla_compat._table_for_constraint(constraint)
pk_constraint = cast("PrimaryKeyConstraint", constraint)
return cls(
sqla_compat.constraint_name_or_none(pk_constraint.name),
constraint_table.name,
pk_constraint.columns.keys(),
schema=constraint_table.schema,
**pk_constraint.dialect_kwargs,
)
def to_constraint(
self, migration_context: Optional[MigrationContext] = None
) -> PrimaryKeyConstraint:
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.primary_key_constraint(
self.constraint_name,
self.table_name,
self.columns,
schema=self.schema,
**self.kw,
)
@classmethod
def create_primary_key(
cls,
operations: Operations,
constraint_name: Optional[str],
table_name: str,
columns: List[str],
*,
schema: Optional[str] = None,
) -> None:
"""Issue a "create primary key" instruction using the current
migration context.
e.g.::
from alembic import op
op.create_primary_key("pk_my_table", "my_table", ["id", "version"])
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.PrimaryKeyConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param constraint_name: Name of the primary key constraint. The name
is necessary so that an ALTER statement can be emitted. For setups
that use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the target table.
:param columns: a list of string column names to be applied to the
primary key constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
"""
op = cls(constraint_name, table_name, columns, schema=schema)
return operations.invoke(op)
@classmethod
def batch_create_primary_key(
cls,
operations: BatchOperations,
constraint_name: str,
columns: List[str],
) -> None:
"""Issue a "create primary key" instruction using the
current batch migration context.
The batch form of this call omits the ``table_name`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_primary_key`
"""
op = cls(
constraint_name,
operations.impl.table_name,
columns,
schema=operations.impl.schema,
)
return operations.invoke(op)
@Operations.register_operation("create_unique_constraint")
@BatchOperations.register_operation(
"create_unique_constraint", "batch_create_unique_constraint"
)
@AddConstraintOp.register_add_constraint("unique_constraint")
class CreateUniqueConstraintOp(AddConstraintOp):
"""Represent a create unique constraint operation."""
constraint_type = "unique"
def __init__(
self,
constraint_name: Optional[sqla_compat._ConstraintNameDefined],
table_name: str,
columns: Sequence[str],
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
self.constraint_name = constraint_name
self.table_name = table_name
self.columns = columns
self.schema = schema
self.kw = kw
@classmethod
def from_constraint(
cls, constraint: Constraint
) -> CreateUniqueConstraintOp:
constraint_table = sqla_compat._table_for_constraint(constraint)
uq_constraint = cast("UniqueConstraint", constraint)
kw: dict = {}
if uq_constraint.deferrable:
kw["deferrable"] = uq_constraint.deferrable
if uq_constraint.initially:
kw["initially"] = uq_constraint.initially
kw.update(uq_constraint.dialect_kwargs)
return cls(
sqla_compat.constraint_name_or_none(uq_constraint.name),
constraint_table.name,
[c.name for c in uq_constraint.columns],
schema=constraint_table.schema,
**kw,
)
def to_constraint(
self, migration_context: Optional[MigrationContext] = None
) -> UniqueConstraint:
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.unique_constraint(
self.constraint_name,
self.table_name,
self.columns,
schema=self.schema,
**self.kw,
)
@classmethod
def create_unique_constraint(
cls,
operations: Operations,
constraint_name: Optional[str],
table_name: str,
columns: Sequence[str],
*,
schema: Optional[str] = None,
**kw: Any,
) -> Any:
"""Issue a "create unique constraint" instruction using the
current migration context.
e.g.::
from alembic import op
op.create_unique_constraint("uq_user_name", "user", ["name"])
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.UniqueConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param name: Name of the unique constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the source table.
:param columns: a list of string column names in the
source table.
:param deferrable: optional bool. If set, emit DEFERRABLE or
NOT DEFERRABLE when issuing DDL for this constraint.
:param initially: optional string. If set, emit INITIALLY <value>
when issuing DDL for this constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
"""
op = cls(constraint_name, table_name, columns, schema=schema, **kw)
return operations.invoke(op)
@classmethod
def batch_create_unique_constraint(
cls,
operations: BatchOperations,
constraint_name: str,
columns: Sequence[str],
**kw: Any,
) -> Any:
"""Issue a "create unique constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_unique_constraint`
"""
kw["schema"] = operations.impl.schema
op = cls(constraint_name, operations.impl.table_name, columns, **kw)
return operations.invoke(op)
@Operations.register_operation("create_foreign_key")
@BatchOperations.register_operation(
"create_foreign_key", "batch_create_foreign_key"
)
@AddConstraintOp.register_add_constraint("foreign_key_constraint")
class CreateForeignKeyOp(AddConstraintOp):
"""Represent a create foreign key constraint operation."""
constraint_type = "foreignkey"
def __init__(
self,
constraint_name: Optional[sqla_compat._ConstraintNameDefined],
source_table: str,
referent_table: str,
local_cols: List[str],
remote_cols: List[str],
**kw: Any,
) -> None:
self.constraint_name = constraint_name
self.source_table = source_table
self.referent_table = referent_table
self.local_cols = local_cols
self.remote_cols = remote_cols
self.kw = kw
def to_diff_tuple(self) -> Tuple[str, ForeignKeyConstraint]:
return ("add_fk", self.to_constraint())
@classmethod
def from_constraint(cls, constraint: Constraint) -> CreateForeignKeyOp:
fk_constraint = cast("ForeignKeyConstraint", constraint)
kw: dict = {}
if fk_constraint.onupdate:
kw["onupdate"] = fk_constraint.onupdate
if fk_constraint.ondelete:
kw["ondelete"] = fk_constraint.ondelete
if fk_constraint.initially:
kw["initially"] = fk_constraint.initially
if fk_constraint.deferrable:
kw["deferrable"] = fk_constraint.deferrable
if fk_constraint.use_alter:
kw["use_alter"] = fk_constraint.use_alter
if fk_constraint.match:
kw["match"] = fk_constraint.match
(
source_schema,
source_table,
source_columns,
target_schema,
target_table,
target_columns,
onupdate,
ondelete,
deferrable,
initially,
) = sqla_compat._fk_spec(fk_constraint)
kw["source_schema"] = source_schema
kw["referent_schema"] = target_schema
kw.update(fk_constraint.dialect_kwargs)
return cls(
sqla_compat.constraint_name_or_none(fk_constraint.name),
source_table,
target_table,
source_columns,
target_columns,
**kw,
)
def to_constraint(
self, migration_context: Optional[MigrationContext] = None
) -> ForeignKeyConstraint:
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.foreign_key_constraint(
self.constraint_name,
self.source_table,
self.referent_table,
self.local_cols,
self.remote_cols,
**self.kw,
)
@classmethod
def create_foreign_key(
cls,
operations: Operations,
constraint_name: Optional[str],
source_table: str,
referent_table: str,
local_cols: List[str],
remote_cols: List[str],
*,
onupdate: Optional[str] = None,
ondelete: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
match: Optional[str] = None,
source_schema: Optional[str] = None,
referent_schema: Optional[str] = None,
**dialect_kw: Any,
) -> None:
"""Issue a "create foreign key" instruction using the
current migration context.
e.g.::
from alembic import op
op.create_foreign_key(
"fk_user_address",
"address",
"user",
["user_id"],
["id"],
)
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.ForeignKeyConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param constraint_name: Name of the foreign key constraint. The name
is necessary so that an ALTER statement can be emitted. For setups
that use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param source_table: String name of the source table.
:param referent_table: String name of the destination table.
:param local_cols: a list of string column names in the
source table.
:param remote_cols: a list of string column names in the
remote table.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param source_schema: Optional schema name of the source table.
:param referent_schema: Optional schema name of the destination table.
"""
op = cls(
constraint_name,
source_table,
referent_table,
local_cols,
remote_cols,
onupdate=onupdate,
ondelete=ondelete,
deferrable=deferrable,
source_schema=source_schema,
referent_schema=referent_schema,
initially=initially,
match=match,
**dialect_kw,
)
return operations.invoke(op)
@classmethod
def batch_create_foreign_key(
cls,
operations: BatchOperations,
constraint_name: str,
referent_table: str,
local_cols: List[str],
remote_cols: List[str],
*,
referent_schema: Optional[str] = None,
onupdate: Optional[str] = None,
ondelete: Optional[str] = None,
deferrable: Optional[bool] = None,
initially: Optional[str] = None,
match: Optional[str] = None,
**dialect_kw: Any,
) -> None:
"""Issue a "create foreign key" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``source_schema``
arguments from the call.
e.g.::
with batch_alter_table("address") as batch_op:
batch_op.create_foreign_key(
"fk_user_address",
"user",
["user_id"],
["id"],
)
.. seealso::
:meth:`.Operations.create_foreign_key`
"""
op = cls(
constraint_name,
operations.impl.table_name,
referent_table,
local_cols,
remote_cols,
onupdate=onupdate,
ondelete=ondelete,
deferrable=deferrable,
source_schema=operations.impl.schema,
referent_schema=referent_schema,
initially=initially,
match=match,
**dialect_kw,
)
return operations.invoke(op)
@Operations.register_operation("create_check_constraint")
@BatchOperations.register_operation(
"create_check_constraint", "batch_create_check_constraint"
)
@AddConstraintOp.register_add_constraint("check_constraint")
@AddConstraintOp.register_add_constraint("table_or_column_check_constraint")
@AddConstraintOp.register_add_constraint("column_check_constraint")
class CreateCheckConstraintOp(AddConstraintOp):
"""Represent a create check constraint operation."""
constraint_type = "check"
def __init__(
self,
constraint_name: Optional[sqla_compat._ConstraintNameDefined],
table_name: str,
condition: Union[str, TextClause, ColumnElement[Any]],
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
self.constraint_name = constraint_name
self.table_name = table_name
self.condition = condition
self.schema = schema
self.kw = kw
@classmethod
def from_constraint(
cls, constraint: Constraint
) -> CreateCheckConstraintOp:
constraint_table = sqla_compat._table_for_constraint(constraint)
ck_constraint = cast("CheckConstraint", constraint)
return cls(
sqla_compat.constraint_name_or_none(ck_constraint.name),
constraint_table.name,
cast("ColumnElement[Any]", ck_constraint.sqltext),
schema=constraint_table.schema,
**ck_constraint.dialect_kwargs,
)
def to_constraint(
self, migration_context: Optional[MigrationContext] = None
) -> CheckConstraint:
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.check_constraint(
self.constraint_name,
self.table_name,
self.condition,
schema=self.schema,
**self.kw,
)
@classmethod
def create_check_constraint(
cls,
operations: Operations,
constraint_name: Optional[str],
table_name: str,
condition: Union[str, BinaryExpression, TextClause],
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
"""Issue a "create check constraint" instruction using the
current migration context.
e.g.::
from alembic import op
from sqlalchemy.sql import column, func
op.create_check_constraint(
"ck_user_name_len",
"user",
func.len(column("name")) > 5,
)
CHECK constraints are usually against a SQL expression, so ad-hoc
table metadata is usually needed. The function will convert the given
arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound
to an anonymous table in order to emit the CREATE statement.
:param name: Name of the check constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the source table.
:param condition: SQL expression that's the condition of the
constraint. Can be a string or SQLAlchemy expression language
structure.
:param deferrable: optional bool. If set, emit DEFERRABLE or
NOT DEFERRABLE when issuing DDL for this constraint.
:param initially: optional string. If set, emit INITIALLY <value>
when issuing DDL for this constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
"""
op = cls(constraint_name, table_name, condition, schema=schema, **kw)
return operations.invoke(op)
@classmethod
def batch_create_check_constraint(
cls,
operations: BatchOperations,
constraint_name: str,
condition: Union[str, BinaryExpression, TextClause],
**kw: Any,
) -> None:
"""Issue a "create check constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_check_constraint`
"""
op = cls(
constraint_name,
operations.impl.table_name,
condition,
schema=operations.impl.schema,
**kw,
)
return operations.invoke(op)
@Operations.register_operation("create_index")
@BatchOperations.register_operation("create_index", "batch_create_index")
class CreateIndexOp(MigrateOperation):
"""Represent a create index operation."""
def __init__(
self,
index_name: Optional[str],
table_name: str,
columns: Sequence[Union[str, TextClause, ColumnElement[Any]]],
*,
schema: Optional[str] = None,
unique: bool = False,
if_not_exists: Optional[bool] = None,
**kw: Any,
) -> None:
self.index_name = index_name
self.table_name = table_name
self.columns = columns
self.schema = schema
self.unique = unique
self.if_not_exists = if_not_exists
self.kw = kw
def reverse(self) -> DropIndexOp:
return DropIndexOp.from_index(self.to_index())
def to_diff_tuple(self) -> Tuple[str, Index]:
return ("add_index", self.to_index())
@classmethod
def from_index(cls, index: Index) -> CreateIndexOp:
assert index.table is not None
return cls(
index.name, # type: ignore[arg-type]
index.table.name,
sqla_compat._get_index_expressions(index),
schema=index.table.schema,
unique=index.unique,
**index.kwargs,
)
def to_index(
self, migration_context: Optional[MigrationContext] = None
) -> Index:
schema_obj = schemaobj.SchemaObjects(migration_context)
idx = schema_obj.index(
self.index_name,
self.table_name,
self.columns,
schema=self.schema,
unique=self.unique,
**self.kw,
)
return idx
@classmethod
def create_index(
cls,
operations: Operations,
index_name: Optional[str],
table_name: str,
columns: Sequence[Union[str, TextClause, Function[Any]]],
*,
schema: Optional[str] = None,
unique: bool = False,
if_not_exists: Optional[bool] = None,
**kw: Any,
) -> None:
r"""Issue a "create index" instruction using the current
migration context.
e.g.::
from alembic import op
op.create_index("ik_test", "t1", ["foo", "bar"])
Functional indexes can be produced by using the
:func:`sqlalchemy.sql.expression.text` construct::
from alembic import op
from sqlalchemy import text
op.create_index("ik_test", "t1", [text("lower(foo)")])
:param index_name: name of the index.
:param table_name: name of the owning table.
:param columns: a list consisting of string column names and/or
:func:`~sqlalchemy.sql.expression.text` constructs.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param unique: If True, create a unique index.
:param quote: Force quoting of this column's name on or off,
corresponding to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param if_not_exists: If True, adds IF NOT EXISTS operator when
creating the new index.
.. versionadded:: 1.12.0
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
"""
op = cls(
index_name,
table_name,
columns,
schema=schema,
unique=unique,
if_not_exists=if_not_exists,
**kw,
)
return operations.invoke(op)
@classmethod
def batch_create_index(
cls,
operations: BatchOperations,
index_name: str,
columns: List[str],
**kw: Any,
) -> None:
"""Issue a "create index" instruction using the
current batch migration context.
.. seealso::
:meth:`.Operations.create_index`
"""
op = cls(
index_name,
operations.impl.table_name,
columns,
schema=operations.impl.schema,
**kw,
)
return operations.invoke(op)
@Operations.register_operation("drop_index")
@BatchOperations.register_operation("drop_index", "batch_drop_index")
class DropIndexOp(MigrateOperation):
"""Represent a drop index operation."""
def __init__(
self,
index_name: Union[quoted_name, str, conv],
table_name: Optional[str] = None,
*,
schema: Optional[str] = None,
if_exists: Optional[bool] = None,
_reverse: Optional[CreateIndexOp] = None,
**kw: Any,
) -> None:
self.index_name = index_name
self.table_name = table_name
self.schema = schema
self.if_exists = if_exists
self._reverse = _reverse
self.kw = kw
def to_diff_tuple(self) -> Tuple[str, Index]:
return ("remove_index", self.to_index())
def reverse(self) -> CreateIndexOp:
return CreateIndexOp.from_index(self.to_index())
@classmethod
def from_index(cls, index: Index) -> DropIndexOp:
assert index.table is not None
return cls(
index.name, # type: ignore[arg-type]
table_name=index.table.name,
schema=index.table.schema,
_reverse=CreateIndexOp.from_index(index),
**index.kwargs,
)
def to_index(
self, migration_context: Optional[MigrationContext] = None
) -> Index:
schema_obj = schemaobj.SchemaObjects(migration_context)
# need a dummy column name here since SQLAlchemy
# 0.7.6 and further raises on Index with no columns
return schema_obj.index(
self.index_name,
self.table_name,
self._reverse.columns if self._reverse else ["x"],
schema=self.schema,
**self.kw,
)
@classmethod
def drop_index(
cls,
operations: Operations,
index_name: str,
table_name: Optional[str] = None,
*,
schema: Optional[str] = None,
if_exists: Optional[bool] = None,
**kw: Any,
) -> None:
r"""Issue a "drop index" instruction using the current
migration context.
e.g.::
drop_index("accounts")
:param index_name: name of the index.
:param table_name: name of the owning table. Some
backends such as Microsoft SQL Server require this.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param if_exists: If True, adds IF EXISTS operator when
dropping the index.
.. versionadded:: 1.12.0
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
"""
op = cls(
index_name,
table_name=table_name,
schema=schema,
if_exists=if_exists,
**kw,
)
return operations.invoke(op)
@classmethod
def batch_drop_index(
cls, operations: BatchOperations, index_name: str, **kw: Any
) -> None:
"""Issue a "drop index" instruction using the
current batch migration context.
.. seealso::
:meth:`.Operations.drop_index`
"""
op = cls(
index_name,
table_name=operations.impl.table_name,
schema=operations.impl.schema,
**kw,
)
return operations.invoke(op)
@Operations.register_operation("create_table")
class CreateTableOp(MigrateOperation):
"""Represent a create table operation."""
def __init__(
self,
table_name: str,
columns: Sequence[SchemaItem],
*,
schema: Optional[str] = None,
_namespace_metadata: Optional[MetaData] = None,
_constraints_included: bool = False,
**kw: Any,
) -> None:
self.table_name = table_name
self.columns = columns
self.schema = schema
self.info = kw.pop("info", {})
self.comment = kw.pop("comment", None)
self.prefixes = kw.pop("prefixes", None)
self.kw = kw
self._namespace_metadata = _namespace_metadata
self._constraints_included = _constraints_included
def reverse(self) -> DropTableOp:
return DropTableOp.from_table(
self.to_table(), _namespace_metadata=self._namespace_metadata
)
def to_diff_tuple(self) -> Tuple[str, Table]:
return ("add_table", self.to_table())
@classmethod
def from_table(
cls, table: Table, *, _namespace_metadata: Optional[MetaData] = None
) -> CreateTableOp:
if _namespace_metadata is None:
_namespace_metadata = table.metadata
return cls(
table.name,
list(table.c) + list(table.constraints), # type:ignore[arg-type]
schema=table.schema,
_namespace_metadata=_namespace_metadata,
# given a Table() object, this Table will contain full Index()
# and UniqueConstraint objects already constructed in response to
# each unique=True / index=True flag on a Column. Carry this
# state along so that when we re-convert back into a Table, we
# skip unique=True/index=True so that these constraints are
# not doubled up. see #844 #848
_constraints_included=True,
comment=table.comment,
info=dict(table.info),
prefixes=list(table._prefixes),
**table.kwargs,
)
def to_table(
self, migration_context: Optional[MigrationContext] = None
) -> Table:
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.table(
self.table_name,
*self.columns,
schema=self.schema,
prefixes=list(self.prefixes) if self.prefixes else [],
comment=self.comment,
info=self.info.copy() if self.info else {},
_constraints_included=self._constraints_included,
**self.kw,
)
@classmethod
def create_table(
cls,
operations: Operations,
table_name: str,
*columns: SchemaItem,
**kw: Any,
) -> Table:
r"""Issue a "create table" instruction using the current migration
context.
This directive receives an argument list similar to that of the
traditional :class:`sqlalchemy.schema.Table` construct, but without the
metadata::
from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
from alembic import op
op.create_table(
"account",
Column("id", INTEGER, primary_key=True),
Column("name", VARCHAR(50), nullable=False),
Column("description", NVARCHAR(200)),
Column("timestamp", TIMESTAMP, server_default=func.now()),
)
Note that :meth:`.create_table` accepts
:class:`~sqlalchemy.schema.Column`
constructs directly from the SQLAlchemy library. In particular,
default values to be created on the database side are
specified using the ``server_default`` parameter, and not
``default`` which only specifies Python-side defaults::
from alembic import op
from sqlalchemy import Column, TIMESTAMP, func
# specify "DEFAULT NOW" along with the "timestamp" column
op.create_table(
"account",
Column("id", INTEGER, primary_key=True),
Column("timestamp", TIMESTAMP, server_default=func.now()),
)
The function also returns a newly created
:class:`~sqlalchemy.schema.Table` object, corresponding to the table
specification given, which is suitable for
immediate SQL operations, in particular
:meth:`.Operations.bulk_insert`::
from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
from alembic import op
account_table = op.create_table(
"account",
Column("id", INTEGER, primary_key=True),
Column("name", VARCHAR(50), nullable=False),
Column("description", NVARCHAR(200)),
Column("timestamp", TIMESTAMP, server_default=func.now()),
)
op.bulk_insert(
account_table,
[
{"name": "A1", "description": "account 1"},
{"name": "A2", "description": "account 2"},
],
)
:param table_name: Name of the table
:param \*columns: collection of :class:`~sqlalchemy.schema.Column`
objects within
the table, as well as optional :class:`~sqlalchemy.schema.Constraint`
objects
and :class:`~.sqlalchemy.schema.Index` objects.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param \**kw: Other keyword arguments are passed to the underlying
:class:`sqlalchemy.schema.Table` object created for the command.
:return: the :class:`~sqlalchemy.schema.Table` object corresponding
to the parameters given.
"""
op = cls(table_name, columns, **kw)
return operations.invoke(op)
@Operations.register_operation("drop_table")
class DropTableOp(MigrateOperation):
"""Represent a drop table operation."""
def __init__(
self,
table_name: str,
*,
schema: Optional[str] = None,
table_kw: Optional[MutableMapping[Any, Any]] = None,
_reverse: Optional[CreateTableOp] = None,
) -> None:
self.table_name = table_name
self.schema = schema
self.table_kw = table_kw or {}
self.comment = self.table_kw.pop("comment", None)
self.info = self.table_kw.pop("info", None)
self.prefixes = self.table_kw.pop("prefixes", None)
self._reverse = _reverse
def to_diff_tuple(self) -> Tuple[str, Table]:
return ("remove_table", self.to_table())
def reverse(self) -> CreateTableOp:
return CreateTableOp.from_table(self.to_table())
@classmethod
def from_table(
cls, table: Table, *, _namespace_metadata: Optional[MetaData] = None
) -> DropTableOp:
return cls(
table.name,
schema=table.schema,
table_kw={
"comment": table.comment,
"info": dict(table.info),
"prefixes": list(table._prefixes),
**table.kwargs,
},
_reverse=CreateTableOp.from_table(
table, _namespace_metadata=_namespace_metadata
),
)
def to_table(
self, migration_context: Optional[MigrationContext] = None
) -> Table:
if self._reverse:
cols_and_constraints = self._reverse.columns
else:
cols_and_constraints = []
schema_obj = schemaobj.SchemaObjects(migration_context)
t = schema_obj.table(
self.table_name,
*cols_and_constraints,
comment=self.comment,
info=self.info.copy() if self.info else {},
prefixes=list(self.prefixes) if self.prefixes else [],
schema=self.schema,
_constraints_included=self._reverse._constraints_included
if self._reverse
else False,
**self.table_kw,
)
return t
@classmethod
def drop_table(
cls,
operations: Operations,
table_name: str,
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
r"""Issue a "drop table" instruction using the current
migration context.
e.g.::
drop_table("accounts")
:param table_name: Name of the table
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param \**kw: Other keyword arguments are passed to the underlying
:class:`sqlalchemy.schema.Table` object created for the command.
"""
op = cls(table_name, schema=schema, table_kw=kw)
operations.invoke(op)
class AlterTableOp(MigrateOperation):
"""Represent an alter table operation."""
def __init__(
self,
table_name: str,
*,
schema: Optional[str] = None,
) -> None:
self.table_name = table_name
self.schema = schema
@Operations.register_operation("rename_table")
class RenameTableOp(AlterTableOp):
"""Represent a rename table operation."""
def __init__(
self,
old_table_name: str,
new_table_name: str,
*,
schema: Optional[str] = None,
) -> None:
super().__init__(old_table_name, schema=schema)
self.new_table_name = new_table_name
@classmethod
def rename_table(
cls,
operations: Operations,
old_table_name: str,
new_table_name: str,
*,
schema: Optional[str] = None,
) -> None:
"""Emit an ALTER TABLE to rename a table.
:param old_table_name: old name.
:param new_table_name: new name.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
"""
op = cls(old_table_name, new_table_name, schema=schema)
return operations.invoke(op)
@Operations.register_operation("create_table_comment")
@BatchOperations.register_operation(
"create_table_comment", "batch_create_table_comment"
)
class CreateTableCommentOp(AlterTableOp):
"""Represent a COMMENT ON `table` operation."""
def __init__(
self,
table_name: str,
comment: Optional[str],
*,
schema: Optional[str] = None,
existing_comment: Optional[str] = None,
) -> None:
self.table_name = table_name
self.comment = comment
self.existing_comment = existing_comment
self.schema = schema
@classmethod
def create_table_comment(
cls,
operations: Operations,
table_name: str,
comment: Optional[str],
*,
existing_comment: Optional[str] = None,
schema: Optional[str] = None,
) -> None:
"""Emit a COMMENT ON operation to set the comment for a table.
:param table_name: string name of the target table.
:param comment: string value of the comment being registered against
the specified table.
:param existing_comment: String value of a comment
already registered on the specified table, used within autogenerate
so that the operation is reversible, but not required for direct
use.
.. seealso::
:meth:`.Operations.drop_table_comment`
:paramref:`.Operations.alter_column.comment`
"""
op = cls(
table_name,
comment,
existing_comment=existing_comment,
schema=schema,
)
return operations.invoke(op)
@classmethod
def batch_create_table_comment(
cls,
operations: BatchOperations,
comment: Optional[str],
*,
existing_comment: Optional[str] = None,
) -> None:
"""Emit a COMMENT ON operation to set the comment for a table
using the current batch migration context.
:param comment: string value of the comment being registered against
the specified table.
:param existing_comment: String value of a comment
already registered on the specified table, used within autogenerate
so that the operation is reversible, but not required for direct
use.
"""
op = cls(
operations.impl.table_name,
comment,
existing_comment=existing_comment,
schema=operations.impl.schema,
)
return operations.invoke(op)
def reverse(self):
"""Reverses the COMMENT ON operation against a table."""
if self.existing_comment is None:
return DropTableCommentOp(
self.table_name,
existing_comment=self.comment,
schema=self.schema,
)
else:
return CreateTableCommentOp(
self.table_name,
self.existing_comment,
existing_comment=self.comment,
schema=self.schema,
)
def to_table(self, migration_context=None):
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.table(
self.table_name, schema=self.schema, comment=self.comment
)
def to_diff_tuple(self):
return ("add_table_comment", self.to_table(), self.existing_comment)
@Operations.register_operation("drop_table_comment")
@BatchOperations.register_operation(
"drop_table_comment", "batch_drop_table_comment"
)
class DropTableCommentOp(AlterTableOp):
"""Represent an operation to remove the comment from a table."""
def __init__(
self,
table_name: str,
*,
schema: Optional[str] = None,
existing_comment: Optional[str] = None,
) -> None:
self.table_name = table_name
self.existing_comment = existing_comment
self.schema = schema
@classmethod
def drop_table_comment(
cls,
operations: Operations,
table_name: str,
*,
existing_comment: Optional[str] = None,
schema: Optional[str] = None,
) -> None:
"""Issue a "drop table comment" operation to
remove an existing comment set on a table.
:param table_name: string name of the target table.
:param existing_comment: An optional string value of a comment already
registered on the specified table.
.. seealso::
:meth:`.Operations.create_table_comment`
:paramref:`.Operations.alter_column.comment`
"""
op = cls(table_name, existing_comment=existing_comment, schema=schema)
return operations.invoke(op)
@classmethod
def batch_drop_table_comment(
cls,
operations: BatchOperations,
*,
existing_comment: Optional[str] = None,
) -> None:
"""Issue a "drop table comment" operation to
remove an existing comment set on a table using the current
batch operations context.
:param existing_comment: An optional string value of a comment already
registered on the specified table.
"""
op = cls(
operations.impl.table_name,
existing_comment=existing_comment,
schema=operations.impl.schema,
)
return operations.invoke(op)
def reverse(self):
"""Reverses the COMMENT ON operation against a table."""
return CreateTableCommentOp(
self.table_name, self.existing_comment, schema=self.schema
)
def to_table(self, migration_context=None):
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.table(self.table_name, schema=self.schema)
def to_diff_tuple(self):
return ("remove_table_comment", self.to_table())
@Operations.register_operation("alter_column")
@BatchOperations.register_operation("alter_column", "batch_alter_column")
class AlterColumnOp(AlterTableOp):
"""Represent an alter column operation."""
def __init__(
self,
table_name: str,
column_name: str,
*,
schema: Optional[str] = None,
existing_type: Optional[Any] = None,
existing_server_default: Any = False,
existing_nullable: Optional[bool] = None,
existing_comment: Optional[str] = None,
modify_nullable: Optional[bool] = None,
modify_comment: Optional[Union[str, Literal[False]]] = False,
modify_server_default: Any = False,
modify_name: Optional[str] = None,
modify_type: Optional[Any] = None,
**kw: Any,
) -> None:
super().__init__(table_name, schema=schema)
self.column_name = column_name
self.existing_type = existing_type
self.existing_server_default = existing_server_default
self.existing_nullable = existing_nullable
self.existing_comment = existing_comment
self.modify_nullable = modify_nullable
self.modify_comment = modify_comment
self.modify_server_default = modify_server_default
self.modify_name = modify_name
self.modify_type = modify_type
self.kw = kw
def to_diff_tuple(self) -> Any:
col_diff = []
schema, tname, cname = self.schema, self.table_name, self.column_name
if self.modify_type is not None:
col_diff.append(
(
"modify_type",
schema,
tname,
cname,
{
"existing_nullable": self.existing_nullable,
"existing_server_default": (
self.existing_server_default
),
"existing_comment": self.existing_comment,
},
self.existing_type,
self.modify_type,
)
)
if self.modify_nullable is not None:
col_diff.append(
(
"modify_nullable",
schema,
tname,
cname,
{
"existing_type": self.existing_type,
"existing_server_default": (
self.existing_server_default
),
"existing_comment": self.existing_comment,
},
self.existing_nullable,
self.modify_nullable,
)
)
if self.modify_server_default is not False:
col_diff.append(
(
"modify_default",
schema,
tname,
cname,
{
"existing_nullable": self.existing_nullable,
"existing_type": self.existing_type,
"existing_comment": self.existing_comment,
},
self.existing_server_default,
self.modify_server_default,
)
)
if self.modify_comment is not False:
col_diff.append(
(
"modify_comment",
schema,
tname,
cname,
{
"existing_nullable": self.existing_nullable,
"existing_type": self.existing_type,
"existing_server_default": (
self.existing_server_default
),
},
self.existing_comment,
self.modify_comment,
)
)
return col_diff
def has_changes(self) -> bool:
hc1 = (
self.modify_nullable is not None
or self.modify_server_default is not False
or self.modify_type is not None
or self.modify_comment is not False
)
if hc1:
return True
for kw in self.kw:
if kw.startswith("modify_"):
return True
else:
return False
def reverse(self) -> AlterColumnOp:
kw = self.kw.copy()
kw["existing_type"] = self.existing_type
kw["existing_nullable"] = self.existing_nullable
kw["existing_server_default"] = self.existing_server_default
kw["existing_comment"] = self.existing_comment
if self.modify_type is not None:
kw["modify_type"] = self.modify_type
if self.modify_nullable is not None:
kw["modify_nullable"] = self.modify_nullable
if self.modify_server_default is not False:
kw["modify_server_default"] = self.modify_server_default
if self.modify_comment is not False:
kw["modify_comment"] = self.modify_comment
# TODO: make this a little simpler
all_keys = {
m.group(1)
for m in [re.match(r"^(?:existing_|modify_)(.+)$", k) for k in kw]
if m
}
for k in all_keys:
if "modify_%s" % k in kw:
swap = kw["existing_%s" % k]
kw["existing_%s" % k] = kw["modify_%s" % k]
kw["modify_%s" % k] = swap
return self.__class__(
self.table_name, self.column_name, schema=self.schema, **kw
)
@classmethod
def alter_column(
cls,
operations: Operations,
table_name: str,
column_name: str,
*,
nullable: Optional[bool] = None,
comment: Optional[Union[str, Literal[False]]] = False,
server_default: Any = False,
new_column_name: Optional[str] = None,
type_: Optional[Union[TypeEngine, Type[TypeEngine]]] = None,
existing_type: Optional[Union[TypeEngine, Type[TypeEngine]]] = None,
existing_server_default: Optional[
Union[str, bool, Identity, Computed]
] = False,
existing_nullable: Optional[bool] = None,
existing_comment: Optional[str] = None,
schema: Optional[str] = None,
**kw: Any,
) -> None:
r"""Issue an "alter column" instruction using the
current migration context.
Generally, only that aspect of the column which
is being changed, i.e. name, type, nullability,
default, needs to be specified. Multiple changes
can also be specified at once and the backend should
"do the right thing", emitting each change either
separately or together as the backend allows.
MySQL has special requirements here, since MySQL
cannot ALTER a column without a full specification.
When producing MySQL-compatible migration files,
it is recommended that the ``existing_type``,
``existing_server_default``, and ``existing_nullable``
parameters be present, if not being altered.
Type changes which are against the SQLAlchemy
"schema" types :class:`~sqlalchemy.types.Boolean`
and :class:`~sqlalchemy.types.Enum` may also
add or drop constraints which accompany those
types on backends that don't support them natively.
The ``existing_type`` argument is
used in this case to identify and remove a previous
constraint that was bound to the type object.
:param table_name: string name of the target table.
:param column_name: string name of the target column,
as it exists before the operation begins.
:param nullable: Optional; specify ``True`` or ``False``
to alter the column's nullability.
:param server_default: Optional; specify a string
SQL expression, :func:`~sqlalchemy.sql.expression.text`,
or :class:`~sqlalchemy.schema.DefaultClause` to indicate
an alteration to the column's default value.
Set to ``None`` to have the default removed.
:param comment: optional string text of a new comment to add to the
column.
:param new_column_name: Optional; specify a string name here to
indicate the new name within a column rename operation.
:param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine`
type object to specify a change to the column's type.
For SQLAlchemy types that also indicate a constraint (i.e.
:class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`),
the constraint is also generated.
:param autoincrement: set the ``AUTO_INCREMENT`` flag of the column;
currently understood by the MySQL dialect.
:param existing_type: Optional; a
:class:`~sqlalchemy.types.TypeEngine`
type object to specify the previous type. This
is required for all MySQL column alter operations that
don't otherwise specify a new type, as well as for
when nullability is being changed on a SQL Server
column. It is also used if the type is a so-called
SQLlchemy "schema" type which may define a constraint (i.e.
:class:`~sqlalchemy.types.Boolean`,
:class:`~sqlalchemy.types.Enum`),
so that the constraint can be dropped.
:param existing_server_default: Optional; The existing
default value of the column. Required on MySQL if
an existing default is not being changed; else MySQL
removes the default.
:param existing_nullable: Optional; the existing nullability
of the column. Required on MySQL if the existing nullability
is not being changed; else MySQL sets this to NULL.
:param existing_autoincrement: Optional; the existing autoincrement
of the column. Used for MySQL's system of altering a column
that specifies ``AUTO_INCREMENT``.
:param existing_comment: string text of the existing comment on the
column to be maintained. Required on MySQL if the existing comment
on the column is not being changed.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param postgresql_using: String argument which will indicate a
SQL expression to render within the Postgresql-specific USING clause
within ALTER COLUMN. This string is taken directly as raw SQL which
must explicitly include any necessary quoting or escaping of tokens
within the expression.
"""
alt = cls(
table_name,
column_name,
schema=schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
existing_comment=existing_comment,
modify_name=new_column_name,
modify_type=type_,
modify_server_default=server_default,
modify_nullable=nullable,
modify_comment=comment,
**kw,
)
return operations.invoke(alt)
@classmethod
def batch_alter_column(
cls,
operations: BatchOperations,
column_name: str,
*,
nullable: Optional[bool] = None,
comment: Optional[Union[str, Literal[False]]] = False,
server_default: Any = False,
new_column_name: Optional[str] = None,
type_: Optional[Union[TypeEngine, Type[TypeEngine]]] = None,
existing_type: Optional[Union[TypeEngine, Type[TypeEngine]]] = None,
existing_server_default: Optional[
Union[str, bool, Identity, Computed]
] = False,
existing_nullable: Optional[bool] = None,
existing_comment: Optional[str] = None,
insert_before: Optional[str] = None,
insert_after: Optional[str] = None,
**kw: Any,
) -> None:
"""Issue an "alter column" instruction using the current
batch migration context.
Parameters are the same as that of :meth:`.Operations.alter_column`,
as well as the following option(s):
:param insert_before: String name of an existing column which this
column should be placed before, when creating the new table.
:param insert_after: String name of an existing column which this
column should be placed after, when creating the new table. If
both :paramref:`.BatchOperations.alter_column.insert_before`
and :paramref:`.BatchOperations.alter_column.insert_after` are
omitted, the column is inserted after the last existing column
in the table.
.. seealso::
:meth:`.Operations.alter_column`
"""
alt = cls(
operations.impl.table_name,
column_name,
schema=operations.impl.schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
existing_comment=existing_comment,
modify_name=new_column_name,
modify_type=type_,
modify_server_default=server_default,
modify_nullable=nullable,
modify_comment=comment,
insert_before=insert_before,
insert_after=insert_after,
**kw,
)
return operations.invoke(alt)
@Operations.register_operation("add_column")
@BatchOperations.register_operation("add_column", "batch_add_column")
class AddColumnOp(AlterTableOp):
"""Represent an add column operation."""
def __init__(
self,
table_name: str,
column: Column[Any],
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
super().__init__(table_name, schema=schema)
self.column = column
self.kw = kw
def reverse(self) -> DropColumnOp:
return DropColumnOp.from_column_and_tablename(
self.schema, self.table_name, self.column
)
def to_diff_tuple(
self,
) -> Tuple[str, Optional[str], str, Column[Any]]:
return ("add_column", self.schema, self.table_name, self.column)
def to_column(self) -> Column:
return self.column
@classmethod
def from_column(cls, col: Column) -> AddColumnOp:
return cls(col.table.name, col, schema=col.table.schema)
@classmethod
def from_column_and_tablename(
cls,
schema: Optional[str],
tname: str,
col: Column[Any],
) -> AddColumnOp:
return cls(tname, col, schema=schema)
@classmethod
def add_column(
cls,
operations: Operations,
table_name: str,
column: Column[Any],
*,
schema: Optional[str] = None,
) -> None:
"""Issue an "add column" instruction using the current
migration context.
e.g.::
from alembic import op
from sqlalchemy import Column, String
op.add_column("organization", Column("name", String()))
The :meth:`.Operations.add_column` method typically corresponds
to the SQL command "ALTER TABLE... ADD COLUMN". Within the scope
of this command, the column's name, datatype, nullability,
and optional server-generated defaults may be indicated.
.. note::
With the exception of NOT NULL constraints or single-column FOREIGN
KEY constraints, other kinds of constraints such as PRIMARY KEY,
UNIQUE or CHECK constraints **cannot** be generated using this
method; for these constraints, refer to operations such as
:meth:`.Operations.create_primary_key` and
:meth:`.Operations.create_check_constraint`. In particular, the
following :class:`~sqlalchemy.schema.Column` parameters are
**ignored**:
* :paramref:`~sqlalchemy.schema.Column.primary_key` - SQL databases
typically do not support an ALTER operation that can add
individual columns one at a time to an existing primary key
constraint, therefore it's less ambiguous to use the
:meth:`.Operations.create_primary_key` method, which assumes no
existing primary key constraint is present.
* :paramref:`~sqlalchemy.schema.Column.unique` - use the
:meth:`.Operations.create_unique_constraint` method
* :paramref:`~sqlalchemy.schema.Column.index` - use the
:meth:`.Operations.create_index` method
The provided :class:`~sqlalchemy.schema.Column` object may include a
:class:`~sqlalchemy.schema.ForeignKey` constraint directive,
referencing a remote table name. For this specific type of constraint,
Alembic will automatically emit a second ALTER statement in order to
add the single-column FOREIGN KEY constraint separately::
from alembic import op
from sqlalchemy import Column, INTEGER, ForeignKey
op.add_column(
"organization",
Column("account_id", INTEGER, ForeignKey("accounts.id")),
)
The column argument passed to :meth:`.Operations.add_column` is a
:class:`~sqlalchemy.schema.Column` construct, used in the same way it's
used in SQLAlchemy. In particular, values or functions to be indicated
as producing the column's default value on the database side are
specified using the ``server_default`` parameter, and not ``default``
which only specifies Python-side defaults::
from alembic import op
from sqlalchemy import Column, TIMESTAMP, func
# specify "DEFAULT NOW" along with the column add
op.add_column(
"account",
Column("timestamp", TIMESTAMP, server_default=func.now()),
)
:param table_name: String name of the parent table.
:param column: a :class:`sqlalchemy.schema.Column` object
representing the new column.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
"""
op = cls(table_name, column, schema=schema)
return operations.invoke(op)
@classmethod
def batch_add_column(
cls,
operations: BatchOperations,
column: Column[Any],
*,
insert_before: Optional[str] = None,
insert_after: Optional[str] = None,
) -> None:
"""Issue an "add column" instruction using the current
batch migration context.
.. seealso::
:meth:`.Operations.add_column`
"""
kw = {}
if insert_before:
kw["insert_before"] = insert_before
if insert_after:
kw["insert_after"] = insert_after
op = cls(
operations.impl.table_name,
column,
schema=operations.impl.schema,
**kw,
)
return operations.invoke(op)
@Operations.register_operation("drop_column")
@BatchOperations.register_operation("drop_column", "batch_drop_column")
class DropColumnOp(AlterTableOp):
"""Represent a drop column operation."""
def __init__(
self,
table_name: str,
column_name: str,
*,
schema: Optional[str] = None,
_reverse: Optional[AddColumnOp] = None,
**kw: Any,
) -> None:
super().__init__(table_name, schema=schema)
self.column_name = column_name
self.kw = kw
self._reverse = _reverse
def to_diff_tuple(
self,
) -> Tuple[str, Optional[str], str, Column[Any]]:
return (
"remove_column",
self.schema,
self.table_name,
self.to_column(),
)
def reverse(self) -> AddColumnOp:
if self._reverse is None:
raise ValueError(
"operation is not reversible; "
"original column is not present"
)
return AddColumnOp.from_column_and_tablename(
self.schema, self.table_name, self._reverse.column
)
@classmethod
def from_column_and_tablename(
cls,
schema: Optional[str],
tname: str,
col: Column[Any],
) -> DropColumnOp:
return cls(
tname,
col.name,
schema=schema,
_reverse=AddColumnOp.from_column_and_tablename(schema, tname, col),
)
def to_column(
self, migration_context: Optional[MigrationContext] = None
) -> Column:
if self._reverse is not None:
return self._reverse.column
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.column(self.column_name, NULLTYPE)
@classmethod
def drop_column(
cls,
operations: Operations,
table_name: str,
column_name: str,
*,
schema: Optional[str] = None,
**kw: Any,
) -> None:
"""Issue a "drop column" instruction using the current
migration context.
e.g.::
drop_column("organization", "account_id")
:param table_name: name of table
:param column_name: name of column
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
:param mssql_drop_check: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop the CHECK constraint on the column using a
SQL-script-compatible
block that selects into a @variable from sys.check_constraints,
then exec's a separate DROP CONSTRAINT for that constraint.
:param mssql_drop_default: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop the DEFAULT constraint on the column using a
SQL-script-compatible
block that selects into a @variable from sys.default_constraints,
then exec's a separate DROP CONSTRAINT for that default.
:param mssql_drop_foreign_key: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop a single FOREIGN KEY constraint on the column using a
SQL-script-compatible
block that selects into a @variable from
sys.foreign_keys/sys.foreign_key_columns,
then exec's a separate DROP CONSTRAINT for that default. Only
works if the column has exactly one FK constraint which refers to
it, at the moment.
"""
op = cls(table_name, column_name, schema=schema, **kw)
return operations.invoke(op)
@classmethod
def batch_drop_column(
cls, operations: BatchOperations, column_name: str, **kw: Any
) -> None:
"""Issue a "drop column" instruction using the current
batch migration context.
.. seealso::
:meth:`.Operations.drop_column`
"""
op = cls(
operations.impl.table_name,
column_name,
schema=operations.impl.schema,
**kw,
)
return operations.invoke(op)
@Operations.register_operation("bulk_insert")
class BulkInsertOp(MigrateOperation):
"""Represent a bulk insert operation."""
def __init__(
self,
table: Union[Table, TableClause],
rows: List[dict],
*,
multiinsert: bool = True,
) -> None:
self.table = table
self.rows = rows
self.multiinsert = multiinsert
@classmethod
def bulk_insert(
cls,
operations: Operations,
table: Union[Table, TableClause],
rows: List[dict],
*,
multiinsert: bool = True,
) -> None:
"""Issue a "bulk insert" operation using the current
migration context.
This provides a means of representing an INSERT of multiple rows
which works equally well in the context of executing on a live
connection as well as that of generating a SQL script. In the
case of a SQL script, the values are rendered inline into the
statement.
e.g.::
from alembic import op
from datetime import date
from sqlalchemy.sql import table, column
from sqlalchemy import String, Integer, Date
# Create an ad-hoc table to use for the insert statement.
accounts_table = table(
"account",
column("id", Integer),
column("name", String),
column("create_date", Date),
)
op.bulk_insert(
accounts_table,
[
{
"id": 1,
"name": "John Smith",
"create_date": date(2010, 10, 5),
},
{
"id": 2,
"name": "Ed Williams",
"create_date": date(2007, 5, 27),
},
{
"id": 3,
"name": "Wendy Jones",
"create_date": date(2008, 8, 15),
},
],
)
When using --sql mode, some datatypes may not render inline
automatically, such as dates and other special types. When this
issue is present, :meth:`.Operations.inline_literal` may be used::
op.bulk_insert(
accounts_table,
[
{
"id": 1,
"name": "John Smith",
"create_date": op.inline_literal("2010-10-05"),
},
{
"id": 2,
"name": "Ed Williams",
"create_date": op.inline_literal("2007-05-27"),
},
{
"id": 3,
"name": "Wendy Jones",
"create_date": op.inline_literal("2008-08-15"),
},
],
multiinsert=False,
)
When using :meth:`.Operations.inline_literal` in conjunction with
:meth:`.Operations.bulk_insert`, in order for the statement to work
in "online" (e.g. non --sql) mode, the
:paramref:`~.Operations.bulk_insert.multiinsert`
flag should be set to ``False``, which will have the effect of
individual INSERT statements being emitted to the database, each
with a distinct VALUES clause, so that the "inline" values can
still be rendered, rather than attempting to pass the values
as bound parameters.
:param table: a table object which represents the target of the INSERT.
:param rows: a list of dictionaries indicating rows.
:param multiinsert: when at its default of True and --sql mode is not
enabled, the INSERT statement will be executed using
"executemany()" style, where all elements in the list of
dictionaries are passed as bound parameters in a single
list. Setting this to False results in individual INSERT
statements being emitted per parameter set, and is needed
in those cases where non-literal values are present in the
parameter sets.
"""
op = cls(table, rows, multiinsert=multiinsert)
operations.invoke(op)
@Operations.register_operation("execute")
@BatchOperations.register_operation("execute", "batch_execute")
class ExecuteSQLOp(MigrateOperation):
"""Represent an execute SQL operation."""
def __init__(
self,
sqltext: Union[Update, str, Insert, TextClause],
*,
execution_options: Optional[dict[str, Any]] = None,
) -> None:
self.sqltext = sqltext
self.execution_options = execution_options
@classmethod
def execute(
cls,
operations: Operations,
sqltext: Union[str, TextClause, Update],
*,
execution_options: Optional[dict[str, Any]] = None,
) -> None:
r"""Execute the given SQL using the current migration context.
The given SQL can be a plain string, e.g.::
op.execute("INSERT INTO table (foo) VALUES ('some value')")
Or it can be any kind of Core SQL Expression construct, such as
below where we use an update construct::
from sqlalchemy.sql import table, column
from sqlalchemy import String
from alembic import op
account = table("account", column("name", String))
op.execute(
account.update()
.where(account.c.name == op.inline_literal("account 1"))
.values({"name": op.inline_literal("account 2")})
)
Above, we made use of the SQLAlchemy
:func:`sqlalchemy.sql.expression.table` and
:func:`sqlalchemy.sql.expression.column` constructs to make a brief,
ad-hoc table construct just for our UPDATE statement. A full
:class:`~sqlalchemy.schema.Table` construct of course works perfectly
fine as well, though note it's a recommended practice to at least
ensure the definition of a table is self-contained within the migration
script, rather than imported from a module that may break compatibility
with older migrations.
In a SQL script context, the statement is emitted directly to the
output stream. There is *no* return result, however, as this
function is oriented towards generating a change script
that can run in "offline" mode. Additionally, parameterized
statements are discouraged here, as they *will not work* in offline
mode. Above, we use :meth:`.inline_literal` where parameters are
to be used.
For full interaction with a connected database where parameters can
also be used normally, use the "bind" available from the context::
from alembic import op
connection = op.get_bind()
connection.execute(
account.update()
.where(account.c.name == "account 1")
.values({"name": "account 2"})
)
Additionally, when passing the statement as a plain string, it is first
coerceed into a :func:`sqlalchemy.sql.expression.text` construct
before being passed along. In the less likely case that the
literal SQL string contains a colon, it must be escaped with a
backslash, as::
op.execute(r"INSERT INTO table (foo) VALUES ('\:colon_value')")
:param sqltext: Any legal SQLAlchemy expression, including:
* a string
* a :func:`sqlalchemy.sql.expression.text` construct.
* a :func:`sqlalchemy.sql.expression.insert` construct.
* a :func:`sqlalchemy.sql.expression.update`,
:func:`sqlalchemy.sql.expression.insert`,
or :func:`sqlalchemy.sql.expression.delete` construct.
* Any "executable" described in SQLAlchemy Core documentation,
noting that no result set is returned.
.. note:: when passing a plain string, the statement is coerced into
a :func:`sqlalchemy.sql.expression.text` construct. This construct
considers symbols with colons, e.g. ``:foo`` to be bound parameters.
To avoid this, ensure that colon symbols are escaped, e.g.
``\:foo``.
:param execution_options: Optional dictionary of
execution options, will be passed to
:meth:`sqlalchemy.engine.Connection.execution_options`.
"""
op = cls(sqltext, execution_options=execution_options)
return operations.invoke(op)
@classmethod
def batch_execute(
cls,
operations: Operations,
sqltext: Union[str, TextClause, Update],
*,
execution_options: Optional[dict[str, Any]] = None,
) -> None:
"""Execute the given SQL using the current migration context.
.. seealso::
:meth:`.Operations.execute`
"""
return cls.execute(
operations, sqltext, execution_options=execution_options
)
class OpContainer(MigrateOperation):
"""Represent a sequence of operations operation."""
def __init__(self, ops: Sequence[MigrateOperation] = ()) -> None:
self.ops = list(ops)
def is_empty(self) -> bool:
return not self.ops
def as_diffs(self) -> Any:
return list(OpContainer._ops_as_diffs(self))
@classmethod
def _ops_as_diffs(
cls, migrations: OpContainer
) -> Iterator[Tuple[Any,...]]:
for op in migrations.ops:
if hasattr(op, "ops"):
yield from cls._ops_as_diffs(cast("OpContainer", op))
else:
yield op.to_diff_tuple()
class ModifyTableOps(OpContainer):
"""Contains a sequence of operations that all apply to a single Table."""
def __init__(
self,
table_name: str,
ops: Sequence[MigrateOperation],
*,
schema: Optional[str] = None,
) -> None:
super().__init__(ops)
self.table_name = table_name
self.schema = schema
def reverse(self) -> ModifyTableOps:
return ModifyTableOps(
self.table_name,
ops=list(reversed([op.reverse() for op in self.ops])),
schema=self.schema,
)
class UpgradeOps(OpContainer):
"""contains a sequence of operations that would apply to the
'upgrade' stream of a script.
.. seealso::
:ref:`customizing_revision`
"""
def __init__(
self,
ops: Sequence[MigrateOperation] = (),
upgrade_token: str = "upgrades",
) -> None:
super().__init__(ops=ops)
self.upgrade_token = upgrade_token
def reverse_into(self, downgrade_ops: DowngradeOps) -> DowngradeOps:
downgrade_ops.ops[:] = list( # type:ignore[index]
reversed([op.reverse() for op in self.ops])
)
return downgrade_ops
def reverse(self) -> DowngradeOps:
return self.reverse_into(DowngradeOps(ops=[]))
class DowngradeOps(OpContainer):
"""contains a sequence of operations that would apply to the
'downgrade' stream of a script.
.. seealso::
:ref:`customizing_revision`
"""
def __init__(
self,
ops: Sequence[MigrateOperation] = (),
downgrade_token: str = "downgrades",
) -> None:
super().__init__(ops=ops)
self.downgrade_token = downgrade_token
def reverse(self):
return UpgradeOps(
ops=list(reversed([op.reverse() for op in self.ops]))
)
class MigrationScript(MigrateOperation):
"""represents a migration script.
E.g. when autogenerate encounters this object, this corresponds to the
production of an actual script file.
A normal :class:`.MigrationScript` object would contain a single
:class:`.UpgradeOps` and a single :class:`.DowngradeOps` directive.
These are accessible via the ``.upgrade_ops`` and ``.downgrade_ops``
attributes.
In the case of an autogenerate operation that runs multiple times,
such as the multiple database example in the "multidb" template,
the ``.upgrade_ops`` and ``.downgrade_ops`` attributes are disabled,
and instead these objects should be accessed via the ``.upgrade_ops_list``
and ``.downgrade_ops_list`` list-based attributes. These latter
attributes are always available at the very least as single-element lists.
.. seealso::
:ref:`customizing_revision`
"""
_needs_render: Optional[bool]
def __init__(
self,
rev_id: Optional[str],
upgrade_ops: UpgradeOps,
downgrade_ops: DowngradeOps,
*,
message: Optional[str] = None,
imports: Set[str] = set(),
head: Optional[str] = None,
splice: Optional[bool] = None,
branch_label: Optional[_RevIdType] = None,
version_path: Optional[str] = None,
depends_on: Optional[_RevIdType] = None,
) -> None:
self.rev_id = rev_id
self.message = message
self.imports = imports
self.head = head
self.splice = splice
self.branch_label = branch_label
self.version_path = version_path
self.depends_on = depends_on
self.upgrade_ops = upgrade_ops
self.downgrade_ops = downgrade_ops
@property
def upgrade_ops(self):
"""An instance of :class:`.UpgradeOps`.
.. seealso::
:attr:`.MigrationScript.upgrade_ops_list`
"""
if len(self._upgrade_ops) > 1:
raise ValueError(
"This MigrationScript instance has a multiple-entry "
"list for UpgradeOps; please use the "
"upgrade_ops_list attribute."
)
elif not self._upgrade_ops:
return None
else:
return self._upgrade_ops[0]
@upgrade_ops.setter
def upgrade_ops(self, upgrade_ops):
self._upgrade_ops = util.to_list(upgrade_ops)
for elem in self._upgrade_ops:
assert isinstance(elem, UpgradeOps)
@property
def downgrade_ops(self):
"""An instance of :class:`.DowngradeOps`.
.. seealso::
:attr:`.MigrationScript.downgrade_ops_list`
"""
if len(self._downgrade_ops) > 1:
raise ValueError(
"This MigrationScript instance has a multiple-entry "
"list for DowngradeOps; please use the "
"downgrade_ops_list attribute."
)
elif not self._downgrade_ops:
return None
else:
return self._downgrade_ops[0]
@downgrade_ops.setter
def downgrade_ops(self, downgrade_ops):
self._downgrade_ops = util.to_list(downgrade_ops)
for elem in self._downgrade_ops:
assert isinstance(elem, DowngradeOps)
@property
def upgrade_ops_list(self) -> List[UpgradeOps]:
"""A list of :class:`.UpgradeOps` instances.
This is used in place of the :attr:`.MigrationScript.upgrade_ops`
attribute when dealing with a revision operation that does
multiple autogenerate passes.
"""
return self._upgrade_ops
@property
def downgrade_ops_list(self) -> List[DowngradeOps]:
"""A list of :class:`.DowngradeOps` instances.
This is used in place of the :attr:`.MigrationScript.downgrade_ops`
attribute when dealing with a revision operation that does
multiple autogenerate passes.
"""
return self._downgrade_ops |
|
scikit-learn__scikit-learn | calibration.rst | Tutorial | Generate tutorial about probability calibration | BSD 3-Clause New or Revised License | scikit-learn__scikit-learn/doc/modules/calibration.rst | [
"scikit-learn__scikit-learn/sklearn/calibration.py",
"scikit-learn__scikit-learn/sklearn/naive_bayes.py"
] | scikit-learn__scikit-learn/sklearn/ensemble | Probability calibration
When performing classification you often want not only to predict the
class label, but also obtain a probability of the respective label. This
probability gives you some kind of confidence on the prediction. Some
models can give you poor estimates of the class probabilities and some
even do not support probability prediction (e.g., some instances of
~sklearn.linear_model.SGDClassifier). The calibration module allows you
to better calibrate the probabilities of a given model, or to add
support for probability prediction.
Well calibrated classifiers are probabilistic classifiers for which the
output of the predict_proba method can be directly interpreted as a
confidence level. For instance, a well calibrated (binary) classifier
should classify the samples such that among the samples to which it gave
a predict_proba value close to, say, 0.8, approximately 80% actually
belong to the positive class.
Before we show how to re-calibrate a classifier, we first need a way to
detect how good a classifier is calibrated.
Note
Strictly proper scoring rules for probabilistic predictions like
sklearn.metrics.brier_score_loss and sklearn.metrics.log_loss assess
calibration (reliability) and discriminative power (resolution) of a
model, as well as the randomness of the data (uncertainty) at the same
time. This follows from the well-known Brier score decomposition of
Murphy. As it is not clear which term dominates, the score is of limited
use for assessing calibration alone (unless one computes each term of
the decomposition). A lower Brier loss, for instance, does not
necessarily mean a better calibrated model, it could also mean a worse
calibrated model with much more discriminatory power, e.g. using many
more features.
Calibration curves
Calibration curves, also referred to as reliability diagrams (Wilks
1995), compare how well the probabilistic predictions of a binary
classifier are calibrated. It plots the frequency of the positive label
(to be more precise, an estimation of the conditional event probability
P(Y=1|predict_proba)) on the y-axis against the predicted probability
predict_proba of a model on the x-axis. The tricky part is to get values
for the y-axis. In scikit-learn, this is accomplished by binning the
predictions such that the x-axis represents the average predicted
probability in each bin. The y-axis is then the fraction of positives
given the predictions of that bin, i.e. the proportion of samples whose
class is the positive class (in each bin).
The top calibration curve plot is created with
CalibrationDisplay.from_estimator, which uses calibration_curve to
calculate the per bin average predicted probabilities and fraction of
positives. CalibrationDisplay.from_estimator takes as input a fitted
classifier, which is used to calculate the predicted probabilities. The
classifier thus must have predict_proba method. For the few classifiers
that do not have a predict_proba method, it is possible to use
CalibratedClassifierCV to calibrate the classifier outputs to
probabilities.
The bottom histogram gives some insight into the behavior of each
classifier by showing the number of samples in each predicted
probability bin.
LogisticRegression returns well calibrated predictions by default as it
has a canonical link function for its loss, i.e. the logit-link for the
log_loss. This leads to the so-called balance property, see and
Logistic_regression. In contrast to that, the other shown models return
biased probabilities; with different biases per model.
GaussianNB (Naive Bayes) tends to push probabilities to 0 or 1 (note the
counts in the histograms). This is mainly because it makes the
assumption that features are conditionally independent given the class,
which is not the case in this dataset which contains 2 redundant
features.
RandomForestClassifier shows the opposite behavior: the histograms show
peaks at probabilities approximately 0.2 and 0.9, while probabilities
close to 0 or 1 are very rare. An explanation for this is given by
Niculescu-Mizil and Caruana: "Methods such as bagging and random forests
that average predictions from a base set of models can have difficulty
making predictions near 0 and 1 because variance in the underlying base
models will bias predictions that should be near zero or one away from
these values. Because predictions are restricted to the interval [0,1],
errors caused by variance tend to be one-sided near zero and one. For
example, if a model should predict p = 0 for a case, the only way
bagging can achieve this is if all bagged trees predict zero. If we add
noise to the trees that bagging is averaging over, this noise will cause
some trees to predict values larger than 0 for this case, thus moving
the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level
trees trained with random forests have relatively high variance due to
feature subsetting." As a result, the calibration curve shows a
characteristic sigmoid shape, indicating that the classifier could trust
its "intuition" more and return probabilities closer to 0 or 1
typically.
LinearSVC (SVC) shows an even more sigmoid curve than the random forest,
which is typical for maximum-margin methods (compare Niculescu-Mizil and
Caruana), which focus on difficult to classify samples that are close to
the decision boundary (the support vectors).
Calibrating a classifier
Calibrating a classifier consists of fitting a regressor (called a
calibrator) that maps the output of the classifier (as given by
decision_function or predict_proba) to a calibrated probability in [0,
1]. Denoting the output of the classifier for a given sample by f_(i),
the calibrator tries to predict the conditional event probability
P(y_(i)=1|f_(i)).
Ideally, the calibrator is fit on a dataset independent of the training
data used to fit the classifier in the first place. This is because
performance of the classifier on its training data would be better than
for novel data. Using the classifier output of training data to fit the
calibrator would thus result in a biased calibrator that maps to
probabilities closer to 0 and 1 than it should.
Usage
The CalibratedClassifierCV class is used to calibrate a classifier.
CalibratedClassifierCV uses a cross-validation approach to ensure
unbiased data is always used to fit the calibrator. The data is split
into k (train_set, test_set) couples (as determined by cv). When
ensemble=True (default), the following procedure is repeated
independently for each cross-validation split: a clone of base_estimator
is first trained on the train subset. Then its predictions on the test
subset are used to fit a calibrator (either a sigmoid or isotonic
regressor). This results in an ensemble of k (classifier, calibrator)
couples where each calibrator maps the output of its corresponding
classifier into [0, 1]. Each couple is exposed in the
calibrated_classifiers_ attribute, where each entry is a calibrated
classifier with a predict_proba method that outputs calibrated
probabilities. The output of predict_proba for the main
CalibratedClassifierCV instance corresponds to the average of the
predicted probabilities of the k estimators in the
calibrated_classifiers_ list. The output of predict is the class that
has the highest probability.
When ensemble=False, cross-validation is used to obtain 'unbiased'
predictions for all the data, via
~sklearn.model_selection.cross_val_predict. These unbiased predictions
are then used to train the calibrator. The attribute
calibrated_classifiers_ consists of only one (classifier, calibrator)
couple where the classifier is the base_estimator trained on all the
data. In this case the output of predict_proba for
CalibratedClassifierCV is the predicted probabilities obtained from the
single (classifier, calibrator) couple.
The main advantage of ensemble=True is to benefit from the traditional
ensembling effect (similar to bagging). The resulting ensemble should
both be well calibrated and slightly more accurate than with
ensemble=False. The main advantage of using ensemble=False is
computational: it reduces the overall fit time by training only a single
base classifier and calibrator pair, decreases the final model size and
increases prediction speed.
Alternatively an already fitted classifier can be calibrated by setting
cv="prefit". In this case, the data is not split and all of it is used
to fit the regressor. It is up to the user to make sure that the data
used for fitting the classifier is disjoint from the data used for
fitting the regressor.
CalibratedClassifierCV supports the use of two regression techniques for
calibration via the method parameter: "sigmoid" and "isotonic".
Sigmoid
The sigmoid regressor, method="sigmoid" is based on Platt's logistic
model:
$$p(y_i = 1 | f_i) = \frac{1}{1 + \exp(A f_i + B)} \,,$$
where y_(i) is the true label of sample i and f_(i) is the output of the
un-calibrated classifier for sample i. A and B are real numbers to be
determined when fitting the regressor via maximum likelihood.
The sigmoid method assumes the calibration curve <calibration_curve> can
be corrected by applying a sigmoid function to the raw predictions. This
assumption has been empirically justified in the case of svm with common
kernel functions on various benchmark datasets in section 2.1 of Platt
1999 but does not necessarily hold in general. Additionally, the
logistic model works best if the calibration error is symmetrical,
meaning the classifier output for each binary class is normally
distributed with the same variance. This can be a problem for highly
imbalanced classification problems, where outputs do not have equal
variance.
In general this method is most effective for small sample sizes or when
the un-calibrated model is under-confident and has similar calibration
errors for both high and low outputs.
Isotonic
The method="isotonic" fits a non-parametric isotonic regressor, which
outputs a step-wise non-decreasing function, see sklearn.isotonic. It
minimizes:
$$\sum_{i=1}^{n} (y_i - \hat{f}_i)^2$$
subject to f̂_(i) ≥ f̂_(j) whenever f_(i) ≥ f_(j). y_(i) is the true label
of sample i and f̂_(i) is the output of the calibrated classifier for
sample i (i.e., the calibrated probability). This method is more general
when compared to 'sigmoid' as the only restriction is that the mapping
function is monotonically increasing. It is thus more powerful as it can
correct any monotonic distortion of the un-calibrated model. However, it
is more prone to overfitting, especially on small datasets.
Overall, 'isotonic' will perform as well as or better than 'sigmoid'
when there is enough data (greater than ~ 1000 samples) to avoid
overfitting.
Note
Impact on ranking metrics like AUC
It is generally expected that calibration does not affect ranking
metrics such as ROC-AUC. However, these metrics might differ after
calibration when using method="isotonic" since isotonic regression
introduces ties in the predicted probabilities. This can be seen as
within the uncertainty of the model predictions. In case, you strictly
want to keep the ranking and thus AUC scores, use method="logistic"
which is a strictly monotonic transformation and thus keeps the ranking.
Multiclass support
Both isotonic and sigmoid regressors only support 1-dimensional data
(e.g., binary classification output) but are extended for multiclass
classification if the base_estimator supports multiclass predictions.
For multiclass predictions, CalibratedClassifierCV calibrates for each
class separately in a ovr_classification fashion. When predicting
probabilities, the calibrated probabilities for each class are predicted
separately. As those probabilities do not necessarily sum to one, a
postprocessing is performed to normalize them.
| """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
import warnings
from functools import partial
from inspect import signature
from math import log
from numbers import Integral, Real
import numpy as np
from scipy.optimize import minimize
from scipy.special import expit
from sklearn.utils import Bunch
from._loss import HalfBinomialLoss
from.base import (
BaseEstimator,
ClassifierMixin,
MetaEstimatorMixin,
RegressorMixin,
_fit_context,
clone,
)
from.isotonic import IsotonicRegression
from.model_selection import check_cv, cross_val_predict
from.preprocessing import LabelEncoder, label_binarize
from.svm import LinearSVC
from.utils import (
_safe_indexing,
column_or_1d,
indexable,
)
from.utils._param_validation import (
HasMethods,
Hidden,
Interval,
StrOptions,
validate_params,
)
from.utils._plotting import _BinaryClassifierCurveDisplayMixin
from.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_routing_enabled,
process_routing,
)
from.utils.multiclass import check_classification_targets
from.utils.parallel import Parallel, delayed
from.utils.validation import (
_check_method_params,
_check_pos_label_consistency,
_check_sample_weight,
_num_samples,
check_consistent_length,
check_is_fitted,
)
class CalibratedClassifierCV(ClassifierMixin, MetaEstimatorMixin, BaseEstimator):
"""Probability calibration with isotonic regression or logistic regression.
This class uses cross-validation to both estimate the parameters of a
classifier and subsequently calibrate a classifier. With default
`ensemble=True`, for each cv split it
fits a copy of the base estimator to the training subset, and calibrates it
using the testing subset. For prediction, predicted probabilities are
averaged across these individual calibrated classifiers. When
`ensemble=False`, cross-validation is used to obtain unbiased predictions,
via :func:`~sklearn.model_selection.cross_val_predict`, which are then
used for calibration. For prediction, the base estimator, trained using all
the data, is used. This is the method implemented when `probabilities=True`
for :mod:`sklearn.svm` estimators.
Already fitted classifiers can be calibrated via the parameter
`cv="prefit"`. In this case, no cross-validation is used and all provided
data is used for calibration. The user has to take care manually that data
for model fitting and calibration are disjoint.
The calibration is based on the :term:`decision_function` method of the
`estimator` if it exists, else on :term:`predict_proba`.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
estimator : estimator instance, default=None
The classifier whose output need to be calibrated to provide more
accurate `predict_proba` outputs. The default classifier is
a :class:`~sklearn.svm.LinearSVC`.
.. versionadded:: 1.2
method : {'sigmoid', 'isotonic'}, default='sigmoid'
The method to use for calibration. Can be'sigmoid' which
corresponds to Platt's method (i.e. a logistic regression model) or
'isotonic' which is a non-parametric approach. It is not advised to
use isotonic calibration with too few calibration samples
``(<<1000)`` since it tends to overfit.
cv : int, cross-validation generator, iterable or "prefit", \
default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used. If ``y`` is
neither binary nor multiclass, :class:`~sklearn.model_selection.KFold`
is used.
Refer to the :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that `estimator` has been
fitted already and all data is used for calibration.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
Base estimator clones are fitted in parallel across cross-validation
iterations. Therefore parallelism happens only when `cv!= "prefit"`.
See :term:`Glossary <n_jobs>` for more details.
.. versionadded:: 0.24
ensemble : bool, default=True
Determines how the calibrator is fitted when `cv` is not `'prefit'`.
Ignored if `cv='prefit'`.
If `True`, the `estimator` is fitted using training data, and
calibrated using testing data, for each `cv` fold. The final estimator
is an ensemble of `n_cv` fitted classifier and calibrator pairs, where
`n_cv` is the number of cross-validation folds. The output is the
average predicted probabilities of all pairs.
If `False`, `cv` is used to compute unbiased predictions, via
:func:`~sklearn.model_selection.cross_val_predict`, which are then
used for calibration. At prediction time, the classifier used is the
`estimator` trained on all the data.
Note that this method is also internally implemented in
:mod:`sklearn.svm` estimators with the `probabilities=True` parameter.
.. versionadded:: 0.24
base_estimator : estimator instance
This parameter is deprecated. Use `estimator` instead.
.. deprecated:: 1.2
The parameter `base_estimator` is deprecated in 1.2 and will be
removed in 1.4. Use `estimator` instead.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The class labels.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 1.0
calibrated_classifiers_ : list (len() equal to cv or 1 if `cv="prefit"` \
or `ensemble=False`)
The list of classifier and calibrator pairs.
- When `cv="prefit"`, the fitted `estimator` and fitted
calibrator.
- When `cv` is not "prefit" and `ensemble=True`, `n_cv` fitted
`estimator` and calibrator pairs. `n_cv` is the number of
cross-validation folds.
- When `cv` is not "prefit" and `ensemble=False`, the `estimator`,
fitted on all the data, and fitted calibrator.
.. versionchanged:: 0.24
Single calibrated classifier case when `ensemble=False`.
See Also
--------
calibration_curve : Compute true and predicted probabilities
for a calibration curve.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.calibration import CalibratedClassifierCV
>>> X, y = make_classification(n_samples=100, n_features=2,
... n_redundant=0, random_state=42)
>>> base_clf = GaussianNB()
>>> calibrated_clf = CalibratedClassifierCV(base_clf, cv=3)
>>> calibrated_clf.fit(X, y)
CalibratedClassifierCV(...)
>>> len(calibrated_clf.calibrated_classifiers_)
3
>>> calibrated_clf.predict_proba(X)[:5, :]
array([[0.110..., 0.889...],
[0.072..., 0.927...],
[0.928..., 0.071...],
[0.928..., 0.071...],
[0.071..., 0.928...]])
>>> from sklearn.model_selection import train_test_split
>>> X, y = make_classification(n_samples=100, n_features=2,
... n_redundant=0, random_state=42)
>>> X_train, X_calib, y_train, y_calib = train_test_split(
... X, y, random_state=42
... )
>>> base_clf = GaussianNB()
>>> base_clf.fit(X_train, y_train)
GaussianNB()
>>> calibrated_clf = CalibratedClassifierCV(base_clf, cv="prefit")
>>> calibrated_clf.fit(X_calib, y_calib)
CalibratedClassifierCV(...)
>>> len(calibrated_clf.calibrated_classifiers_)
1
>>> calibrated_clf.predict_proba([[-0.5, 0.5]])
array([[0.936..., 0.063...]])
"""
_parameter_constraints: dict = {
"estimator": [
HasMethods(["fit", "predict_proba"]),
HasMethods(["fit", "decision_function"]),
None,
],
"method": [StrOptions({"isotonic", "sigmoid"})],
"cv": ["cv_object", StrOptions({"prefit"})],
"n_jobs": [Integral, None],
"ensemble": ["boolean"],
"base_estimator": [
HasMethods(["fit", "predict_proba"]),
HasMethods(["fit", "decision_function"]),
None,
Hidden(StrOptions({"deprecated"})),
],
}
def __init__(
self,
estimator=None,
*,
method="sigmoid",
cv=None,
n_jobs=None,
ensemble=True,
base_estimator="deprecated",
):
self.estimator = estimator
self.method = method
self.cv = cv
self.n_jobs = n_jobs
self.ensemble = ensemble
self.base_estimator = base_estimator
def _get_estimator(self):
"""Resolve which estimator to return (default is LinearSVC)"""
# TODO(1.4): Remove when base_estimator is removed
if self.base_estimator!= "deprecated":
if self.estimator is not None:
raise ValueError(
"Both `base_estimator` and `estimator` are set. Only set "
"`estimator` since `base_estimator` is deprecated."
)
warnings.warn(
(
"`base_estimator` was renamed to `estimator` in version 1.2 and "
"will be removed in 1.4."
),
FutureWarning,
)
estimator = self.base_estimator
else:
estimator = self.estimator
if estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
estimator = LinearSVC(random_state=0, dual="auto")
if _routing_enabled():
estimator.set_fit_request(sample_weight=True)
return estimator
@_fit_context(
# CalibratedClassifierCV.estimator is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, sample_weight=None, **fit_params):
"""Fit the calibrated model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
**fit_params : dict
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
self : object
Returns an instance of self.
"""
check_classification_targets(y)
X, y = indexable(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
estimator = self._get_estimator()
self.calibrated_classifiers_ = []
if self.cv == "prefit":
# `classes_` should be consistent with that of estimator
check_is_fitted(self.estimator, attributes=["classes_"])
self.classes_ = self.estimator.classes_
pred_method, method_name = _get_prediction_method(estimator)
n_classes = len(self.classes_)
predictions = _compute_predictions(pred_method, method_name, X, n_classes)
calibrated_classifier = _fit_calibrator(
estimator,
predictions,
y,
self.classes_,
self.method,
sample_weight,
)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
# Set `classes_` using all `y`
label_encoder_ = LabelEncoder().fit(y)
self.classes_ = label_encoder_.classes_
n_classes = len(self.classes_)
if _routing_enabled():
routed_params = process_routing(
self,
"fit",
sample_weight=sample_weight,
**fit_params,
)
else:
# sample_weight checks
fit_parameters = signature(estimator.fit).parameters
supports_sw = "sample_weight" in fit_parameters
if sample_weight is not None and not supports_sw:
estimator_name = type(estimator).__name__
warnings.warn(
f"Since {estimator_name} does not appear to accept"
" sample_weight, sample weights will only be used for the"
" calibration itself. This can be caused by a limitation of"
" the current scikit-learn API. See the following issue for"
" more details:"
" https://github.com/scikit-learn/scikit-learn/issues/21134."
" Be warned that the result of the calibration is likely to be"
" incorrect."
)
routed_params = Bunch()
routed_params.splitter = Bunch(split={}) # no routing for splitter
routed_params.estimator = Bunch(fit=fit_params)
if sample_weight is not None and supports_sw:
routed_params.estimator.fit["sample_weight"] = sample_weight
# Check that each cross-validation fold can have at least one
# example per class
if isinstance(self.cv, int):
n_folds = self.cv
elif hasattr(self.cv, "n_splits"):
n_folds = self.cv.n_splits
else:
n_folds = None
if n_folds and np.any(
[np.sum(y == class_) < n_folds for class_ in self.classes_]
):
raise ValueError(
f"Requesting {n_folds}-fold "
"cross-validation but provided less than "
f"{n_folds} examples for at least one class."
)
cv = check_cv(self.cv, y, classifier=True)
if self.ensemble:
parallel = Parallel(n_jobs=self.n_jobs)
self.calibrated_classifiers_ = parallel(
delayed(_fit_classifier_calibrator_pair)(
clone(estimator),
X,
y,
train=train,
test=test,
method=self.method,
classes=self.classes_,
sample_weight=sample_weight,
fit_params=routed_params.estimator.fit,
)
for train, test in cv.split(X, y, **routed_params.splitter.split)
)
else:
this_estimator = clone(estimator)
_, method_name = _get_prediction_method(this_estimator)
pred_method = partial(
cross_val_predict,
estimator=this_estimator,
X=X,
y=y,
cv=cv,
method=method_name,
n_jobs=self.n_jobs,
params=routed_params.estimator.fit,
)
predictions = _compute_predictions(
pred_method, method_name, X, n_classes
)
this_estimator.fit(X, y, **routed_params.estimator.fit)
# Note: Here we don't pass on fit_params because the supported
# calibrators don't support fit_params anyway
calibrated_classifier = _fit_calibrator(
this_estimator,
predictions,
y,
self.classes_,
self.method,
sample_weight,
)
self.calibrated_classifiers_.append(calibrated_classifier)
first_clf = self.calibrated_classifiers_[0].estimator
if hasattr(first_clf, "n_features_in_"):
self.n_features_in_ = first_clf.n_features_in_
if hasattr(first_clf, "feature_names_in_"):
self.feature_names_in_ = first_clf.feature_names_in_
return self
def predict_proba(self, X):
"""Calibrated probabilities of classification.
This function returns calibrated probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict_proba`.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self)
# Compute the arithmetic mean of the predictions of the calibrated
# classifiers
mean_proba = np.zeros((_num_samples(X), len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples.
The predicted class is the class that has the highest probability,
and can thus be different from the prediction of the uncalibrated classifier.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict`.
Returns
-------
C : ndarray of shape (n_samples,)
The predicted class.
"""
check_is_fitted(self)
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = (
MetadataRouter(owner=self.__class__.__name__)
.add_self_request(self)
.add(
estimator=self._get_estimator(),
method_mapping=MethodMapping().add(callee="fit", caller="fit"),
)
.add(
splitter=self.cv,
method_mapping=MethodMapping().add(callee="split", caller="fit"),
)
)
return router
def _more_tags(self):
return {
"_xfail_checks": {
"check_sample_weights_invariance": (
"Due to the cross-validation and sample ordering, removing a sample"
" is not strictly equal to putting is weight to zero. Specific unit"
" tests are added for CalibratedClassifierCV specifically."
),
}
}
def _fit_classifier_calibrator_pair(
estimator,
X,
y,
train,
test,
method,
classes,
sample_weight=None,
fit_params=None,
):
"""Fit a classifier/calibration pair on a given train/test split.
Fit the classifier on the train set, compute its predictions on the test
set and use the predictions as input to fit the calibrator along with the
test labels.
Parameters
----------
estimator : estimator instance
Cloned base estimator.
X : array-like, shape (n_samples, n_features)
Sample data.
y : array-like, shape (n_samples,)
Targets.
train : ndarray, shape (n_train_indices,)
Indices of the training subset.
test : ndarray, shape (n_test_indices,)
Indices of the testing subset.
method : {'sigmoid', 'isotonic'}
Method to use for calibration.
classes : ndarray, shape (n_classes,)
The target classes.
sample_weight : array-like, default=None
Sample weights for `X`.
fit_params : dict, default=None
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
calibrated_classifier : _CalibratedClassifier instance
"""
fit_params_train = _check_method_params(X, params=fit_params, indices=train)
X_train, y_train = _safe_indexing(X, train), _safe_indexing(y, train)
X_test, y_test = _safe_indexing(X, test), _safe_indexing(y, test)
estimator.fit(X_train, y_train, **fit_params_train)
n_classes = len(classes)
pred_method, method_name = _get_prediction_method(estimator)
predictions = _compute_predictions(pred_method, method_name, X_test, n_classes)
sw_test = None if sample_weight is None else _safe_indexing(sample_weight, test)
calibrated_classifier = _fit_calibrator(
estimator, predictions, y_test, classes, method, sample_weight=sw_test
)
return calibrated_classifier
def _get_prediction_method(clf):
"""Return prediction method.
`decision_function` method of `clf` returned, if it
exists, otherwise `predict_proba` method returned.
Parameters
----------
clf : Estimator instance
Fitted classifier to obtain the prediction method from.
Returns
-------
prediction_method : callable
The prediction method.
method_name : str
The name of the prediction method.
"""
if hasattr(clf, "decision_function"):
method = getattr(clf, "decision_function")
return method, "decision_function"
if hasattr(clf, "predict_proba"):
method = getattr(clf, "predict_proba")
return method, "predict_proba"
def _compute_predictions(pred_method, method_name, X, n_classes):
"""Return predictions for `X` and reshape binary outputs to shape
(n_samples, 1).
Parameters
----------
pred_method : callable
Prediction method.
method_name: str
Name of the prediction method
X : array-like or None
Data used to obtain predictions.
n_classes : int
Number of classes present.
Returns
-------
predictions : array-like, shape (X.shape[0], len(clf.classes_))
The predictions. Note if there are 2 classes, array is of shape
(X.shape[0], 1).
"""
predictions = pred_method(X=X)
if method_name == "decision_function":
if predictions.ndim == 1:
predictions = predictions[:, np.newaxis]
elif method_name == "predict_proba":
if n_classes == 2:
predictions = predictions[:, 1:]
else: # pragma: no cover
# this branch should be unreachable.
raise ValueError(f"Invalid prediction method: {method_name}")
return predictions
def _fit_calibrator(clf, predictions, y, classes, method, sample_weight=None):
"""Fit calibrator(s) and return a `_CalibratedClassifier`
instance.
`n_classes` (i.e. `len(clf.classes_)`) calibrators are fitted.
However, if `n_classes` equals 2, one calibrator is fitted.
Parameters
----------
clf : estimator instance
Fitted classifier.
predictions : array-like, shape (n_samples, n_classes) or (n_samples, 1) \
when binary.
Raw predictions returned by the un-calibrated base classifier.
y : array-like, shape (n_samples,)
The targets.
classes : ndarray, shape (n_classes,)
All the prediction classes.
method : {'sigmoid', 'isotonic'}
The method to use for calibration.
sample_weight : ndarray, shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
pipeline : _CalibratedClassifier instance
"""
Y = label_binarize(y, classes=classes)
label_encoder = LabelEncoder().fit(classes)
pos_class_indices = label_encoder.transform(clf.classes_)
calibrators = []
for class_idx, this_pred in zip(pos_class_indices, predictions.T):
if method == "isotonic":
calibrator = IsotonicRegression(out_of_bounds="clip")
else: # "sigmoid"
calibrator = _SigmoidCalibration()
calibrator.fit(this_pred, Y[:, class_idx], sample_weight)
calibrators.append(calibrator)
pipeline = _CalibratedClassifier(clf, calibrators, method=method, classes=classes)
return pipeline
class _CalibratedClassifier:
"""Pipeline-like chaining a fitted classifier and its fitted calibrators.
Parameters
----------
estimator : estimator instance
Fitted classifier.
calibrators : list of fitted estimator instances
List of fitted calibrators (either 'IsotonicRegression' or
'_SigmoidCalibration'). The number of calibrators equals the number of
classes. However, if there are 2 classes, the list contains only one
fitted calibrator.
classes : array-like of shape (n_classes,)
All the prediction classes.
method : {'sigmoid', 'isotonic'}, default='sigmoid'
The method to use for calibration. Can be'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach based on isotonic regression.
"""
def __init__(self, estimator, calibrators, *, classes, method="sigmoid"):
self.estimator = estimator
self.calibrators = calibrators
self.classes = classes
self.method = method
def predict_proba(self, X):
"""Calculate calibrated probabilities.
Calculates classification calibrated probabilities
for each class, in a one-vs-all manner, for `X`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The sample data.
Returns
-------
proba : array, shape (n_samples, n_classes)
The predicted probabilities. Can be exact zeros.
"""
n_classes = len(self.classes)
pred_method, method_name = _get_prediction_method(self.estimator)
predictions = _compute_predictions(pred_method, method_name, X, n_classes)
label_encoder = LabelEncoder().fit(self.classes)
pos_class_indices = label_encoder.transform(self.estimator.classes_)
proba = np.zeros((_num_samples(X), n_classes))
for class_idx, this_pred, calibrator in zip(
pos_class_indices, predictions.T, self.calibrators
):
if n_classes == 2:
# When binary, `predictions` consists only of predictions for
# clf.classes_[1] but `pos_class_indices` = 0
class_idx += 1
proba[:, class_idx] = calibrator.predict(this_pred)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1.0 - proba[:, 1]
else:
denominator = np.sum(proba, axis=1)[:, np.newaxis]
# In the edge case where for each class calibrator returns a null
# probability for a given sample, use the uniform distribution
# instead.
uniform_proba = np.full_like(proba, 1 / n_classes)
proba = np.divide(
proba, denominator, out=uniform_proba, where=denominator!= 0
)
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
# The max_abs_prediction_threshold was approximated using
# logit(np.finfo(np.float64).eps) which is about -36
def _sigmoid_calibration(
predictions, y, sample_weight=None, max_abs_prediction_threshold=30
):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
predictions : ndarray of shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray of shape (n_samples,)
The targets.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
predictions = column_or_1d(predictions)
y = column_or_1d(y)
F = predictions # F follows Platt's notations
scale_constant = 1.0
max_prediction = np.max(np.abs(F))
# If the predictions have large values we scale them in order to bring
# them within a suitable range. This has no effect on the final
# (prediction) result because linear models like Logisitic Regression
# without a penalty are invariant to multiplying the features by a
# constant.
if max_prediction >= max_abs_prediction_threshold:
scale_constant = max_prediction
# We rescale the features in a copy: inplace rescaling could confuse
# the caller and make the code harder to reason about.
F = F / scale_constant
# Bayesian priors (see Platt end of section 2.2):
# It corresponds to the number of samples, taking into account the
# `sample_weight`.
mask_negative_samples = y <= 0
if sample_weight is not None:
prior0 = (sample_weight[mask_negative_samples]).sum()
prior1 = (sample_weight[~mask_negative_samples]).sum()
else:
prior0 = float(np.sum(mask_negative_samples))
prior1 = y.shape[0] - prior0
T = np.zeros_like(y, dtype=np.float64)
T[y > 0] = (prior1 + 1.0) / (prior1 + 2.0)
T[y <= 0] = 1.0 / (prior0 + 2.0)
bin_loss = HalfBinomialLoss()
def loss_grad(AB):
l, g = bin_loss.loss_gradient(
y_true=T,
raw_prediction=-(AB[0] * F + AB[1]),
sample_weight=sample_weight,
)
loss = l.sum()
grad = np.array([-g @ F, -g.sum()])
return loss, grad
AB0 = np.array([0.0, log((prior0 + 1.0) / (prior1 + 1.0))])
opt_result = minimize(
loss_grad,
AB0,
method="L-BFGS-B",
jac=True,
options={
"gtol": 1e-6,
"ftol": 64 * np.finfo(float).eps,
},
)
AB_ = opt_result.x
# The tuned multiplicative parameter is converted back to the original
# input feature scale. The offset parameter does not need rescaling since
# we did not rescale the outcome variable.
return AB_[0] / scale_constant, AB_[1]
class _SigmoidCalibration(RegressorMixin, BaseEstimator):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples,)
Training data.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like of shape (n_samples,)
Data to predict from.
Returns
-------
T_ : ndarray of shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return expit(-(self.a_ * T + self.b_))
@validate_params(
{
"y_true": ["array-like"],
"y_prob": ["array-like"],
"pos_label": [Real, str, "boolean", None],
"n_bins": [Interval(Integral, 1, None, closed="left")],
"strategy": [StrOptions({"uniform", "quantile"})],
},
prefer_skip_nested_validation=True,
)
def calibration_curve(
y_true,
y_prob,
*,
pos_label=None,
n_bins=5,
strategy="uniform",
):
"""Compute true and predicted probabilities for a calibration curve.
The method assumes the inputs come from a binary classifier, and
discretize the [0, 1] interval into bins.
Calibration curves may also be referred to as reliability diagrams.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
True targets.
y_prob : array-like of shape (n_samples,)
Probabilities of the positive class.
pos_label : int, float, bool or str, default=None
The label of the positive class.
.. versionadded:: 1.1
n_bins : int, default=5
Number of bins to discretize the [0, 1] interval. A bigger number
requires more data. Bins with no samples (i.e. without
corresponding values in `y_prob`) will not be returned, thus the
returned arrays may have less than `n_bins` values.
strategy : {'uniform', 'quantile'}, default='uniform'
Strategy used to define the widths of the bins.
uniform
The bins have identical widths.
quantile
The bins have the same number of samples and depend on `y_prob`.
Returns
-------
prob_true : ndarray of shape (n_bins,) or smaller
The proportion of samples whose class is the positive class, in each
bin (fraction of positives).
prob_pred : ndarray of shape (n_bins,) or smaller
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
Examples
--------
>>> import numpy as np
>>> from sklearn.calibration import calibration_curve
>>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1])
>>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.])
>>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3)
>>> prob_true
array([0., 0.5, 1. ])
>>> prob_pred
array([0.2 , 0.525, 0.85 ])
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
check_consistent_length(y_true, y_prob)
pos_label = _check_pos_label_consistency(pos_label, y_true)
if y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1].")
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError(
f"Only binary classification is supported. Provided labels {labels}."
)
y_true = y_true == pos_label
if strategy == "quantile": # Determine bin edges by distribution of data
quantiles = np.linspace(0, 1, n_bins + 1)
bins = np.percentile(y_prob, quantiles * 100)
elif strategy == "uniform":
bins = np.linspace(0.0, 1.0, n_bins + 1)
else:
raise ValueError(
"Invalid entry to'strategy' input. Strategy "
"must be either 'quantile' or 'uniform'."
)
binids = np.searchsorted(bins[1:-1], y_prob)
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total!= 0
prob_true = bin_true[nonzero] / bin_total[nonzero]
prob_pred = bin_sums[nonzero] / bin_total[nonzero]
return prob_true, prob_pred
class CalibrationDisplay(_BinaryClassifierCurveDisplayMixin):
"""Calibration curve (also known as reliability diagram) visualization.
It is recommended to use
:func:`~sklearn.calibration.CalibrationDisplay.from_estimator` or
:func:`~sklearn.calibration.CalibrationDisplay.from_predictions`
to create a `CalibrationDisplay`. All parameters are stored as attributes.
Read more about calibration in the :ref:`User Guide <calibration>` and
more about the scikit-learn visualization API in :ref:`visualizations`.
.. versionadded:: 1.0
Parameters
----------
prob_true : ndarray of shape (n_bins,)
The proportion of samples whose class is the positive class (fraction
of positives), in each bin.
prob_pred : ndarray of shape (n_bins,)
The mean predicted probability in each bin.
y_prob : ndarray of shape (n_samples,)
Probability estimates for the positive class, for each sample.
estimator_name : str, default=None
Name of estimator. If None, the estimator name is not shown.
pos_label : int, float, bool or str, default=None
The positive class when computing the calibration curve.
By default, `estimators.classes_[1]` is considered as the
positive class.
.. versionadded:: 1.1
Attributes
----------
line_ : matplotlib Artist
Calibration curve.
ax_ : matplotlib Axes
Axes with calibration curve.
figure_ : matplotlib Figure
Figure containing the curve.
See Also
--------
calibration_curve : Compute true and predicted probabilities for a
calibration curve.
CalibrationDisplay.from_predictions : Plot calibration curve using true
and predicted labels.
CalibrationDisplay.from_estimator : Plot calibration curve using an
estimator and data.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.calibration import calibration_curve, CalibrationDisplay
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = LogisticRegression(random_state=0)
>>> clf.fit(X_train, y_train)
LogisticRegression(random_state=0)
>>> y_prob = clf.predict_proba(X_test)[:, 1]
>>> prob_true, prob_pred = calibration_curve(y_test, y_prob, n_bins=10)
>>> disp = CalibrationDisplay(prob_true, prob_pred, y_prob)
>>> disp.plot()
<...>
"""
def __init__(
self, prob_true, prob_pred, y_prob, *, estimator_name=None, pos_label=None
):
self.prob_true = prob_true
self.prob_pred = prob_pred
self.y_prob = y_prob
self.estimator_name = estimator_name
self.pos_label = pos_label
def plot(self, *, ax=None, name=None, ref_line=True, **kwargs):
"""Plot visualization.
Extra keyword arguments will be passed to
:func:`matplotlib.pyplot.plot`.
Parameters
----------
ax : Matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
name : str, default=None
Name for labeling curve. If `None`, use `estimator_name` if
not `None`, otherwise no labeling is shown.
ref_line : bool, default=True
If `True`, plots a reference line representing a perfectly
calibrated classifier.
**kwargs : dict
Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
display : :class:`~sklearn.calibration.CalibrationDisplay`
Object that stores computed values.
"""
self.ax_, self.figure_, name = self._validate_plot_params(ax=ax, name=name)
info_pos_label = (
f"(Positive class: {self.pos_label})" if self.pos_label is not None else ""
)
line_kwargs = {"marker": "s", "linestyle": "-"}
if name is not None:
line_kwargs["label"] = name
line_kwargs.update(**kwargs)
ref_line_label = "Perfectly calibrated"
existing_ref_line = ref_line_label in self.ax_.get_legend_handles_labels()[1]
if ref_line and not existing_ref_line:
self.ax_.plot([0, 1], [0, 1], "k:", label=ref_line_label)
self.line_ = self.ax_.plot(self.prob_pred, self.prob_true, **line_kwargs)[0]
# We always have to show the legend for at least the reference line
self.ax_.legend(loc="lower right")
xlabel = f"Mean predicted probability {info_pos_label}"
ylabel = f"Fraction of positives {info_pos_label}"
self.ax_.set(xlabel=xlabel, ylabel=ylabel)
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
n_bins=5,
strategy="uniform",
pos_label=None,
name=None,
ref_line=True,
ax=None,
**kwargs,
):
"""Plot calibration curve using a binary classifier and data.
A calibration curve, also known as a reliability diagram, uses inputs
from a binary classifier and plots the average predicted probability
for each bin against the fraction of positive classes, on the
y-axis.
Extra keyword arguments will be passed to
:func:`matplotlib.pyplot.plot`.
Read more about calibration in the :ref:`User Guide <calibration>` and
more about the scikit-learn visualization API in :ref:`visualizations`.
.. versionadded:: 1.0
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier. The classifier must
have a :term:`predict_proba` method.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Binary target values.
n_bins : int, default=5
Number of bins to discretize the [0, 1] interval into when
calculating the calibration curve. A bigger number requires more
data.
strategy : {'uniform', 'quantile'}, default='uniform'
Strategy used to define the widths of the bins.
- `'uniform'`: The bins have identical widths.
- `'quantile'`: The bins have the same number of samples and depend
on predicted probabilities.
pos_label : int, float, bool or str, default=None
The positive class when computing the calibration curve.
By default, `estimators.classes_[1]` is considered as the
positive class.
.. versionadded:: 1.1
name : str, default=None
Name for labeling curve. If `None`, the name of the estimator is
used.
ref_line : bool, default=True
If `True`, plots a reference line representing a perfectly
calibrated classifier.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
**kwargs : dict
Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
display : :class:`~sklearn.calibration.CalibrationDisplay`.
Object that stores computed values.
See Also
--------
CalibrationDisplay.from_predictions : Plot calibration curve using true
and predicted labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.calibration import CalibrationDisplay
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = LogisticRegression(random_state=0)
>>> clf.fit(X_train, y_train)
LogisticRegression(random_state=0)
>>> disp = CalibrationDisplay.from_estimator(clf, X_test, y_test)
>>> plt.show()
"""
y_prob, pos_label, name = cls._validate_and_get_response_values(
estimator,
X,
y,
response_method="predict_proba",
pos_label=pos_label,
name=name,
)
return cls.from_predictions(
y,
y_prob,
n_bins=n_bins,
strategy=strategy,
pos_label=pos_label,
name=name,
ref_line=ref_line,
ax=ax,
**kwargs,
)
@classmethod
def from_predictions(
cls,
y_true,
y_prob,
*,
n_bins=5,
strategy="uniform",
pos_label=None,
name=None,
ref_line=True,
ax=None,
**kwargs,
):
"""Plot calibration curve using true labels and predicted probabilities.
Calibration curve, also known as reliability diagram, uses inputs
from a binary classifier and plots the average predicted probability
for each bin against the fraction of positive classes, on the
y-axis.
Extra keyword arguments will be passed to
:func:`matplotlib.pyplot.plot`.
Read more about calibration in the :ref:`User Guide <calibration>` and
more about the scikit-learn visualization API in :ref:`visualizations`.
.. versionadded:: 1.0
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels.
y_prob : array-like of shape (n_samples,)
The predicted probabilities of the positive class.
n_bins : int, default=5
Number of bins to discretize the [0, 1] interval into when
calculating the calibration curve. A bigger number requires more
data.
strategy : {'uniform', 'quantile'}, default='uniform'
Strategy used to define the widths of the bins.
- `'uniform'`: The bins have identical widths.
- `'quantile'`: The bins have the same number of samples and depend
on predicted probabilities.
pos_label : int, float, bool or str, default=None
The positive class when computing the calibration curve.
By default, `estimators.classes_[1]` is considered as the
positive class.
.. versionadded:: 1.1
name : str, default=None
Name for labeling curve.
ref_line : bool, default=True
If `True`, plots a reference line representing a perfectly
calibrated classifier.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
**kwargs : dict
Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
display : :class:`~sklearn.calibration.CalibrationDisplay`.
Object that stores computed values.
See Also
--------
CalibrationDisplay.from_estimator : Plot calibration curve using an
estimator and data.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.calibration import CalibrationDisplay
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = LogisticRegression(random_state=0)
>>> clf.fit(X_train, y_train)
LogisticRegression(random_state=0)
>>> y_prob = clf.predict_proba(X_test)[:, 1]
>>> disp = CalibrationDisplay.from_predictions(y_test, y_prob)
>>> plt.show()
"""
pos_label_validated, name = cls._validate_from_predictions_params(
y_true, y_prob, sample_weight=None, pos_label=pos_label, name=name
)
prob_true, prob_pred = calibration_curve(
y_true, y_prob, n_bins=n_bins, strategy=strategy, pos_label=pos_label
)
disp = cls(
prob_true=prob_true,
prob_pred=prob_pred,
y_prob=y_prob,
estimator_name=name,
pos_label=pos_label_validated,
)
return disp.plot(ax=ax, ref_line=ref_line, **kwargs)
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
import numpy as np
from scipy.special import logsumexp
from.base import BaseEstimator, ClassifierMixin, _fit_context
from.preprocessing import LabelBinarizer, binarize, label_binarize
from.utils._param_validation import Hidden, Interval, StrOptions
from.utils.extmath import safe_sparse_dot
from.utils.multiclass import _check_partial_fit_first_call
from.utils.validation import _check_sample_weight, check_is_fitted, check_non_negative
__all__ = [
"BernoulliNB",
"GaussianNB",
"MultinomialNB",
"ComplementNB",
"CategoricalNB",
]
class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape (n_samples, n_classes).
Public methods predict, predict_proba, predict_log_proba, and
predict_joint_log_proba pass the input through _check_X before handing it
over to _joint_log_likelihood. The term "joint log likelihood" is used
interchangibly with "joint log probability".
"""
@abstractmethod
def _check_X(self, X):
"""To be overridden in subclasses with the actual checks.
Only used in predict* methods.
"""
def predict_joint_log_proba(self, X):
"""Return joint log probability estimates for the test vector X.
For each row x of X and class y, the joint log probability is given by
``log P(x, y) = log P(y) + log P(x|y),``
where ``log P(y)`` is the class prior probability and ``log P(x|y)`` is
the class-conditional probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Returns the joint log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._check_X(X)
return self._joint_log_likelihood(X)
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X.
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1,..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(_BaseNB):
"""
Gaussian Naive Bayes (GaussianNB).
Can perform online updates to model parameters via :meth:`partial_fit`.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified, the priors are not
adjusted according to the data.
var_smoothing : float, default=1e-9
Portion of the largest variance of all features that is added to
variances for calculation stability.
.. versionadded:: 0.20
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
number of training samples observed in each class.
class_prior_ : ndarray of shape (n_classes,)
probability of each class.
classes_ : ndarray of shape (n_classes,)
class labels known to the classifier.
epsilon_ : float
absolute additive value to variances.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
var_ : ndarray of shape (n_classes, n_features)
Variance of each feature per class.
.. versionadded:: 1.0
theta_ : ndarray of shape (n_classes, n_features)
mean of each feature per class.
See Also
--------
BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models.
CategoricalNB : Naive Bayes classifier for categorical features.
ComplementNB : Complement Naive Bayes classifier.
MultinomialNB : Naive Bayes classifier for multinomial models.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
_parameter_constraints: dict = {
"priors": ["array-like", None],
"var_smoothing": [Interval(Real, 0, None, closed="left")],
}
def __init__(self, *, priors=None, var_smoothing=1e-9):
self.priors = priors
self.var_smoothing = var_smoothing
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns the instance itself.
"""
y = self._validate_data(y=y)
return self._partial_fit(
X, y, np.unique(y), _refit=True, sample_weight=sample_weight
)
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
return self._validate_data(X, reset=False)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like of shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like of shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like of shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like of shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
if np.isclose(n_new, 0.0):
return mu, var
new_mu = np.average(X, axis=0, weights=sample_weight)
new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = old_ssd + new_ssd + (n_new * n_past / n_total) * (mu - new_mu) ** 2
total_var = total_ssd / n_total
return total_mu, total_var
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns the instance itself.
"""
return self._partial_fit(
X, y, classes, _refit=False, sample_weight=sample_weight
)
def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit : bool, default=False
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
if _refit:
self.classes_ = None
first_call = _check_partial_fit_first_call(self, classes)
X, y = self._validate_data(X, y, reset=first_call)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
self.epsilon_ = self.var_smoothing * np.var(X, axis=0).max()
if first_call:
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.var_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provided prior matches the number of classes
if len(priors)!= n_classes:
raise ValueError("Number of priors must match number of classes.")
# Check that the sum is 1
if not np.isclose(priors.sum(), 1.0):
raise ValueError("The sum of the priors should be 1.")
# Check that the priors are non-negative
if (priors < 0).any():
raise ValueError("Priors must be non-negative.")
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_), dtype=np.float64)
else:
if X.shape[1]!= self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.var_[:, :] -= self.epsilon_
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = np.isin(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError(
"The target label(s) %s in y do not exist in the initial classes %s"
% (unique_y[~unique_y_in_classes], classes)
)
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.var_[i, :], X_i, sw_i
)
self.theta_[i, :] = new_theta
self.var_[i, :] = new_sigma
self.class_count_[i] += N_i
self.var_[:, :] += self.epsilon_
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = -0.5 * np.sum(np.log(2.0 * np.pi * self.var_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) / (self.var_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class _BaseDiscreteNB(_BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per _BaseNB
_update_feature_log_prob(alpha)
_count(X, Y)
"""
_parameter_constraints: dict = {
"alpha": [Interval(Real, 0, None, closed="left"), "array-like"],
"fit_prior": ["boolean"],
"class_prior": ["array-like", None],
"force_alpha": ["boolean", Hidden(StrOptions({"warn"}))],
}
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None, force_alpha="warn"):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.force_alpha = force_alpha
@abstractmethod
def _count(self, X, Y):
"""Update counts that are used to calculate probabilities.
The counts make up a sufficient statistic extracted from the data.
Accordingly, this method is called each time `fit` or `partial_fit`
update the model. `class_count_` and `feature_count_` must be updated
here along with any model specific counts.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
Y : ndarray of shape (n_samples, n_classes)
Binarized class labels.
"""
@abstractmethod
def _update_feature_log_prob(self, alpha):
"""Update feature log probabilities based on counts.
This method is called each time `fit` or `partial_fit` update the
model.
Parameters
----------
alpha : float
smoothing parameter. See :meth:`_check_alpha`.
"""
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
return self._validate_data(X, accept_sparse="csr", reset=False)
def _check_X_y(self, X, y, reset=True):
"""Validate X and y in fit methods."""
return self._validate_data(X, y, accept_sparse="csr", reset=reset)
def _update_class_log_prior(self, class_prior=None):
"""Update class log priors.
The class log priors are based on `class_prior`, class count or the
number of classes. This method is called each time `fit` or
`partial_fit` update the model.
"""
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior)!= n_classes:
raise ValueError("Number of priors must match number of classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
with warnings.catch_warnings():
# silence the warning when count is 0 because class was not yet
# observed
warnings.simplefilter("ignore", RuntimeWarning)
log_class_count = np.log(self.class_count_)
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = log_class_count - np.log(self.class_count_.sum())
else:
self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))
def _check_alpha(self):
alpha = (
np.asarray(self.alpha) if not isinstance(self.alpha, Real) else self.alpha
)
alpha_min = np.min(alpha)
if isinstance(alpha, np.ndarray):
if not alpha.shape[0] == self.n_features_in_:
raise ValueError(
"When alpha is an array, it should contains `n_features`. "
f"Got {alpha.shape[0]} elements instead of {self.n_features_in_}."
)
# check that all alpha are positive
if alpha_min < 0:
raise ValueError("All values in alpha must be greater than 0.")
alpha_lower_bound = 1e-10
# TODO(1.4): Replace w/ deprecation of self.force_alpha
# See gh #22269
_force_alpha = self.force_alpha
if _force_alpha == "warn" and alpha_min < alpha_lower_bound:
_force_alpha = False
warnings.warn(
(
"The default value for `force_alpha` will change to `True` in 1.4."
" To suppress this warning, manually set the value of"
" `force_alpha`."
),
FutureWarning,
)
if alpha_min < alpha_lower_bound and not _force_alpha:
warnings.warn(
"alpha too small will result in numeric errors, setting alpha ="
f" {alpha_lower_bound:.1e}. Use `force_alpha=True` to keep alpha"
" unchanged."
)
return np.maximum(alpha, alpha_lower_bound)
return alpha
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns the instance itself.
"""
first_call = not hasattr(self, "classes_")
X, y = self._check_X_y(X, y, reset=first_call)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_classes = len(classes)
self._init_counters(n_classes, n_features)
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
if len(self.classes_) == 2:
Y = np.concatenate((1 - Y, Y), axis=1)
else: # degenerate case: just one class
Y = np.ones_like(Y)
if X.shape[0]!= Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64, copy=False)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns the instance itself.
"""
X, y = self._check_X_y(X, y)
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
if len(self.classes_) == 2:
Y = np.concatenate((1 - Y, Y), axis=1)
else: # degenerate case: just one class
Y = np.ones_like(Y)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
if sample_weight is not None:
Y = Y.astype(np.float64, copy=False)
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_classes = Y.shape[1]
self._init_counters(n_classes, n_features)
self._count(X, Y)
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def _init_counters(self, n_classes, n_features):
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_classes, n_features), dtype=np.float64)
def _more_tags(self):
return {"poor_score": True}
class MultinomialNB(_BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models.
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float or array-like of shape (n_features,), default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(set alpha=0 and force_alpha=True, for no smoothing).
force_alpha : bool, default=False
If False and alpha is less than 1e-10, it will set alpha to
1e-10. If True, alpha will remain unchanged. This may cause
numerical errors if alpha is too close to 0.
.. versionadded:: 1.2
.. deprecated:: 1.2
The default value of `force_alpha` will change to `True` in v1.4.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified, the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models.
CategoricalNB : Naive Bayes classifier for categorical features.
ComplementNB : Complement Naive Bayes classifier.
GaussianNB : Gaussian Naive Bayes.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB(force_alpha=True)
>>> clf.fit(X, y)
MultinomialNB(force_alpha=True)
>>> print(clf.predict(X[2:3]))
[3]
"""
def __init__(
self, *, alpha=1.0, force_alpha="warn", fit_prior=True, class_prior=None
):
super().__init__(
alpha=alpha,
fit_prior=fit_prior,
class_prior=class_prior,
force_alpha=force_alpha,
)
def _more_tags(self):
return {"requires_positive_X": True}
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
check_non_negative(X, "MultinomialNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = np.log(smoothed_fc) - np.log(
smoothed_cc.reshape(-1, 1)
)
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
return safe_sparse_dot(X, self.feature_log_prob_.T) + self.class_log_prior_
class ComplementNB(_BaseDiscreteNB):
"""The Complement Naive Bayes classifier described in Rennie et al. (2003).
The Complement Naive Bayes classifier was designed to correct the "severe
assumptions" made by the standard Multinomial Naive Bayes classifier. It is
particularly suited for imbalanced data sets.
Read more in the :ref:`User Guide <complement_naive_bayes>`.
.. versionadded:: 0.20
Parameters
----------
alpha : float or array-like of shape (n_features,), default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(set alpha=0 and force_alpha=True, for no smoothing).
force_alpha : bool, default=False
If False and alpha is less than 1e-10, it will set alpha to
1e-10. If True, alpha will remain unchanged. This may cause
numerical errors if alpha is too close to 0.
.. versionadded:: 1.2
.. deprecated:: 1.2
The default value of `force_alpha` will change to `True` in v1.4.
fit_prior : bool, default=True
Only used in edge case with a single class in the training set.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. Not used.
norm : bool, default=False
Whether or not a second normalization of the weights is performed. The
default behavior mirrors the implementations found in Mahout and Weka,
which do not follow the full algorithm described in Table 9 of the
paper.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class. Only used in edge
case with a single class in the training set.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_all_ : ndarray of shape (n_features,)
Number of samples encountered for each feature during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature) during fitting.
This value is weighted by the sample weight when provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical weights for class complements.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models.
CategoricalNB : Naive Bayes classifier for categorical features.
GaussianNB : Gaussian Naive Bayes.
MultinomialNB : Naive Bayes classifier for multinomial models.
References
----------
Rennie, J. D., Shih, L., Teevan, J., & Karger, D. R. (2003).
Tackling the poor assumptions of naive bayes text classifiers. In ICML
(Vol. 3, pp. 616-623).
https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import ComplementNB
>>> clf = ComplementNB(force_alpha=True)
>>> clf.fit(X, y)
ComplementNB(force_alpha=True)
>>> print(clf.predict(X[2:3]))
[3]
"""
_parameter_constraints: dict = {
**_BaseDiscreteNB._parameter_constraints,
"norm": ["boolean"],
}
def __init__(
self,
*,
alpha=1.0,
force_alpha="warn",
fit_prior=True,
class_prior=None,
norm=False,
):
super().__init__(
alpha=alpha,
force_alpha=force_alpha,
fit_prior=fit_prior,
class_prior=class_prior,
)
self.norm = norm
def _more_tags(self):
return {"requires_positive_X": True}
def _count(self, X, Y):
"""Count feature occurrences."""
check_non_negative(X, "ComplementNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
self.feature_all_ = self.feature_count_.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and compute the weights."""
comp_count = self.feature_all_ + alpha - self.feature_count_
logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True))
# _BaseNB.predict uses argmax, but ComplementNB operates with argmin.
if self.norm:
summed = logged.sum(axis=1, keepdims=True)
feature_log_prob = logged / summed
else:
feature_log_prob = -logged
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X."""
jll = safe_sparse_dot(X, self.feature_log_prob_.T)
if len(self.classes_) == 1:
jll += self.class_log_prior_
return jll
class BernoulliNB(_BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float or array-like of shape (n_features,), default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(set alpha=0 and force_alpha=True, for no smoothing).
force_alpha : bool, default=False
If False and alpha is less than 1e-10, it will set alpha to
1e-10. If True, alpha will remain unchanged. This may cause
numerical errors if alpha is too close to 0.
.. versionadded:: 1.2
.. deprecated:: 1.2
The default value of `force_alpha` will change to `True` in v1.4.
binarize : float or None, default=0.0
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified, the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Log probability of each class (smoothed).
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features given a class, P(x_i|y).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
CategoricalNB : Naive Bayes classifier for categorical features.
ComplementNB : The Complement Naive Bayes classifier
described in Rennie et al. (2003).
GaussianNB : Gaussian Naive Bayes (GaussianNB).
MultinomialNB : Naive Bayes classifier for multinomial models.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB(force_alpha=True)
>>> clf.fit(X, Y)
BernoulliNB(force_alpha=True)
>>> print(clf.predict(X[2:3]))
[3]
"""
_parameter_constraints: dict = {
**_BaseDiscreteNB._parameter_constraints,
"binarize": [None, Interval(Real, 0, None, closed="left")],
}
def __init__(
self,
*,
alpha=1.0,
force_alpha="warn",
binarize=0.0,
fit_prior=True,
class_prior=None,
):
super().__init__(
alpha=alpha,
fit_prior=fit_prior,
class_prior=class_prior,
force_alpha=force_alpha,
)
self.binarize = binarize
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
X = super()._check_X(X)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X
def _check_X_y(self, X, y, reset=True):
X, y = super()._check_X_y(X, y, reset=reset)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X, y
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = self.class_count_ + alpha * 2
self.feature_log_prob_ = np.log(smoothed_fc) - np.log(
smoothed_cc.reshape(-1, 1)
)
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
n_features = self.feature_log_prob_.shape[1]
n_features_X = X.shape[1]
if n_features_X!= n_features:
raise ValueError(
"Expected input with %d features, got %d instead"
% (n_features, n_features_X)
)
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
class CategoricalNB(_BaseDiscreteNB):
"""Naive Bayes classifier for categorical features.
The categorical Naive Bayes classifier is suitable for classification with
discrete features that are categorically distributed. The categories of
each feature are drawn from a categorical distribution.
Read more in the :ref:`User Guide <categorical_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(set alpha=0 and force_alpha=True, for no smoothing).
force_alpha : bool, default=False
If False and alpha is less than 1e-10, it will set alpha to
1e-10. If True, alpha will remain unchanged. This may cause
numerical errors if alpha is too close to 0.
.. versionadded:: 1.2
.. deprecated:: 1.2
The default value of `force_alpha` will change to `True` in v1.4.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified, the priors are not
adjusted according to the data.
min_categories : int or array-like of shape (n_features,), default=None
Minimum number of categories per feature.
- integer: Sets the minimum number of categories per feature to
`n_categories` for each features.
- array-like: shape (n_features,) where `n_categories[i]` holds the
minimum number of categories for the ith column of the input.
- None (default): Determines the number of categories automatically
from the training data.
.. versionadded:: 0.24
Attributes
----------
category_count_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the number of samples
encountered for each class and category of the specific feature.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_log_prob_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the empirical log probability
of categories given the respective feature and class, ``P(x_i|y)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_categories_ : ndarray of shape (n_features,), dtype=np.int64
Number of categories for each feature. This value is
inferred from the data or set by the minimum number of categories.
.. versionadded:: 0.24
See Also
--------
BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models.
ComplementNB : Complement Naive Bayes classifier.
GaussianNB : Gaussian Naive Bayes.
MultinomialNB : Naive Bayes classifier for multinomial models.
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import CategoricalNB
>>> clf = CategoricalNB(force_alpha=True)
>>> clf.fit(X, y)
CategoricalNB(force_alpha=True)
>>> print(clf.predict(X[2:3]))
[3]
"""
_parameter_constraints: dict = {
**_BaseDiscreteNB._parameter_constraints,
"min_categories": [
None,
"array-like",
Interval(Integral, 1, None, closed="left"),
],
"alpha": [Interval(Real, 0, None, closed="left")],
}
def __init__(
self,
*,
alpha=1.0,
force_alpha="warn",
fit_prior=True,
class_prior=None,
min_categories=None,
):
super().__init__(
alpha=alpha,
force_alpha=force_alpha,
fit_prior=fit_prior,
class_prior=class_prior,
)
self.min_categories = min_categories
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0,..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns the instance itself.
"""
return super().fit(X, y, sample_weight=sample_weight)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0,..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns the instance itself.
"""
return super().partial_fit(X, y, classes, sample_weight=sample_weight)
def _more_tags(self):
return {"requires_positive_X": True}
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
X = self._validate_data(
X, dtype="int", accept_sparse=False, force_all_finite=True, reset=False
)
check_non_negative(X, "CategoricalNB (input X)")
return X
def _check_X_y(self, X, y, reset=True):
X, y = self._validate_data(
X, y, dtype="int", accept_sparse=False, force_all_finite=True, reset=reset
)
check_non_negative(X, "CategoricalNB (input X)")
return X, y
def _init_counters(self, n_classes, n_features):
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
self.category_count_ = [np.zeros((n_classes, 0)) for _ in range(n_features)]
@staticmethod
def _validate_n_categories(X, min_categories):
# rely on max for n_categories categories are encoded between 0...n-1
n_categories_X = X.max(axis=0) + 1
min_categories_ = np.array(min_categories)
if min_categories is not None:
if not np.issubdtype(min_categories_.dtype, np.signedinteger):
raise ValueError(
"'min_categories' should have integral type. Got "
f"{min_categories_.dtype} instead."
)
n_categories_ = np.maximum(n_categories_X, min_categories_, dtype=np.int64)
if n_categories_.shape!= n_categories_X.shape:
raise ValueError(
f"'min_categories' should have shape ({X.shape[1]},"
") when an array-like is provided. Got"
f" {min_categories_.shape} instead."
)
return n_categories_
else:
return n_categories_X
def _count(self, X, Y):
def _update_cat_count_dims(cat_count, highest_feature):
diff = highest_feature + 1 - cat_count.shape[1]
if diff > 0:
# we append a column full of zeros for each new category
return np.pad(cat_count, [(0, 0), (0, diff)], "constant")
return cat_count
def _update_cat_count(X_feature, Y, cat_count, n_classes):
for j in range(n_classes):
mask = Y[:, j].astype(bool)
if Y.dtype.type == np.int64:
weights = None
else:
weights = Y[mask, j]
counts = np.bincount(X_feature[mask], weights=weights)
indices = np.nonzero(counts)[0]
cat_count[j, indices] += counts[indices]
self.class_count_ += Y.sum(axis=0)
self.n_categories_ = self._validate_n_categories(X, self.min_categories)
for i in range(self.n_features_in_):
X_feature = X[:, i]
self.category_count_[i] = _update_cat_count_dims(
self.category_count_[i], self.n_categories_[i] - 1
)
_update_cat_count(
X_feature, Y, self.category_count_[i], self.class_count_.shape[0]
)
def _update_feature_log_prob(self, alpha):
feature_log_prob = []
for i in range(self.n_features_in_):
smoothed_cat_count = self.category_count_[i] + alpha
smoothed_class_count = smoothed_cat_count.sum(axis=1)
feature_log_prob.append(
np.log(smoothed_cat_count) - np.log(smoothed_class_count.reshape(-1, 1))
)
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
self._check_n_features(X, reset=False)
jll = np.zeros((X.shape[0], self.class_count_.shape[0]))
for i in range(self.n_features_in_):
indices = X[:, i]
jll += self.feature_log_prob_[i][:, indices].T
total_ll = jll + self.class_log_prior_
return total_ll |
scikit-learn__scikit-learn | compose.rst | Tutorial | Generate tutorial about pipelines | BSD 3-Clause New or Revised License | scikit-learn__scikit-learn/doc/modules/compose.rst | [
"scikit-learn__scikit-learn/sklearn/pipeline.py"
] | Pipelines and composite estimators
Transformers are usually combined with classifiers, regressors or other
estimators to build a composite estimator. The most common tool is a
Pipeline <pipeline>. Pipeline is often used in combination with
FeatureUnion <feature_union> which concatenates the output of
transformers into a composite feature space. TransformedTargetRegressor
<transformed_target_regressor> deals with transforming the target (i.e.
log-transform y). In contrast, Pipelines only transform the observed
data (X).
Pipeline: chaining estimators
Pipeline can be used to chain multiple estimators into one. This is
useful as there is often a fixed sequence of steps in processing the
data, for example feature selection, normalization and classification.
Pipeline serves multiple purposes here:
Convenience and encapsulation
You only have to call fit and predict once on your data to fit a
whole sequence of estimators.
Joint parameter selection
You can grid search <grid_search> over parameters of all estimators
in the pipeline at once.
Safety
Pipelines help avoid leaking statistics from your test data into the
trained model in cross-validation, by ensuring that the same samples
are used to train the transformers and predictors.
All estimators in a pipeline, except the last one, must be transformers
(i.e. must have a transform method). The last estimator may be any type
(transformer, classifier, etc.).
Note
Calling fit on the pipeline is the same as calling fit on each estimator
in turn, transform the input and pass it on to the next step. The
pipeline has all the methods that the last estimator in the pipeline
has, i.e. if the last estimator is a classifier, the Pipeline can be
used as a classifier. If the last estimator is a transformer, again, so
is the pipeline.
Usage
Construction
The Pipeline is built using a list of (key, value) pairs, where the key
is a string containing the name you want to give this step and value is
an estimator object:
>>> from sklearn.pipeline import Pipeline
>>> from sklearn.svm import SVC
>>> from sklearn.decomposition import PCA
>>> estimators = [('reduce_dim', PCA()), ('clf', SVC())]
>>> pipe = Pipeline(estimators)
>>> pipe
Pipeline(steps=[('reduce_dim', PCA()), ('clf', SVC())])
The utility function make_pipeline is a shorthand for constructing
pipelines; it takes a variable number of estimators and returns a
pipeline, filling in the names automatically:
>>> from sklearn.pipeline import make_pipeline
>>> make_pipeline(PCA(), SVC())
Pipeline(steps=[('pca', PCA()), ('svc', SVC())])
Accessing steps
The estimators of a pipeline are stored as a list in the steps
attribute, but can be accessed by index or name by indexing (with [idx])
the Pipeline:
>>> pipe.steps[0]
('reduce_dim', PCA())
>>> pipe[0]
PCA()
>>> pipe['reduce_dim']
PCA()
Pipeline's named_steps attribute allows accessing steps by name with tab
completion in interactive environments:
>>> pipe.named_steps.reduce_dim is pipe['reduce_dim']
True
A sub-pipeline can also be extracted using the slicing notation commonly
used for Python Sequences such as lists or strings (although only a step
of 1 is permitted). This is convenient for performing only some of the
transformations (or their inverse):
>>> pipe[:1] Pipeline(steps=[('reduce_dim', PCA())]) >>> pipe[-1:]
Pipeline(steps=[('clf', SVC())])
Nested parameters
Parameters of the estimators in the pipeline can be accessed using the
<estimator>__<parameter> syntax:
>>> pipe.set_params(clf__C=10)
Pipeline(steps=[('reduce_dim', PCA()), ('clf', SVC(C=10))])
This is particularly important for doing grid searches:
>>> from sklearn.model_selection import GridSearchCV
>>> param_grid = dict(reduce_dim__n_components=[2, 5, 10],
... clf__C=[0.1, 10, 100])
>>> grid_search = GridSearchCV(pipe, param_grid=param_grid)
Individual steps may also be replaced as parameters, and non-final steps
may be ignored by setting them to 'passthrough':
>>> from sklearn.linear_model import LogisticRegression
>>> param_grid = dict(reduce_dim=['passthrough', PCA(5), PCA(10)],
... clf=[SVC(), LogisticRegression()],
... clf__C=[0.1, 10, 100])
>>> grid_search = GridSearchCV(pipe, param_grid=param_grid)
The estimators of the pipeline can be retrieved by index:
>>> pipe[0] PCA()
or by name:
>>> pipe['reduce_dim']
PCA()
To enable model inspection, ~sklearn.pipeline.Pipeline has a
get_feature_names_out() method, just like all transformers. You can use
pipeline slicing to get the feature names going into each step:
>>> from sklearn.datasets import load_iris
>>> from sklearn.feature_selection import SelectKBest
>>> iris = load_iris()
>>> pipe = Pipeline(steps=[
... ('select', SelectKBest(k=2)),
... ('clf', LogisticRegression())])
>>> pipe.fit(iris.data, iris.target)
Pipeline(steps=[('select', SelectKBest(...)), ('clf', LogisticRegression(...))])
>>> pipe[:-1].get_feature_names_out()
array(['x2', 'x3'], ...)
You can also provide custom feature names for the input data using
get_feature_names_out:
>>> pipe[:-1].get_feature_names_out(iris.feature_names)
array(['petal length (cm)', 'petal width (cm)'], ...)
Examples:
- sphx_glr_auto_examples_feature_selection_plot_feature_selection_pipeline.py
- sphx_glr_auto_examples_model_selection_plot_grid_search_text_feature_extraction.py
- sphx_glr_auto_examples_compose_plot_digits_pipe.py
- sphx_glr_auto_examples_miscellaneous_plot_kernel_approximation.py
- sphx_glr_auto_examples_svm_plot_svm_anova.py
- sphx_glr_auto_examples_compose_plot_compare_reduction.py
- sphx_glr_auto_examples_miscellaneous_plot_pipeline_display.py
See Also:
- composite_grid_search
Caching transformers: avoid repeated computation
sklearn.pipeline
Fitting transformers may be computationally expensive. With its memory
parameter set, Pipeline will cache each transformer after calling fit.
This feature is used to avoid computing the fit transformers within a
pipeline if the parameters and input data are identical. A typical
example is the case of a grid search in which the transformers can be
fitted only once and reused for each configuration. The last step will
never be cached, even if it is a transformer.
The parameter memory is needed in order to cache the transformers.
memory can be either a string containing the directory where to cache
the transformers or a joblib.Memory object:
>>> from tempfile import mkdtemp
>>> from shutil import rmtree
>>> from sklearn.decomposition import PCA
>>> from sklearn.svm import SVC
>>> from sklearn.pipeline import Pipeline
>>> estimators = [('reduce_dim', PCA()), ('clf', SVC())]
>>> cachedir = mkdtemp()
>>> pipe = Pipeline(estimators, memory=cachedir)
>>> pipe
Pipeline(memory=...,
steps=[('reduce_dim', PCA()), ('clf', SVC())])
>>> # Clear the cache directory when you don't need it anymore
>>> rmtree(cachedir)
Warning: Side effect of caching transformers
Using a Pipeline without cache enabled, it is possible to inspect the
original instance such as:
>>> from sklearn.datasets import load_digits
>>> X_digits, y_digits = load_digits(return_X_y=True)
>>> pca1 = PCA()
>>> svm1 = SVC()
>>> pipe = Pipeline([('reduce_dim', pca1), ('clf', svm1)])
>>> pipe.fit(X_digits, y_digits)
Pipeline(steps=[('reduce_dim', PCA()), ('clf', SVC())])
>>> # The pca instance can be inspected directly
>>> print(pca1.components_)
[[-1.77484909e-19 ... 4.07058917e-18]]
Enabling caching triggers a clone of the transformers before fitting.
Therefore, the transformer instance given to the pipeline cannot be
inspected directly. In following example, accessing the
~sklearn.decomposition.PCA instance pca2 will raise an AttributeError
since pca2 will be an unfitted transformer. Instead, use the attribute
named_steps to inspect estimators within the pipeline:
>>> cachedir = mkdtemp()
>>> pca2 = PCA()
>>> svm2 = SVC()
>>> cached_pipe = Pipeline([('reduce_dim', pca2), ('clf', svm2)],
... memory=cachedir)
>>> cached_pipe.fit(X_digits, y_digits)
Pipeline(memory=...,
steps=[('reduce_dim', PCA()), ('clf', SVC())])
>>> print(cached_pipe.named_steps['reduce_dim'].components_)
[[-1.77484909e-19 ... 4.07058917e-18]]
>>> # Remove the cache directory
>>> rmtree(cachedir)
Transforming target in regression
~sklearn.compose.TransformedTargetRegressor transforms the targets y
before fitting a regression model. The predictions are mapped back to
the original space via an inverse transform. It takes as an argument the
regressor that will be used for prediction, and the transformer that
will be applied to the target variable:
>>> import numpy as np
>>> from sklearn.datasets import fetch_california_housing
>>> from sklearn.compose import TransformedTargetRegressor
>>> from sklearn.preprocessing import QuantileTransformer
>>> from sklearn.linear_model import LinearRegression
>>> from sklearn.model_selection import train_test_split
>>> X, y = fetch_california_housing(return_X_y=True)
>>> X, y = X[:2000, :], y[:2000] # select a subset of data
>>> transformer = QuantileTransformer(output_distribution='normal')
>>> regressor = LinearRegression()
>>> regr = TransformedTargetRegressor(regressor=regressor,
... transformer=transformer)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
>>> regr.fit(X_train, y_train)
TransformedTargetRegressor(...)
>>> print('R2 score: {0:.2f}'.format(regr.score(X_test, y_test)))
R2 score: 0.61
>>> raw_target_regr = LinearRegression().fit(X_train, y_train)
>>> print('R2 score: {0:.2f}'.format(raw_target_regr.score(X_test, y_test)))
R2 score: 0.59
For simple transformations, instead of a Transformer object, a pair of
functions can be passed, defining the transformation and its inverse
mapping:
>>> def func(x):
... return np.log(x)
>>> def inverse_func(x):
... return np.exp(x)
Subsequently, the object is created as:
>>> regr = TransformedTargetRegressor(regressor=regressor,
... func=func,
... inverse_func=inverse_func)
>>> regr.fit(X_train, y_train)
TransformedTargetRegressor(...)
>>> print('R2 score: {0:.2f}'.format(regr.score(X_test, y_test)))
R2 score: 0.51
By default, the provided functions are checked at each fit to be the
inverse of each other. However, it is possible to bypass this checking
by setting check_inverse to False:
>>> def inverse_func(x):
... return x
>>> regr = TransformedTargetRegressor(regressor=regressor,
... func=func,
... inverse_func=inverse_func,
... check_inverse=False)
>>> regr.fit(X_train, y_train)
TransformedTargetRegressor(...)
>>> print('R2 score: {0:.2f}'.format(regr.score(X_test, y_test)))
R2 score: -1.57
Note
The transformation can be triggered by setting either transformer or the
pair of functions func and inverse_func. However, setting both options
will raise an error.
FeatureUnion: composite feature spaces
FeatureUnion combines several transformer objects into a new transformer
that combines their output. A FeatureUnion takes a list of transformer
objects. During fitting, each of these is fit to the data independently.
The transformers are applied in parallel, and the feature matrices they
output are concatenated side-by-side into a larger matrix.
When you want to apply different transformations to each field of the
data, see the related class ~sklearn.compose.ColumnTransformer (see
user guide <column_transformer>).
FeatureUnion serves the same purposes as Pipeline -convenience and joint
parameter estimation and validation.
FeatureUnion and Pipeline can be combined to create complex models.
(A FeatureUnion has no way of checking whether two transformers might
produce identical features. It only produces a union when the feature
sets are disjoint, and making sure they are is the caller's
responsibility.)
Usage
A FeatureUnion is built using a list of (key, value) pairs, where the
key is the name you want to give to a given transformation (an arbitrary
string; it only serves as an identifier) and value is an estimator
object:
>>> from sklearn.pipeline import FeatureUnion
>>> from sklearn.decomposition import PCA
>>> from sklearn.decomposition import KernelPCA
>>> estimators = [('linear_pca', PCA()), ('kernel_pca', KernelPCA())]
>>> combined = FeatureUnion(estimators)
>>> combined
FeatureUnion(transformer_list=[('linear_pca', PCA()),
('kernel_pca', KernelPCA())])
Like pipelines, feature unions have a shorthand constructor called
make_union that does not require explicit naming of the components.
Like Pipeline, individual steps may be replaced using set_params, and
ignored by setting to 'drop':
>>> combined.set_params(kernel_pca='drop')
FeatureUnion(transformer_list=[('linear_pca', PCA()),
('kernel_pca', 'drop')])
ColumnTransformer for heterogeneous data
Many datasets contain features of different types, say text, floats, and
dates, where each type of feature requires separate preprocessing or
feature extraction steps. Often it is easiest to preprocess data before
applying scikit-learn methods, for example using pandas. Processing your
data before passing it to scikit-learn might be problematic for one of
the following reasons:
1. Incorporating statistics from test data into the preprocessors makes
cross-validation scores unreliable (known as data leakage), for
example in the case of scalers or imputing missing values.
2. You may want to include the parameters of the preprocessors in a
parameter search <grid_search>.
The ~sklearn.compose.ColumnTransformer helps performing different
transformations for different columns of the data, within a
~sklearn.pipeline.Pipeline that is safe from data leakage and that can
be parametrized. ~sklearn.compose.ColumnTransformer works on arrays,
sparse matrices, and pandas DataFrames.
To each column, a different transformation can be applied, such as
preprocessing or a specific feature extraction method:
>>> import pandas as pd
>>> X = pd.DataFrame(
... {'city': ['London', 'London', 'Paris', 'Sallisaw'],
... 'title': ["His Last Bow", "How Watson Learned the Trick",
... "A Moveable Feast", "The Grapes of Wrath"],
... 'expert_rating': [5, 3, 4, 5],
... 'user_rating': [4, 5, 4, 3]})
For this data, we might want to encode the 'city' column as a
categorical variable using ~sklearn.preprocessing.OneHotEncoder but
apply a ~sklearn.feature_extraction.text.CountVectorizer to the 'title'
column. As we might use multiple feature extraction methods on the same
column, we give each transformer a unique name, say 'city_category' and
'title_bow'. By default, the remaining rating columns are ignored
(remainder='drop'):
>>> from sklearn.compose import ColumnTransformer
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> from sklearn.preprocessing import OneHotEncoder
>>> column_trans = ColumnTransformer(
... [('categories', OneHotEncoder(dtype='int'), ['city']),
... ('title_bow', CountVectorizer(), 'title')],
... remainder='drop', verbose_feature_names_out=False)
>>> column_trans.fit(X)
ColumnTransformer(transformers=[('categories', OneHotEncoder(dtype='int'),
['city']),
('title_bow', CountVectorizer(), 'title')],
verbose_feature_names_out=False)
>>> column_trans.get_feature_names_out()
array(['city_London', 'city_Paris', 'city_Sallisaw', 'bow', 'feast',
'grapes', 'his', 'how', 'last', 'learned', 'moveable', 'of', 'the',
'trick', 'watson', 'wrath'], ...)
>>> column_trans.transform(X).toarray()
array([[1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1]]...)
In the above example, the
~sklearn.feature_extraction.text.CountVectorizer expects a 1D array as
input and therefore the columns were specified as a string ('title').
However, ~sklearn.preprocessing.OneHotEncoder as most of other
transformers expects 2D data, therefore in that case you need to specify
the column as a list of strings (['city']).
Apart from a scalar or a single item list, the column selection can be
specified as a list of multiple items, an integer array, a slice, a
boolean mask, or with a ~sklearn.compose.make_column_selector. The
~sklearn.compose.make_column_selector is used to select columns based on
data type or column name:
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.compose import make_column_selector
>>> ct = ColumnTransformer([
... ('scale', StandardScaler(),
... make_column_selector(dtype_include=np.number)),
... ('onehot',
... OneHotEncoder(),
... make_column_selector(pattern='city', dtype_include=object))])
>>> ct.fit_transform(X)
array([[ 0.904..., 0. , 1. , 0. , 0. ],
[-1.507..., 1.414..., 1. , 0. , 0. ],
[-0.301..., 0. , 0. , 1. , 0. ],
[ 0.904..., -1.414..., 0. , 0. , 1. ]])
Strings can reference columns if the input is a DataFrame, integers are
always interpreted as the positional columns.
We can keep the remaining rating columns by setting
remainder='passthrough'. The values are appended to the end of the
transformation:
>>> column_trans = ColumnTransformer(
... [('city_category', OneHotEncoder(dtype='int'),['city']),
... ('title_bow', CountVectorizer(), 'title')],
... remainder='passthrough')
>>> column_trans.fit_transform(X)
array([[1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 5, 4],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 3, 5],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 4, 4],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 5, 3]]...)
The remainder parameter can be set to an estimator to transform the
remaining rating columns. The transformed values are appended to the end
of the transformation:
>>> from sklearn.preprocessing import MinMaxScaler
>>> column_trans = ColumnTransformer(
... [('city_category', OneHotEncoder(), ['city']),
... ('title_bow', CountVectorizer(), 'title')],
... remainder=MinMaxScaler())
>>> column_trans.fit_transform(X)[:, -2:]
array([[1. , 0.5],
[0. , 1. ],
[0.5, 0.5],
[1. , 0. ]])
The ~sklearn.compose.make_column_transformer function is available to
more easily create a ~sklearn.compose.ColumnTransformer object.
Specifically, the names will be given automatically. The equivalent for
the above example would be:
>>> from sklearn.compose import make_column_transformer
>>> column_trans = make_column_transformer(
... (OneHotEncoder(), ['city']),
... (CountVectorizer(), 'title'),
... remainder=MinMaxScaler())
>>> column_trans
ColumnTransformer(remainder=MinMaxScaler(),
transformers=[('onehotencoder', OneHotEncoder(), ['city']),
('countvectorizer', CountVectorizer(),
'title')])
If ~sklearn.compose.ColumnTransformer is fitted with a dataframe and the
dataframe only has string column names, then transforming a dataframe
will use the column names to select the columns:
>>> ct = ColumnTransformer(
... [("scale", StandardScaler(), ["expert_rating"])]).fit(X)
>>> X_new = pd.DataFrame({"expert_rating": [5, 6, 1],
... "ignored_new_col": [1.2, 0.3, -0.1]})
>>> ct.transform(X_new)
array([[ 0.9...],
[ 2.1...],
[-3.9...]])
Visualizing Composite Estimators
Estimators are displayed with an HTML representation when shown in a
jupyter notebook. This is useful to diagnose or visualize a Pipeline
with many estimators. This visualization is activated by default:
>>> column_trans # doctest: +SKIP
It can be deactivated by setting the display option in
~sklearn.set_config to 'text':
>>> from sklearn import set_config
>>> set_config(display='text') # doctest: +SKIP
>>> # displays text representation in a jupyter context
>>> column_trans # doctest: +SKIP
An example of the HTML output can be seen in the HTML representation of
Pipeline section of
sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py.
As an alternative, the HTML can be written to a file using
~sklearn.utils.estimator_html_repr:
>>> from sklearn.utils import estimator_html_repr
>>> with open('my_estimator.html', 'w') as f: # doctest: +SKIP
... f.write(estimator_html_repr(clf))
| """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# License: BSD
from collections import defaultdict
from itertools import islice
import numpy as np
from scipy import sparse
from.base import TransformerMixin, _fit_context, clone
from.exceptions import NotFittedError
from.preprocessing import FunctionTransformer
from.utils import Bunch, _print_elapsed_time, check_pandas_support
from.utils._estimator_html_repr import _VisualBlock
from.utils._metadata_requests import METHODS
from.utils._param_validation import HasMethods, Hidden
from.utils._set_output import _get_output_config, _safe_set_output
from.utils._tags import _safe_tags
from.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_raise_for_params,
_routing_enabled,
process_routing,
)
from.utils.metaestimators import _BaseComposition, available_if
from.utils.parallel import Parallel, delayed
from.utils.validation import check_is_fitted, check_memory
__all__ = ["Pipeline", "FeatureUnion", "make_pipeline", "make_union"]
def _final_estimator_has(attr):
"""Check that final_estimator has `attr`.
Used together with `available_if` in `Pipeline`."""
def check(self):
# raise original `AttributeError` if `attr` does not exist
getattr(self._final_estimator, attr)
return True
return check
class Pipeline(_BaseComposition):
"""
Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement `fit` and `transform` methods.
The final estimator only needs to implement `fit`.
The transformers in the pipeline can be cached using ``memory`` argument.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters. For this, it
enables setting parameters of the various steps using their names and the
parameter name separated by a `'__'`, as in the example below. A step's
estimator may be replaced entirely by setting the parameter with its name
to another estimator, or a transformer removed by setting it to
`'passthrough'` or `None`.
For an example use case of `Pipeline` combined with
:class:`~sklearn.model_selection.GridSearchCV`, refer to
:ref:`sphx_glr_auto_examples_compose_plot_compare_reduction.py`. The
example :ref:`sphx_glr_auto_examples_compose_plot_digits_pipe.py` shows how
to grid search on a pipeline using `'__'` as a separator in the parameter names.
Read more in the :ref:`User Guide <pipeline>`.
.. versionadded:: 0.5
Parameters
----------
steps : list of tuple
List of (name, transform) tuples (implementing `fit`/`transform`) that
are chained in sequential order. The last transform must be an
estimator.
memory : str or object with the joblib.Memory interface, default=None
Used to cache the fitted transformers of the pipeline. The last step
will never be cached, even if it is a transformer. By default, no
caching is performed. If a string is given, it is the path to the
caching directory. Enabling caching triggers a clone of the transformers
before fitting. Therefore, the transformer instance given to the
pipeline cannot be inspected directly. Use the attribute ``named_steps``
or ``steps`` to inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Attributes
----------
named_steps : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
classes_ : ndarray of shape (n_classes,)
The classes labels. Only exist if the last step of the pipeline is a
classifier.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying first estimator in `steps` exposes such an attribute
when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 1.0
See Also
--------
make_pipeline : Convenience function for simplified pipeline construction.
Examples
--------
>>> from sklearn.svm import SVC
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.pipeline import Pipeline
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=0)
>>> pipe = Pipeline([('scaler', StandardScaler()), ('svc', SVC())])
>>> # The pipeline can be used as any other estimator
>>> # and avoids leaking the test set into the train set
>>> pipe.fit(X_train, y_train).score(X_test, y_test)
0.88
>>> # An estimator's parameter can be set using '__' syntax
>>> pipe.set_params(svc__C=10).fit(X_train, y_train).score(X_test, y_test)
0.76
"""
# BaseEstimator interface
_required_parameters = ["steps"]
_parameter_constraints: dict = {
"steps": [list, Hidden(tuple)],
"memory": [None, str, HasMethods(["cache"])],
"verbose": ["boolean"],
}
def __init__(self, steps, *, memory=None, verbose=False):
self.steps = steps
self.memory = memory
self.verbose = verbose
def set_output(self, *, transform=None):
"""Set the output container when `"transform"` and `"fit_transform"` are called.
Calling `set_output` will set the output of all estimators in `steps`.
Parameters
----------
transform : {"default", "pandas"}, default=None
Configure output of `transform` and `fit_transform`.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `None`: Transform configuration is unchanged
Returns
-------
self : estimator instance
Estimator instance.
"""
for _, _, step in self._iter():
_safe_set_output(step, transform=transform)
return self
def get_params(self, deep=True):
"""Get parameters for this estimator.
Returns the parameters given in the constructor as well as the
estimators contained within the `steps` of the `Pipeline`.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params("steps", deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``. Note that
you can directly set the parameters of the estimators contained in
`steps`.
Parameters
----------
**kwargs : dict
Parameters of this estimator or parameters of estimators contained
in `steps`. Parameters of the steps may be set using its name and
the parameter name separated by a '__'.
Returns
-------
self : object
Pipeline class instance.
"""
self._set_params("steps", **kwargs)
return self
def _validate_steps(self):
names, estimators = zip(*self.steps)
# validate names
self._validate_names(names)
# validate estimators
transformers = estimators[:-1]
estimator = estimators[-1]
for t in transformers:
if t is None or t == "passthrough":
continue
if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(
t, "transform"
):
raise TypeError(
"All intermediate steps should be "
"transformers and implement fit and transform "
"or be the string 'passthrough' "
"'%s' (type %s) doesn't" % (t, type(t))
)
# We allow last estimator to be None as an identity transformation
if (
estimator is not None
and estimator!= "passthrough"
and not hasattr(estimator, "fit")
):
raise TypeError(
"Last step of Pipeline should implement fit "
"or be the string 'passthrough'. "
"'%s' (type %s) doesn't" % (estimator, type(estimator))
)
def _iter(self, with_final=True, filter_passthrough=True):
"""
Generate (idx, (name, trans)) tuples from self.steps
When filter_passthrough is True, 'passthrough' and None transformers
are filtered out.
"""
stop = len(self.steps)
if not with_final:
stop -= 1
for idx, (name, trans) in enumerate(islice(self.steps, 0, stop)):
if not filter_passthrough:
yield idx, name, trans
elif trans is not None and trans!= "passthrough":
yield idx, name, trans
def __len__(self):
"""
Returns the length of the Pipeline
"""
return len(self.steps)
def __getitem__(self, ind):
"""Returns a sub-pipeline or a single estimator in the pipeline
Indexing with an integer will return an estimator; using a slice
returns another Pipeline instance which copies a slice of this
Pipeline. This copy is shallow: modifying (or fitting) estimators in
the sub-pipeline will affect the larger pipeline and vice-versa.
However, replacing a value in `step` will not affect a copy.
"""
if isinstance(ind, slice):
if ind.step not in (1, None):
raise ValueError("Pipeline slicing only supports a step of 1")
return self.__class__(
self.steps[ind], memory=self.memory, verbose=self.verbose
)
try:
name, est = self.steps[ind]
except TypeError:
# Not an int, try get step by name
return self.named_steps[ind]
return est
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
@property
def named_steps(self):
"""Access the steps by name.
Read-only attribute to access any step by given name.
Keys are steps names and values are the steps objects."""
# Use Bunch object to improve autocomplete
return Bunch(**dict(self.steps))
@property
def _final_estimator(self):
try:
estimator = self.steps[-1][1]
return "passthrough" if estimator is None else estimator
except (ValueError, AttributeError, TypeError):
# This condition happens when a call to a method is first calling
# `_available_if` and `fit` did not validate `steps` yet. We
# return `None` and an `InvalidParameterError` will be raised
# right after.
return None
def _log_message(self, step_idx):
if not self.verbose:
return None
name, _ = self.steps[step_idx]
return "(step %d of %d) Processing %s" % (step_idx + 1, len(self.steps), name)
def _check_method_params(self, method, props, **kwargs):
if _routing_enabled():
routed_params = process_routing(self, method, **props, **kwargs)
return routed_params
else:
fit_params_steps = Bunch(
**{
name: Bunch(**{method: {} for method in METHODS})
for name, step in self.steps
if step is not None
}
)
for pname, pval in props.items():
if "__" not in pname:
raise ValueError(
"Pipeline.fit does not accept the {} parameter. "
"You can pass parameters to specific steps of your "
"pipeline using the stepname__parameter format, e.g. "
"`Pipeline.fit(X, y, logisticregression__sample_weight"
"=sample_weight)`.".format(pname)
)
step, param = pname.split("__", 1)
fit_params_steps[step]["fit"][param] = pval
# without metadata routing, fit_transform and fit_predict
# get all the same params and pass it to the last fit.
fit_params_steps[step]["fit_transform"][param] = pval
fit_params_steps[step]["fit_predict"][param] = pval
return fit_params_steps
# Estimator interface
def _fit(self, X, y=None, routed_params=None):
# shallow copy of steps - this should really be steps_
self.steps = list(self.steps)
self._validate_steps()
# Setup the memory
memory = check_memory(self.memory)
fit_transform_one_cached = memory.cache(_fit_transform_one)
for step_idx, name, transformer in self._iter(
with_final=False, filter_passthrough=False
):
if transformer is None or transformer == "passthrough":
with _print_elapsed_time("Pipeline", self._log_message(step_idx)):
continue
if hasattr(memory, "location") and memory.location is None:
# we do not clone when caching is disabled to
# preserve backward compatibility
cloned_transformer = transformer
else:
cloned_transformer = clone(transformer)
# Fit or load from cache the current transformer
X, fitted_transformer = fit_transform_one_cached(
cloned_transformer,
X,
y,
None,
message_clsname="Pipeline",
message=self._log_message(step_idx),
params=routed_params[name],
)
# Replace the transformer of the step with the fitted
# transformer. This is necessary when loading the transformer
# from the cache.
self.steps[step_idx] = (name, fitted_transformer)
return X
@_fit_context(
# estimators in Pipeline.steps are not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None, **params):
"""Fit the model.
Fit all the transformers one after the other and transform the
data. Finally, fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**params : dict of str -> object
- If `enable_metadata_routing=False` (default):
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
- If `enable_metadata_routing=True`:
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionchanged:: 1.4
Parameters are now passed to the ``transform`` method of the
intermediate steps as well, if requested, and if
`enable_metadata_routing=True` is set via
:func:`~sklearn.set_config`.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
self : object
Pipeline with fitted steps.
"""
routed_params = self._check_method_params(method="fit", props=params)
Xt = self._fit(X, y, routed_params)
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
if self._final_estimator!= "passthrough":
last_step_params = routed_params[self.steps[-1][0]]
self._final_estimator.fit(Xt, y, **last_step_params["fit"])
return self
def _can_fit_transform(self):
return (
self._final_estimator == "passthrough"
or hasattr(self._final_estimator, "transform")
or hasattr(self._final_estimator, "fit_transform")
)
@available_if(_can_fit_transform)
@_fit_context(
# estimators in Pipeline.steps are not validated yet
prefer_skip_nested_validation=False
)
def fit_transform(self, X, y=None, **params):
"""Fit the model and transform with the final estimator.
Fits all the transformers one after the other and transform the
data. Then uses `fit_transform` on transformed data with the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**params : dict of str -> object
- If `enable_metadata_routing=False` (default):
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
- If `enable_metadata_routing=True`:
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionchanged:: 1.4
Parameters are now passed to the ``transform`` method of the
intermediate steps as well, if requested, and if
`enable_metadata_routing=True`.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
Xt : ndarray of shape (n_samples, n_transformed_features)
Transformed samples.
"""
routed_params = self._check_method_params(method="fit_transform", props=params)
Xt = self._fit(X, y, routed_params)
last_step = self._final_estimator
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
if last_step == "passthrough":
return Xt
last_step_params = routed_params[self.steps[-1][0]]
if hasattr(last_step, "fit_transform"):
return last_step.fit_transform(
Xt, y, **last_step_params["fit_transform"]
)
else:
return last_step.fit(Xt, y, **last_step_params["fit"]).transform(
Xt, **last_step_params["transform"]
)
@available_if(_final_estimator_has("predict"))
def predict(self, X, **params):
"""Transform the data, and apply `predict` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls `predict`
method. Only valid if the final estimator implements `predict`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
**params : dict of str -> object
- If `enable_metadata_routing=False` (default):
Parameters to the ``predict`` called at the end of all
transformations in the pipeline.
- If `enable_metadata_routing=True`:
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionadded:: 0.20
.. versionchanged:: 1.4
Parameters are now passed to the ``transform`` method of the
intermediate steps as well, if requested, and if
`enable_metadata_routing=True` is set via
:func:`~sklearn.set_config`.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Note that while this may be used to return uncertainties from some
models with ``return_std`` or ``return_cov``, uncertainties that are
generated by the transformations in the pipeline are not propagated
to the final estimator.
Returns
-------
y_pred : ndarray
Result of calling `predict` on the final estimator.
"""
Xt = X
if not _routing_enabled():
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
return self.steps[-1][1].predict(Xt, **params)
# metadata routing enabled
routed_params = process_routing(self, "predict", **params)
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt, **routed_params[name].transform)
return self.steps[-1][1].predict(Xt, **routed_params[self.steps[-1][0]].predict)
@available_if(_final_estimator_has("fit_predict"))
@_fit_context(
# estimators in Pipeline.steps are not validated yet
prefer_skip_nested_validation=False
)
def fit_predict(self, X, y=None, **params):
"""Transform the data, and apply `fit_predict` with the final estimator.
Call `fit_transform` of each transformer in the pipeline. The
transformed data are finally passed to the final estimator that calls
`fit_predict` method. Only valid if the final estimator implements
`fit_predict`.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
**params : dict of str -> object
- If `enable_metadata_routing=False` (default):
Parameters to the ``predict`` called at the end of all
transformations in the pipeline.
- If `enable_metadata_routing=True`:
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionadded:: 0.20
.. versionchanged:: 1.4
Parameters are now passed to the ``transform`` method of the
intermediate steps as well, if requested, and if
`enable_metadata_routing=True`.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Note that while this may be used to return uncertainties from some
models with ``return_std`` or ``return_cov``, uncertainties that are
generated by the transformations in the pipeline are not propagated
to the final estimator.
Returns
-------
y_pred : ndarray
Result of calling `fit_predict` on the final estimator.
"""
routed_params = self._check_method_params(method="fit_predict", props=params)
Xt = self._fit(X, y, routed_params)
params_last_step = routed_params[self.steps[-1][0]]
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
y_pred = self.steps[-1][1].fit_predict(
Xt, y, **params_last_step.get("fit_predict", {})
)
return y_pred
@available_if(_final_estimator_has("predict_proba"))
def predict_proba(self, X, **params):
"""Transform the data, and apply `predict_proba` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`predict_proba` method. Only valid if the final estimator implements
`predict_proba`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
**params : dict of str -> object
- If `enable_metadata_routing=False` (default):
Parameters to the `predict_proba` called at the end of all
transformations in the pipeline.
- If `enable_metadata_routing=True`:
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionadded:: 0.20
.. versionchanged:: 1.4
Parameters are now passed to the ``transform`` method of the
intermediate steps as well, if requested, and if
`enable_metadata_routing=True`.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
y_proba : ndarray of shape (n_samples, n_classes)
Result of calling `predict_proba` on the final estimator.
"""
Xt = X
if not _routing_enabled():
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
return self.steps[-1][1].predict_proba(Xt, **params)
# metadata routing enabled
routed_params = process_routing(self, "predict_proba", **params)
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt, **routed_params[name].transform)
return self.steps[-1][1].predict_proba(
Xt, **routed_params[self.steps[-1][0]].predict_proba
)
@available_if(_final_estimator_has("decision_function"))
def decision_function(self, X, **params):
"""Transform the data, and apply `decision_function` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`decision_function` method. Only valid if the final estimator
implements `decision_function`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
**params : dict of string -> object
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
y_score : ndarray of shape (n_samples, n_classes)
Result of calling `decision_function` on the final estimator.
"""
_raise_for_params(params, self, "decision_function")
# not branching here since params is only available if
# enable_metadata_routing=True
routed_params = process_routing(self, "decision_function", **params)
Xt = X
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(
Xt, **routed_params.get(name, {}).get("transform", {})
)
return self.steps[-1][1].decision_function(
Xt, **routed_params.get(self.steps[-1][0], {}).get("decision_function", {})
)
@available_if(_final_estimator_has("score_samples"))
def score_samples(self, X):
"""Transform the data, and apply `score_samples` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`score_samples` method. Only valid if the final estimator implements
`score_samples`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : ndarray of shape (n_samples,)
Result of calling `score_samples` on the final estimator.
"""
Xt = X
for _, _, transformer in self._iter(with_final=False):
Xt = transformer.transform(Xt)
return self.steps[-1][1].score_samples(Xt)
@available_if(_final_estimator_has("predict_log_proba"))
def predict_log_proba(self, X, **params):
"""Transform the data, and apply `predict_log_proba` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`predict_log_proba` method. Only valid if the final estimator
implements `predict_log_proba`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
**params : dict of str -> object
- If `enable_metadata_routing=False` (default):
Parameters to the `predict_log_proba` called at the end of all
transformations in the pipeline.
- If `enable_metadata_routing=True`:
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionadded:: 0.20
.. versionchanged:: 1.4
Parameters are now passed to the ``transform`` method of the
intermediate steps as well, if requested, and if
`enable_metadata_routing=True`.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
y_log_proba : ndarray of shape (n_samples, n_classes)
Result of calling `predict_log_proba` on the final estimator.
"""
Xt = X
if not _routing_enabled():
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
return self.steps[-1][1].predict_log_proba(Xt, **params)
# metadata routing enabled
routed_params = process_routing(self, "predict_log_proba", **params)
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt, **routed_params[name].transform)
return self.steps[-1][1].predict_log_proba(
Xt, **routed_params[self.steps[-1][0]].predict_log_proba
)
def _can_transform(self):
return self._final_estimator == "passthrough" or hasattr(
self._final_estimator, "transform"
)
@available_if(_can_transform)
def transform(self, X, **params):
"""Transform the data, and apply `transform` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`transform` method. Only valid if the final estimator
implements `transform`.
This also works where final estimator is `None` in which case all prior
transformations are applied.
Parameters
----------
X : iterable
Data to transform. Must fulfill input requirements of first step
of the pipeline.
**params : dict of str -> object
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
Xt : ndarray of shape (n_samples, n_transformed_features)
Transformed data.
"""
_raise_for_params(params, self, "transform")
# not branching here since params is only available if
# enable_metadata_routing=True
routed_params = process_routing(self, "transform", **params)
Xt = X
for _, name, transform in self._iter():
Xt = transform.transform(Xt, **routed_params[name].transform)
return Xt
def _can_inverse_transform(self):
return all(hasattr(t, "inverse_transform") for _, _, t in self._iter())
@available_if(_can_inverse_transform)
def inverse_transform(self, Xt, **params):
"""Apply `inverse_transform` for each step in a reverse order.
All estimators in the pipeline must support `inverse_transform`.
Parameters
----------
Xt : array-like of shape (n_samples, n_transformed_features)
Data samples, where ``n_samples`` is the number of samples and
``n_features`` is the number of features. Must fulfill
input requirements of last step of pipeline's
``inverse_transform`` method.
**params : dict of str -> object
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
Xt : ndarray of shape (n_samples, n_features)
Inverse transformed data, that is, data in the original feature
space.
"""
_raise_for_params(params, self, "inverse_transform")
# we don't have to branch here, since params is only non-empty if
# enable_metadata_routing=True.
routed_params = process_routing(self, "inverse_transform", **params)
reverse_iter = reversed(list(self._iter()))
for _, name, transform in reverse_iter:
Xt = transform.inverse_transform(
Xt, **routed_params[name].inverse_transform
)
return Xt
@available_if(_final_estimator_has("score"))
def score(self, X, y=None, sample_weight=None, **params):
"""Transform the data, and apply `score` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`score` method. Only valid if the final estimator implements `score`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all
steps of the pipeline.
sample_weight : array-like, default=None
If not None, this argument is passed as ``sample_weight`` keyword
argument to the ``score`` method of the final estimator.
**params : dict of str -> object
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
score : float
Result of calling `score` on the final estimator.
"""
Xt = X
if not _routing_enabled():
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
score_params = {}
if sample_weight is not None:
score_params["sample_weight"] = sample_weight
return self.steps[-1][1].score(Xt, y, **score_params)
# metadata routing is enabled.
routed_params = process_routing(
self, "score", sample_weight=sample_weight, **params
)
Xt = X
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt, **routed_params[name].transform)
return self.steps[-1][1].score(Xt, y, **routed_params[self.steps[-1][0]].score)
@property
def classes_(self):
"""The classes labels. Only exist if the last step is a classifier."""
return self.steps[-1][1].classes_
def _more_tags(self):
tags = {
"_xfail_checks": {
"check_dont_overwrite_parameters": (
"Pipeline changes the `steps` parameter, which it shouldn't."
"Therefore this test is x-fail until we fix this."
),
"check_estimators_overwrite_params": (
"Pipeline changes the `steps` parameter, which it shouldn't."
"Therefore this test is x-fail until we fix this."
),
}
}
try:
tags["pairwise"] = _safe_tags(self.steps[0][1], "pairwise")
except (ValueError, AttributeError, TypeError):
# This happens when the `steps` is not a list of (name, estimator)
# tuples and `fit` is not called yet to validate the steps.
pass
try:
tags["multioutput"] = _safe_tags(self.steps[-1][1], "multioutput")
except (ValueError, AttributeError, TypeError):
# This happens when the `steps` is not a list of (name, estimator)
# tuples and `fit` is not called yet to validate the steps.
pass
return tags
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Transform input features using the pipeline.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
feature_names_out = input_features
for _, name, transform in self._iter():
if not hasattr(transform, "get_feature_names_out"):
raise AttributeError(
"Estimator {} does not provide get_feature_names_out. "
"Did you mean to call pipeline[:-1].get_feature_names_out"
"()?".format(name)
)
feature_names_out = transform.get_feature_names_out(feature_names_out)
return feature_names_out
@property
def n_features_in_(self):
"""Number of features seen during first step `fit` method."""
# delegate to first step (which will call _check_is_fitted)
return self.steps[0][1].n_features_in_
@property
def feature_names_in_(self):
"""Names of features seen during first step `fit` method."""
# delegate to first step (which will call _check_is_fitted)
return self.steps[0][1].feature_names_in_
def __sklearn_is_fitted__(self):
"""Indicate whether pipeline has been fit."""
try:
# check if the last step of the pipeline is fitted
# we only check the last step since if the last step is fit, it
# means the previous steps should also be fit. This is faster than
# checking if every step of the pipeline is fit.
check_is_fitted(self.steps[-1][1])
return True
except NotFittedError:
return False
def _sk_visual_block_(self):
_, estimators = zip(*self.steps)
def _get_name(name, est):
if est is None or est == "passthrough":
return f"{name}: passthrough"
# Is an estimator
return f"{name}: {est.__class__.__name__}"
names = [_get_name(name, est) for name, est in self.steps]
name_details = [str(est) for est in estimators]
return _VisualBlock(
"serial",
estimators,
names=names,
name_details=name_details,
dash_wrapped=False,
)
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRouter
A :class:`~utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__)
# first we add all steps except the last one
for _, name, trans in self._iter(with_final=False, filter_passthrough=True):
method_mapping = MethodMapping()
# fit, fit_predict, and fit_transform call fit_transform if it
# exists, or else fit and transform
if hasattr(trans, "fit_transform"):
(
method_mapping.add(caller="fit", callee="fit_transform")
.add(caller="fit_transform", callee="fit_transform")
.add(caller="fit_predict", callee="fit_transform")
)
else:
(
method_mapping.add(caller="fit", callee="fit")
.add(caller="fit", callee="transform")
.add(caller="fit_transform", callee="fit")
.add(caller="fit_transform", callee="transform")
.add(caller="fit_predict", callee="fit")
.add(caller="fit_predict", callee="transform")
)
(
method_mapping.add(caller="predict", callee="transform")
.add(caller="predict", callee="transform")
.add(caller="predict_proba", callee="transform")
.add(caller="decision_function", callee="transform")
.add(caller="predict_log_proba", callee="transform")
.add(caller="transform", callee="transform")
.add(caller="inverse_transform", callee="inverse_transform")
.add(caller="score", callee="transform")
)
router.add(method_mapping=method_mapping, **{name: trans})
final_name, final_est = self.steps[-1]
if final_est is None or final_est == "passthrough":
return router
# then we add the last step
method_mapping = MethodMapping()
if hasattr(final_est, "fit_transform"):
method_mapping.add(caller="fit_transform", callee="fit_transform")
else:
method_mapping.add(caller="fit", callee="fit").add(
caller="fit", callee="transform"
)
(
method_mapping.add(caller="fit", callee="fit")
.add(caller="predict", callee="predict")
.add(caller="fit_predict", callee="fit_predict")
.add(caller="predict_proba", callee="predict_proba")
.add(caller="decision_function", callee="decision_function")
.add(caller="predict_log_proba", callee="predict_log_proba")
.add(caller="transform", callee="transform")
.add(caller="inverse_transform", callee="inverse_transform")
.add(caller="score", callee="score")
)
router.add(method_mapping=method_mapping, **{final_name: final_est})
return router
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [
estimator if isinstance(estimator, str) else type(estimator).__name__.lower()
for estimator in estimators
]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(namecount.items()):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps, memory=None, verbose=False):
"""Construct a :class:`Pipeline` from the given estimators.
This is a shorthand for the :class:`Pipeline` constructor; it does not
require, and does not permit, naming the estimators. Instead, their names
will be set to the lowercase of their types automatically.
Parameters
----------
*steps : list of Estimator objects
List of the scikit-learn estimators that are chained together.
memory : str or object with the joblib.Memory interface, default=None
Used to cache the fitted transformers of the pipeline. The last step
will never be cached, even if it is a transformer. By default, no
caching is performed. If a string is given, it is the path to the
caching directory. Enabling caching triggers a clone of the transformers
before fitting. Therefore, the transformer instance given to the
pipeline cannot be inspected directly. Use the attribute ``named_steps``
or ``steps`` to inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Returns
-------
p : Pipeline
Returns a scikit-learn :class:`Pipeline` object.
See Also
--------
Pipeline : Class for creating a pipeline of transforms with a final
estimator.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.pipeline import make_pipeline
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None))
Pipeline(steps=[('standardscaler', StandardScaler()),
('gaussiannb', GaussianNB())])
"""
return Pipeline(_name_estimators(steps), memory=memory, verbose=verbose)
def _transform_one(transformer, X, y, weight, params):
"""Call transform and apply weight to output.
Parameters
----------
transformer : estimator
Estimator to be used for transformation.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data to be transformed.
y : ndarray of shape (n_samples,)
Ignored.
weight : float
Weight to be applied to the output of the transformation.
params : dict
Parameters to be passed to the transformer's ``transform`` method.
This should be of the form ``process_routing()["step_name"]``.
"""
res = transformer.transform(X, **params.transform)
# if we have a weight for this transformer, multiply output
if weight is None:
return res
return res * weight
def _fit_transform_one(
transformer, X, y, weight, message_clsname="", message=None, params=None
):
"""
Fits ``transformer`` to ``X`` and ``y``. The transformed result is returned
with the fitted transformer. If ``weight`` is not ``None``, the result will
be multiplied by ``weight``.
``params`` needs to be of the form ``process_routing()["step_name"]``.
"""
params = params or {}
with _print_elapsed_time(message_clsname, message):
if hasattr(transformer, "fit_transform"):
res = transformer.fit_transform(X, y, **params.get("fit_transform", {}))
else:
res = transformer.fit(X, y, **params.get("fit", {})).transform(
X, **params.get("transform", {})
)
if weight is None:
return res, transformer
return res * weight, transformer
def _fit_one(transformer, X, y, weight, message_clsname="", message=None, params=None):
"""
Fits ``transformer`` to ``X`` and ``y``.
"""
with _print_elapsed_time(message_clsname, message):
return transformer.fit(X, y, **params["fit"])
class FeatureUnion(TransformerMixin, _BaseComposition):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters of the transformers may be set using its name and the parameter
name separated by a '__'. A transformer may be replaced entirely by
setting the parameter with its name to another transformer, removed by
setting to 'drop' or disabled by setting to 'passthrough' (features are
passed without transformation).
Read more in the :ref:`User Guide <feature_union>`.
.. versionadded:: 0.13
Parameters
----------
transformer_list : list of (str, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer. The transformer can
be 'drop' for it to be ignored or can be 'passthrough' for features to
be passed unchanged.
.. versionadded:: 1.1
Added the option `"passthrough"`.
.. versionchanged:: 0.22
Deprecated `None` as a transformer in favor of 'drop'.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None
transformer_weights : dict, default=None
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
Raises ValueError if key not present in ``transformer_list``.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
Attributes
----------
named_transformers : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
Read-only attribute to access any transformer parameter by user
given name. Keys are transformer names and values are
transformer parameters.
.. versionadded:: 1.2
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying first transformer in `transformer_list` exposes such an
attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when
`X` has feature names that are all strings.
.. versionadded:: 1.3
See Also
--------
make_union : Convenience function for simplified feature union
construction.
Examples
--------
>>> from sklearn.pipeline import FeatureUnion
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> union = FeatureUnion([("pca", PCA(n_components=1)),
... ("svd", TruncatedSVD(n_components=2))])
>>> X = [[0., 1., 3], [2., 2., 5]]
>>> union.fit_transform(X)
array([[ 1.5 , 3.0..., 0.8...],
[-1.5 , 5.7..., -0.4...]])
>>> # An estimator's parameter can be set using '__' syntax
>>> union.set_params(pca__n_components=1).fit_transform(X)
array([[ 1.5 , 3.0...],
[-1.5 , 5.7...]])
For a more detailed example of usage, see
:ref:`sphx_glr_auto_examples_compose_plot_feature_union.py`.
"""
_required_parameters = ["transformer_list"]
def __init__(
self, transformer_list, *, n_jobs=None, transformer_weights=None, verbose=False
):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
self.verbose = verbose
def set_output(self, *, transform=None):
"""Set the output container when `"transform"` and `"fit_transform"` are called.
`set_output` will set the output of all estimators in `transformer_list`.
Parameters
----------
transform : {"default", "pandas"}, default=None
Configure output of `transform` and `fit_transform`.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `None`: Transform configuration is unchanged
Returns
-------
self : estimator instance
Estimator instance.
"""
super().set_output(transform=transform)
for _, step, _ in self._iter():
_safe_set_output(step, transform=transform)
return self
@property
def named_transformers(self):
# Use Bunch object to improve autocomplete
return Bunch(**dict(self.transformer_list))
def get_params(self, deep=True):
"""Get parameters for this estimator.
Returns the parameters given in the constructor as well as the
estimators contained within the `transformer_list` of the
`FeatureUnion`.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params("transformer_list", deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``. Note that
you can directly set the parameters of the estimators contained in
`transformer_list`.
Parameters
----------
**kwargs : dict
Parameters of this estimator or parameters of estimators contained
in `transform_list`. Parameters of the transformers may be set
using its name and the parameter name separated by a '__'.
Returns
-------
self : object
FeatureUnion class instance.
"""
self._set_params("transformer_list", **kwargs)
return self
def _validate_transformers(self):
names, transformers = zip(*self.transformer_list)
# validate names
self._validate_names(names)
# validate estimators
for t in transformers:
if t in ("drop", "passthrough"):
continue
if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(
t, "transform"
):
raise TypeError(
"All estimators should implement fit and "
"transform. '%s' (type %s) doesn't" % (t, type(t))
)
def _validate_transformer_weights(self):
if not self.transformer_weights:
return
transformer_names = set(name for name, _ in self.transformer_list)
for name in self.transformer_weights:
if name not in transformer_names:
raise ValueError(
f'Attempting to weight transformer "{name}", '
"but it is not present in transformer_list."
)
def _iter(self):
"""
Generate (name, trans, weight) tuples excluding None and
'drop' transformers.
"""
get_weight = (self.transformer_weights or {}).get
for name, trans in self.transformer_list:
if trans == "drop":
continue
if trans == "passthrough":
trans = FunctionTransformer(feature_names_out="one-to-one")
yield (name, trans, get_weight(name))
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
feature_names = []
for name, trans, _ in self._iter():
if not hasattr(trans, "get_feature_names_out"):
raise AttributeError(
"Transformer %s (type %s) does not provide get_feature_names_out."
% (str(name), type(trans).__name__)
)
feature_names.extend(
[f"{name}__{f}" for f in trans.get_feature_names_out(input_features)]
)
return np.asarray(feature_names, dtype=object)
def fit(self, X, y=None, **fit_params):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like of shape (n_samples, n_outputs), default=None
Targets for supervised learning.
**fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
Returns
-------
self : object
FeatureUnion class instance.
"""
transformers = self._parallel_func(X, y, fit_params, _fit_one)
if not transformers:
# All transformers are None
return self
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
y : array-like of shape (n_samples, n_outputs), default=None
Targets for supervised learning.
**fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
Returns
-------
X_t : array-like or sparse matrix of \
shape (n_samples, sum_n_components)
The `hstack` of results of transformers. `sum_n_components` is the
sum of `n_components` (output dimension) over transformers.
"""
results = self._parallel_func(X, y, fit_params, _fit_transform_one)
if not results:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*results)
self._update_transformer_list(transformers)
return self._hstack(Xs)
def _log_message(self, name, idx, total):
if not self.verbose:
return None
return "(step %d of %d) Processing %s" % (idx, total, name)
def _parallel_func(self, X, y, fit_params, func):
"""Runs func in parallel on X and y"""
self.transformer_list = list(self.transformer_list)
self._validate_transformers()
self._validate_transformer_weights()
transformers = list(self._iter())
params = Bunch(fit=fit_params, fit_transform=fit_params)
return Parallel(n_jobs=self.n_jobs)(
delayed(func)(
transformer,
X,
y,
weight,
message_clsname="FeatureUnion",
message=self._log_message(name, idx, len(transformers)),
params=params,
)
for idx, (name, transformer, weight) in enumerate(transformers, 1)
)
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix of \
shape (n_samples, sum_n_components)
The `hstack` of results of transformers. `sum_n_components` is the
sum of `n_components` (output dimension) over transformers.
"""
# TODO(SLEP6): accept **params here in `transform` and route it to the
# underlying estimators.
params = Bunch(transform={})
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, X, None, weight, params)
for name, trans, weight in self._iter()
)
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
return self._hstack(Xs)
def _hstack(self, Xs):
config = _get_output_config("transform", self)
if config["dense"] == "pandas" and all(hasattr(X, "iloc") for X in Xs):
pd = check_pandas_support("transform")
return pd.concat(Xs, axis=1)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def _update_transformer_list(self, transformers):
transformers = iter(transformers)
self.transformer_list[:] = [
(name, old if old == "drop" else next(transformers))
for name, old in self.transformer_list
]
@property
def n_features_in_(self):
"""Number of features seen during :term:`fit`."""
# X is passed to all transformers so we just delegate to the first one
return self.transformer_list[0][1].n_features_in_
@property
def feature_names_in_(self):
"""Names of features seen during :term:`fit`."""
# X is passed to all transformers -- delegate to the first one
return self.transformer_list[0][1].feature_names_in_
def __sklearn_is_fitted__(self):
# Delegate whether feature union was fitted
for _, transformer, _ in self._iter():
check_is_fitted(transformer)
return True
def _sk_visual_block_(self):
names, transformers = zip(*self.transformer_list)
return _VisualBlock("parallel", transformers, names=names)
def __getitem__(self, name):
"""Return transformer with name."""
if not isinstance(name, str):
raise KeyError("Only string keys are supported")
return self.named_transformers[name]
def make_union(*transformers, n_jobs=None, verbose=False):
"""Construct a :class:`FeatureUnion` from the given transformers.
This is a shorthand for the :class:`FeatureUnion` constructor; it does not
require, and does not permit, naming the transformers. Instead, they will
be given names automatically based on their types. It also does not allow
weighting.
Parameters
----------
*transformers : list of estimators
One or more estimators.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
Returns
-------
f : FeatureUnion
A :class:`FeatureUnion` object for concatenating the results of multiple
transformer objects.
See Also
--------
FeatureUnion : Class for concatenating the results of multiple transformer
objects.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> from sklearn.pipeline import make_union
>>> make_union(PCA(), TruncatedSVD())
FeatureUnion(transformer_list=[('pca', PCA()),
('truncatedsvd', TruncatedSVD())])
"""
return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs, verbose=verbose) |
|
scikit-learn__scikit-learn | feature_extraction.rst | Module doc | Generate documentation for this module | BSD 3-Clause New or Revised License | scikit-learn__scikit-learn/doc/modules/feature_extraction.rst | [
"scikit-learn__scikit-learn/sklearn/feature_extraction/text.py",
"scikit-learn__scikit-learn/sklearn/feature_extraction/image.py"
] | scikit-learn__scikit-learn/sklearn/feature_extraction | Feature extraction
The sklearn.feature_extraction module can be used to extract features in
a format supported by machine learning algorithms from datasets
consisting of formats such as text and image.
Note
Feature extraction is very different from feature_selection: the former
consists in transforming arbitrary data, such as text or images, into
numerical features usable for machine learning. The latter is a machine
learning technique applied on these features.
Loading features from dicts
The class DictVectorizer can be used to convert feature arrays
represented as lists of standard Python dict objects to the NumPy/SciPy
representation used by scikit-learn estimators.
While not particularly fast to process, Python's dict has the advantages
of being convenient to use, being sparse (absent features need not be
stored) and storing feature names in addition to values.
DictVectorizer implements what is called one-of-K or "one-hot" coding
for categorical (aka nominal, discrete) features. Categorical features
are "attribute-value" pairs where the value is restricted to a list of
discrete possibilities without ordering (e.g. topic identifiers, types
of objects, tags, names...).
In the following, "city" is a categorical attribute while "temperature"
is a traditional numerical feature:
>>> measurements = [
... {'city': 'Dubai', 'temperature': 33.},
... {'city': 'London', 'temperature': 12.},
... {'city': 'San Francisco', 'temperature': 18.},
... ]
>>> from sklearn.feature_extraction import DictVectorizer
>>> vec = DictVectorizer()
>>> vec.fit_transform(measurements).toarray()
array([[ 1., 0., 0., 33.],
[ 0., 1., 0., 12.],
[ 0., 0., 1., 18.]])
>>> vec.get_feature_names_out()
array(['city=Dubai', 'city=London', 'city=San Francisco', 'temperature'], ...)
DictVectorizer accepts multiple string values for one feature, like,
e.g., multiple categories for a movie.
Assume a database classifies each movie using some categories (not
mandatories) and its year of release.
>>> movie_entry = [{'category': ['thriller', 'drama'], 'year': 2003},
... {'category': ['animation', 'family'], 'year': 2011}, ... {'year':
1974}] >>> vec.fit_transform(movie_entry).toarray() array([[0.000e+00,
1.000e+00, 0.000e+00, 1.000e+00, 2.003e+03], [1.000e+00, 0.000e+00,
1.000e+00, 0.000e+00, 2.011e+03], [0.000e+00, 0.000e+00, 0.000e+00,
0.000e+00, 1.974e+03]]) >>> vec.get_feature_names_out()
array(['category=animation', 'category=drama', 'category=family',
'category=thriller', 'year'], ...) >>> vec.transform({'category':
['thriller'], ... 'unseen_feature': '3'}).toarray() array([[0., 0.,
0., 1., 0.]])
DictVectorizer is also a useful representation transformation for
training sequence classifiers in Natural Language Processing models that
typically work by extracting feature windows around a particular word of
interest.
For example, suppose that we have a first algorithm that extracts Part
of Speech (PoS) tags that we want to use as complementary tags for
training a sequence classifier (e.g. a chunker). The following dict
could be such a window of features extracted around the word 'sat' in
the sentence 'The cat sat on the mat.':
>>> pos_window = [
... {
... 'word-2': 'the',
... 'pos-2': 'DT',
... 'word-1': 'cat',
... 'pos-1': 'NN',
... 'word+1': 'on',
... 'pos+1': 'PP',
... },
... # in a real application one would extract many such dictionaries
... ]
This description can be vectorized into a sparse two-dimensional matrix
suitable for feeding into a classifier (maybe after being piped into a
~text.TfidfTransformer for normalization):
>>> vec = DictVectorizer()
>>> pos_vectorized = vec.fit_transform(pos_window)
>>> pos_vectorized
<1x6 sparse matrix of type '<... 'numpy.float64'>'
with 6 stored elements in Compressed Sparse ... format>
>>> pos_vectorized.toarray()
array([[1., 1., 1., 1., 1., 1.]])
>>> vec.get_feature_names_out()
array(['pos+1=PP', 'pos-1=NN', 'pos-2=DT', 'word+1=on', 'word-1=cat',
'word-2=the'], ...)
As you can imagine, if one extracts such a context around each
individual word of a corpus of documents the resulting matrix will be
very wide (many one-hot-features) with most of them being valued to zero
most of the time. So as to make the resulting data structure able to fit
in memory the DictVectorizer class uses a scipy.sparse matrix by default
instead of a numpy.ndarray.
Feature hashing
The class FeatureHasher is a high-speed, low-memory vectorizer that uses
a technique known as feature hashing, or the "hashing trick". Instead of
building a hash table of the features encountered in training, as the
vectorizers do, instances of FeatureHasher apply a hash function to the
features to determine their column index in sample matrices directly.
The result is increased speed and reduced memory usage, at the expense
of inspectability; the hasher does not remember what the input features
looked like and has no inverse_transform method.
Since the hash function might cause collisions between (unrelated)
features, a signed hash function is used and the sign of the hash value
determines the sign of the value stored in the output matrix for a
feature. This way, collisions are likely to cancel out rather than
accumulate error, and the expected mean of any output feature's value is
zero. This mechanism is enabled by default with alternate_sign=True and
is particularly useful for small hash table sizes (n_features < 10000).
For large hash table sizes, it can be disabled, to allow the output to
be passed to estimators like ~sklearn.naive_bayes.MultinomialNB or
~sklearn.feature_selection.chi2 feature selectors that expect
non-negative inputs.
FeatureHasher accepts either mappings (like Python's dict and its
variants in the collections module), (feature, value) pairs, or strings,
depending on the constructor parameter input_type. Mapping are treated
as lists of (feature, value) pairs, while single strings have an
implicit value of 1, so ['feat1', 'feat2', 'feat3'] is interpreted as
[('feat1', 1), ('feat2', 1), ('feat3', 1)]. If a single feature occurs
multiple times in a sample, the associated values will be summed (so
('feat', 2) and ('feat', 3.5) become ('feat', 5.5)). The output from
FeatureHasher is always a scipy.sparse matrix in the CSR format.
Feature hashing can be employed in document classification, but unlike
~text.CountVectorizer, FeatureHasher does not do word splitting or any
other preprocessing except Unicode-to-UTF-8 encoding; see
hashing_vectorizer, below, for a combined tokenizer/hasher.
As an example, consider a word-level natural language processing task
that needs features extracted from (token, part_of_speech) pairs. One
could use a Python generator function to extract features:
def token_features(token, part_of_speech):
if token.isdigit():
yield "numeric"
else:
yield "token={}".format(token.lower())
yield "token,pos={},{}".format(token, part_of_speech)
if token[0].isupper():
yield "uppercase_initial"
if token.isupper():
yield "all_uppercase"
yield "pos={}".format(part_of_speech)
Then, the raw_X to be fed to FeatureHasher.transform can be constructed
using:
raw_X = (token_features(tok, pos_tagger(tok)) for tok in corpus)
and fed to a hasher with:
hasher = FeatureHasher(input_type='string')
X = hasher.transform(raw_X)
to get a scipy.sparse matrix X.
Note the use of a generator comprehension, which introduces laziness
into the feature extraction: tokens are only processed on demand from
the hasher.
Implementation details
FeatureHasher uses the signed 32-bit variant of MurmurHash3. As a result
(and because of limitations in scipy.sparse), the maximum number of
features supported is currently 2³¹ − 1.
The original formulation of the hashing trick by Weinberger et al. used
two separate hash functions h and ξ to determine the column index and
sign of a feature, respectively. The present implementation works under
the assumption that the sign bit of MurmurHash3 is independent of its
other bits.
Since a simple modulo is used to transform the hash function to a column
index, it is advisable to use a power of two as the n_features
parameter; otherwise the features will not be mapped evenly to the
columns.
References:
- Kilian Weinberger, Anirban Dasgupta, John Langford, Alex Smola and
Josh Attenberg (2009). Feature hashing for large scale multitask
learning. Proc. ICML.
- MurmurHash3.
Text feature extraction
The Bag of Words representation
Text Analysis is a major application field for machine learning
algorithms. However the raw data, a sequence of symbols cannot be fed
directly to the algorithms themselves as most of them expect numerical
feature vectors with a fixed size rather than the raw text documents
with variable length.
In order to address this, scikit-learn provides utilities for the most
common ways to extract numerical features from text content, namely:
- tokenizing strings and giving an integer id for each possible token,
for instance by using white-spaces and punctuation as token
separators.
- counting the occurrences of tokens in each document.
- normalizing and weighting with diminishing importance tokens that
occur in the majority of samples / documents.
In this scheme, features and samples are defined as follows:
- each individual token occurrence frequency (normalized or not) is
treated as a feature.
- the vector of all the token frequencies for a given document is
considered a multivariate sample.
A corpus of documents can thus be represented by a matrix with one row
per document and one column per token (e.g. word) occurring in the
corpus.
We call vectorization the general process of turning a collection of
text documents into numerical feature vectors. This specific strategy
(tokenization, counting and normalization) is called the Bag of Words or
"Bag of n-grams" representation. Documents are described by word
occurrences while completely ignoring the relative position information
of the words in the document.
Sparsity
As most documents will typically use a very small subset of the words
used in the corpus, the resulting matrix will have many feature values
that are zeros (typically more than 99% of them).
For instance a collection of 10,000 short text documents (such as
emails) will use a vocabulary with a size in the order of 100,000 unique
words in total while each document will use 100 to 1000 unique words
individually.
In order to be able to store such a matrix in memory but also to speed
up algebraic operations matrix / vector, implementations will typically
use a sparse representation such as the implementations available in the
scipy.sparse package.
Common Vectorizer usage
CountVectorizer implements both tokenization and occurrence counting in
a single class:
>>> from sklearn.feature_extraction.text import CountVectorizer
This model has many parameters, however the default values are quite
reasonable (please see the reference documentation
<text_feature_extraction_ref> for the details):
>>> vectorizer = CountVectorizer()
>>> vectorizer
CountVectorizer()
Let's use it to tokenize and count the word occurrences of a
minimalistic corpus of text documents:
>>> corpus = [
... 'This is the first document.',
... 'This is the second second document.',
... 'And the third one.',
... 'Is this the first document?',
... ]
>>> X = vectorizer.fit_transform(corpus)
>>> X
<4x9 sparse matrix of type '<... 'numpy.int64'>'
with 19 stored elements in Compressed Sparse ... format>
The default configuration tokenizes the string by extracting words of at
least 2 letters. The specific function that does this step can be
requested explicitly:
>>> analyze = vectorizer.build_analyzer()
>>> analyze("This is a text document to analyze.") == (
... ['this', 'is', 'text', 'document', 'to', 'analyze'])
True
Each term found by the analyzer during the fit is assigned a unique
integer index corresponding to a column in the resulting matrix. This
interpretation of the columns can be retrieved as follows:
>>> vectorizer.get_feature_names_out()
array(['and', 'document', 'first', 'is', 'one', 'second', 'the',
'third', 'this'], ...)
>>> X.toarray()
array([[0, 1, 1, 1, 0, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 2, 1, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 0, 1, 0, 1]]...)
The converse mapping from feature name to column index is stored in the
vocabulary_ attribute of the vectorizer:
>>> vectorizer.vocabulary_.get('document')
1
Hence words that were not seen in the training corpus will be completely
ignored in future calls to the transform method:
>>> vectorizer.transform(['Something completely new.']).toarray()
array([[0, 0, 0, 0, 0, 0, 0, 0, 0]]...)
Note that in the previous corpus, the first and the last documents have
exactly the same words hence are encoded in equal vectors. In particular
we lose the information that the last document is an interrogative form.
To preserve some of the local ordering information we can extract
2-grams of words in addition to the 1-grams (individual words):
>>> bigram_vectorizer = CountVectorizer(ngram_range=(1, 2),
... token_pattern=r'\b\w+\b', min_df=1)
>>> analyze = bigram_vectorizer.build_analyzer()
>>> analyze('Bi-grams are cool!') == (
... ['bi', 'grams', 'are', 'cool', 'bi grams', 'grams are', 'are cool'])
True
The vocabulary extracted by this vectorizer is hence much bigger and can
now resolve ambiguities encoded in local positioning patterns:
>>> X_2 = bigram_vectorizer.fit_transform(corpus).toarray()
>>> X_2
array([[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 1, 0, 0, 2, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1]]...)
In particular the interrogative form "Is this" is only present in the
last document:
>>> feature_index = bigram_vectorizer.vocabulary_.get('is this')
>>> X_2[:, feature_index]
array([0, 0, 0, 1]...)
Using stop words
Stop words are words like "and", "the", "him", which are presumed to be
uninformative in representing the content of a text, and which may be
removed to avoid them being construed as signal for prediction.
Sometimes, however, similar words are useful for prediction, such as in
classifying writing style or personality.
There are several known issues in our provided 'english' stop word list.
It does not aim to be a general, 'one-size-fits-all' solution as some
tasks may require a more custom solution. See [NQY18] for more details.
Please take care in choosing a stop word list. Popular stop word lists
may include words that are highly informative to some tasks, such as
computer.
You should also make sure that the stop word list has had the same
preprocessing and tokenization applied as the one used in the
vectorizer. The word we've is split into we and ve by CountVectorizer's
default tokenizer, so if we've is in stop_words, but ve is not, ve will
be retained from we've in transformed text. Our vectorizers will try to
identify and warn about some kinds of inconsistencies.
References
Tf–idf term weighting
In a large text corpus, some words will be very present (e.g. "the",
"a", "is" in English) hence carrying very little meaningful information
about the actual contents of the document. If we were to feed the direct
count data directly to a classifier those very frequent terms would
shadow the frequencies of rarer yet more interesting terms.
In order to re-weight the count features into floating point values
suitable for usage by a classifier it is very common to use the tf–idf
transform.
Tf means term-frequency while tf–idf means term-frequency times inverse
document-frequency: tf-idf(t,d) = tf(t,d) × idf(t).
Using the TfidfTransformer's default settings,
TfidfTransformer(norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False)
the term frequency, the number of times a term occurs in a given
document, is multiplied with idf component, which is computed as
$\text{idf}(t) = \log{\frac{1 + n}{1+\text{df}(t)}} + 1$,
where n is the total number of documents in the document set, and df(t)
is the number of documents in the document set that contain term t. The
resulting tf-idf vectors are then normalized by the Euclidean norm:
$v_{norm} = \frac{v}{||v||_2} = \frac{v}{\sqrt{v{_1}^2 +
v{_2}^2 + \dots + v{_n}^2}}$.
This was originally a term weighting scheme developed for information
retrieval (as a ranking function for search engines results) that has
also found good use in document classification and clustering.
The following sections contain further explanations and examples that
illustrate how the tf-idfs are computed exactly and how the tf-idfs
computed in scikit-learn's TfidfTransformer and TfidfVectorizer differ
slightly from the standard textbook notation that defines the idf as
$\text{idf}(t) = \log{\frac{n}{1+\text{df}(t)}}.$
In the TfidfTransformer and TfidfVectorizer with smooth_idf=False, the
"1" count is added to the idf instead of the idf's denominator:
$\text{idf}(t) = \log{\frac{n}{\text{df}(t)}} + 1$
This normalization is implemented by the TfidfTransformer class:
>>> from sklearn.feature_extraction.text import TfidfTransformer
>>> transformer = TfidfTransformer(smooth_idf=False)
>>> transformer
TfidfTransformer(smooth_idf=False)
Again please see the reference documentation
<text_feature_extraction_ref> for the details on all the parameters.
Let's take an example with the following counts. The first term is
present 100% of the time hence not very interesting. The two other
features only in less than 50% of the time hence probably more
representative of the content of the documents:
>>> counts = [[3, 0, 1],
... [2, 0, 0],
... [3, 0, 0],
... [4, 0, 0],
... [3, 2, 0],
... [3, 0, 2]]
...
>>> tfidf = transformer.fit_transform(counts)
>>> tfidf
<6x3 sparse matrix of type '<... 'numpy.float64'>'
with 9 stored elements in Compressed Sparse ... format>
>>> tfidf.toarray()
array([[0.81940995, 0. , 0.57320793],
[1. , 0. , 0. ],
[1. , 0. , 0. ],
[1. , 0. , 0. ],
[0.47330339, 0.88089948, 0. ],
[0.58149261, 0. , 0.81355169]])
Each row is normalized to have unit Euclidean norm:
$v_{norm} = \frac{v}{||v||_2} = \frac{v}{\sqrt{v{_1}^2 +
v{_2}^2 + \dots + v{_n}^2}}$
For example, we can compute the tf-idf of the first term in the first
document in the counts array as follows:
n = 6
df(t)_(term1) = 6
$\text{idf}(t)_{\text{term1}} =
\log \frac{n}{\text{df}(t)} + 1 = \log(1)+1 = 1$
tf-idf_(term1) = tf × idf = 3 × 1 = 3
Now, if we repeat this computation for the remaining 2 terms in the
document, we get
tf-idf_(term2) = 0 × (log(6/1)+1) = 0
tf-idf_(term3) = 1 × (log(6/2)+1) ≈ 2.0986
and the vector of raw tf-idfs:
tf-idf_(raw) = [3,0,2.0986].
Then, applying the Euclidean (L2) norm, we obtain the following tf-idfs
for document 1:
$\frac{[3, 0, 2.0986]}{\sqrt{\big(3^2 + 0^2 + 2.0986^2\big)}}
= [ 0.819, 0, 0.573].$
Furthermore, the default parameter smooth_idf=True adds "1" to the
numerator and denominator as if an extra document was seen containing
every term in the collection exactly once, which prevents zero
divisions:
$\text{idf}(t) = \log{\frac{1 + n}{1+\text{df}(t)}} + 1$
Using this modification, the tf-idf of the third term in document 1
changes to 1.8473:
tf-idf_(term3) = 1 × log (7/3) + 1 ≈ 1.8473
And the L2-normalized tf-idf changes to
$\frac{[3, 0, 1.8473]}{\sqrt{\big(3^2 + 0^2 + 1.8473^2\big)}}
= [0.8515, 0, 0.5243]$:
>>> transformer = TfidfTransformer()
>>> transformer.fit_transform(counts).toarray()
array([[0.85151335, 0. , 0.52433293],
[1. , 0. , 0. ],
[1. , 0. , 0. ],
[1. , 0. , 0. ],
[0.55422893, 0.83236428, 0. ],
[0.63035731, 0. , 0.77630514]])
The weights of each feature computed by the fit method call are stored
in a model attribute:
>>> transformer.idf_
array([1. ..., 2.25..., 1.84...])
As tf–idf is very often used for text features, there is also another
class called TfidfVectorizer that combines all the options of
CountVectorizer and TfidfTransformer in a single model:
>>> from sklearn.feature_extraction.text import TfidfVectorizer
>>> vectorizer = TfidfVectorizer()
>>> vectorizer.fit_transform(corpus)
<4x9 sparse matrix of type '<... 'numpy.float64'>'
with 19 stored elements in Compressed Sparse ... format>
While the tf–idf normalization is often very useful, there might be
cases where the binary occurrence markers might offer better features.
This can be achieved by using the binary parameter of CountVectorizer.
In particular, some estimators such as bernoulli_naive_bayes explicitly
model discrete boolean random variables. Also, very short texts are
likely to have noisy tf–idf values while the binary occurrence info is
more stable.
As usual the best way to adjust the feature extraction parameters is to
use a cross-validated grid search, for instance by pipelining the
feature extractor with a classifier:
- sphx_glr_auto_examples_model_selection_plot_grid_search_text_feature_extraction.py
Decoding text files
Text is made of characters, but files are made of bytes. These bytes
represent characters according to some encoding. To work with text files
in Python, their bytes must be decoded to a character set called
Unicode. Common encodings are ASCII, Latin-1 (Western Europe), KOI8-R
(Russian) and the universal encodings UTF-8 and UTF-16. Many others
exist.
Note
An encoding can also be called a 'character set', but this term is less
accurate: several encodings can exist for a single character set.
The text feature extractors in scikit-learn know how to decode text
files, but only if you tell them what encoding the files are in. The
CountVectorizer takes an encoding parameter for this purpose. For modern
text files, the correct encoding is probably UTF-8, which is therefore
the default (encoding="utf-8").
If the text you are loading is not actually encoded with UTF-8, however,
you will get a UnicodeDecodeError. The vectorizers can be told to be
silent about decoding errors by setting the decode_error parameter to
either "ignore" or "replace". See the documentation for the Python
function bytes.decode for more details (type help(bytes.decode) at the
Python prompt).
If you are having trouble decoding text, here are some things to try:
- Find out what the actual encoding of the text is. The file might
come with a header or README that tells you the encoding, or there
might be some standard encoding you can assume based on where the
text comes from.
- You may be able to find out what kind of encoding it is in general
using the UNIX command file. The Python chardet module comes with a
script called chardetect.py that will guess the specific encoding,
though you cannot rely on its guess being correct.
- You could try UTF-8 and disregard the errors. You can decode byte
strings with bytes.decode(errors='replace') to replace all decoding
errors with a meaningless character, or set decode_error='replace'
in the vectorizer. This may damage the usefulness of your features.
- Real text may come from a variety of sources that may have used
different encodings, or even be sloppily decoded in a different
encoding than the one it was encoded with. This is common in text
retrieved from the Web. The Python package ftfy can automatically
sort out some classes of decoding errors, so you could try decoding
the unknown text as latin-1 and then using ftfy to fix errors.
- If the text is in a mish-mash of encodings that is simply too hard
to sort out (which is the case for the 20 Newsgroups dataset), you
can fall back on a simple single-byte encoding such as latin-1. Some
text may display incorrectly, but at least the same sequence of
bytes will always represent the same feature.
For example, the following snippet uses chardet (not shipped with
scikit-learn, must be installed separately) to figure out the encoding
of three texts. It then vectorizes the texts and prints the learned
vocabulary. The output is not shown here.
>>> import chardet # doctest: +SKIP >>> text1 = b"Sei mir
gegrxc3xbcxc3x9ft mein Sauerkraut" >>> text2 = b"holdselig sind deine
Gerxfcche" >>> text3 = b"xffxfeAx00ux00fx00
x00Fx00lx00xfcx00gx00ex00lx00nx00 x00dx00ex00sx00
x00Gx00ex00sx00ax00nx00gx00ex00sx00,x00
x00Hx00ex00rx00zx00lx00ix00ex00bx00cx00hx00ex00nx00,x00
x00tx00rx00ax00gx00 x00ix00cx00hx00 x00dx00ix00cx00hx00
x00fx00ox00rx00tx00" >>> decoded =
[x.decode(chardet.detect(x)['encoding']) ... for x in (text1, text2,
text3)] # doctest: +SKIP >>> v =
CountVectorizer().fit(decoded).vocabulary # doctest: +SKIP >>> for
term in v: print(v) # doctest: +SKIP
(Depending on the version of chardet, it might get the first one wrong.)
For an introduction to Unicode and character encodings in general, see
Joel Spolsky's Absolute Minimum Every Software Developer Must Know About
Unicode.
Applications and examples
The bag of words representation is quite simplistic but surprisingly
useful in practice.
In particular in a supervised setting it can be successfully combined
with fast and scalable linear models to train document classifiers, for
instance:
- sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py
In an unsupervised setting it can be used to group similar documents
together by applying clustering algorithms such as k_means:
- sphx_glr_auto_examples_text_plot_document_clustering.py
Finally it is possible to discover the main topics of a corpus by
relaxing the hard assignment constraint of clustering, for instance by
using NMF:
- sphx_glr_auto_examples_applications_plot_topics_extraction_with_nmf_lda.py
Limitations of the Bag of Words representation
A collection of unigrams (what bag of words is) cannot capture phrases
and multi-word expressions, effectively disregarding any word order
dependence. Additionally, the bag of words model doesn't account for
potential misspellings or word derivations.
N-grams to the rescue! Instead of building a simple collection of
unigrams (n=1), one might prefer a collection of bigrams (n=2), where
occurrences of pairs of consecutive words are counted.
One might alternatively consider a collection of character n-grams, a
representation resilient against misspellings and derivations.
For example, let's say we're dealing with a corpus of two documents:
['words', 'wprds']. The second document contains a misspelling of the
word 'words'. A simple bag of words representation would consider these
two as very distinct documents, differing in both of the two possible
features. A character 2-gram representation, however, would find the
documents matching in 4 out of 8 features, which may help the preferred
classifier decide better:
>>> ngram_vectorizer = CountVectorizer(analyzer='char_wb', ngram_range=(2, 2))
>>> counts = ngram_vectorizer.fit_transform(['words', 'wprds'])
>>> ngram_vectorizer.get_feature_names_out()
array([' w', 'ds', 'or', 'pr', 'rd', 's ', 'wo', 'wp'], ...)
>>> counts.toarray().astype(int)
array([[1, 1, 1, 0, 1, 1, 1, 0],
[1, 1, 0, 1, 1, 1, 0, 1]])
In the above example, char_wb analyzer is used, which creates n-grams
only from characters inside word boundaries (padded with space on each
side). The char analyzer, alternatively, creates n-grams that span
across words:
>>> ngram_vectorizer = CountVectorizer(analyzer='char_wb', ngram_range=(5, 5))
>>> ngram_vectorizer.fit_transform(['jumpy fox'])
<1x4 sparse matrix of type '<... 'numpy.int64'>'
with 4 stored elements in Compressed Sparse ... format>
>>> ngram_vectorizer.get_feature_names_out()
array([' fox ', ' jump', 'jumpy', 'umpy '], ...)
>>> ngram_vectorizer = CountVectorizer(analyzer='char', ngram_range=(5, 5))
>>> ngram_vectorizer.fit_transform(['jumpy fox'])
<1x5 sparse matrix of type '<... 'numpy.int64'>'
with 5 stored elements in Compressed Sparse ... format>
>>> ngram_vectorizer.get_feature_names_out()
array(['jumpy', 'mpy f', 'py fo', 'umpy ', 'y fox'], ...)
The word boundaries-aware variant char_wb is especially interesting for
languages that use white-spaces for word separation as it generates
significantly less noisy features than the raw char variant in that
case. For such languages it can increase both the predictive accuracy
and convergence speed of classifiers trained using such features while
retaining the robustness with regards to misspellings and word
derivations.
While some local positioning information can be preserved by extracting
n-grams instead of individual words, bag of words and bag of n-grams
destroy most of the inner structure of the document and hence most of
the meaning carried by that internal structure.
In order to address the wider task of Natural Language Understanding,
the local structure of sentences and paragraphs should thus be taken
into account. Many such models will thus be casted as "Structured
output" problems which are currently outside of the scope of
scikit-learn.
Vectorizing a large text corpus with the hashing trick
The above vectorization scheme is simple but the fact that it holds an
in-memory mapping from the string tokens to the integer feature indices
(the vocabulary_ attribute) causes several problems when dealing with
large datasets:
- the larger the corpus, the larger the vocabulary will grow and hence
the memory use too,
- fitting requires the allocation of intermediate data structures of
size proportional to that of the original dataset.
- building the word-mapping requires a full pass over the dataset
hence it is not possible to fit text classifiers in a strictly
online manner.
- pickling and un-pickling vectorizers with a large vocabulary_ can be
very slow (typically much slower than pickling / un-pickling flat
data structures such as a NumPy array of the same size),
- it is not easily possible to split the vectorization work into
concurrent sub tasks as the vocabulary_ attribute would have to be a
shared state with a fine grained synchronization barrier: the
mapping from token string to feature index is dependent on ordering
of the first occurrence of each token hence would have to be shared,
potentially harming the concurrent workers' performance to the point
of making them slower than the sequential variant.
It is possible to overcome those limitations by combining the "hashing
trick" (Feature_hashing) implemented by the
~sklearn.feature_extraction.FeatureHasher class and the text
preprocessing and tokenization features of the CountVectorizer.
This combination is implementing in HashingVectorizer, a transformer
class that is mostly API compatible with CountVectorizer.
HashingVectorizer is stateless, meaning that you don't have to call fit
on it:
>>> from sklearn.feature_extraction.text import HashingVectorizer
>>> hv = HashingVectorizer(n_features=10)
>>> hv.transform(corpus)
<4x10 sparse matrix of type '<... 'numpy.float64'>'
with 16 stored elements in Compressed Sparse ... format>
You can see that 16 non-zero feature tokens were extracted in the vector
output: this is less than the 19 non-zeros extracted previously by the
CountVectorizer on the same toy corpus. The discrepancy comes from hash
function collisions because of the low value of the n_features
parameter.
In a real world setting, the n_features parameter can be left to its
default value of 2 ** 20 (roughly one million possible features). If
memory or downstream models size is an issue selecting a lower value
such as 2 ** 18 might help without introducing too many additional
collisions on typical text classification tasks.
Note that the dimensionality does not affect the CPU training time of
algorithms which operate on CSR matrices (LinearSVC(dual=True),
Perceptron, SGDClassifier, PassiveAggressive) but it does for algorithms
that work with CSC matrices (LinearSVC(dual=False), Lasso(), etc.).
Let's try again with the default setting:
>>> hv = HashingVectorizer()
>>> hv.transform(corpus)
<4x1048576 sparse matrix of type '<... 'numpy.float64'>'
with 19 stored elements in Compressed Sparse ... format>
We no longer get the collisions, but this comes at the expense of a much
larger dimensionality of the output space. Of course, other terms than
the 19 used here might still collide with each other.
The HashingVectorizer also comes with the following limitations:
- it is not possible to invert the model (no inverse_transform
method), nor to access the original string representation of the
features, because of the one-way nature of the hash function that
performs the mapping.
- it does not provide IDF weighting as that would introduce
statefulness in the model. A TfidfTransformer can be appended to it
in a pipeline if required.
Performing out-of-core scaling with HashingVectorizer
An interesting development of using a HashingVectorizer is the ability
to perform out-of-core scaling. This means that we can learn from data
that does not fit into the computer's main memory.
A strategy to implement out-of-core scaling is to stream data to the
estimator in mini-batches. Each mini-batch is vectorized using
HashingVectorizer so as to guarantee that the input space of the
estimator has always the same dimensionality. The amount of memory used
at any time is thus bounded by the size of a mini-batch. Although there
is no limit to the amount of data that can be ingested using such an
approach, from a practical point of view the learning time is often
limited by the CPU time one wants to spend on the task.
For a full-fledged example of out-of-core scaling in a text
classification task see
sphx_glr_auto_examples_applications_plot_out_of_core_classification.py.
Customizing the vectorizer classes
It is possible to customize the behavior by passing a callable to the
vectorizer constructor:
>>> def my_tokenizer(s):
... return s.split()
...
>>> vectorizer = CountVectorizer(tokenizer=my_tokenizer)
>>> vectorizer.build_analyzer()(u"Some... punctuation!") == (
... ['some...', 'punctuation!'])
True
In particular we name:
- preprocessor: a callable that takes an entire document as input
(as a single string), and returns a possibly transformed version
of the document, still as an entire string. This can be used to
remove HTML tags, lowercase the entire document, etc.
- tokenizer: a callable that takes the output from the preprocessor
and splits it into tokens, then returns a list of these.
- analyzer: a callable that replaces the preprocessor and tokenizer.
The default analyzers all call the preprocessor and tokenizer, but
custom analyzers will skip this. N-gram extraction and stop word
filtering take place at the analyzer level, so a custom analyzer
may have to reproduce these steps.
(Lucene users might recognize these names, but be aware that
scikit-learn concepts may not map one-to-one onto Lucene concepts.)
To make the preprocessor, tokenizer and analyzers aware of the model
parameters it is possible to derive from the class and override the
build_preprocessor, build_tokenizer and build_analyzer factory methods
instead of passing custom functions.
Some tips and tricks:
- If documents are pre-tokenized by an external package, then store
them in files (or strings) with the tokens separated by whitespace
and pass analyzer=str.split
- Fancy token-level analysis such as stemming, lemmatizing, compound
splitting, filtering based on part-of-speech, etc. are not
included in the scikit-learn codebase, but can be added by
customizing either the tokenizer or the analyzer. Here's a
CountVectorizer with a tokenizer and lemmatizer using NLTK:
>>> from nltk import word_tokenize # doctest: +SKIP
>>> from nltk.stem import WordNetLemmatizer # doctest: +SKIP
>>> class LemmaTokenizer:
... def __init__(self):
... self.wnl = WordNetLemmatizer()
... def __call__(self, doc):
... return [self.wnl.lemmatize(t) for t in word_tokenize(doc)]
...
>>> vect = CountVectorizer(tokenizer=LemmaTokenizer()) # doctest: +SKIP
(Note that this will not filter out punctuation.)
The following example will, for instance, transform some British
spelling to American spelling:
>>> import re
>>> def to_british(tokens):
... for t in tokens:
... t = re.sub(r"(...)our$", r"\1or", t)
... t = re.sub(r"([bt])re$", r"\1er", t)
... t = re.sub(r"([iy])s(e$|ing|ation)", r"\1z\2", t)
... t = re.sub(r"ogue$", "og", t)
... yield t
...
>>> class CustomVectorizer(CountVectorizer):
... def build_tokenizer(self):
... tokenize = super().build_tokenizer()
... return lambda doc: list(to_british(tokenize(doc)))
...
>>> print(CustomVectorizer().build_analyzer()(u"color colour"))
[...'color', ...'color']
for other styles of preprocessing; examples include stemming,
lemmatization, or normalizing numerical tokens, with the latter
illustrated in:
- sphx_glr_auto_examples_bicluster_plot_bicluster_newsgroups.py
Customizing the vectorizer can also be useful when handling Asian
languages that do not use an explicit word separator such as whitespace.
Image feature extraction
Patch extraction
The extract_patches_2d function extracts patches from an image stored as
a two-dimensional array, or three-dimensional with color information
along the third axis. For rebuilding an image from all its patches, use
reconstruct_from_patches_2d. For example let us generate a 4x4 pixel
picture with 3 color channels (e.g. in RGB format):
>>> import numpy as np
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(4 * 4 * 3).reshape((4, 4, 3))
>>> one_image[:, :, 0] # R channel of a fake RGB picture
array([[ 0, 3, 6, 9],
[12, 15, 18, 21],
[24, 27, 30, 33],
[36, 39, 42, 45]])
>>> patches = image.extract_patches_2d(one_image, (2, 2), max_patches=2,
... random_state=0)
>>> patches.shape
(2, 2, 2, 3)
>>> patches[:, :, :, 0]
array([[[ 0, 3],
[12, 15]],
<BLANKLINE>
[[15, 18],
[27, 30]]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> patches.shape
(9, 2, 2, 3)
>>> patches[4, :, :, 0]
array([[15, 18],
[27, 30]])
Let us now try to reconstruct the original image from the patches by
averaging on overlapping areas:
>>> reconstructed = image.reconstruct_from_patches_2d(patches, (4, 4, 3))
>>> np.testing.assert_array_equal(one_image, reconstructed)
The PatchExtractor class works in the same way as extract_patches_2d,
only it supports multiple images as input. It is implemented as a
scikit-learn transformer, so it can be used in pipelines. See:
>>> five_images = np.arange(5 * 4 * 4 * 3).reshape(5, 4, 4, 3)
>>> patches = image.PatchExtractor(patch_size=(2, 2)).transform(five_images)
>>> patches.shape
(45, 2, 2, 3)
Connectivity graph of an image
Several estimators in the scikit-learn can use connectivity information
between features or samples. For instance Ward clustering
(hierarchical_clustering) can cluster together only neighboring pixels
of an image, thus forming contiguous patches:
For this purpose, the estimators use a 'connectivity' matrix, giving
which samples are connected.
The function img_to_graph returns such a matrix from a 2D or 3D image.
Similarly, grid_to_graph build a connectivity matrix for images given
the shape of these image.
These matrices can be used to impose connectivity in estimators that use
connectivity information, such as Ward clustering
(hierarchical_clustering), but also to build precomputed kernels, or
similarity matrices.
| # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
import array
import re
import unicodedata
import warnings
from collections import defaultdict
from collections.abc import Mapping
from functools import partial
from numbers import Integral
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from..base import BaseEstimator, OneToOneFeatureMixin, TransformerMixin, _fit_context
from..exceptions import NotFittedError
from..preprocessing import normalize
from..utils import _IS_32BIT
from..utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions
from..utils.validation import FLOAT_DTYPES, check_array, check_is_fitted
from._hash import FeatureHasher
from._stop_words import ENGLISH_STOP_WORDS
__all__ = [
"HashingVectorizer",
"CountVectorizer",
"ENGLISH_STOP_WORDS",
"TfidfTransformer",
"TfidfVectorizer",
"strip_accents_ascii",
"strip_accents_unicode",
"strip_tags",
]
def _preprocess(doc, accent_function=None, lower=False):
"""Chain together an optional series of text preprocessing steps to
apply to a document.
Parameters
----------
doc: str
The string to preprocess
accent_function: callable, default=None
Function for handling accented characters. Common strategies include
normalizing and removing.
lower: bool, default=False
Whether to use str.lower to lowercase all of the text
Returns
-------
doc: str
preprocessed string
"""
if lower:
doc = doc.lower()
if accent_function is not None:
doc = accent_function(doc)
return doc
def _analyze(
doc,
analyzer=None,
tokenizer=None,
ngrams=None,
preprocessor=None,
decoder=None,
stop_words=None,
):
"""Chain together an optional series of text processing steps to go from
a single document to ngrams, with or without tokenizing or preprocessing.
If analyzer is used, only the decoder argument is used, as the analyzer is
intended to replace the preprocessor, tokenizer, and ngrams steps.
Parameters
----------
analyzer: callable, default=None
tokenizer: callable, default=None
ngrams: callable, default=None
preprocessor: callable, default=None
decoder: callable, default=None
stop_words: list, default=None
Returns
-------
ngrams: list
A sequence of tokens, possibly with pairs, triples, etc.
"""
if decoder is not None:
doc = decoder(doc)
if analyzer is not None:
doc = analyzer(doc)
else:
if preprocessor is not None:
doc = preprocessor(doc)
if tokenizer is not None:
doc = tokenizer(doc)
if ngrams is not None:
if stop_words is not None:
doc = ngrams(doc, stop_words)
else:
doc = ngrams(doc)
return doc
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart.
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
Parameters
----------
s : str
The string to strip.
Returns
-------
s : str
The stripped string.
See Also
--------
strip_accents_ascii : Remove accentuated char for any unicode symbol that
has a direct ASCII equivalent.
"""
try:
# If `s` is ASCII-compatible, then it does not contain any accented
# characters and we can avoid an expensive list comprehension
s.encode("ASCII", errors="strict")
return s
except UnicodeEncodeError:
normalized = unicodedata.normalize("NFKD", s)
return "".join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing.
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
Parameters
----------
s : str
The string to strip.
Returns
-------
s : str
The stripped string.
See Also
--------
strip_accents_unicode : Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize("NFKD", s)
return nkfd_form.encode("ASCII", "ignore").decode("ASCII")
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function.
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
Parameters
----------
s : str
The string to strip.
Returns
-------
s : str
The stripped string.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, str):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class _VectorizerMixin:
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols.
The decoding strategy depends on the vectorizer parameters.
Parameters
----------
doc : bytes or str
The string to decode.
Returns
-------
doc: str
A string of unicode symbols.
"""
if self.input == "filename":
with open(doc, "rb") as fh:
doc = fh.read()
elif self.input == "file":
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError(
"np.nan is an invalid document, expected byte or unicode string."
)
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n!= 1:
original_tokens = tokens
if min_n == 1:
# no need to do any slicing for unigrams
# just iterate through the original tokens
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
# bind method outside of loop to reduce overhead
tokens_append = tokens.append
space_join = " ".join
for n in range(min_n, min(max_n + 1, n_original_tokens + 1)):
for i in range(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i : i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
min_n, max_n = self.ngram_range
if min_n == 1:
# no need to do any slicing for unigrams
# iterate through the string
ngrams = list(text_document)
min_n += 1
else:
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for n in range(min_n, min(max_n + 1, text_len + 1)):
for i in range(text_len - n + 1):
ngrams_append(text_document[i : i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
operating only inside word boundaries. n-grams at the edges
of words are padded with space."""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for w in text_document.split():
w = " " + w + " "
w_len = len(w)
for n in range(min_n, max_n + 1):
offset = 0
ngrams_append(w[offset : offset + n])
while offset + n < w_len:
offset += 1
ngrams_append(w[offset : offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization.
Returns
-------
preprocessor: callable
A function to preprocess the text before tokenization.
"""
if self.preprocessor is not None:
return self.preprocessor
# accent stripping
if not self.strip_accents:
strip_accents = None
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == "ascii":
strip_accents = strip_accents_ascii
elif self.strip_accents == "unicode":
strip_accents = strip_accents_unicode
else:
raise ValueError(
'Invalid value for "strip_accents": %s' % self.strip_accents
)
return partial(_preprocess, accent_function=strip_accents, lower=self.lowercase)
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens.
Returns
-------
tokenizer: callable
A function to split a string into a sequence of tokens.
"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
if token_pattern.groups > 1:
raise ValueError(
"More than 1 capturing group in token pattern. Only a single "
"group should be captured."
)
return token_pattern.findall
def get_stop_words(self):
"""Build or fetch the effective stop words list.
Returns
-------
stop_words: list or None
A list of stop words.
"""
return _check_stop_list(self.stop_words)
def _check_stop_words_consistency(self, stop_words, preprocess, tokenize):
"""Check if stop words are consistent
Returns
-------
is_consistent : True if stop words are consistent with the preprocessor
and tokenizer, False if they are not, None if the check
was previously performed, "error" if it could not be
performed (e.g. because of the use of a custom
preprocessor / tokenizer)
"""
if id(self.stop_words) == getattr(self, "_stop_words_id", None):
# Stop words are were previously validated
return None
# NB: stop_words is validated, unlike self.stop_words
try:
inconsistent = set()
for w in stop_words or ():
tokens = list(tokenize(preprocess(w)))
for token in tokens:
if token not in stop_words:
inconsistent.add(token)
self._stop_words_id = id(self.stop_words)
if inconsistent:
warnings.warn(
"Your stop_words may be inconsistent with "
"your preprocessing. Tokenizing the stop "
"words generated tokens %r not in "
"stop_words."
% sorted(inconsistent)
)
return not inconsistent
except Exception:
# Failed to check stop words consistency (e.g. because a custom
# preprocessor or tokenizer was used)
self._stop_words_id = id(self.stop_words)
return "error"
def build_analyzer(self):
"""Return a callable to process input data.
The callable handles preprocessing, tokenization, and n-grams generation.
Returns
-------
analyzer: callable
A function to handle preprocessing, tokenization
and n-grams generation.
"""
if callable(self.analyzer):
return partial(_analyze, analyzer=self.analyzer, decoder=self.decode)
preprocess = self.build_preprocessor()
if self.analyzer == "char":
return partial(
_analyze,
ngrams=self._char_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "char_wb":
return partial(
_analyze,
ngrams=self._char_wb_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "word":
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
self._check_stop_words_consistency(stop_words, preprocess, tokenize)
return partial(
_analyze,
ngrams=self._word_ngrams,
tokenizer=tokenize,
preprocessor=preprocess,
decoder=self.decode,
stop_words=stop_words,
)
else:
raise ValueError(
"%s is not a valid tokenization scheme/analyzer" % self.analyzer
)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i)!= i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(vocabulary.values())
if len(indices)!= len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in range(len(vocabulary)):
if i not in indices:
msg = "Vocabulary of size %d doesn't contain index %d." % (
len(vocabulary),
i,
)
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fitted)"""
if not hasattr(self, "vocabulary_"):
self._validate_vocabulary()
if not self.fixed_vocabulary_:
raise NotFittedError("Vocabulary not fitted or provided")
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
def _validate_ngram_range(self):
"""Check validity of ngram_range parameter"""
min_n, max_m = self.ngram_range
if min_n > max_m:
raise ValueError(
"Invalid value for ngram_range=%s "
"lower boundary larger than the upper boundary."
% str(self.ngram_range)
)
def _warn_for_unused_params(self):
if self.tokenizer is not None and self.token_pattern is not None:
warnings.warn(
"The parameter 'token_pattern' will not be used"
" since 'tokenizer' is not None'"
)
if self.preprocessor is not None and callable(self.analyzer):
warnings.warn(
"The parameter 'preprocessor' will not be used"
" since 'analyzer' is callable'"
)
if (
self.ngram_range!= (1, 1)
and self.ngram_range is not None
and callable(self.analyzer)
):
warnings.warn(
"The parameter 'ngram_range' will not be used"
" since 'analyzer' is callable'"
)
if self.analyzer!= "word" or callable(self.analyzer):
if self.stop_words is not None:
warnings.warn(
"The parameter'stop_words' will not be used"
" since 'analyzer'!= 'word'"
)
if (
self.token_pattern is not None
and self.token_pattern!= r"(?u)\b\w\w+\b"
):
warnings.warn(
"The parameter 'token_pattern' will not be used"
" since 'analyzer'!= 'word'"
)
if self.tokenizer is not None:
warnings.warn(
"The parameter 'tokenizer' will not be used"
" since 'analyzer'!= 'word'"
)
class HashingVectorizer(
TransformerMixin, _VectorizerMixin, BaseEstimator, auto_wrap_output_keys=None
):
r"""Convert a collection of text documents to a matrix of token occurrences.
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory.
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters.
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
For an efficiency comparision of the different feature extractors, see
:ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
- If `'file'`, the sequence items must have a'read' method (file-like
object) that is called to fetch the bytes in memory.
- If `'content'`, the input is expected to be a sequence of items that
can be of type string or byte.
encoding : str, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore','replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and'replace'.
strip_accents : {'ascii', 'unicode'} or callable, default=None
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
a direct ASCII mapping.
'unicode' is a slightly slower method that works on any character.
None (default) means no character normalization is performed.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable, default=None
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
Only applies if ``analyzer`` is not callable.
tokenizer : callable, default=None
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
stop_words : {'english'}, list, default=None
If 'english', a built-in stop word list for English is used.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
token_pattern : str or None, default=r"(?u)\\b\\w\\w+\\b"
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
If there is a capturing group in token_pattern then the
captured group content, not the entire match, becomes the token.
At most one capturing group is permitted.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used. For example an ``ngram_range`` of ``(1, 1)`` means only
unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means
only bigrams.
Only applies if ``analyzer`` is not callable.
analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data
is first read from the file and then passed to the given callable
analyzer.
n_features : int, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
binary : bool, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
norm : {'l1', 'l2'}, default='l2'
Norm used to normalize term vectors. None for no normalization.
alternate_sign : bool, default=True
When True, an alternating sign is added to the features as to
approximately conserve the inner product in the hashed space even for
small n_features. This approach is similar to sparse random projection.
.. versionadded:: 0.19
dtype : type, default=np.float64
Type of the matrix returned by fit_transform() or transform().
See Also
--------
CountVectorizer : Convert a collection of text documents to a matrix of
token counts.
TfidfVectorizer : Convert a collection of raw documents to a matrix of
TF-IDF features.
Notes
-----
This estimator is :term:`stateless` and does not need to be fitted.
However, we recommend to call :meth:`fit_transform` instead of
:meth:`transform`, as parameter validation is only performed in
:meth:`fit`.
Examples
--------
>>> from sklearn.feature_extraction.text import HashingVectorizer
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = HashingVectorizer(n_features=2**4)
>>> X = vectorizer.fit_transform(corpus)
>>> print(X.shape)
(4, 16)
"""
_parameter_constraints: dict = {
"input": [StrOptions({"filename", "file", "content"})],
"encoding": [str],
"decode_error": [StrOptions({"strict", "ignore", "replace"})],
"strip_accents": [StrOptions({"ascii", "unicode"}), None, callable],
"lowercase": ["boolean"],
"preprocessor": [callable, None],
"tokenizer": [callable, None],
"stop_words": [StrOptions({"english"}), list, None],
"token_pattern": [str, None],
"ngram_range": [tuple],
"analyzer": [StrOptions({"word", "char", "char_wb"}), callable],
"n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="left")],
"binary": ["boolean"],
"norm": [StrOptions({"l1", "l2"}), None],
"alternate_sign": ["boolean"],
"dtype": "no_validation", # delegate to numpy
}
def __init__(
self,
*,
input="content",
encoding="utf-8",
decode_error="strict",
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1),
analyzer="word",
n_features=(2**20),
binary=False,
norm="l2",
alternate_sign=True,
dtype=np.float64,
):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.alternate_sign = alternate_sign
self.dtype = dtype
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : ndarray of shape [n_samples, n_features]
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
HashingVectorizer instance.
"""
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : ndarray of shape [n_samples, n_features]
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
HashingVectorizer instance.
"""
# triggers a parameter validation
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._warn_for_unused_params()
self._validate_ngram_range()
self._get_hasher().fit(X, y=y)
return self
def transform(self, X):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._validate_ngram_range()
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
def fit_transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
return self.fit(X, y).transform(X)
def _get_hasher(self):
return FeatureHasher(
n_features=self.n_features,
input_type="string",
dtype=self.dtype,
alternate_sign=self.alternate_sign,
)
def _more_tags(self):
return {"X_types": ["string"]}
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.issparse(X) and X.format == "csr":
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(X.indptr)
class CountVectorizer(_VectorizerMixin, BaseEstimator):
r"""Convert a collection of text documents to a matrix of token counts.
This implementation produces a sparse representation of the counts using
scipy.sparse.csr_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
For an efficiency comparision of the different feature extractors, see
:ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
- If `'file'`, the sequence items must have a'read' method (file-like
object) that is called to fetch the bytes in memory.
- If `'content'`, the input is expected to be a sequence of items that
can be of type string or byte.
encoding : str, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore','replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and'replace'.
strip_accents : {'ascii', 'unicode'} or callable, default=None
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
a direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) means no character normalization is performed.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable, default=None
Override the preprocessing (strip_accents and lowercase) stage while
preserving the tokenizing and n-grams generation steps.
Only applies if ``analyzer`` is not callable.
tokenizer : callable, default=None
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
stop_words : {'english'}, list, default=None
If 'english', a built-in stop word list for English is used.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. In this case, setting `max_df`
to a higher value, such as in the range (0.7, 1.0), can automatically detect
and filter stop words based on intra corpus document frequency of terms.
token_pattern : str or None, default=r"(?u)\\b\\w\\w+\\b"
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
If there is a capturing group in token_pattern then the
captured group content, not the entire match, becomes the token.
At most one capturing group is permitted.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
word n-grams or char n-grams to be extracted. All values of n such
such that min_n <= n <= max_n will be used. For example an
``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means
unigrams and bigrams, and ``(2, 2)`` means only bigrams.
Only applies if ``analyzer`` is not callable.
analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
Whether the feature should be made of word n-gram or character
n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``filename`` or ``file``, the data is
first read from the file and then passed to the given callable
analyzer.
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int, default=None
If not None, build a vocabulary that only consider the top
`max_features` ordered by term frequency across the corpus.
Otherwise, all features are used.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, default=None
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : bool, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : dtype, default=np.int64
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
fixed_vocabulary_ : bool
True if a fixed vocabulary of term to indices mapping
is provided by the user.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See Also
--------
HashingVectorizer : Convert a collection of text documents to a
matrix of token counts.
TfidfVectorizer : Convert a collection of raw documents to a matrix
of TF-IDF features.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
Examples
--------
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = CountVectorizer()
>>> X = vectorizer.fit_transform(corpus)
>>> vectorizer.get_feature_names_out()
array(['and', 'document', 'first', 'is', 'one','second', 'the', 'third',
'this'],...)
>>> print(X.toarray())
[[0 1 1 1 0 0 1 0 1]
[0 2 0 1 0 1 1 0 1]
[1 0 0 1 1 0 1 1 1]
[0 1 1 1 0 0 1 0 1]]
>>> vectorizer2 = CountVectorizer(analyzer='word', ngram_range=(2, 2))
>>> X2 = vectorizer2.fit_transform(corpus)
>>> vectorizer2.get_feature_names_out()
array(['and this', 'document is', 'first document', 'is the', 'is this',
'second document', 'the first', 'the second', 'the third', 'third one',
'this document', 'this is', 'this the'],...)
>>> print(X2.toarray())
[[0 0 1 1 0 0 1 0 0 0 0 1 0]
[0 1 0 1 0 1 0 1 0 0 1 0 0]
[1 0 0 1 0 0 0 0 1 1 0 1 0]
[0 0 1 0 1 0 1 0 0 0 0 0 1]]
"""
_parameter_constraints: dict = {
"input": [StrOptions({"filename", "file", "content"})],
"encoding": [str],
"decode_error": [StrOptions({"strict", "ignore", "replace"})],
"strip_accents": [StrOptions({"ascii", "unicode"}), None, callable],
"lowercase": ["boolean"],
"preprocessor": [callable, None],
"tokenizer": [callable, None],
"stop_words": [StrOptions({"english"}), list, None],
"token_pattern": [str, None],
"ngram_range": [tuple],
"analyzer": [StrOptions({"word", "char", "char_wb"}), callable],
"max_df": [
Interval(RealNotInt, 0, 1, closed="both"),
Interval(Integral, 1, None, closed="left"),
],
"min_df": [
Interval(RealNotInt, 0, 1, closed="both"),
Interval(Integral, 1, None, closed="left"),
],
"max_features": [Interval(Integral, 1, None, closed="left"), None],
"vocabulary": [Mapping, HasMethods("__iter__"), None],
"binary": ["boolean"],
"dtype": "no_validation", # delegate to numpy
}
def __init__(
self,
*,
input="content",
encoding="utf-8",
decode_error="strict",
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1),
analyzer="word",
max_df=1.0,
min_df=1,
max_features=None,
vocabulary=None,
binary=False,
dtype=np.int64,
):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
self.max_features = max_features
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(vocabulary.items())
map_index = np.empty(len(sorted_features), dtype=X.indices.dtype)
for new_val, (term, old_val) in enumerate(sorted_features):
vocabulary[term] = new_val
map_index[old_val] = new_val
X.indices = map_index.take(X.indices, mode="clip")
return X
def _limit_features(self, X, vocabulary, high=None, low=None, limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
tfs = np.asarray(X.sum(axis=0)).ravel()
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(vocabulary.items()):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError(
"After pruning, no terms remain. Try a lower min_df or a higher max_df."
)
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = []
indptr = []
values = _make_int_array()
indptr.append(0)
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
j_indices.extend(feature_counter.keys())
values.extend(feature_counter.values())
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError(
"empty vocabulary; perhaps the documents only contain stop words"
)
if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1
if _IS_32BIT:
raise ValueError(
(
"sparse CSR array has {} non-zero "
"elements and requires 64 bit indexing, "
"which is unsupported with 32 bit Python."
).format(indptr[-1])
)
indices_dtype = np.int64
else:
indices_dtype = np.int32
j_indices = np.asarray(j_indices, dtype=indices_dtype)
indptr = np.asarray(indptr, dtype=indices_dtype)
values = np.frombuffer(values, dtype=np.intc)
X = sp.csr_matrix(
(values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype,
)
X.sort_indices()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
self : object
Fitted vectorizer.
"""
self.fit_transform(raw_documents)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return document-term matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
X : array of shape (n_samples, n_features)
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
if isinstance(raw_documents, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._validate_ngram_range()
self._warn_for_unused_params()
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
if self.fixed_vocabulary_ and self.lowercase:
for term in self.vocabulary:
if any(map(str.isupper, term)):
warnings.warn(
"Upper case characters found in"
" vocabulary while 'lowercase'"
" is True. These entries will not"
" be matched with any documents"
)
break
vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
n_doc = X.shape[0]
max_doc_count = max_df if isinstance(max_df, Integral) else max_df * n_doc
min_doc_count = min_df if isinstance(min_df, Integral) else min_df * n_doc
if max_doc_count < min_doc_count:
raise ValueError("max_df corresponds to < documents than min_df")
if max_features is not None:
X = self._sort_features(X, vocabulary)
X, self.stop_words_ = self._limit_features(
X, vocabulary, max_doc_count, min_doc_count, max_features
)
if max_features is None:
X = self._sort_features(X, vocabulary)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
if isinstance(raw_documents, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document-term matrix.
Returns
-------
X_inv : list of arrays of shape (n_samples,)
List of arrays of terms.
"""
self._check_vocabulary()
# We need CSR format for fast row manipulations.
X = check_array(X, accept_sparse="csr")
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
if sp.issparse(X):
return [
inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)
]
else:
return [
inverse_vocabulary[np.flatnonzero(X[i, :])].ravel()
for i in range(n_samples)
]
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
self._check_vocabulary()
return np.asarray(
[t for t, i in sorted(self.vocabulary_.items(), key=itemgetter(1))],
dtype=object,
)
def _more_tags(self):
return {"X_types": ["string"]}
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(
OneToOneFeatureMixin, TransformerMixin, BaseEstimator, auto_wrap_output_keys=None
):
"""Transform a count matrix to a normalized tf or tf-idf representation.
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The formula that is used to compute the tf-idf for a term t of a document d
in a document set is tf-idf(t, d) = tf(t, d) * idf(t), and the idf is
computed as idf(t) = log [ n / df(t) ] + 1 (if ``smooth_idf=False``), where
n is the total number of documents in the document set and df(t) is the
document frequency of t; the document frequency is the number of documents
in the document set that contain the term t. The effect of adding "1" to
the idf in the equation above is that terms with zero idf, i.e., terms
that occur in all documents in a training set, will not be entirely
ignored.
(Note that the idf formula above differs from the standard textbook
notation that defines the idf as
idf(t) = log [ n / (df(t) + 1) ]).
If ``smooth_idf=True`` (the default), the constant "1" is added to the
numerator and denominator of the idf as if an extra document was seen
containing every term in the collection exactly once, which prevents
zero divisions: idf(t) = log [ (1 + n) / (1 + df(t)) ] + 1.
Furthermore, the formulas used to compute tf and idf depend
on parameter settings that correspond to the SMART notation used in IR
as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when
``sublinear_tf=True``.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when ``norm='l2'``, "n" (none)
when ``norm=None``.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : {'l1', 'l2'} or None, default='l2'
Each output row will have unit norm, either:
- 'l2': Sum of squares of vector elements is 1. The cosine
similarity between two vectors is their dot product when l2 norm has
been applied.
- 'l1': Sum of absolute values of vector elements is 1.
See :func:`~sklearn.preprocessing.normalize`.
- None: No normalization.
use_idf : bool, default=True
Enable inverse-document-frequency reweighting. If False, idf(t) = 1.
smooth_idf : bool, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : bool, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array of shape (n_features)
The inverse document frequency (IDF) vector; only defined
if ``use_idf`` is True.
.. versionadded:: 0.20
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 1.0
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
CountVectorizer : Transforms text into a sparse matrix of n-gram counts.
TfidfVectorizer : Convert a collection of raw documents to a matrix of
TF-IDF features.
HashingVectorizer : Convert a collection of text documents to a matrix
of token occurrences.
References
----------
.. [Yates2011] R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.
.. [MRS2008] C.D. Manning, P. Raghavan and H. Schütze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.
Examples
--------
>>> from sklearn.feature_extraction.text import TfidfTransformer
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> from sklearn.pipeline import Pipeline
>>> corpus = ['this is the first document',
... 'this document is the second document',
... 'and this is the third one',
... 'is this the first document']
>>> vocabulary = ['this', 'document', 'first', 'is','second', 'the',
... 'and', 'one']
>>> pipe = Pipeline([('count', CountVectorizer(vocabulary=vocabulary)),
... ('tfid', TfidfTransformer())]).fit(corpus)
>>> pipe['count'].transform(corpus).toarray()
array([[1, 1, 1, 1, 0, 1, 0, 0],
[1, 2, 0, 1, 1, 1, 0, 0],
[1, 0, 0, 1, 0, 1, 1, 1],
[1, 1, 1, 1, 0, 1, 0, 0]])
>>> pipe['tfid'].idf_
array([1. , 1.22314355, 1.51082562, 1. , 1.91629073,
1. , 1.91629073, 1.91629073])
>>> pipe.transform(corpus).shape
(4, 8)
"""
_parameter_constraints: dict = {
"norm": [StrOptions({"l1", "l2"}), None],
"use_idf": ["boolean"],
"smooth_idf": ["boolean"],
"sublinear_tf": ["boolean"],
}
def __init__(self, *, norm="l2", use_idf=True, smooth_idf=True, sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Learn the idf vector (global term weights).
Parameters
----------
X : sparse matrix of shape n_samples, n_features)
A matrix of term/token counts.
y : None
This parameter is not needed to compute tf-idf.
Returns
-------
self : object
Fitted transformer.
"""
# large sparse data is not supported for 32bit platforms because
# _document_frequency uses np.bincount which works on arrays of
# dtype NPY_INTP which is int32 for 32bit platforms. See #20923
X = self._validate_data(
X, accept_sparse=("csr", "csc"), accept_large_sparse=not _IS_32BIT
)
if not sp.issparse(X):
X = sp.csr_matrix(X)
dtype = X.dtype if X.dtype in FLOAT_DTYPES else np.float64
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
df = df.astype(dtype, copy=False)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(n_samples / df) + 1
self._idf_diag = sp.diags(
idf,
offsets=0,
shape=(n_features, n_features),
format="csr",
dtype=dtype,
)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation.
Parameters
----------
X : sparse matrix of (n_samples, n_features)
A matrix of term/token counts.
copy : bool, default=True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix of shape (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
X = self._validate_data(
X, accept_sparse="csr", dtype=FLOAT_DTYPES, copy=copy, reset=False
)
if not sp.issparse(X):
X = sp.csr_matrix(X, dtype=np.float64)
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
# idf_ being a property, the automatic attributes detection
# does not work as usual and we need to specify the attribute
# name:
check_is_fitted(self, attributes=["idf_"], msg="idf vector is not fitted")
# *= doesn't work
X = X * self._idf_diag
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
"""Inverse document frequency vector, only defined if `use_idf=True`.
Returns
-------
ndarray of shape (n_features,)
"""
# if _idf_diag is not set, this will raise an attribute error,
# which means hasattr(self, "idf_") is False
return np.ravel(self._idf_diag.sum(axis=0))
@idf_.setter
def idf_(self, value):
value = np.asarray(value, dtype=np.float64)
n_features = value.shape[0]
self._idf_diag = sp.spdiags(
value, diags=0, m=n_features, n=n_features, format="csr"
)
def _more_tags(self):
return {"X_types": ["2darray", "sparse"]}
class TfidfVectorizer(CountVectorizer):
r"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to :class:`CountVectorizer` followed by
:class:`TfidfTransformer`.
For an example of usage, see
:ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py`.
For an efficiency comparision of the different feature extractors, see
:ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
- If `'file'`, the sequence items must have a'read' method (file-like
object) that is called to fetch the bytes in memory.
- If `'content'`, the input is expected to be a sequence of items that
can be of type string or byte.
encoding : str, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore','replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and'replace'.
strip_accents : {'ascii', 'unicode'} or callable, default=None
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
a direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) means no character normalization is performed.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable, default=None
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
Only applies if ``analyzer`` is not callable.
tokenizer : callable, default=None
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data
is first read from the file and then passed to the given callable
analyzer.
stop_words : {'english'}, list, default=None
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. In this case, setting `max_df`
to a higher value, such as in the range (0.7, 1.0), can automatically detect
and filter stop words based on intra corpus document frequency of terms.
token_pattern : str, default=r"(?u)\\b\\w\\w+\\b"
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
If there is a capturing group in token_pattern then the
captured group content, not the entire match, becomes the token.
At most one capturing group is permitted.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used. For example an ``ngram_range`` of ``(1, 1)`` means only
unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means
only bigrams.
Only applies if ``analyzer`` is not callable.
max_df : float or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float in range [0.0, 1.0], the parameter represents a proportion of
documents, integer absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float in range of [0.0, 1.0], the parameter represents a proportion
of documents, integer absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int, default=None
If not None, build a vocabulary that only consider the top
`max_features` ordered by term frequency across the corpus.
Otherwise, all features are used.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, default=None
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : bool, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set `binary` to True, `use_idf` to False and
`norm` to None to get 0/1 outputs).
dtype : dtype, default=float64
Type of the matrix returned by fit_transform() or transform().
norm : {'l1', 'l2'} or None, default='l2'
Each output row will have unit norm, either:
- 'l2': Sum of squares of vector elements is 1. The cosine
similarity between two vectors is their dot product when l2 norm has
been applied.
- 'l1': Sum of absolute values of vector elements is 1.
See :func:`~sklearn.preprocessing.normalize`.
- None: No normalization.
use_idf : bool, default=True
Enable inverse-document-frequency reweighting. If False, idf(t) = 1.
smooth_idf : bool, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : bool, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
fixed_vocabulary_ : bool
True if a fixed vocabulary of term to indices mapping
is provided by the user.
idf_ : array of shape (n_features,)
The inverse document frequency (IDF) vector; only defined
if ``use_idf`` is True.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See Also
--------
CountVectorizer : Transforms text into a sparse matrix of n-gram counts.
TfidfTransformer : Performs the TF-IDF transformation from a provided
matrix of counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
Examples
--------
>>> from sklearn.feature_extraction.text import TfidfVectorizer
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = TfidfVectorizer()
>>> X = vectorizer.fit_transform(corpus)
>>> vectorizer.get_feature_names_out()
array(['and', 'document', 'first', 'is', 'one','second', 'the', 'third',
'this'],...)
>>> print(X.shape)
(4, 9)
"""
_parameter_constraints: dict = {**CountVectorizer._parameter_constraints}
_parameter_constraints.update(
{
"norm": [StrOptions({"l1", "l2"}), None],
"use_idf": ["boolean"],
"smooth_idf": ["boolean"],
"sublinear_tf": ["boolean"],
}
)
def __init__(
self,
*,
input="content",
encoding="utf-8",
decode_error="strict",
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
analyzer="word",
stop_words=None,
token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1),
max_df=1.0,
min_df=1,
max_features=None,
vocabulary=None,
binary=False,
dtype=np.float64,
norm="l2",
use_idf=True,
smooth_idf=True,
sublinear_tf=False,
):
super().__init__(
input=input,
encoding=encoding,
decode_error=decode_error,
strip_accents=strip_accents,
lowercase=lowercase,
preprocessor=preprocessor,
tokenizer=tokenizer,
analyzer=analyzer,
stop_words=stop_words,
token_pattern=token_pattern,
ngram_range=ngram_range,
max_df=max_df,
min_df=min_df,
max_features=max_features,
vocabulary=vocabulary,
binary=binary,
dtype=dtype,
)
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def idf_(self):
"""Inverse document frequency vector, only defined if `use_idf=True`.
Returns
-------
ndarray of shape (n_features,)
"""
if not hasattr(self, "_tfidf"):
raise NotFittedError(
f"{self.__class__.__name__} is not fitted yet. Call 'fit' with "
"appropriate arguments before using this attribute."
)
return self._tfidf.idf_
@idf_.setter
def idf_(self, value):
if not self.use_idf:
raise ValueError("`idf_` cannot be set when `user_idf=False`.")
if not hasattr(self, "_tfidf"):
# We should support transferring `idf_` from another `TfidfTransformer`
# and therefore, we need to create the transformer instance it does not
# exist yet.
self._tfidf = TfidfTransformer(
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
self._validate_vocabulary()
if hasattr(self, "vocabulary_"):
if len(self.vocabulary_)!= len(value):
raise ValueError(
"idf length = %d must be equal to vocabulary size = %d"
% (len(value), len(self.vocabulary))
)
self._tfidf.idf_ = value
def _check_params(self):
if self.dtype not in FLOAT_DTYPES:
warnings.warn(
"Only {} 'dtype' should be used. {} 'dtype' will "
"be converted to np.float64.".format(FLOAT_DTYPES, self.dtype),
UserWarning,
)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is not needed to compute tfidf.
Returns
-------
self : object
Fitted vectorizer.
"""
self._check_params()
self._warn_for_unused_params()
self._tfidf = TfidfTransformer(
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return document-term matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
X : sparse matrix of (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
self._check_params()
self._tfidf = TfidfTransformer(
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
Returns
-------
X : sparse matrix of (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, msg="The TF-IDF vectorizer is not fitted")
X = super().transform(raw_documents)
return self._tfidf.transform(X, copy=False)
def _more_tags(self):
return {"X_types": ["string"], "_skip_test": True}
"""
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
from numbers import Integral, Number, Real
import numpy as np
from numpy.lib.stride_tricks import as_strided
from scipy import sparse
from..base import BaseEstimator, TransformerMixin, _fit_context
from..utils import check_array, check_random_state
from..utils._param_validation import Hidden, Interval, RealNotInt, validate_params
__all__ = [
"PatchExtractor",
"extract_patches_2d",
"grid_to_graph",
"img_to_graph",
"reconstruct_from_patches_2d",
]
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
----------
n_x : int
The size of the grid in the x direction.
n_y : int
The size of the grid in the y direction.
n_z : integer, default=1
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(), vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(), vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
_, n_y, n_z = img.shape
gradient = np.abs(
img[
edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z,
]
- img[
edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z,
]
)
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.isin(edges[0], inds), np.isin(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(
n_x, n_y, n_z, mask=None, img=None, return_as=sparse.coo_matrix, dtype=None
):
"""Auxiliary function for img_to_graph and grid_to_graph"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None: # To not overwrite input dtype
if img is None:
dtype = int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = mask.astype(dtype=bool, copy=False)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix(
(
np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)), np.hstack((j_idx, diag_idx))),
),
(n_voxels, n_voxels),
dtype=dtype,
)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
@validate_params(
{
"img": ["array-like"],
"mask": [None, np.ndarray],
"return_as": [type],
"dtype": "no_validation", # validation delegated to numpy
},
prefer_skip_nested_validation=True,
)
def img_to_graph(img, *, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections.
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : array-like of shape (height, width) or (height, width, channel)
2D or 3D image.
mask : ndarray of shape (height, width) or \
(height, width, channel), dtype=bool, default=None
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, \
default=sparse.coo_matrix
The class to use to build the returned adjacency matrix.
dtype : dtype, default=None
The data of the returned sparse matrix. By default it is the
dtype of img.
Returns
-------
graph : ndarray or a sparse matrix class
The computed adjacency matrix.
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
@validate_params(
{
"n_x": [Interval(Integral, left=1, right=None, closed="left")],
"n_y": [Interval(Integral, left=1, right=None, closed="left")],
"n_z": [Interval(Integral, left=1, right=None, closed="left")],
"mask": [None, np.ndarray],
"return_as": [type],
"dtype": "no_validation", # validation delegated to numpy
},
prefer_skip_nested_validation=True,
)
def grid_to_graph(
n_x, n_y, n_z=1, *, mask=None, return_as=sparse.coo_matrix, dtype=int
):
"""Graph of the pixel-to-pixel connections.
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis.
n_y : int
Dimension in y axis.
n_z : int, default=1
Dimension in z axis.
mask : ndarray of shape (n_x, n_y, n_z), dtype=bool, default=None
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, \
default=sparse.coo_matrix
The class to use to build the returned adjacency matrix.
dtype : dtype, default=int
The data of the returned sparse matrix. By default it is int.
Returns
-------
graph : np.ndarray or a sparse matrix class
The computed adjacency matrix.
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as, dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : int or float, default=None
The maximum number of patches to extract. If `max_patches` is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches. If `max_patches` is None, all possible patches are extracted.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if isinstance(max_patches, (Integral)) and max_patches < all_patches:
return max_patches
elif isinstance(max_patches, (Integral)) and max_patches >= all_patches:
return all_patches
elif isinstance(max_patches, (Real)) and 0 < max_patches < 1:
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def _extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : int or tuple of length arr.ndim.default=8
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : int or tuple of length arr.ndim, default=1
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = (
(np.array(arr.shape) - np.array(patch_shape)) // np.array(extraction_step)
) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
@validate_params(
{
"image": [np.ndarray],
"patch_size": [tuple, list],
"max_patches": [
Interval(RealNotInt, 0, 1, closed="neither"),
Interval(Integral, 1, None, closed="left"),
None,
],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches.
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : ndarray of shape (image_height, image_width) or \
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of int (patch_height, patch_width)
The dimensions of one patch.
max_patches : int or float, default=None
The maximum number of patches to extract. If `max_patches` is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches. If `max_patches` is None it corresponds to the total number
of patches that can be extracted.
random_state : int, RandomState instance, default=None
Determines the random number generator used for random sampling when
`max_patches` is not None. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> from sklearn.feature_extraction import image
>>> # Use the array data from the first image in this dataset:
>>> one_image = load_sample_image("china.jpg")
>>> print('Image shape: {}'.format(one_image.shape))
Image shape: (427, 640, 3)
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print('Patches shape: {}'.format(patches.shape))
Patches shape: (272214, 2, 2, 3)
>>> # Here are just two of these patches:
>>> print(patches[1])
[[[174 201 231]
[174 201 231]]
[[173 200 230]
[173 200 230]]]
>>> print(patches[800])
[[[187 214 243]
[188 215 244]]
[[187 214 243]
[188 215 244]]]
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError(
"Height of the patch should be less than the height of the image."
)
if p_w > i_w:
raise ValueError(
"Width of the patch should be less than the width of the image."
)
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = _extract_patches(
image, patch_shape=(p_h, p_w, n_colors), extraction_step=1
)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
@validate_params(
{"patches": [np.ndarray], "image_size": [tuple, Hidden(list)]},
prefer_skip_nested_validation=True,
)
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : ndarray of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of int (image_height, image_width) or \
(image_height, image_width, n_channels)
The size of the image that will be reconstructed.
Returns
-------
image : ndarray of shape image_size
The reconstructed image.
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i : i + p_h, j : j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) * min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(TransformerMixin, BaseEstimator):
"""Extracts patches from a collection of images.
Read more in the :ref:`User Guide <image_feature_extraction>`.
.. versionadded:: 0.9
Parameters
----------
patch_size : tuple of int (patch_height, patch_width), default=None
The dimensions of one patch. If set to None, the patch size will be
automatically set to `(img_height // 10, img_width // 10)`, where
`img_height` and `img_width` are the dimensions of the input images.
max_patches : int or float, default=None
The maximum number of patches per image to extract. If `max_patches` is
a float in (0, 1), it is taken to mean a proportion of the total number
of patches. If set to None, extract all possible patches.
random_state : int, RandomState instance, default=None
Determines the random number generator used for random sampling when
`max_patches is not None`. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
See Also
--------
reconstruct_from_patches_2d : Reconstruct image from all of its patches.
Notes
-----
This estimator is stateless and does not need to be fitted. However, we
recommend to call :meth:`fit_transform` instead of :meth:`transform`, as
parameter validation is only performed in :meth:`fit`.
Examples
--------
>>> from sklearn.datasets import load_sample_images
>>> from sklearn.feature_extraction import image
>>> # Use the array data from the second image in this dataset:
>>> X = load_sample_images().images[1]
>>> X = X[None,...]
>>> print(f"Image shape: {X.shape}")
Image shape: (1, 427, 640, 3)
>>> pe = image.PatchExtractor(patch_size=(10, 10))
>>> pe_trans = pe.transform(X)
>>> print(f"Patches shape: {pe_trans.shape}")
Patches shape: (263758, 10, 10, 3)
>>> X_reconstructed = image.reconstruct_from_patches_2d(pe_trans, X.shape[1:])
>>> print(f"Reconstructed shape: {X_reconstructed.shape}")
Reconstructed shape: (427, 640, 3)
"""
_parameter_constraints: dict = {
"patch_size": [tuple, None],
"max_patches": [
None,
Interval(RealNotInt, 0, 1, closed="neither"),
Interval(Integral, 1, None, closed="left"),
],
"random_state": ["random_state"],
}
def __init__(self, *, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Only validate the parameters of the estimator.
This method allows to: (i) validate the parameters of the estimator and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : ndarray of shape (n_samples, image_height, image_width) or \
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
return self
def transform(self, X):
"""Transform the image samples in `X` into a matrix of patch data.
Parameters
----------
X : ndarray of shape (n_samples, image_height, image_width) or \
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
X = self._validate_data(
X=X,
ensure_2d=False,
allow_nd=True,
ensure_min_samples=1,
ensure_min_features=1,
reset=False,
)
random_state = check_random_state(self.random_state)
n_imgs, img_height, img_width = X.shape[:3]
if self.patch_size is None:
patch_size = img_height // 10, img_width // 10
else:
if len(self.patch_size)!= 2:
raise ValueError(
"patch_size must be a tuple of two integers. Got"
f" {self.patch_size} instead."
)
patch_size = self.patch_size
n_imgs, img_height, img_width = X.shape[:3]
X = np.reshape(X, (n_imgs, img_height, img_width, -1))
n_channels = X.shape[-1]
# compute the dimensions of the patches array
patch_height, patch_width = patch_size
n_patches = _compute_n_patches(
img_height, img_width, patch_height, patch_width, self.max_patches
)
patches_shape = (n_imgs * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches : (ii + 1) * n_patches] = extract_patches_2d(
image,
patch_size,
max_patches=self.max_patches,
random_state=random_state,
)
return patches
def _more_tags(self):
return {"X_types": ["3darray"], "stateless": True} |
scikit-learn__scikit-learn | gaussian_process.rst | Module doc | Generate documentation for this module | BSD 3-Clause New or Revised License | scikit-learn__scikit-learn/doc/modules/gaussian_process.rst | [
"scikit-learn__scikit-learn/sklearn/gaussian_process/kernels.py"
] | scikit-learn__scikit-learn/sklearn/gaussian_process | Gaussian Processes
Gaussian Processes (GP) are a generic supervised learning method
designed to solve regression and probabilistic classification problems.
The advantages of Gaussian processes are:
- The prediction interpolates the observations (at least for regular
kernels).
- The prediction is probabilistic (Gaussian) so that one can compute
empirical confidence intervals and decide based on those if one
should refit (online fitting, adaptive fitting) the prediction in
some region of interest.
- Versatile: different kernels
<gp_kernels> can be specified. Common kernels are provided, but it
is also possible to specify custom kernels.
The disadvantages of Gaussian processes include:
- They are not sparse, i.e., they use the whole samples/features
information to perform the prediction.
- They lose efficiency in high dimensional spaces -- namely when the
number of features exceeds a few dozens.
Gaussian Process Regression (GPR)
The GaussianProcessRegressor implements Gaussian processes (GP) for
regression purposes. For this, the prior of the GP needs to be
specified. The prior mean is assumed to be constant and zero (for
normalize_y=False) or the training data's mean (for normalize_y=True).
The prior's covariance is specified by passing a kernel <gp_kernels>
object. The hyperparameters of the kernel are optimized during fitting
of GaussianProcessRegressor by maximizing the log-marginal-likelihood
(LML) based on the passed optimizer. As the LML may have multiple local
optima, the optimizer can be started repeatedly by specifying
n_restarts_optimizer. The first run is always conducted starting from
the initial hyperparameter values of the kernel; subsequent runs are
conducted from hyperparameter values that have been chosen randomly from
the range of allowed values. If the initial hyperparameters should be
kept fixed, None can be passed as optimizer.
The noise level in the targets can be specified by passing it via the
parameter alpha, either globally as a scalar or per datapoint. Note that
a moderate noise level can also be helpful for dealing with numeric
issues during fitting as it is effectively implemented as Tikhonov
regularization, i.e., by adding it to the diagonal of the kernel matrix.
An alternative to specifying the noise level explicitly is to include a
WhiteKernel component into the kernel, which can estimate the global
noise level from the data (see example below).
The implementation is based on Algorithm 2.1 of [RW2006]. In addition to
the API of standard scikit-learn estimators, GaussianProcessRegressor:
- allows prediction without prior fitting (based on the GP prior)
- provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
- exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
GPR examples
GPR with noise-level estimation
This example illustrates that GPR with a sum-kernel including a
WhiteKernel can estimate the noise level of data. An illustration of the
log-marginal-likelihood (LML) landscape shows that there exist two local
maxima of LML.
The first corresponds to a model with a high noise level and a large
length scale, which explains all variations in the data by noise.
The second one has a smaller noise level and shorter length scale, which
explains most of the variation by the noise-free functional
relationship. The second model has a higher likelihood; however,
depending on the initial value for the hyperparameters, the
gradient-based optimization might also converge to the high-noise
solution. It is thus important to repeat the optimization several times
for different initializations.
Comparison of GPR and Kernel Ridge Regression
Both kernel ridge regression (KRR) and GPR learn a target function by
employing internally the "kernel trick". KRR learns a linear function in
the space induced by the respective kernel which corresponds to a
non-linear function in the original space. The linear function in the
kernel space is chosen based on the mean-squared error loss with ridge
regularization. GPR uses the kernel to define the covariance of a prior
distribution over the target functions and uses the observed training
data to define a likelihood function. Based on Bayes theorem, a
(Gaussian) posterior distribution over target functions is defined,
whose mean is used for prediction.
A major difference is that GPR can choose the kernel's hyperparameters
based on gradient-ascent on the marginal likelihood function while KRR
needs to perform a grid search on a cross-validated loss function
(mean-squared error loss). A further difference is that GPR learns a
generative, probabilistic model of the target function and can thus
provide meaningful confidence intervals and posterior samples along with
the predictions while KRR only provides predictions.
The following figure illustrates both methods on an artificial dataset,
which consists of a sinusoidal target function and strong noise. The
figure compares the learned model of KRR and GPR based on a
ExpSineSquared kernel, which is suited for learning periodic functions.
The kernel's hyperparameters control the smoothness (length_scale) and
periodicity of the kernel (periodicity). Moreover, the noise level of
the data is learned explicitly by GPR by an additional WhiteKernel
component in the kernel and by the regularization parameter alpha of
KRR.
The figure shows that both methods learn reasonable models of the target
function. GPR provides reasonable confidence bounds on the prediction
which are not available for KRR. A major difference between the two
methods is the time required for fitting and predicting: while fitting
KRR is fast in principle, the grid-search for hyperparameter
optimization scales exponentially with the number of hyperparameters
("curse of dimensionality"). The gradient-based optimization of the
parameters in GPR does not suffer from this exponential scaling and is
thus considerably faster on this example with 3-dimensional
hyperparameter space. The time for predicting is similar; however,
generating the variance of the predictive distribution of GPR takes
considerably longer than just predicting the mean.
GPR on Mauna Loa CO2 data
This example is based on Section 5.4.3 of [RW2006]. It illustrates an
example of complex kernel engineering and hyperparameter optimization
using gradient ascent on the log-marginal-likelihood. The data consists
of the monthly average atmospheric CO2 concentrations (in parts per
million by volume (ppmv)) collected at the Mauna Loa Observatory in
Hawaii, between 1958 and 1997. The objective is to model the CO2
concentration as a function of the time t.
The kernel is composed of several terms that are responsible for
explaining different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF
kernel. The RBF kernel with a large length-scale enforces this
component to be smooth; it is not enforced that the trend is rising
which leaves this choice to the GP. The specific length-scale and
the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The
length-scale of this periodic component, controlling its smoothness,
is a free parameter. In order to allow decaying away from exact
periodicity, the product with an RBF kernel is taken. The
length-scale of this RBF component controls the decay time and is a
further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha
parameter, which determines the diffuseness of the length-scales,
are to be determined. According to [RW2006], these irregularities
can better be explained by a RationalQuadratic than an RBF kernel
component, probably because it can accommodate several
length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which
shall explain the correlated noise components such as local weather
phenomena, and a WhiteKernel contribution for the white noise. The
relative amplitudes and the RBF's length scale are further free
parameters.
Maximizing the log-marginal-likelihood after subtracting the target's
mean yields the following kernel with an LML of -83.214:
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term
rising trend (length-scale 41.8 years). The periodic component has an
amplitude of 3.27ppm, a decay time of 180 years and a length-scale of
1.44. The long decay time indicates that we have a locally very close to
periodic seasonal component. The correlated noise has an amplitude of
0.197ppm with a length scale of 0.138 years and a white-noise
contribution of 0.197ppm. Thus, the overall noise level is very small,
indicating that the data can be very well explained by the model. The
figure shows also that the model makes very confident predictions until
around 2015
Gaussian Process Classification (GPC)
The GaussianProcessClassifier implements Gaussian processes (GP) for
classification purposes, more specifically for probabilistic
classification, where test predictions take the form of class
probabilities. GaussianProcessClassifier places a GP prior on a latent
function f, which is then squashed through a link function to obtain the
probabilistic classification. The latent function f is a so-called
nuisance function, whose values are not observed and are not relevant by
themselves. Its purpose is to allow a convenient formulation of the
model, and f is removed (integrated out) during prediction.
GaussianProcessClassifier implements the logistic link function, for
which the integral cannot be computed analytically but is easily
approximated in the binary case.
In contrast to the regression setting, the posterior of the latent
function f is not Gaussian even for a GP prior since a Gaussian
likelihood is inappropriate for discrete class labels. Rather, a
non-Gaussian likelihood corresponding to the logistic link function
(logit) is used. GaussianProcessClassifier approximates the non-Gaussian
posterior with a Gaussian based on the Laplace approximation. More
details can be found in Chapter 3 of [RW2006].
The GP prior mean is assumed to be zero. The prior's covariance is
specified by passing a kernel <gp_kernels> object. The hyperparameters
of the kernel are optimized during fitting of GaussianProcessRegressor
by maximizing the log-marginal-likelihood (LML) based on the passed
optimizer. As the LML may have multiple local optima, the optimizer can
be started repeatedly by specifying n_restarts_optimizer. The first run
is always conducted starting from the initial hyperparameter values of
the kernel; subsequent runs are conducted from hyperparameter values
that have been chosen randomly from the range of allowed values. If the
initial hyperparameters should be kept fixed, None can be passed as
optimizer.
GaussianProcessClassifier supports multi-class classification by
performing either one-versus-rest or one-versus-one based training and
prediction. In one-versus-rest, one binary Gaussian process classifier
is fitted for each class, which is trained to separate this class from
the rest. In "one_vs_one", one binary Gaussian process classifier is
fitted for each pair of classes, which is trained to separate these two
classes. The predictions of these binary predictors are combined into
multi-class predictions. See the section on
multi-class classification <multiclass> for more details.
In the case of Gaussian process classification, "one_vs_one" might be
computationally cheaper since it has to solve many problems involving
only a subset of the whole training set rather than fewer problems on
the whole dataset. Since Gaussian process classification scales
cubically with the size of the dataset, this might be considerably
faster. However, note that "one_vs_one" does not support predicting
probability estimates but only plain predictions. Moreover, note that
GaussianProcessClassifier does not (yet) implement a true multi-class
Laplace approximation internally, but as discussed above is based on
solving several binary classification tasks internally, which are
combined using one-versus-rest or one-versus-one.
GPC examples
Probabilistic predictions with GPC
This example illustrates the predicted probability of GPC for an RBF
kernel with different choices of the hyperparameters. The first figure
shows the predicted probability of GPC with arbitrarily chosen
hyperparameters and with the hyperparameters corresponding to the
maximum log-marginal-likelihood (LML).
While the hyperparameters chosen by optimizing LML have a considerably
larger LML, they perform slightly worse according to the log-loss on
test data. The figure shows that this is because they exhibit a steep
change of the class probabilities at the class boundaries (which is
good) but have predicted probabilities close to 0.5 far away from the
class boundaries (which is bad) This undesirable effect is caused by the
Laplace approximation used internally by GPC.
The second figure shows the log-marginal-likelihood for different
choices of the kernel's hyperparameters, highlighting the two choices of
the hyperparameters used in the first figure by black dots.
Illustration of GPC on the XOR dataset
This example illustrates GPC on XOR data. Compared are a stationary,
isotropic kernel (RBF) and a non-stationary kernel (DotProduct). On this
particular dataset, the DotProduct kernel obtains considerably better
results because the class-boundaries are linear and coincide with the
coordinate axes. In practice, however, stationary kernels such as RBF
often obtain better results.
Gaussian process classification (GPC) on iris dataset
This example illustrates the predicted probability of GPC for an
isotropic and anisotropic RBF kernel on a two-dimensional version for
the iris-dataset. This illustrates the applicability of GPC to
non-binary classification. The anisotropic RBF kernel obtains slightly
higher log-marginal-likelihood by assigning different length-scales to
the two feature dimensions.
Kernels for Gaussian Processes
Kernels (also called "covariance functions" in the context of GPs) are a
crucial ingredient of GPs which determine the shape of prior and
posterior of the GP. They encode the assumptions on the function being
learned by defining the "similarity" of two datapoints combined with the
assumption that similar datapoints should have similar target values.
Two categories of kernels can be distinguished: stationary kernels
depend only on the distance of two datapoints and not on their absolute
values k(x_(i),x_(j)) = k(d(x_(i),x_(j))) and are thus invariant to
translations in the input space, while non-stationary kernels depend
also on the specific values of the datapoints. Stationary kernels can
further be subdivided into isotropic and anisotropic kernels, where
isotropic kernels are also invariant to rotations in the input space.
For more details, we refer to Chapter 4 of [RW2006]. For guidance on how
to best combine different kernels, we refer to [Duv2014].
Gaussian Process Kernel API
The main usage of a Kernel is to compute the GP's covariance between
datapoints. For this, the method __call__ of the kernel can be called.
This method can either be used to compute the "auto-covariance" of all
pairs of datapoints in a 2d array X, or the "cross-covariance" of all
combinations of datapoints of a 2d array X with datapoints in a 2d array
Y. The following identity holds true for all kernels k (except for the
WhiteKernel): k(X) == K(X, Y=X)
If only the diagonal of the auto-covariance is being used, the method
diag() of a kernel can be called, which is more computationally
efficient than the equivalent call to __call__:
np.diag(k(X, X)) == k.diag(X)
Kernels are parameterized by a vector θ of hyperparameters. These
hyperparameters can for instance control length-scales or periodicity of
a kernel (see below). All kernels support computing analytic gradients
of the kernel's auto-covariance with respect to log(θ) via setting
eval_gradient=True in the __call__ method. That is, a
(len(X), len(X), len(theta)) array is returned where the entry [i, j, l]
contains $\frac{\partial k_\theta(x_i, x_j)}{\partial log(\theta_l)}$.
This gradient is used by the Gaussian process (both regressor and
classifier) in computing the gradient of the log-marginal-likelihood,
which in turn is used to determine the value of θ, which maximizes the
log-marginal-likelihood, via gradient ascent. For each hyperparameter,
the initial value and the bounds need to be specified when creating an
instance of the kernel. The current value of θ can be get and set via
the property theta of the kernel object. Moreover, the bounds of the
hyperparameters can be accessed by the property bounds of the kernel.
Note that both properties (theta and bounds) return log-transformed
values of the internally used values since those are typically more
amenable to gradient-based optimization. The specification of each
hyperparameter is stored in the form of an instance of Hyperparameter in
the respective kernel. Note that a kernel using a hyperparameter with
name "x" must have the attributes self.x and self.x_bounds.
The abstract base class for all kernels is Kernel. Kernel implements a
similar interface as ~sklearn.base.BaseEstimator, providing the methods
get_params(), set_params(), and clone(). This allows setting kernel
values also via meta-estimators such as ~sklearn.pipeline.Pipeline or
~sklearn.model_selection.GridSearchCV. Note that due to the nested
structure of kernels (by applying kernel operators, see below), the
names of kernel parameters might become relatively complicated. In
general, for a binary kernel operator, parameters of the left operand
are prefixed with k1__ and parameters of the right operand with k2__. An
additional convenience method is clone_with_theta(theta), which returns
a cloned version of the kernel but with the hyperparameters set to
theta. An illustrative example:
>>> from sklearn.gaussian_process.kernels import ConstantKernel,
RBF >>> kernel = ConstantKernel(constant_value=1.0,
constant_value_bounds=(0.0, 10.0)) * RBF(length_scale=0.5,
length_scale_bounds=(0.0, 10.0)) + RBF(length_scale=2.0,
length_scale_bounds=(0.0, 10.0)) >>> for hyperparameter in
kernel.hyperparameters: print(hyperparameter)
Hyperparameter(name='k1__k1__constant_value', value_type='numeric',
bounds=array([[ 0., 10.]]), n_elements=1, fixed=False)
Hyperparameter(name='k1__k2__length_scale', value_type='numeric',
bounds=array([[ 0., 10.]]), n_elements=1, fixed=False)
Hyperparameter(name='k2__length_scale', value_type='numeric',
bounds=array([[ 0., 10.]]), n_elements=1, fixed=False) >>> params =
kernel.get_params() >>> for key in sorted(params): print("%s : %s" %
(key, params[key])) k1 : 1*2 RBF(length_scale=0.5) k1__k1 : 1**2
k1__k1__constant_value : 1.0 k1__k1__constant_value_bounds : (0.0,
10.0) k1__k2 : RBF(length_scale=0.5) k1__k2__length_scale : 0.5
k1__k2__length_scale_bounds : (0.0, 10.0) k2 : RBF(length_scale=2)
k2__length_scale : 2.0 k2__length_scale_bounds : (0.0, 10.0) >>>
print(kernel.theta) # Note: log-transformed [ 0. -0.69314718
0.69314718] >>> print(kernel.bounds) # Note: log-transformed [[ -inf
2.30258509] [ -inf 2.30258509] [ -inf 2.30258509]]
All Gaussian process kernels are interoperable with
sklearn.metrics.pairwise and vice versa: instances of subclasses of
Kernel can be passed as metric to pairwise_kernels from
sklearn.metrics.pairwise. Moreover, kernel functions from pairwise can
be used as GP kernels by using the wrapper class PairwiseKernel. The
only caveat is that the gradient of the hyperparameters is not analytic
but numeric and all those kernels support only isotropic distances. The
parameter gamma is considered to be a hyperparameter and may be
optimized. The other kernel parameters are set directly at
initialization and are kept fixed.
Basic kernels
The ConstantKernel kernel can be used as part of a Product kernel where
it scales the magnitude of the other factor (kernel) or as part of a Sum
kernel, where it modifies the mean of the Gaussian process. It depends
on a parameter constant_value. It is defined as:
k(x_(i),x_(j)) = constant_value ∀ x₁, x₂
The main use-case of the WhiteKernel kernel is as part of a sum-kernel
where it explains the noise-component of the signal. Tuning its
parameter noise_level corresponds to estimating the noise-level. It is
defined as:
k(x_(i),x_(j)) = noise_level if x_(i) = = x_(j) else 0
| """Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
import math
import warnings
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from inspect import signature
import numpy as np
from scipy.spatial.distance import cdist, pdist, squareform
from scipy.special import gamma, kv
from..base import clone
from..exceptions import ConvergenceWarning
from..metrics.pairwise import pairwise_kernels
from..utils.validation import _num_samples
def _check_length_scale(X, length_scale):
length_scale = np.squeeze(length_scale).astype(float)
if np.ndim(length_scale) > 1:
raise ValueError("length_scale cannot be of dimension greater than 1")
if np.ndim(length_scale) == 1 and X.shape[1]!= length_scale.shape[0]:
raise ValueError(
"Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)" % (length_scale.shape[0], X.shape[1])
)
return length_scale
class Hyperparameter(
namedtuple(
"Hyperparameter", ("name", "value_type", "bounds", "n_elements", "fixed")
)
):
"""A kernel hyperparameter's specification in form of a namedtuple.
.. versionadded:: 0.18
Attributes
----------
name : str
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : str
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default=None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
Examples
--------
>>> from sklearn.gaussian_process.kernels import ConstantKernel
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import Hyperparameter
>>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
>>> kernel = ConstantKernel(constant_value=1.0,
... constant_value_bounds=(0.0, 10.0))
We can access each hyperparameter:
>>> for hyperparameter in kernel.hyperparameters:
... print(hyperparameter)
Hyperparameter(name='constant_value', value_type='numeric',
bounds=array([[ 0., 10.]]), n_elements=1, fixed=False)
>>> params = kernel.get_params()
>>> for key in sorted(params): print(f"{key} : {params[key]}")
constant_value : 1.0
constant_value_bounds : (0.0, 10.0)
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if not isinstance(bounds, str) or bounds!= "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0]!= n_elements:
raise ValueError(
"Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0])
)
if fixed is None:
fixed = isinstance(bounds, str) and bounds == "fixed"
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed
)
# This is mainly a testing utility to check that two hyperparameters
# are equal.
def __eq__(self, other):
return (
self.name == other.name
and self.value_type == other.value_type
and np.all(self.bounds == other.bounds)
and self.n_elements == other.n_elements
and self.fixed == other.fixed
)
class Kernel(metaclass=ABCMeta):
"""Base class for all kernels.
.. versionadded:: 0.18
"""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, "deprecated_original", cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if parameter.kind!= parameter.VAR_KEYWORD and parameter.name!= "self":
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs)!= 0:
raise RuntimeError(
"scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention." % (cls,)
)
for arg in args:
params[arg] = getattr(self, arg)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in params.items():
split = key.split("__", 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError(
"Invalid parameter %s for kernel %s. "
"Check the list of available parameters "
"with `kernel.get_params().keys()`." % (name, self)
)
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError(
"Invalid parameter %s for kernel %s. "
"Check the list of available parameters "
"with `kernel.get_params().keys()`."
% (key, self.__class__.__name__)
)
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta.
Parameters
----------
theta : ndarray of shape (n_dims,)
The hyperparameters
"""
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = [
getattr(self, attr)
for attr in dir(self)
if attr.startswith("hyperparameter_")
]
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
params = self.get_params()
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(params[hyperparameter.name])
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
params = self.get_params()
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
params[hyperparameter.name] = np.exp(
theta[i : i + hyperparameter.n_elements]
)
i += hyperparameter.n_elements
else:
params[hyperparameter.name] = np.exp(theta[i])
i += 1
if i!= len(theta):
raise ValueError(
"theta has not the correct number of entries."
" Should be %d; given are %d" % (i, len(theta))
)
self.set_params(**params)
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : ndarray of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = [
hyperparameter.bounds
for hyperparameter in self.hyperparameters
if not hyperparameter.fixed
]
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self)!= type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None)!= params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format, self.theta))
)
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples,)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary."""
@property
def requires_vector_input(self):
"""Returns whether the kernel is defined on fixed-length feature
vectors or generic objects. Defaults to True for backward
compatibility."""
return True
def _check_bounds_params(self):
"""Called after fitting to warn if bounds may have been too tight."""
list_close = np.isclose(self.bounds, np.atleast_2d(self.theta).T)
idx = 0
for hyp in self.hyperparameters:
if hyp.fixed:
continue
for dim in range(hyp.n_elements):
if list_close[idx, 0]:
warnings.warn(
"The optimal value found for "
"dimension %s of parameter %s is "
"close to the specified lower "
"bound %s. Decreasing the bound and"
" calling fit again may find a "
"better value." % (dim, hyp.name, hyp.bounds[dim][0]),
ConvergenceWarning,
)
elif list_close[idx, 1]:
warnings.warn(
"The optimal value found for "
"dimension %s of parameter %s is "
"close to the specified upper "
"bound %s. Increasing the bound and"
" calling fit again may find a "
"better value." % (dim, hyp.name, hyp.bounds[dim][1]),
ConvergenceWarning,
)
idx += 1
class NormalizedKernelMixin:
"""Mixin for kernels which are normalized: k(X, X)=1.
.. versionadded:: 0.18
"""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin:
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y).
.. versionadded:: 0.18
"""
def is_stationary(self):
"""Returns whether the kernel is stationary."""
return True
class GenericKernelMixin:
"""Mixin for kernels which operate on generic objects such as variable-
length sequences, trees, and graphs.
.. versionadded:: 0.22
"""
@property
def requires_vector_input(self):
"""Whether the kernel works only on fixed-length feature vectors."""
return False
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels.
.. versionadded:: 0.18
Parameters
----------
kernels : list of Kernels
The other kernels
Examples
--------
>>> from sklearn.gaussian_process.kernels import WhiteKernel
>>> from sklearn.gaussian_process.kernels import RBF
>>> from sklearn.gaussian_process.kernels import CompoundKernel
>>> kernel = CompoundKernel(
... [WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)])
>>> print(kernel.bounds)
[[-11.51292546 11.51292546]
[-11.51292546 11.51292546]]
>>> print(kernel.n_dims)
2
>>> print(kernel.theta)
[1.09861229 0.69314718]
"""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims : (i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of the
kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : ndarray of shape \
(n_samples_X, n_samples_X, n_dims, n_kernels), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient) for kernel in self.kernels])
def __eq__(self, b):
if type(self)!= type(b) or len(self.kernels)!= len(b.kernels):
return False
return np.all(
[self.kernels[i] == b.kernels[i] for i in range(len(self.kernels))]
)
def is_stationary(self):
"""Returns whether the kernel is stationary."""
return np.all([kernel.is_stationary() for kernel in self.kernels])
@property
def requires_vector_input(self):
"""Returns whether the kernel is defined on discrete structures."""
return np.any([kernel.requires_vector_input for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to `np.diag(self(X))`; however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators.
.. versionadded:: 0.18
"""
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(("k1__" + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(("k2__" + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = [
Hyperparameter(
"k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements,
)
for hyperparameter in self.k1.hyperparameters
]
for hyperparameter in self.k2.hyperparameters:
r.append(
Hyperparameter(
"k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements,
)
)
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : ndarray of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self)!= type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) or (
self.k1 == b.k2 and self.k2 == b.k1
)
def is_stationary(self):
"""Returns whether the kernel is stationary."""
return self.k1.is_stationary() and self.k2.is_stationary()
@property
def requires_vector_input(self):
"""Returns whether the kernel is stationary."""
return self.k1.requires_vector_input or self.k2.requires_vector_input
class Sum(KernelOperator):
"""The `Sum` kernel takes two kernels :math:`k_1` and :math:`k_2`
and combines them via
.. math::
k_{sum}(X, Y) = k_1(X, Y) + k_2(X, Y)
Note that the `__add__` magic method is overridden, so
`Sum(RBF(), RBF())` is equivalent to using the + operator
with `RBF() + RBF()`.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel
The first base-kernel of the sum-kernel
k2 : Kernel
The second base-kernel of the sum-kernel
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import RBF, Sum, ConstantKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = Sum(ConstantKernel(2), RBF())
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
1.0
>>> kernel
1.41**2 + RBF(length_scale=1)
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to `np.diag(self(X))`; however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""The `Product` kernel takes two kernels :math:`k_1` and :math:`k_2`
and combines them via
.. math::
k_{prod}(X, Y) = k_1(X, Y) * k_2(X, Y)
Note that the `__mul__` magic method is overridden, so
`Product(RBF(), RBF())` is equivalent to using the * operator
with `RBF() * RBF()`.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel
The first base-kernel of the product-kernel
k2 : Kernel
The second base-kernel of the product-kernel
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import (RBF, Product,
... ConstantKernel)
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = Product(ConstantKernel(2), RBF())
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
1.0
>>> kernel
1.41**2 * RBF(length_scale=1)
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_Y, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack(
(K1_gradient * K2[:, :, np.newaxis], K2_gradient * K1[:, :, np.newaxis])
)
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""The Exponentiation kernel takes one base kernel and a scalar parameter
:math:`p` and combines them via
.. math::
k_{exp}(X, Y) = k(X, Y) ^p
Note that the `__pow__` magic method is overridden, so
`Exponentiation(RBF(), 2)` is equivalent to using the ** operator
with `RBF() ** 2`.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
kernel : Kernel
The base kernel
exponent : float
The exponent for the base kernel
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import (RationalQuadratic,
... Exponentiation)
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = Exponentiation(RationalQuadratic(), exponent=2)
>>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.419...
>>> gpr.predict(X[:1,:], return_std=True)
(array([635.5...]), array([0.559...]))
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(("kernel__" + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(
Hyperparameter(
"kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements,
)
)
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : ndarray of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self)!= type(b):
return False
return self.kernel == b.kernel and self.exponent == b.exponent
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_Y, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K**self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K**self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary."""
return self.kernel.is_stationary()
@property
def requires_vector_input(self):
"""Returns whether the kernel is defined on discrete structures."""
return self.kernel.requires_vector_input
class ConstantKernel(StationaryKernelMixin, GenericKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
.. math::
k(x_1, x_2) = constant\\_value \\;\\forall\\; x_1, x_2
Adding a constant kernel is equivalent to adding a constant::
kernel = RBF() + ConstantKernel(constant_value=2)
is the same as::
kernel = RBF() + 2
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
constant_value : float, default=1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on `constant_value`.
If set to "fixed", `constant_value` cannot be changed during
hyperparameter tuning.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import RBF, ConstantKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = RBF() + ConstantKernel(constant_value=2)
>>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3696...
>>> gpr.predict(X[:1,:], return_std=True)
(array([606.1...]), array([0.24...]))
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
@property
def hyperparameter_constant_value(self):
return Hyperparameter("constant_value", "numeric", self.constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = np.full(
(_num_samples(X), _num_samples(Y)),
self.constant_value,
dtype=np.array(self.constant_value).dtype,
)
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (
K,
np.full(
(_num_samples(X), _num_samples(X), 1),
self.constant_value,
dtype=np.array(self.constant_value).dtype,
),
)
else:
return K, np.empty((_num_samples(X), _num_samples(X), 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.full(
_num_samples(X),
self.constant_value,
dtype=np.array(self.constant_value).dtype,
)
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, GenericKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise of the signal as independently and identically
normally-distributed. The parameter noise_level equals the variance of this
noise.
.. math::
k(x_1, x_2) = noise\\_level \\text{ if } x_i == x_j \\text{ else } 0
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
noise_level : float, default=1.0
Parameter controlling the noise level (variance)
noise_level_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'noise_level'.
If set to "fixed", 'noise_level' cannot be changed during
hyperparameter tuning.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel(noise_level=0.5)
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3680...
>>> gpr.predict(X[:2,:], return_std=True)
(array([653.0..., 592.1... ]), array([316.6..., 316.6...]))
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
@property
def hyperparameter_noise_level(self):
return Hyperparameter("noise_level", "numeric", self.noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(_num_samples(X))
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (
K,
self.noise_level * np.eye(_num_samples(X))[:, :, np.newaxis],
)
else:
return K, np.empty((_num_samples(X), _num_samples(X), 0))
else:
return K
else:
return np.zeros((_num_samples(X), _num_samples(Y)))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.full(
_num_samples(X), self.noise_level, dtype=np.array(self.noise_level).dtype
)
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(
self.__class__.__name__, self.noise_level
)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length scale
parameter :math:`l>0`, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
.. math::
k(x_i, x_j) = \\exp\\left(- \\frac{d(x_i, x_j)^2}{2l^2} \\right)
where :math:`l` is the length scale of the kernel and
:math:`d(\\cdot,\\cdot)` is the Euclidean distance.
For advice on how to set the length scale parameter, see e.g. [1]_.
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
See [2]_, Chapter 4, Section 4.2, for further details of the RBF kernel.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
length_scale : float or ndarray of shape (n_features,), default=1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'length_scale'.
If set to "fixed", 'length_scale' cannot be changed during
hyperparameter tuning.
References
----------
.. [1] `David Duvenaud (2014). "The Kernel Cookbook:
Advice on Covariance functions".
<https://www.cs.toronto.edu/~duvenaud/cookbook/>`_
.. [2] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
"Gaussian Processes for Machine Learning". The MIT Press.
<http://www.gaussianprocess.org/gpml/>`_
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import RBF
>>> X, y = load_iris(return_X_y=True)
>>> kernel = 1.0 * RBF(1.0)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9866...
>>> gpc.predict_proba(X[:2,:])
array([[0.8354..., 0.03228..., 0.1322...],
[0.7906..., 0.0652..., 0.1441...]])
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.length_scale_bounds = length_scale_bounds
@property
def anisotropic(self):
return np.iterable(self.length_scale) and len(self.length_scale) > 1
@property
def hyperparameter_length_scale(self):
if self.anisotropic:
return Hyperparameter(
"length_scale",
"numeric",
self.length_scale_bounds,
len(self.length_scale),
)
return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric="sqeuclidean")
K = np.exp(-0.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale, metric="sqeuclidean")
K = np.exp(-0.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = (K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (
length_scale**2
)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
)
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0]
)
class Matern(RBF):
"""Matern kernel.
The class of Matern kernels is a generalization of the :class:`RBF`.
It has an additional parameter :math:`\\nu` which controls the
smoothness of the resulting function. The smaller :math:`\\nu`,
the less smooth the approximated function is.
As :math:`\\nu\\rightarrow\\infty`, the kernel becomes equivalent to
the :class:`RBF` kernel. When :math:`\\nu = 1/2`, the Matérn kernel
becomes identical to the absolute exponential kernel.
Important intermediate values are
:math:`\\nu=1.5` (once differentiable functions)
and :math:`\\nu=2.5` (twice differentiable functions).
The kernel is given by:
.. math::
k(x_i, x_j) = \\frac{1}{\\Gamma(\\nu)2^{\\nu-1}}\\Bigg(
\\frac{\\sqrt{2\\nu}}{l} d(x_i, x_j )
\\Bigg)^\\nu K_\\nu\\Bigg(
\\frac{\\sqrt{2\\nu}}{l} d(x_i, x_j )\\Bigg)
where :math:`d(\\cdot,\\cdot)` is the Euclidean distance,
:math:`K_{\\nu}(\\cdot)` is a modified Bessel function and
:math:`\\Gamma(\\cdot)` is the gamma function.
See [1]_, Chapter 4, Section 4.2, for details regarding the different
variants of the Matern kernel.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
length_scale : float or ndarray of shape (n_features,), default=1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'length_scale'.
If set to "fixed", 'length_scale' cannot be changed during
hyperparameter tuning.
nu : float, default=1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
References
----------
.. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
"Gaussian Processes for Machine Learning". The MIT Press.
<http://www.gaussianprocess.org/gpml/>`_
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import Matern
>>> X, y = load_iris(return_X_y=True)
>>> kernel = 1.0 * Matern(length_scale=1.0, nu=1.5)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9866...
>>> gpc.predict_proba(X[:2,:])
array([[0.8513..., 0.0368..., 0.1117...],
[0.8086..., 0.0693..., 0.1220...]])
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5), nu=1.5):
super().__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric="euclidean")
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale, metric="euclidean")
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1.0 + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1.0 + K + K**2 / 3.0) * np.exp(-K)
elif self.nu == np.inf:
K = np.exp(-(dists**2) / 2.0)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = math.sqrt(2 * self.nu) * K
K.fill((2 ** (1.0 - self.nu)) / gamma(self.nu))
K *= tmp**self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (
length_scale**2
)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
denominator = np.sqrt(D.sum(axis=2))[:, :, np.newaxis]
divide_result = np.zeros_like(D)
np.divide(
D,
denominator,
out=divide_result,
where=denominator!= 0,
)
K_gradient = K[..., np.newaxis] * divide_result
elif self.nu == 1.5:
K_gradient = 3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
elif self.nu == np.inf:
K_gradient = D * K[..., np.newaxis]
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu,
)
else:
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0], self.nu
)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length scales. It is
parameterized by a length scale parameter :math:`l>0` and a scale
mixture parameter :math:`\\alpha>0`. Only the isotropic variant
where length_scale :math:`l` is a scalar is supported at the moment.
The kernel is given by:
.. math::
k(x_i, x_j) = \\left(
1 + \\frac{d(x_i, x_j)^2 }{ 2\\alpha l^2}\\right)^{-\\alpha}
where :math:`\\alpha` is the scale mixture parameter, :math:`l` is
the length scale of the kernel and :math:`d(\\cdot,\\cdot)` is the
Euclidean distance.
For advice on how to set the parameters, see e.g. [1]_.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default=1.0
The length scale of the kernel.
alpha : float > 0, default=1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'length_scale'.
If set to "fixed", 'length_scale' cannot be changed during
hyperparameter tuning.
alpha_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'alpha'.
If set to "fixed", 'alpha' cannot be changed during
hyperparameter tuning.
References
----------
.. [1] `David Duvenaud (2014). "The Kernel Cookbook:
Advice on Covariance functions".
<https://www.cs.toronto.edu/~duvenaud/cookbook/>`_
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import RationalQuadratic
>>> X, y = load_iris(return_X_y=True)
>>> kernel = RationalQuadratic(length_scale=1.0, alpha=1.5)
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9733...
>>> gpc.predict_proba(X[:2,:])
array([[0.8881..., 0.0566..., 0.05518...],
[0.8678..., 0.0707..., 0.0614...]])
"""
def __init__(
self,
length_scale=1.0,
alpha=1.0,
length_scale_bounds=(1e-5, 1e5),
alpha_bounds=(1e-5, 1e5),
):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
@property
def hyperparameter_length_scale(self):
return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_alpha(self):
return Hyperparameter("alpha", "numeric", self.alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if len(np.atleast_1d(self.length_scale)) > 1:
raise AttributeError(
"RationalQuadratic kernel only supports isotropic version, "
"please use a single scalar for length_scale"
)
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric="sqeuclidean"))
tmp = dists / (2 * self.alpha * self.length_scale**2)
base = 1 + tmp
K = base**-self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric="sqeuclidean")
K = (1 + dists / (2 * self.alpha * self.length_scale**2)) ** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = dists * K / (self.length_scale**2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = K * (
-self.alpha * np.log(base)
+ dists / (2 * self.length_scale**2 * base)
)
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale
)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
r"""Exp-Sine-Squared kernel (aka periodic kernel).
The ExpSineSquared kernel allows one to model functions which repeat
themselves exactly. It is parameterized by a length scale
parameter :math:`l>0` and a periodicity parameter :math:`p>0`.
Only the isotropic variant where :math:`l` is a scalar is
supported at the moment. The kernel is given by:
.. math::
k(x_i, x_j) = \text{exp}\left(-
\frac{ 2\sin^2(\pi d(x_i, x_j)/p) }{ l^ 2} \right)
where :math:`l` is the length scale of the kernel, :math:`p` the
periodicity of the kernel and :math:`d(\\cdot,\\cdot)` is the
Euclidean distance.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default=1.0
The length scale of the kernel.
periodicity : float > 0, default=1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'length_scale'.
If set to "fixed", 'length_scale' cannot be changed during
hyperparameter tuning.
periodicity_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'periodicity'.
If set to "fixed", 'periodicity' cannot be changed during
hyperparameter tuning.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import ExpSineSquared
>>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0)
>>> kernel = ExpSineSquared(length_scale=1, periodicity=1)
>>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.0144...
>>> gpr.predict(X[:2,:], return_std=True)
(array([425.6..., 457.5...]), array([0.3894..., 0.3467...]))
"""
def __init__(
self,
length_scale=1.0,
periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5),
):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
@property
def hyperparameter_length_scale(self):
"""Returns the length scale"""
return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_periodicity(self):
return Hyperparameter("periodicity", "numeric", self.periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric="euclidean"))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(-2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric="euclidean")
K = np.exp(
-2 * (np.sin(np.pi / self.periodicity * dists) / self.length_scale) ** 2
)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = 4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = (
4 * arg / self.length_scale**2 * cos_of_arg * sin_of_arg * K
)
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity
)
class DotProduct(Kernel):
r"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting :math:`N(0, 1)` priors on the coefficients
of :math:`x_d (d = 1,..., D)` and a prior of :math:`N(0, \sigma_0^2)`
on the bias. The DotProduct kernel is invariant to a rotation of
the coordinates about the origin, but not translations.
It is parameterized by a parameter sigma_0 :math:`\sigma`
which controls the inhomogenity of the kernel. For :math:`\sigma_0^2 =0`,
the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
.. math::
k(x_i, x_j) = \sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
See [1]_, Chapter 4, Section 4.2, for further details regarding the
DotProduct kernel.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
sigma_0 : float >= 0, default=1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogeneous.
sigma_0_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on'sigma_0'.
If set to "fixed",'sigma_0' cannot be changed during
hyperparameter tuning.
References
----------
.. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
"Gaussian Processes for Machine Learning". The MIT Press.
<http://www.gaussianprocess.org/gpml/>`_
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel()
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3680...
>>> gpr.predict(X[:2,:], return_std=True)
(array([653.0..., 592.1...]), array([316.6..., 316.6...]))
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
@property
def hyperparameter_sigma_0(self):
return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0**2
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0**2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0**2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y).
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X).
"""
return np.einsum("ij,ij->i", X, X) + self.sigma_0**2
def is_stationary(self):
"""Returns whether the kernel is stationary."""
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk),), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
.. versionadded:: 0.18
Parameters
----------
gamma : float, default=1.0
Parameter gamma of the pairwise kernel specified by metric. It should
be positive.
gamma_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'gamma'.
If set to "fixed", 'gamma' cannot be changed during
hyperparameter tuning.
metric : {"linear", "additive_chi2", "chi2", "poly", "polynomial", \
"rbf", "laplacian", "sigmoid", "cosine"} or callable, \
default="linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default=None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.gaussian_process import GaussianProcessClassifier
>>> from sklearn.gaussian_process.kernels import PairwiseKernel
>>> X, y = load_iris(return_X_y=True)
>>> kernel = PairwiseKernel(metric='rbf')
>>> gpc = GaussianProcessClassifier(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpc.score(X, y)
0.9733...
>>> gpc.predict_proba(X[:2,:])
array([[0.8880..., 0.05663..., 0.05532...],
[0.8676..., 0.07073..., 0.06165...]])
"""
def __init__(
self,
gamma=1.0,
gamma_bounds=(1e-5, 1e5),
metric="linear",
pairwise_kernels_kwargs=None,
):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.metric = metric
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
@property
def hyperparameter_gamma(self):
return Hyperparameter("gamma", "numeric", self.gamma_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
pairwise_kernels_kwargs = self.pairwise_kernels_kwargs
if self.pairwise_kernels_kwargs is None:
pairwise_kernels_kwargs = {}
X = np.atleast_2d(X)
K = pairwise_kernels(
X,
Y,
metric=self.metric,
gamma=self.gamma,
filter_params=True,
**pairwise_kernels_kwargs,
)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X,
Y,
metric=self.metric,
gamma=np.exp(gamma),
filter_params=True,
**pairwise_kernels_kwargs,
)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X).ravel()
def is_stationary(self):
"""Returns whether the kernel is stationary."""
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric
) |
scikit-learn__scikit-learn | isotonic.rst | Module doc | Generate documentation for this module | BSD 3-Clause New or Revised License | scikit-learn__scikit-learn/doc/modules/isotonic.rst | [
"scikit-learn__scikit-learn/sklearn/isotonic.py"
] | Isotonic regression
The class IsotonicRegression fits a non-decreasing real function to
1-dimensional data. It solves the following problem:
minimize ∑_(i)w_(i)(y_(i)−ŷ_(i))²
subject to ŷ_(i) ≤ ŷ_(j) whenever X_(i) ≤ X_(j),
where the weights w_(i) are strictly positive, and both X and y are
arbitrary real quantities.
The increasing parameter changes the constraint to ŷ_(i) ≥ ŷ_(j)
whenever X_(i) ≤ X_(j). Setting it to 'auto' will automatically choose
the constraint based on Spearman's rank correlation coefficient.
IsotonicRegression produces a series of predictions ŷ_(i) for the
training data which are the closest to the targets y in terms of mean
squared error. These predictions are interpolated for predicting to
unseen data. The predictions of IsotonicRegression thus form a function
that is piecewise linear. | # Authors: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Nelle Varoquaux <[email protected]>
# License: BSD 3 clause
import math
import warnings
from numbers import Real
import numpy as np
from scipy import interpolate
from scipy.stats import spearmanr
from._isotonic import _inplace_contiguous_isotonic_regression, _make_unique
from.base import BaseEstimator, RegressorMixin, TransformerMixin, _fit_context
from.utils import check_array, check_consistent_length
from.utils._param_validation import Interval, StrOptions, validate_params
from.utils.validation import _check_sample_weight, check_is_fitted
__all__ = ["check_increasing", "isotonic_regression", "IsotonicRegression"]
@validate_params(
{
"x": ["array-like"],
"y": ["array-like"],
},
prefer_skip_nested_validation=True,
)
def check_increasing(x, y):
"""Determine whether y is monotonically correlated with x.
y is found increasing or decreasing with respect to x based on a Spearman
correlation test.
Parameters
----------
x : array-like of shape (n_samples,)
Training data.
y : array-like of shape (n_samples,)
Training target.
Returns
-------
increasing_bool : boolean
Whether the relationship is increasing or decreasing.
Notes
-----
The Spearman correlation coefficient is estimated from the data, and the
sign of the resulting estimate is used as the result.
In the event that the 95% confidence interval based on Fisher transform
spans zero, a warning is raised.
References
----------
Fisher transformation. Wikipedia.
https://en.wikipedia.org/wiki/Fisher_transformation
"""
# Calculate Spearman rho estimate and set return accordingly.
rho, _ = spearmanr(x, y)
increasing_bool = rho >= 0
# Run Fisher transform to get the rho CI, but handle rho=+/-1
if rho not in [-1.0, 1.0] and len(x) > 3:
F = 0.5 * math.log((1.0 + rho) / (1.0 - rho))
F_se = 1 / math.sqrt(len(x) - 3)
# Use a 95% CI, i.e., +/-1.96 S.E.
# https://en.wikipedia.org/wiki/Fisher_transformation
rho_0 = math.tanh(F - 1.96 * F_se)
rho_1 = math.tanh(F + 1.96 * F_se)
# Warn if the CI spans zero.
if np.sign(rho_0)!= np.sign(rho_1):
warnings.warn(
"Confidence interval of the Spearman "
"correlation coefficient spans zero. "
"Determination of ``increasing`` may be "
"suspect."
)
return increasing_bool
@validate_params(
{
"y": ["array-like"],
"sample_weight": ["array-like", None],
"y_min": [Interval(Real, None, None, closed="both"), None],
"y_max": [Interval(Real, None, None, closed="both"), None],
"increasing": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def isotonic_regression(
y, *, sample_weight=None, y_min=None, y_max=None, increasing=True
):
"""Solve the isotonic regression model.
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y : array-like of shape (n_samples,)
The data.
sample_weight : array-like of shape (n_samples,), default=None
Weights on each point of the regression.
If None, weight is set to 1 (equal weights).
y_min : float, default=None
Lower bound on the lowest predicted value (the minimum value may
still be higher). If not set, defaults to -inf.
y_max : float, default=None
Upper bound on the highest predicted value (the maximum may still be
lower). If not set, defaults to +inf.
increasing : bool, default=True
Whether to compute ``y_`` is increasing (if set to True) or decreasing
(if set to False).
Returns
-------
y_ : ndarray of shape (n_samples,)
Isotonic fit of y.
References
----------
"Active set algorithms for isotonic regression; A unifying framework"
by Michael J. Best and Nilotpal Chakravarti, section 3.
"""
order = np.s_[:] if increasing else np.s_[::-1]
y = check_array(y, ensure_2d=False, input_name="y", dtype=[np.float64, np.float32])
y = np.array(y[order], dtype=y.dtype)
sample_weight = _check_sample_weight(sample_weight, y, dtype=y.dtype, copy=True)
sample_weight = np.ascontiguousarray(sample_weight[order])
_inplace_contiguous_isotonic_regression(y, sample_weight)
if y_min is not None or y_max is not None:
# Older versions of np.clip don't accept None as a bound, so use np.inf
if y_min is None:
y_min = -np.inf
if y_max is None:
y_max = np.inf
np.clip(y, y_min, y_max, y)
return y[order]
class IsotonicRegression(RegressorMixin, TransformerMixin, BaseEstimator):
"""Isotonic regression model.
Read more in the :ref:`User Guide <isotonic>`.
.. versionadded:: 0.13
Parameters
----------
y_min : float, default=None
Lower bound on the lowest predicted value (the minimum value may
still be higher). If not set, defaults to -inf.
y_max : float, default=None
Upper bound on the highest predicted value (the maximum may still be
lower). If not set, defaults to +inf.
increasing : bool or 'auto', default=True
Determines whether the predictions should be constrained to increase
or decrease with `X`. 'auto' will decide based on the Spearman
correlation estimate's sign.
out_of_bounds : {'nan', 'clip', 'raise'}, default='nan'
Handles how `X` values outside of the training domain are handled
during prediction.
- 'nan', predictions will be NaN.
- 'clip', predictions will be set to the value corresponding to
the nearest train interval endpoint.
- 'raise', a `ValueError` is raised.
Attributes
----------
X_min_ : float
Minimum value of input array `X_` for left bound.
X_max_ : float
Maximum value of input array `X_` for right bound.
X_thresholds_ : ndarray of shape (n_thresholds,)
Unique ascending `X` values used to interpolate
the y = f(X) monotonic function.
.. versionadded:: 0.24
y_thresholds_ : ndarray of shape (n_thresholds,)
De-duplicated `y` values suitable to interpolate the y = f(X)
monotonic function.
.. versionadded:: 0.24
f_ : function
The stepwise interpolating function that covers the input domain ``X``.
increasing_ : bool
Inferred value for ``increasing``.
See Also
--------
sklearn.linear_model.LinearRegression : Ordinary least squares Linear
Regression.
sklearn.ensemble.HistGradientBoostingRegressor : Gradient boosting that
is a non-parametric model accepting monotonicity constraints.
isotonic_regression : Function to solve the isotonic regression model.
Notes
-----
Ties are broken using the secondary method from de Leeuw, 1977.
References
----------
Isotonic Median Regression: A Linear Programming Approach
Nilotpal Chakravarti
Mathematics of Operations Research
Vol. 14, No. 2 (May, 1989), pp. 303-308
Isotone Optimization in R : Pool-Adjacent-Violators
Algorithm (PAVA) and Active Set Methods
de Leeuw, Hornik, Mair
Journal of Statistical Software 2009
Correctness of Kruskal's algorithms for monotone regression with ties
de Leeuw, Psychometrica, 1977
Examples
--------
>>> from sklearn.datasets import make_regression
>>> from sklearn.isotonic import IsotonicRegression
>>> X, y = make_regression(n_samples=10, n_features=1, random_state=41)
>>> iso_reg = IsotonicRegression().fit(X, y)
>>> iso_reg.predict([.1,.2])
array([1.8628..., 3.7256...])
"""
_parameter_constraints: dict = {
"y_min": [Interval(Real, None, None, closed="both"), None],
"y_max": [Interval(Real, None, None, closed="both"), None],
"increasing": ["boolean", StrOptions({"auto"})],
"out_of_bounds": [StrOptions({"nan", "clip", "raise"})],
}
def __init__(self, *, y_min=None, y_max=None, increasing=True, out_of_bounds="nan"):
self.y_min = y_min
self.y_max = y_max
self.increasing = increasing
self.out_of_bounds = out_of_bounds
def _check_input_data_shape(self, X):
if not (X.ndim == 1 or (X.ndim == 2 and X.shape[1] == 1)):
msg = (
"Isotonic regression input X should be a 1d array or "
"2d array with 1 feature"
)
raise ValueError(msg)
def _build_f(self, X, y):
"""Build the f_ interp1d function."""
bounds_error = self.out_of_bounds == "raise"
if len(y) == 1:
# single y, constant prediction
self.f_ = lambda x: y.repeat(x.shape)
else:
self.f_ = interpolate.interp1d(
X, y, kind="linear", bounds_error=bounds_error
)
def _build_y(self, X, y, sample_weight, trim_duplicates=True):
"""Build the y_ IsotonicRegression."""
self._check_input_data_shape(X)
X = X.reshape(-1) # use 1d view
# Determine increasing if auto-determination requested
if self.increasing == "auto":
self.increasing_ = check_increasing(X, y)
else:
self.increasing_ = self.increasing
# If sample_weights is passed, removed zero-weight values and clean
# order
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
mask = sample_weight > 0
X, y, sample_weight = X[mask], y[mask], sample_weight[mask]
order = np.lexsort((y, X))
X, y, sample_weight = [array[order] for array in [X, y, sample_weight]]
unique_X, unique_y, unique_sample_weight = _make_unique(X, y, sample_weight)
X = unique_X
y = isotonic_regression(
unique_y,
sample_weight=unique_sample_weight,
y_min=self.y_min,
y_max=self.y_max,
increasing=self.increasing_,
)
# Handle the left and right bounds on X
self.X_min_, self.X_max_ = np.min(X), np.max(X)
if trim_duplicates:
# Remove unnecessary points for faster prediction
keep_data = np.ones((len(y),), dtype=bool)
# Aside from the 1st and last point, remove points whose y values
# are equal to both the point before and the point after it.
keep_data[1:-1] = np.logical_or(
np.not_equal(y[1:-1], y[:-2]), np.not_equal(y[1:-1], y[2:])
)
return X[keep_data], y[keep_data]
else:
# The ability to turn off trim_duplicates is only used to it make
# easier to unit test that removing duplicates in y does not have
# any impact the resulting interpolation function (besides
# prediction speed).
return X, y
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples,) or (n_samples, 1)
Training data.
.. versionchanged:: 0.24
Also accepts 2d array with 1 feature.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Weights. If set to None, all weights will be set to 1 (equal
weights).
Returns
-------
self : object
Returns an instance of self.
Notes
-----
X is stored for future use, as :meth:`transform` needs X to interpolate
new input data.
"""
check_params = dict(accept_sparse=False, ensure_2d=False)
X = check_array(
X, input_name="X", dtype=[np.float64, np.float32], **check_params
)
y = check_array(y, input_name="y", dtype=X.dtype, **check_params)
check_consistent_length(X, y, sample_weight)
# Transform y by running the isotonic regression algorithm and
# transform X accordingly.
X, y = self._build_y(X, y, sample_weight)
# It is necessary to store the non-redundant part of the training set
# on the model to make it possible to support model persistence via
# the pickle module as the object built by scipy.interp1d is not
# picklable directly.
self.X_thresholds_, self.y_thresholds_ = X, y
# Build the interpolation function
self._build_f(X, y)
return self
def _transform(self, T):
"""`_transform` is called by both `transform` and `predict` methods.
Since `transform` is wrapped to output arrays of specific types (e.g.
NumPy arrays, pandas DataFrame), we cannot make `predict` call `transform`
directly.
The above behaviour could be changed in the future, if we decide to output
other type of arrays when calling `predict`.
"""
if hasattr(self, "X_thresholds_"):
dtype = self.X_thresholds_.dtype
else:
dtype = np.float64
T = check_array(T, dtype=dtype, ensure_2d=False)
self._check_input_data_shape(T)
T = T.reshape(-1) # use 1d view
if self.out_of_bounds == "clip":
T = np.clip(T, self.X_min_, self.X_max_)
res = self.f_(T)
# on scipy 0.17, interp1d up-casts to float64, so we cast back
res = res.astype(T.dtype)
return res
def transform(self, T):
"""Transform new data by linear interpolation.
Parameters
----------
T : array-like of shape (n_samples,) or (n_samples, 1)
Data to transform.
.. versionchanged:: 0.24
Also accepts 2d array with 1 feature.
Returns
-------
y_pred : ndarray of shape (n_samples,)
The transformed data.
"""
return self._transform(T)
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like of shape (n_samples,) or (n_samples, 1)
Data to transform.
Returns
-------
y_pred : ndarray of shape (n_samples,)
Transformed data.
"""
return self._transform(T)
# We implement get_feature_names_out here instead of using
# `ClassNamePrefixFeaturesOutMixin`` because `input_features` are ignored.
# `input_features` are ignored because `IsotonicRegression` accepts 1d
# arrays and the semantics of `feature_names_in_` are not clear for 1d arrays.
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Ignored.
Returns
-------
feature_names_out : ndarray of str objects
An ndarray with one string i.e. ["isotonicregression0"].
"""
check_is_fitted(self, "f_")
class_name = self.__class__.__name__.lower()
return np.asarray([f"{class_name}0"], dtype=object)
def __getstate__(self):
"""Pickle-protocol - return state of the estimator."""
state = super().__getstate__()
# remove interpolation method
state.pop("f_", None)
return state
def __setstate__(self, state):
"""Pickle-protocol - set state of the estimator.
We need to rebuild the interpolation function.
"""
super().__setstate__(state)
if hasattr(self, "X_thresholds_") and hasattr(self, "y_thresholds_"):
self._build_f(self.X_thresholds_, self.y_thresholds_)
def _more_tags(self):
return {"X_types": ["1darray"]} |
|
scikit-learn__scikit-learn | kernel_approximation.rst | Module doc | Generate documentation for this module | BSD 3-Clause New or Revised License | scikit-learn__scikit-learn/doc/modules/kernel_approximation.rst | [
"scikit-learn__scikit-learn/sklearn/kernel_approximation.py"
] | scikit-learn__scikit-learn/sklearn/linear_model | Kernel Approximation
This submodule contains functions that approximate the feature mappings
that correspond to certain kernels, as they are used for example in
support vector machines (see svm). The following feature functions
perform non-linear transformations of the input, which can serve as a
basis for linear classification or other algorithms.
The advantage of using approximate explicit feature maps compared to the
kernel trick, which makes use of feature maps implicitly, is that
explicit mappings can be better suited for online learning and can
significantly reduce the cost of learning with very large datasets.
Standard kernelized SVMs do not scale well to large datasets, but using
an approximate kernel map it is possible to use much more efficient
linear SVMs. In particular, the combination of kernel map approximations
with SGDClassifier can make non-linear learning on large datasets
possible.
Since there has not been much empirical work using approximate
embeddings, it is advisable to compare results against exact kernel
methods when possible.
polynomial_regression for an exact polynomial transformation.
Nystroem Method for Kernel Approximation
The Nystroem method, as implemented in Nystroem is a general method for
low-rank approximations of kernels. It achieves this by essentially
subsampling the data on which the kernel is evaluated. By default
Nystroem uses the rbf kernel, but it can use any kernel function or a
precomputed kernel matrix. The number of samples used - which is also
the dimensionality of the features computed -is given by the parameter
n_components.
Radial Basis Function Kernel
The RBFSampler constructs an approximate mapping for the radial basis
function kernel, also known as Random Kitchen Sinks [RR2007]. This
transformation can be used to explicitly model a kernel map, prior to
applying a linear algorithm, for example a linear SVM:
>>> from sklearn.kernel_approximation import RBFSampler
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> rbf_feature = RBFSampler(gamma=1, random_state=1)
>>> X_features = rbf_feature.fit_transform(X)
>>> clf = SGDClassifier(max_iter=5)
>>> clf.fit(X_features, y)
SGDClassifier(max_iter=5)
>>> clf.score(X_features, y)
1.0
The mapping relies on a Monte Carlo approximation to the kernel values.
The fit function performs the Monte Carlo sampling, whereas the
transform method performs the mapping of the data. Because of the
inherent randomness of the process, results may vary between different
calls to the fit function.
The fit function takes two arguments: n_components, which is the target
dimensionality of the feature transform, and gamma, the parameter of the
RBF-kernel. A higher n_components will result in a better approximation
of the kernel and will yield results more similar to those produced by a
kernel SVM. Note that "fitting" the feature function does not actually
depend on the data given to the fit function. Only the dimensionality of
the data is used. Details on the method can be found in [RR2007].
For a given value of n_components RBFSampler is often less accurate as
Nystroem. RBFSampler is cheaper to compute, though, making use of larger
feature spaces more efficient.
Additive Chi Squared Kernel
The additive chi squared kernel is a kernel on histograms, often used in
computer vision.
The additive chi squared kernel as used here is given by
$$k(x, y) = \sum_i \frac{2x_iy_i}{x_i+y_i}$$
This is not exactly the same as
sklearn.metrics.pairwise.additive_chi2_kernel. The authors of [VZ2010]
prefer the version above as it is always positive definite. Since the
kernel is additive, it is possible to treat all components x_(i)
separately for embedding. This makes it possible to sample the Fourier
transform in regular intervals, instead of approximating using Monte
Carlo sampling.
The class AdditiveChi2Sampler implements this component wise
deterministic sampling. Each component is sampled n times, yielding
2n + 1 dimensions per input dimension (the multiple of two stems from
the real and complex part of the Fourier transform). In the literature,
n is usually chosen to be 1 or 2, transforming the dataset to size
n_samples * 5 * n_features (in the case of n = 2).
The approximate feature map provided by AdditiveChi2Sampler can be
combined with the approximate feature map provided by RBFSampler to
yield an approximate feature map for the exponentiated chi squared
kernel. See the [VZ2010] for details and [VVZ2010] for combination with
the RBFSampler.
Skewed Chi Squared Kernel
The skewed chi squared kernel is given by:
$$k(x,y) = \prod_i \frac{2\sqrt{x_i+c}\sqrt{y_i+c}}{x_i + y_i + 2c}$$
It has properties that are similar to the exponentiated chi squared
kernel often used in computer vision, but allows for a simple Monte
Carlo approximation of the feature map.
The usage of the SkewedChi2Sampler is the same as the usage described
above for the RBFSampler. The only difference is in the free parameter,
that is called c. For a motivation for this mapping and the mathematical
details see [LS2010].
Polynomial Kernel Approximation via Tensor Sketch
The polynomial kernel <polynomial_kernel> is a popular type of kernel
function given by:
k(x,y) = (γx^(⊤)y+c₀)^(d)
where:
- x, y are the input vectors
- d is the kernel degree
Intuitively, the feature space of the polynomial kernel of degree d
consists of all possible degree-d products among input features, which
enables learning algorithms using this kernel to account for
interactions between features.
The TensorSketch [PP2013] method, as implemented in
PolynomialCountSketch, is a scalable, input data independent method for
polynomial kernel approximation. It is based on the concept of Count
sketch [WIKICS] [CCF2002] , a dimensionality reduction technique similar
to feature hashing, which instead uses several independent hash
functions. TensorSketch obtains a Count Sketch of the outer product of
two vectors (or a vector with itself), which can be used as an
approximation of the polynomial kernel feature space. In particular,
instead of explicitly computing the outer product, TensorSketch computes
the Count Sketch of the vectors and then uses polynomial multiplication
via the Fast Fourier Transform to compute the Count Sketch of their
outer product.
Conveniently, the training phase of TensorSketch simply consists of
initializing some random variables. It is thus independent of the input
data, i.e. it only depends on the number of input features, but not the
data values. In addition, this method can transform samples in
𝒪(n_(samples)(n_(features)+n_(components)log(n_(components)))) time,
where n_(components) is the desired output dimension, determined by
n_components.
Mathematical Details
Kernel methods like support vector machines or kernelized PCA rely on a
property of reproducing kernel Hilbert spaces. For any positive definite
kernel function k (a so called Mercer kernel), it is guaranteed that
there exists a mapping ϕ into a Hilbert space ℋ, such that
k(x,y) = ⟨ϕ(x), ϕ(y)⟩
Where ⟨⋅, ⋅ ⟩ denotes the inner product in the Hilbert space.
If an algorithm, such as a linear support vector machine or PCA, relies
only on the scalar product of data points x_(i), one may use the value
of k(x_(i),x_(j)), which corresponds to applying the algorithm to the
mapped data points ϕ(x_(i)). The advantage of using k is that the
mapping ϕ never has to be calculated explicitly, allowing for arbitrary
large features (even infinite).
One drawback of kernel methods is, that it might be necessary to store
many kernel values k(x_(i),x_(j)) during optimization. If a kernelized
classifier is applied to new data y_(j), k(x_(i),y_(j)) needs to be
computed to make predictions, possibly for many different x_(i) in the
training set.
The classes in this submodule allow to approximate the embedding ϕ,
thereby working explicitly with the representations ϕ(x_(i)), which
obviates the need to apply the kernel or store training examples.
| """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps based on Fourier transforms and Count Sketches.
"""
# Author: Andreas Mueller <[email protected]>
# Daniel Lopez-Sanchez (TensorSketch) <[email protected]>
# License: BSD 3 clause
import warnings
from numbers import Integral, Real
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
try:
from scipy.fft import fft, ifft
except ImportError: # scipy < 1.4
from scipy.fftpack import fft, ifft
from.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from.metrics.pairwise import KERNEL_PARAMS, PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels
from.utils import check_random_state, deprecated
from.utils._param_validation import Interval, StrOptions
from.utils.extmath import safe_sparse_dot
from.utils.validation import (
_check_feature_names_in,
check_is_fitted,
check_non_negative,
)
class PolynomialCountSketch(
ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
):
"""Polynomial kernel approximation via Tensor Sketch.
Implements Tensor Sketch, which approximates the feature map
of the polynomial kernel::
K(X, Y) = (gamma * <X, Y> + coef0)^degree
by efficiently computing a Count Sketch of the outer product of a
vector with itself using Fast Fourier Transforms (FFT). Read more in the
:ref:`User Guide <polynomial_kernel_approx>`.
.. versionadded:: 0.24
Parameters
----------
gamma : float, default=1.0
Parameter of the polynomial kernel whose feature map
will be approximated.
degree : int, default=2
Degree of the polynomial kernel whose feature map
will be approximated.
coef0 : int, default=0
Constant term of the polynomial kernel whose feature map
will be approximated.
n_components : int, default=100
Dimensionality of the output feature space. Usually, `n_components`
should be greater than the number of features in input samples in
order to achieve good performance. The optimal score / run time
balance is typically achieved around `n_components` = 10 * `n_features`,
but this depends on the specific dataset being used.
random_state : int, RandomState instance, default=None
Determines random number generation for indexHash and bitHash
initialization. Pass an int for reproducible results across multiple
function calls. See :term:`Glossary <random_state>`.
Attributes
----------
indexHash_ : ndarray of shape (degree, n_features), dtype=int64
Array of indexes in range [0, n_components) used to represent
the 2-wise independent hash functions for Count Sketch computation.
bitHash_ : ndarray of shape (degree, n_features), dtype=float32
Array with random entries in {+1, -1}, used to represent
the 2-wise independent hash functions for Count Sketch computation.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
Nystroem : Approximate a kernel map using a subset of the training data.
RBFSampler : Approximate a RBF kernel feature map using random Fourier
features.
SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
Examples
--------
>>> from sklearn.kernel_approximation import PolynomialCountSketch
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> ps = PolynomialCountSketch(degree=3, random_state=1)
>>> X_features = ps.fit_transform(X)
>>> clf = SGDClassifier(max_iter=10, tol=1e-3)
>>> clf.fit(X_features, y)
SGDClassifier(max_iter=10)
>>> clf.score(X_features, y)
1.0
"""
_parameter_constraints: dict = {
"gamma": [Interval(Real, 0, None, closed="left")],
"degree": [Interval(Integral, 1, None, closed="left")],
"coef0": [Interval(Real, None, None, closed="neither")],
"n_components": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
}
def __init__(
self, *, gamma=1.0, degree=2, coef0=0, n_components=100, random_state=None
):
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.n_components = n_components
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model with X.
Initializes the internal variables. The method needs no information
about the distribution of data, so we only care about n_features in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X, accept_sparse="csc")
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
if self.coef0!= 0:
n_features += 1
self.indexHash_ = random_state.randint(
0, high=self.n_components, size=(self.degree, n_features)
)
self.bitHash_ = random_state.choice(a=[-1, 1], size=(self.degree, n_features))
self._n_features_out = self.n_components
return self
def transform(self, X):
"""Generate the feature map approximation for X.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse="csc", reset=False)
X_gamma = np.sqrt(self.gamma) * X
if sp.issparse(X_gamma) and self.coef0!= 0:
X_gamma = sp.hstack(
[X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))],
format="csc",
)
elif not sp.issparse(X_gamma) and self.coef0!= 0:
X_gamma = np.hstack(
[X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))]
)
if X_gamma.shape[1]!= self.indexHash_.shape[1]:
raise ValueError(
"Number of features of test samples does not"
" match that of training samples."
)
count_sketches = np.zeros((X_gamma.shape[0], self.degree, self.n_components))
if sp.issparse(X_gamma):
for j in range(X_gamma.shape[1]):
for d in range(self.degree):
iHashIndex = self.indexHash_[d, j]
iHashBit = self.bitHash_[d, j]
count_sketches[:, d, iHashIndex] += (
(iHashBit * X_gamma[:, j]).toarray().ravel()
)
else:
for j in range(X_gamma.shape[1]):
for d in range(self.degree):
iHashIndex = self.indexHash_[d, j]
iHashBit = self.bitHash_[d, j]
count_sketches[:, d, iHashIndex] += iHashBit * X_gamma[:, j]
# For each same, compute a count sketch of phi(x) using the polynomial
# multiplication (via FFT) of p count sketches of x.
count_sketches_fft = fft(count_sketches, axis=2, overwrite_x=True)
count_sketches_fft_prod = np.prod(count_sketches_fft, axis=1)
data_sketch = np.real(ifft(count_sketches_fft_prod, overwrite_x=True))
return data_sketch
class RBFSampler(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Approximate a RBF kernel feature map using random Fourier features.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma :'scale' or float, default=1.0
Parameter of RBF kernel: exp(-gamma * x^2).
If ``gamma='scale'`` is passed then it uses
1 / (n_features * X.var()) as value of gamma.
.. versionadded:: 1.2
The option `"scale"` was added in 1.2.
n_components : int, default=100
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the generation of the random
weights and random offset when fitting the training data.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
random_offset_ : ndarray of shape (n_components,), dtype={np.float64, np.float32}
Random offset used to compute the projection in the `n_components`
dimensions of the feature space.
random_weights_ : ndarray of shape (n_features, n_components),\
dtype={np.float64, np.float32}
Random projection directions drawn from the Fourier transform
of the RBF kernel.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
Nystroem : Approximate a kernel map using a subset of the training data.
PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch.
SkewedChi2Sampler : Approximate feature map for
"skewed chi-squared" kernel.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(https://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
Examples
--------
>>> from sklearn.kernel_approximation import RBFSampler
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> rbf_feature = RBFSampler(gamma=1, random_state=1)
>>> X_features = rbf_feature.fit_transform(X)
>>> clf = SGDClassifier(max_iter=5, tol=1e-3)
>>> clf.fit(X_features, y)
SGDClassifier(max_iter=5)
>>> clf.score(X_features, y)
1.0
"""
_parameter_constraints: dict = {
"gamma": [
StrOptions({"scale"}),
Interval(Real, 0.0, None, closed="left"),
],
"n_components": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
}
def __init__(self, *, gamma=1.0, n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X, accept_sparse="csr")
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
sparse = sp.issparse(X)
if self.gamma == "scale":
# var = E[X^2] - E[X]^2 if sparse
X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var()
self._gamma = 1.0 / (n_features * X_var) if X_var!= 0 else 1.0
else:
self._gamma = self.gamma
self.random_weights_ = (2.0 * self._gamma) ** 0.5 * random_state.normal(
size=(n_features, self.n_components)
)
self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)
if X.dtype == np.float32:
# Setting the data type of the fitted attribute will ensure the
# output data type during `transform`.
self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False)
self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False)
self._n_features_out = self.n_components
return self
def transform(self, X):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse="csr", reset=False)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= (2.0 / self.n_components) ** 0.5
return projection
def _more_tags(self):
return {"preserves_dtype": [np.float64, np.float32]}
class SkewedChi2Sampler(
ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
):
"""Approximate feature map for "skewed chi-squared" kernel.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float, default=1.0
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int, default=100
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the generation of the random
weights and random offset when fitting the training data.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
random_weights_ : ndarray of shape (n_features, n_components)
Weight array, sampled from a secant hyperbolic distribution, which will
be used to linearly transform the log of the data.
random_offset_ : ndarray of shape (n_features, n_components)
Bias term, which will be added to the data. It is uniformly distributed
between 0 and 2*pi.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
Nystroem : Approximate a kernel map using a subset of the training data.
RBFSampler : Approximate a RBF kernel feature map using random Fourier
features.
SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
Examples
--------
>>> from sklearn.kernel_approximation import SkewedChi2Sampler
>>> from sklearn.linear_model import SGDClassifier
>>> X = [[0, 0], [1, 1], [1, 0], [0, 1]]
>>> y = [0, 0, 1, 1]
>>> chi2_feature = SkewedChi2Sampler(skewedness=.01,
... n_components=10,
... random_state=0)
>>> X_features = chi2_feature.fit_transform(X, y)
>>> clf = SGDClassifier(max_iter=10, tol=1e-3)
>>> clf.fit(X_features, y)
SGDClassifier(max_iter=10)
>>> clf.score(X_features, y)
1.0
"""
_parameter_constraints: dict = {
"skewedness": [Interval(Real, None, None, closed="neither")],
"n_components": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
}
def __init__(self, *, skewedness=1.0, n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = 1.0 / np.pi * np.log(np.tan(np.pi / 2.0 * uniform))
self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)
if X.dtype == np.float32:
# Setting the data type of the fitted attribute will ensure the
# output data type during `transform`.
self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False)
self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False)
self._n_features_out = self.n_components
return self
def transform(self, X):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features. All values of X must be
strictly greater than "-skewedness".
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
"""
check_is_fitted(self)
X = self._validate_data(
X, copy=True, dtype=[np.float64, np.float32], reset=False
)
if (X <= -self.skewedness).any():
raise ValueError("X may not contain entries smaller than -skewedness.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.0) / np.sqrt(self.n_components)
return projection
def _more_tags(self):
return {"preserves_dtype": [np.float64, np.float32]}
class AdditiveChi2Sampler(TransformerMixin, BaseEstimator):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps-1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, default=2
Gives the number of (complex) sampling points.
sample_interval : float, default=None
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Attributes
----------
sample_interval_ : float
Stored sampling interval. Specified as a parameter if `sample_steps`
not in {1,2,3}.
.. deprecated:: 1.3
`sample_interval_` serves internal purposes only and will be removed in 1.5.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
This estimator is stateless and does not need to be fitted. However, we
recommend to call :meth:`fit_transform` instead of :meth:`transform`, as
parameter validation is only performed in :meth:`fit`.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.linear_model import SGDClassifier
>>> from sklearn.kernel_approximation import AdditiveChi2Sampler
>>> X, y = load_digits(return_X_y=True)
>>> chi2sampler = AdditiveChi2Sampler(sample_steps=2)
>>> X_transformed = chi2sampler.fit_transform(X, y)
>>> clf = SGDClassifier(max_iter=5, random_state=0, tol=1e-3)
>>> clf.fit(X_transformed, y)
SGDClassifier(max_iter=5, random_state=0)
>>> clf.score(X_transformed, y)
0.9499...
"""
_parameter_constraints: dict = {
"sample_steps": [Interval(Integral, 1, None, closed="left")],
"sample_interval": [Interval(Real, 0, None, closed="left"), None],
}
def __init__(self, *, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the transformer.
"""
X = self._validate_data(X, accept_sparse="csr")
check_non_negative(X, "X in AdditiveChi2Sampler.fit")
# TODO(1.5): remove the setting of _sample_interval from fit
if self.sample_interval is None:
# See figure 2 c) of "Efficient additive kernels via explicit feature maps"
# <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>
# A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
# 2011
if self.sample_steps == 1:
self._sample_interval = 0.8
elif self.sample_steps == 2:
self._sample_interval = 0.5
elif self.sample_steps == 3:
self._sample_interval = 0.4
else:
raise ValueError(
"If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval"
)
else:
self._sample_interval = self.sample_interval
return self
# TODO(1.5): remove
@deprecated( # type: ignore
"The ``sample_interval_`` attribute was deprecated in version 1.3 and "
"will be removed 1.5."
)
@property
def sample_interval_(self):
return self._sample_interval
def transform(self, X):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : {ndarray, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps - 1))
Whether the return value is an array or sparse matrix depends on
the type of the input X.
"""
X = self._validate_data(X, accept_sparse="csr", reset=False)
check_non_negative(X, "X in AdditiveChi2Sampler.transform")
sparse = sp.issparse(X)
if hasattr(self, "_sample_interval"):
# TODO(1.5): remove this branch
sample_interval = self._sample_interval
else:
if self.sample_interval is None:
# See figure 2 c) of "Efficient additive kernels via explicit feature maps" # noqa
# <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>
# A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, # noqa
# 2011
if self.sample_steps == 1:
sample_interval = 0.8
elif self.sample_steps == 2:
sample_interval = 0.5
elif self.sample_steps == 3:
sample_interval = 0.4
else:
raise ValueError(
"If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval"
)
else:
sample_interval = self.sample_interval
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X, self.sample_steps, sample_interval)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Only used to validate feature names with the names seen in :meth:`fit`.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "n_features_in_")
input_features = _check_feature_names_in(
self, input_features, generate_names=True
)
est_name = self.__class__.__name__.lower()
names_list = [f"{est_name}_{name}_sqrt" for name in input_features]
for j in range(1, self.sample_steps):
cos_names = [f"{est_name}_{name}_cos{j}" for name in input_features]
sin_names = [f"{est_name}_{name}_sin{j}" for name in input_features]
names_list.extend(cos_names + sin_names)
return np.asarray(names_list, dtype=object)
@staticmethod
def _transform_dense(X, sample_steps, sample_interval):
non_zero = X!= 0.0
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * sample_interval)
X_new = [X_step]
log_step_nz = sample_interval * np.log(X_nz)
step_nz = 2 * X_nz * sample_interval
for j in range(1, sample_steps):
factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * sample_interval))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
@staticmethod
def _transform_sparse(X, sample_steps, sample_interval):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * sample_interval)
X_step = sp.csr_matrix(
(data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
)
X_new = [X_step]
log_step_nz = sample_interval * np.log(X.data)
step_nz = 2 * X.data * sample_interval
for j in range(1, sample_steps):
factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * sample_interval))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix(
(data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix(
(data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False
)
X_new.append(X_step)
return sp.hstack(X_new)
def _more_tags(self):
return {"stateless": True, "requires_positive_X": True}
class Nystroem(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
.. versionadded:: 0.13
Parameters
----------
kernel : str or callable, default='rbf'
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as `kernel_params`, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
coef0 : float, default=None
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
degree : float, default=None
Degree of the polynomial kernel. Ignored by other kernels.
kernel_params : dict, default=None
Additional parameters (keyword arguments) for kernel function passed
as callable object.
n_components : int, default=100
Number of features to construct.
How many data points will be used to construct the mapping.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the uniform sampling without
replacement of `n_components` of the training data to construct the
basis kernel.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the kernel matrix into `n_jobs` even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.24
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : ndarray of shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : ndarray of shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel.
PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch.
RBFSampler : Approximate a RBF kernel feature map using random Fourier
features.
SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
Examples
--------
>>> from sklearn import datasets, svm
>>> from sklearn.kernel_approximation import Nystroem
>>> X, y = datasets.load_digits(n_class=9, return_X_y=True)
>>> data = X / 16.
>>> clf = svm.LinearSVC(dual="auto")
>>> feature_map_nystroem = Nystroem(gamma=.2,
... random_state=1,
... n_components=300)
>>> data_transformed = feature_map_nystroem.fit_transform(data)
>>> clf.fit(data_transformed, y)
LinearSVC(dual='auto')
>>> clf.score(data_transformed, y)
0.9987...
"""
_parameter_constraints: dict = {
"kernel": [
StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}),
callable,
],
"gamma": [Interval(Real, 0, None, closed="left"), None],
"coef0": [Interval(Real, None, None, closed="neither"), None],
"degree": [Interval(Real, 1, None, closed="left"), None],
"kernel_params": [dict, None],
"n_components": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
"n_jobs": [Integral, None],
}
def __init__(
self,
kernel="rbf",
*,
gamma=None,
coef0=None,
degree=None,
kernel_params=None,
n_components=100,
random_state=None,
n_jobs=None,
):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
self.n_jobs = n_jobs
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X, accept_sparse="csr")
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn(
"n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel."
)
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(
basis,
metric=self.kernel,
filter_params=True,
n_jobs=self.n_jobs,
**self._get_kernel_params(),
)
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = basis_inds
self._n_features_out = n_components
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse="csr", reset=False)
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(
X,
self.components_,
metric=self.kernel,
filter_params=True,
n_jobs=self.n_jobs,
**kernel_params,
)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel) and self.kernel!= "precomputed":
for param in KERNEL_PARAMS[self.kernel]:
if getattr(self, param) is not None:
params[param] = getattr(self, param)
else:
if (
self.gamma is not None
or self.coef0 is not None
or self.degree is not None
):
raise ValueError(
"Don't pass gamma, coef0 or degree to "
"Nystroem if using a callable "
"or precomputed kernel"
)
return params
def _more_tags(self):
return {
"_xfail_checks": {
"check_transformer_preserve_dtypes": (
"dtypes are preserved but not at a close enough precision"
)
},
"preserves_dtype": [np.float64, np.float32],
} |
scikit-learn__scikit-learn | kernel_ridge.rst | Module doc | Generate documentation for this module | BSD 3-Clause New or Revised License | scikit-learn__scikit-learn/doc/modules/kernel_ridge.rst | [
"scikit-learn__scikit-learn/sklearn/kernel_ridge.py"
] | Kernel ridge regression
Kernel ridge regression (KRR) [M2012] combines ridge_regression (linear
least squares with l2-norm regularization) with the kernel trick. It
thus learns a linear function in the space induced by the respective
kernel and the data. For non-linear kernels, this corresponds to a
non-linear function in the original space.
The form of the model learned by KernelRidge is identical to support
vector regression (~sklearn.svm.SVR). However, different loss functions
are used: KRR uses squared error loss while support vector regression
uses ϵ-insensitive loss, both combined with l2 regularization. In
contrast to ~sklearn.svm.SVR, fitting KernelRidge can be done in
closed-form and is typically faster for medium-sized datasets. On the
other hand, the learned model is non-sparse and thus slower than
~sklearn.svm.SVR, which learns a sparse model for ϵ > 0, at
prediction-time.
The following figure compares KernelRidge and ~sklearn.svm.SVR on an
artificial dataset, which consists of a sinusoidal target function and
strong noise added to every fifth datapoint. The learned model of
KernelRidge and ~sklearn.svm.SVR is plotted, where both
complexity/regularization and bandwidth of the RBF kernel have been
optimized using grid-search. The learned functions are very similar;
however, fitting KernelRidge is approximately seven times faster than
fitting ~sklearn.svm.SVR (both with grid-search). However, prediction of
100000 target values is more than three times faster with
~sklearn.svm.SVR since it has learned a sparse model using only
approximately 1/3 of the 100 training datapoints as support vectors.
The next figure compares the time for fitting and prediction of
KernelRidge and ~sklearn.svm.SVR for different sizes of the training
set. Fitting KernelRidge is faster than ~sklearn.svm.SVR for
medium-sized training sets (less than 1000 samples); however, for larger
training sets ~sklearn.svm.SVR scales better. With regard to prediction
time, ~sklearn.svm.SVR is faster than KernelRidge for all sizes of the
training set because of the learned sparse solution. Note that the
degree of sparsity and thus the prediction time depends on the
parameters ϵ and C of the ~sklearn.svm.SVR; ϵ = 0 would correspond to a
dense model. | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from numbers import Integral, Real
import numpy as np
from.base import BaseEstimator, MultiOutputMixin, RegressorMixin, _fit_context
from.linear_model._ridge import _solve_cholesky_kernel
from.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels
from.utils._param_validation import Interval, StrOptions
from.utils.validation import _check_sample_weight, check_is_fitted
class KernelRidge(MultiOutputMixin, RegressorMixin, BaseEstimator):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : float or array-like of shape (n_targets,), default=1.0
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number. See :ref:`ridge_regression` for formula.
kernel : str or callable, default="linear"
Kernel mapping used internally. This parameter is directly passed to
:class:`~sklearn.metrics.pairwise.pairwise_kernels`.
If `kernel` is a string, it must be one of the metrics
in `pairwise.PAIRWISE_KERNEL_FUNCTIONS` or "precomputed".
If `kernel` is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if `kernel` is a callable function, it is called on
each pair of instances (rows) and the resulting value recorded. The
callable should take two rows from X as input and return the
corresponding kernel value as a single number. This means that
callables from :mod:`sklearn.metrics.pairwise` are not allowed, as
they operate on matrices, not single samples. Use the string
identifying the kernel instead.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : int, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : dict, default=None
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : ndarray of shape (n_samples,) or (n_samples, n_targets)
Representation of weight vector(s) in kernel space
X_fit_ : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data, which is also required for prediction. If
kernel == "precomputed" this is instead the precomputed
training matrix, of shape (n_samples, n_samples).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.gaussian_process.GaussianProcessRegressor : Gaussian
Process regressor providing automatic kernel hyperparameters
tuning and predictions uncertainty.
sklearn.linear_model.Ridge : Linear ridge regression.
sklearn.linear_model.RidgeCV : Ridge regression with built-in
cross-validation.
sklearn.svm.SVR : Support Vector Regression accepting a large variety
of kernels.
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> krr = KernelRidge(alpha=1.0)
>>> krr.fit(X, y)
KernelRidge(alpha=1.0)
"""
_parameter_constraints: dict = {
"alpha": [Interval(Real, 0, None, closed="left"), "array-like"],
"kernel": [
StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}),
callable,
],
"gamma": [Interval(Real, 0, None, closed="left"), None],
"degree": [Interval(Integral, 0, None, closed="left")],
"coef0": [Interval(Real, None, None, closed="neither")],
"kernel_params": [dict, None],
}
def __init__(
self,
alpha=1,
*,
kernel="linear",
gamma=None,
degree=3,
coef0=1,
kernel_params=None,
):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma, "degree": self.degree, "coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel, filter_params=True, **params)
def _more_tags(self):
return {"pairwise": self.kernel == "precomputed"}
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit Kernel Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. If kernel == "precomputed" this is instead
a precomputed kernel matrix, of shape (n_samples, n_samples).
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
sample_weight : float or array-like of shape (n_samples,), default=None
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : object
Returns the instance itself.
"""
# Convert data
X, y = self._validate_data(
X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True
)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = _check_sample_weight(sample_weight, X)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha, sample_weight, copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the kernel ridge model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples. If kernel == "precomputed" this is instead a
precomputed kernel matrix, shape = [n_samples,
n_samples_fitted], where n_samples_fitted is the number of
samples used in the fitting for this estimator.
Returns
-------
C : ndarray of shape (n_samples,) or (n_samples, n_targets)
Returns predicted values.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=("csr", "csc"), reset=False)
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_) |
|
scikit-learn__scikit-learn | metrics.rst | Module doc | Generate documentation for this module | BSD 3-Clause New or Revised License | scikit-learn__scikit-learn/doc/modules/metrics.rst | [
"scikit-learn__scikit-learn/sklearn/metrics/pairwise.py"
] | scikit-learn__scikit-learn/sklearn/metrics | Pairwise metrics, Affinities and Kernels
The sklearn.metrics.pairwise submodule implements utilities to evaluate
pairwise distances or affinity of sets of samples.
This module contains both distance metrics and kernels. A brief summary
is given on the two here.
Distance metrics are functions d(a, b) such that d(a, b) < d(a, c) if
objects a and b are considered "more similar" than objects a and c. Two
objects exactly alike would have a distance of zero. One of the most
popular examples is Euclidean distance. To be a 'true' metric, it must
obey the following four conditions:
1. d(a, b) >= 0, for all a and b
2. d(a, b) == 0, if and only if a = b, positive definiteness
3. d(a, b) == d(b, a), symmetry
4. d(a, c) <= d(a, b) + d(b, c), the triangle inequality
Kernels are measures of similarity, i.e. s(a, b) > s(a, c) if objects a
and b are considered "more similar" than objects a and c. A kernel must
also be positive semi-definite.
There are a number of ways to convert between a distance metric and a
similarity measure, such as a kernel. Let D be the distance, and S be
the kernel:
1. S = np.exp(-D * gamma), where one heuristic for choosing gamma is
1 / num_features
2. S = 1. / (D / np.max(D))
The distances between the row vectors of X and the row vectors of Y can
be evaluated using pairwise_distances. If Y is omitted the pairwise
distances of the row vectors of X are calculated. Similarly,
pairwise.pairwise_kernels can be used to calculate the kernel between X
and Y using different kernel functions. See the API reference for more
details.
>>> import numpy as np >>> from sklearn.metrics import
pairwise_distances >>> from sklearn.metrics.pairwise import
pairwise_kernels >>> X = np.array([[2, 3], [3, 5], [5, 8]]) >>> Y =
np.array([[1, 0], [2, 1]]) >>> pairwise_distances(X, Y,
metric='manhattan') array([[ 4., 2.], [ 7., 5.], [12., 10.]]) >>>
pairwise_distances(X, metric='manhattan') array([[0., 3., 8.], [3.,
0., 5.], [8., 5., 0.]]) >>> pairwise_kernels(X, Y, metric='linear')
array([[ 2., 7.], [ 3., 11.], [ 5., 18.]])
Cosine similarity
cosine_similarity computes the L2-normalized dot product of vectors.
That is, if x and y are row vectors, their cosine similarity k is
defined as:
$$k(x, y) = \frac{x y^\top}{\|x\| \|y\|}$$
This is called cosine similarity, because Euclidean (L2) normalization
projects the vectors onto the unit sphere, and their dot product is then
the cosine of the angle between the points denoted by the vectors.
This kernel is a popular choice for computing the similarity of
documents represented as tf-idf vectors. cosine_similarity accepts
scipy.sparse matrices. (Note that the tf-idf functionality in
sklearn.feature_extraction.text can produce normalized vectors, in which
case cosine_similarity is equivalent to linear_kernel, only slower.)
Linear kernel
The function linear_kernel computes the linear kernel, that is, a
special case of polynomial_kernel with degree=1 and coef0=0
(homogeneous). If x and y are column vectors, their linear kernel is:
k(x,y) = x^(⊤)y
Polynomial kernel
The function polynomial_kernel computes the degree-d polynomial kernel
between two vectors. The polynomial kernel represents the similarity
between two vectors. Conceptually, the polynomial kernels considers not
only the similarity between vectors under the same dimension, but also
across dimensions. When used in machine learning algorithms, this allows
to account for feature interaction.
The polynomial kernel is defined as:
k(x,y) = (γx^(⊤)y+c₀)^(d)
where:
- x, y are the input vectors
- d is the kernel degree
If c₀ = 0 the kernel is said to be homogeneous.
Sigmoid kernel
The function sigmoid_kernel computes the sigmoid kernel between two
vectors. The sigmoid kernel is also known as hyperbolic tangent, or
Multilayer Perceptron (because, in the neural network field, it is often
used as neuron activation function). It is defined as:
k(x,y) = tanh (γx^(⊤)y+c₀)
where:
- x, y are the input vectors
- γ is known as slope
- c₀ is known as intercept
RBF kernel
The function rbf_kernel computes the radial basis function (RBF) kernel
between two vectors. This kernel is defined as:
k(x,y) = exp (−γ∥x−y∥²)
where x and y are the input vectors. If γ = σ⁻² the kernel is known as
the Gaussian kernel of variance σ².
Laplacian kernel
The function laplacian_kernel is a variant on the radial basis function
kernel defined as:
k(x,y) = exp (−γ∥x−y∥₁)
where x and y are the input vectors and ∥x − y∥₁ is the Manhattan
distance between the input vectors.
It has proven useful in ML applied to noiseless data. See e.g. Machine
learning for quantum mechanics in a nutshell.
Chi-squared kernel
The chi-squared kernel is a very popular choice for training non-linear
SVMs in computer vision applications. It can be computed using
chi2_kernel and then passed to an ~sklearn.svm.SVC with
kernel="precomputed":
>>> from sklearn.svm import SVC
>>> from sklearn.metrics.pairwise import chi2_kernel
>>> X = [[0, 1], [1, 0], [.2, .8], [.7, .3]]
>>> y = [0, 1, 0, 1]
>>> K = chi2_kernel(X, gamma=.5)
>>> K
array([[1. , 0.36787944, 0.89483932, 0.58364548],
[0.36787944, 1. , 0.51341712, 0.83822343],
[0.89483932, 0.51341712, 1. , 0.7768366 ],
[0.58364548, 0.83822343, 0.7768366 , 1. ]])
>>> svm = SVC(kernel='precomputed').fit(K, y)
>>> svm.predict(K)
array([0, 1, 0, 1])
It can also be directly used as the kernel argument:
>>> svm = SVC(kernel=chi2_kernel).fit(X, y)
>>> svm.predict(X)
array([0, 1, 0, 1])
The chi squared kernel is given by
$$k(x, y) = \exp \left (-\gamma \sum_i \frac{(x[i] - y[i]) ^ 2}{x[i] + y[i]} \right )$$
The data is assumed to be non-negative, and is often normalized to have
an L1-norm of one. The normalization is rationalized with the connection
to the chi squared distance, which is a distance between discrete
probability distributions.
The chi squared kernel is most commonly used on histograms (bags) of
visual words. | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import warnings
from functools import partial
from numbers import Integral, Real
import numpy as np
from joblib import effective_n_jobs
from scipy.sparse import csr_matrix, issparse
from scipy.spatial import distance
from.. import config_context
from..exceptions import DataConversionWarning
from..preprocessing import normalize
from..utils import (
check_array,
gen_batches,
gen_even_slices,
get_chunk_n_rows,
is_scalar_nan,
)
from..utils._mask import _get_mask
from..utils._param_validation import (
Hidden,
Interval,
MissingValues,
Options,
StrOptions,
validate_params,
)
from..utils.extmath import row_norms, safe_sparse_dot
from..utils.fixes import parse_version, sp_base_version
from..utils.parallel import Parallel, delayed
from..utils.validation import _num_samples, check_non_negative
from._pairwise_distances_reduction import ArgKmin
from._pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = float
return X, Y, dtype
def check_pairwise_arrays(
X,
Y,
*,
precomputed=False,
dtype=None,
accept_sparse="csr",
force_all_finite=True,
copy=False,
):
"""Set X and Y appropriately and checks inputs.
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
precomputed : bool, default=False
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : str, type, list of type, default=None
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
accept_sparse : str, bool or list/tuple of str, default='csr'
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
copy : bool, default=False
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
.. versionadded:: 0.22
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
estimator = "check_pairwise_arrays"
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(
X,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
force_all_finite=force_all_finite,
estimator=estimator,
)
else:
X = check_array(
X,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
force_all_finite=force_all_finite,
estimator=estimator,
)
Y = check_array(
Y,
accept_sparse=accept_sparse,
dtype=dtype,
copy=copy,
force_all_finite=force_all_finite,
estimator=estimator,
)
if precomputed:
if X.shape[1]!= Y.shape[0]:
raise ValueError(
"Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." % (X.shape[0], X.shape[1], Y.shape[0])
)
elif X.shape[1]!= Y.shape[1]:
raise ValueError(
"Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (X.shape[1], Y.shape[1])
)
return X, Y
def check_paired_arrays(X, Y):
"""Set X and Y appropriately and checks inputs for paired distances.
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Returns
-------
safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape!= Y.shape:
raise ValueError(
"X and Y should be of same shape. They were respectively %r and %r long."
% (X.shape, Y.shape)
)
return X, Y
# Pairwise distances
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix", None],
"Y_norm_squared": ["array-like", None],
"squared": ["boolean"],
"X_norm_squared": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def euclidean_distances(
X, Y=None, *, Y_norm_squared=None, squared=False, X_norm_squared=None
):
"""
Compute the distance matrix between each pair from a vector array X and Y.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation,
because this equation potentially suffers from "catastrophic cancellation".
Also, the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array where each row is a sample and each column is a feature.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
An array where each row is a sample and each column is a feature.
If `None`, method uses `Y=X`.
Y_norm_squared : array-like of shape (n_samples_Y,) or (n_samples_Y, 1) \
or (1, n_samples_Y), default=None
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
squared : bool, default=False
Return squared Euclidean distances.
X_norm_squared : array-like of shape (n_samples_X,) or (n_samples_X, 1) \
or (1, n_samples_X), default=None
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
Returns the distances between the row vectors of `X`
and the row vectors of `Y`.
See Also
--------
paired_distances : Distances between pairs of elements of X and Y.
Notes
-----
To achieve a better accuracy, `X_norm_squared` and `Y_norm_squared` may be
unused if they are passed as `np.float32`.
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[0., 1.],
[1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
X_norm_squared = check_array(X_norm_squared, ensure_2d=False)
original_shape = X_norm_squared.shape
if X_norm_squared.shape == (X.shape[0],):
X_norm_squared = X_norm_squared.reshape(-1, 1)
if X_norm_squared.shape == (1, X.shape[0]):
X_norm_squared = X_norm_squared.T
if X_norm_squared.shape!= (X.shape[0], 1):
raise ValueError(
f"Incompatible dimensions for X of shape {X.shape} and "
f"X_norm_squared of shape {original_shape}."
)
if Y_norm_squared is not None:
Y_norm_squared = check_array(Y_norm_squared, ensure_2d=False)
original_shape = Y_norm_squared.shape
if Y_norm_squared.shape == (Y.shape[0],):
Y_norm_squared = Y_norm_squared.reshape(1, -1)
if Y_norm_squared.shape == (Y.shape[0], 1):
Y_norm_squared = Y_norm_squared.T
if Y_norm_squared.shape!= (1, Y.shape[0]):
raise ValueError(
f"Incompatible dimensions for Y of shape {Y.shape} and "
f"Y_norm_squared of shape {original_shape}."
)
return _euclidean_distances(X, Y, X_norm_squared, Y_norm_squared, squared)
def _euclidean_distances(X, Y, X_norm_squared=None, Y_norm_squared=None, squared=False):
"""Computational part of euclidean_distances
Assumes inputs are already checked.
If norms are passed as float32, they are unused. If arrays are passed as
float32, norms needs to be recomputed on upcast chunks.
TODO: use a float64 accumulator in row_norms to avoid the latter.
"""
if X_norm_squared is not None:
if X_norm_squared.dtype == np.float32:
XX = None
else:
XX = X_norm_squared.reshape(-1, 1)
elif X.dtype == np.float32:
XX = None
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if Y is X:
YY = None if XX is None else XX.T
else:
if Y_norm_squared is not None:
if Y_norm_squared.dtype == np.float32:
YY = None
else:
YY = Y_norm_squared.reshape(1, -1)
elif Y.dtype == np.float32:
YY = None
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X.dtype == np.float32:
# To minimize precision issues with float32, we compute the distance
# matrix on chunks of X and Y upcast to float64
distances = _euclidean_distances_upcast(X, XX, Y, YY)
else:
# if dtype is already float64, no need to chunk and upcast
distances = -2 * safe_sparse_dot(X, Y.T, dense_output=True)
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
if X is Y:
np.fill_diagonal(distances, 0)
return distances if squared else np.sqrt(distances, out=distances)
@validate_params(
{
"X": ["array-like"],
"Y": ["array-like", None],
"squared": ["boolean"],
"missing_values": [MissingValues(numeric_only=True)],
"copy": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def nan_euclidean_distances(
X, Y=None, *, squared=False, missing_values=np.nan, copy=True
):
"""Calculate the euclidean distances in the presence of missing values.
Compute the euclidean distance between each pair of samples in X and Y,
where Y=X is assumed if Y=None. When calculating the distance between a
pair of samples, this formulation ignores feature coordinates with a
missing value in either sample and scales up the weight of the remaining
coordinates:
dist(x,y) = sqrt(weight * sq. distance from present coordinates)
where,
weight = Total # of coordinates / # of present coordinates
For example, the distance between ``[3, na, na, 6]`` and ``[1, na, 4, 5]``
is:
.. math::
\\sqrt{\\frac{4}{2}((3-1)^2 + (6-5)^2)}
If all the coordinates are missing or if there are no common present
coordinates then NaN is returned for that pair.
Read more in the :ref:`User Guide <metrics>`.
.. versionadded:: 0.22
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
An array where each row is a sample and each column is a feature.
Y : array-like of shape (n_samples_Y, n_features), default=None
An array where each row is a sample and each column is a feature.
If `None`, method uses `Y=X`.
squared : bool, default=False
Return squared Euclidean distances.
missing_values : np.nan, float or int, default=np.nan
Representation of missing value.
copy : bool, default=True
Make and use a deep copy of X and Y (if Y exists).
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
Returns the distances between the row vectors of `X`
and the row vectors of `Y`.
See Also
--------
paired_distances : Distances between pairs of elements of X and Y.
References
----------
* John K. Dixon, "Pattern Recognition with Partly Missing Data",
IEEE Transactions on Systems, Man, and Cybernetics, Volume: 9, Issue:
10, pp. 617 - 621, Oct. 1979.
http://ieeexplore.ieee.org/abstract/document/4310090/
Examples
--------
>>> from sklearn.metrics.pairwise import nan_euclidean_distances
>>> nan = float("NaN")
>>> X = [[0, 1], [1, nan]]
>>> nan_euclidean_distances(X, X) # distance between rows of X
array([[0. , 1.41421356],
[1.41421356, 0. ]])
>>> # get distance to origin
>>> nan_euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
"""
force_all_finite = "allow-nan" if is_scalar_nan(missing_values) else True
X, Y = check_pairwise_arrays(
X, Y, accept_sparse=False, force_all_finite=force_all_finite, copy=copy
)
# Get missing mask for X
missing_X = _get_mask(X, missing_values)
# Get missing mask for Y
missing_Y = missing_X if Y is X else _get_mask(Y, missing_values)
# set missing values to zero
X[missing_X] = 0
Y[missing_Y] = 0
distances = euclidean_distances(X, Y, squared=True)
# Adjust distances for missing values
XX = X * X
YY = Y * Y
distances -= np.dot(XX, missing_Y.T)
distances -= np.dot(missing_X, YY.T)
np.clip(distances, 0, None, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
np.fill_diagonal(distances, 0.0)
present_X = 1 - missing_X
present_Y = present_X if Y is X else ~missing_Y
present_count = np.dot(present_X, present_Y.T)
distances[present_count == 0] = np.nan
# avoid divide by zero
np.maximum(1, present_count, out=present_count)
distances /= present_count
distances *= X.shape[1]
if not squared:
np.sqrt(distances, out=distances)
return distances
def _euclidean_distances_upcast(X, XX=None, Y=None, YY=None, batch_size=None):
"""Euclidean distances between X and Y.
Assumes X and Y have float32 dtype.
Assumes XX and YY have float64 dtype or are None.
X and Y are upcast to float64 by chunks, which size is chosen to limit
memory increase by approximately 10% (at least 10MiB).
"""
n_samples_X = X.shape[0]
n_samples_Y = Y.shape[0]
n_features = X.shape[1]
distances = np.empty((n_samples_X, n_samples_Y), dtype=np.float32)
if batch_size is None:
x_density = X.nnz / np.prod(X.shape) if issparse(X) else 1
y_density = Y.nnz / np.prod(Y.shape) if issparse(Y) else 1
# Allow 10% more memory than X, Y and the distance matrix take (at
# least 10MiB)
maxmem = max(
(
(x_density * n_samples_X + y_density * n_samples_Y) * n_features
+ (x_density * n_samples_X * y_density * n_samples_Y)
)
/ 10,
10 * 2**17,
)
# The increase amount of memory in 8-byte blocks is:
# - x_density * batch_size * n_features (copy of chunk of X)
# - y_density * batch_size * n_features (copy of chunk of Y)
# - batch_size * batch_size (chunk of distance matrix)
# Hence x² + (xd+yd)kx = M, where x=batch_size, k=n_features, M=maxmem
# xd=x_density and yd=y_density
tmp = (x_density + y_density) * n_features
batch_size = (-tmp + np.sqrt(tmp**2 + 4 * maxmem)) / 2
batch_size = max(int(batch_size), 1)
x_batches = gen_batches(n_samples_X, batch_size)
for i, x_slice in enumerate(x_batches):
X_chunk = X[x_slice].astype(np.float64)
if XX is None:
XX_chunk = row_norms(X_chunk, squared=True)[:, np.newaxis]
else:
XX_chunk = XX[x_slice]
y_batches = gen_batches(n_samples_Y, batch_size)
for j, y_slice in enumerate(y_batches):
if X is Y and j < i:
# when X is Y the distance matrix is symmetric so we only need
# to compute half of it.
d = distances[y_slice, x_slice].T
else:
Y_chunk = Y[y_slice].astype(np.float64)
if YY is None:
YY_chunk = row_norms(Y_chunk, squared=True)[np.newaxis, :]
else:
YY_chunk = YY[:, y_slice]
d = -2 * safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True)
d += XX_chunk
d += YY_chunk
distances[x_slice, y_slice] = d.astype(np.float32, copy=False)
return distances
def _argmin_min_reduce(dist, start):
# `start` is specified in the signature but not used. This is because the higher
# order `pairwise_distances_chunked` function needs reduction functions that are
# passed as argument to have a two arguments signature.
indices = dist.argmin(axis=1)
values = dist[np.arange(dist.shape[0]), indices]
return indices, values
def _argmin_reduce(dist, start):
# `start` is specified in the signature but not used. This is because the higher
# order `pairwise_distances_chunked` function needs reduction functions that are
# passed as argument to have a two arguments signature.
return dist.argmin(axis=1)
_VALID_METRICS = [
"euclidean",
"l2",
"l1",
"manhattan",
"cityblock",
"braycurtis",
"canberra",
"chebyshev",
"correlation",
"cosine",
"dice",
"hamming",
"jaccard",
"mahalanobis",
"matching",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalmichener",
"sokalsneath",
"sqeuclidean",
"yule",
"wminkowski",
"nan_euclidean",
"haversine",
]
if sp_base_version < parse_version("1.11"): # pragma: no cover
# Deprecated in SciPy 1.9 and removed in SciPy 1.11
_VALID_METRICS += ["kulsinski"]
if sp_base_version < parse_version("1.9"):
# Deprecated in SciPy 1.0 and removed in SciPy 1.9
_VALID_METRICS += ["matching"]
_NAN_METRICS = ["nan_euclidean"]
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix"],
"axis": [Options(Integral, {0, 1})],
"metric": [
StrOptions(set(_VALID_METRICS).union(ArgKmin.valid_metrics())),
callable,
],
"metric_kwargs": [dict, None],
},
prefer_skip_nested_validation=False, # metric is not validated yet
)
def pairwise_distances_argmin_min(
X, Y, *, axis=1, metric="euclidean", metric_kwargs=None
):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Array containing points.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Array containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default='euclidean'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis','minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean','sokalmichener','sokalsneath','sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
.. note::
`'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11.
.. note::
`'matching'` has been removed in SciPy 1.9 (use `'hamming'` instead).
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : ndarray
The array of minimum distances. `distances[i]` is the distance between
the i-th row in X and the argmin[i]-th row in Y.
See Also
--------
pairwise_distances : Distances between every pair of samples of X and Y.
pairwise_distances_argmin : Same as `pairwise_distances_argmin_min` but only
returns the argmins.
"""
X, Y = check_pairwise_arrays(X, Y)
if axis == 0:
X, Y = Y, X
if metric_kwargs is None:
metric_kwargs = {}
if ArgKmin.is_usable_for(X, Y, metric):
# This is an adaptor for one "sqeuclidean" specification.
# For this backend, we can directly use "sqeuclidean".
if metric_kwargs.get("squared", False) and metric == "euclidean":
metric = "sqeuclidean"
metric_kwargs = {}
values, indices = ArgKmin.compute(
X=X,
Y=Y,
k=1,
metric=metric,
metric_kwargs=metric_kwargs,
strategy="auto",
return_distance=True,
)
values = values.flatten()
indices = indices.flatten()
else:
# Joblib-based backend, which is used when user-defined callable
# are passed for metric.
# This won't be used in the future once PairwiseDistancesReductions support:
# - DistanceMetrics which work on supposedly binary data
# - CSR-dense and dense-CSR case if 'euclidean' in metric.
# Turn off check for finiteness because this is costly and because arrays
# have already been validated.
with config_context(assume_finite=True):
indices, values = zip(
*pairwise_distances_chunked(
X, Y, reduce_func=_argmin_min_reduce, metric=metric, **metric_kwargs
)
)
indices = np.concatenate(indices)
values = np.concatenate(values)
return indices, values
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix"],
"axis": [Options(Integral, {0, 1})],
"metric": [
StrOptions(set(_VALID_METRICS).union(ArgKmin.valid_metrics())),
callable,
],
"metric_kwargs": [dict, None],
},
prefer_skip_nested_validation=False, # metric is not validated yet
)
def pairwise_distances_argmin(X, Y, *, axis=1, metric="euclidean", metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Array containing points.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features)
Arrays containing points.
axis : int, default=1
Axis along which the argmin and distances are to be computed.
metric : str or callable, default="euclidean"
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis','minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean','sokalmichener','sokalsneath','sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
.. note::
`'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11.
.. note::
`'matching'` has been removed in SciPy 1.9 (use `'hamming'` instead).
metric_kwargs : dict, default=None
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See Also
--------
pairwise_distances : Distances between every pair of samples of X and Y.
pairwise_distances_argmin_min : Same as `pairwise_distances_argmin` but also
returns the distances.
"""
if metric_kwargs is None:
metric_kwargs = {}
X, Y = check_pairwise_arrays(X, Y)
if axis == 0:
X, Y = Y, X
if metric_kwargs is None:
metric_kwargs = {}
if ArgKmin.is_usable_for(X, Y, metric):
# This is an adaptor for one "sqeuclidean" specification.
# For this backend, we can directly use "sqeuclidean".
if metric_kwargs.get("squared", False) and metric == "euclidean":
metric = "sqeuclidean"
metric_kwargs = {}
indices = ArgKmin.compute(
X=X,
Y=Y,
k=1,
metric=metric,
metric_kwargs=metric_kwargs,
strategy="auto",
return_distance=False,
)
indices = indices.flatten()
else:
# Joblib-based backend, which is used when user-defined callable
# are passed for metric.
# This won't be used in the future once PairwiseDistancesReductions support:
# - DistanceMetrics which work on supposedly binary data
# - CSR-dense and dense-CSR case if 'euclidean' in metric.
# Turn off check for finiteness because this is costly and because arrays
# have already been validated.
with config_context(assume_finite=True):
indices = np.concatenate(
list(
# This returns a np.ndarray generator whose arrays we need
# to flatten into one.
pairwise_distances_chunked(
X, Y, reduce_func=_argmin_reduce, metric=metric, **metric_kwargs
)
)
)
return indices
@validate_params(
{"X": ["array-like", "sparse matrix"], "Y": ["array-like", "sparse matrix", None]},
prefer_skip_nested_validation=True,
)
def haversine_distances(X, Y=None):
"""Compute the Haversine distance between samples in X and Y.
The Haversine (or great circle) distance is the angular distance between
two points on the surface of a sphere. The first coordinate of each point
is assumed to be the latitude, the second is the longitude, given
in radians. The dimension of the data must be 2.
.. math::
D(x, y) = 2\\arcsin[\\sqrt{\\sin^2((x_{lat} - y_{lat}) / 2)
+ \\cos(x_{lat})\\cos(y_{lat})\\
sin^2((x_{lon} - y_{lon}) / 2)}]
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, 2)
A feature array.
Y : {array-like, sparse matrix} of shape (n_samples_Y, 2), default=None
An optional second feature array. If `None`, uses `Y=X`.
Returns
-------
distance : ndarray of shape (n_samples_X, n_samples_Y)
The distance matrix.
Notes
-----
As the Earth is nearly spherical, the haversine formula provides a good
approximation of the distance between two points of the Earth surface, with
a less than 1% error on average.
Examples
--------
We want to calculate the distance between the Ezeiza Airport
(Buenos Aires, Argentina) and the Charles de Gaulle Airport (Paris,
France).
>>> from sklearn.metrics.pairwise import haversine_distances
>>> from math import radians
>>> bsas = [-34.83333, -58.5166646]
>>> paris = [49.0083899664, 2.53844117956]
>>> bsas_in_radians = [radians(_) for _ in bsas]
>>> paris_in_radians = [radians(_) for _ in paris]
>>> result = haversine_distances([bsas_in_radians, paris_in_radians])
>>> result * 6371000/1000 # multiply by Earth radius to get kilometers
array([[ 0. , 11099.54035582],
[11099.54035582, 0. ]])
"""
from..metrics import DistanceMetric
return DistanceMetric.get_metric("haversine").pairwise(X, Y)
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix", None],
"sum_over_features": ["boolean", Hidden(StrOptions({"deprecated"}))],
},
prefer_skip_nested_validation=True,
)
def manhattan_distances(X, Y=None, *, sum_over_features="deprecated"):
"""Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array where each row is a sample and each column is a feature.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
An array where each row is a sample and each column is a feature.
If `None`, method uses `Y=X`.
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
.. deprecated:: 1.2
``sum_over_features`` was deprecated in version 1.2 and will be removed in
1.4.
Returns
-------
D : ndarray of shape (n_samples_X * n_samples_Y, n_features) or \
(n_samples_X, n_samples_Y)
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Notes
-----
When X and/or Y are CSR sparse matrices and they are not already
in canonical format, this function modifies them in-place to
make them canonical.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])
array([[0.]])
>>> manhattan_distances([[3]], [[2]])
array([[1.]])
>>> manhattan_distances([[2]], [[3]])
array([[1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])
array([[0., 2.],
[4., 4.]])
"""
# TODO(1.4): remove sum_over_features
if sum_over_features!= "deprecated":
warnings.warn(
(
"`sum_over_features` is deprecated in version 1.2 and will be"
" removed in version 1.4."
),
FutureWarning,
)
else:
sum_over_features = True
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError(
"sum_over_features=%r not supported for sparse matrices"
% sum_over_features
)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
X.sum_duplicates() # this also sorts indices in-place
Y.sum_duplicates()
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr, Y.data, Y.indices, Y.indptr, D)
return D
if sum_over_features:
return distance.cdist(X, Y, "cityblock")
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix", None],
},
prefer_skip_nested_validation=True,
)
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Matrix `X`.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
Matrix `Y`.
Returns
-------
distance matrix : ndarray of shape (n_samples_X, n_samples_Y)
Returns the cosine distance between samples in X and Y.
See Also
--------
cosine_similarity : Compute cosine similarity between samples in X and Y.
scipy.spatial.distance.cosine : Dense matrices only.
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
@validate_params(
{"X": ["array-like", "sparse matrix"], "Y": ["array-like", "sparse matrix"]},
prefer_skip_nested_validation=True,
)
def paired_euclidean_distances(X, Y):
"""Compute the paired euclidean distances between X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input array/matrix X.
Y : {array-like, sparse matrix} of shape (n_samples, n_features)
Input array/matrix Y.
Returns
-------
distances : ndarray of shape (n_samples,)
Output array/matrix containing the calculated paired euclidean
distances.
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
@validate_params(
{"X": ["array-like", "sparse matrix"], "Y": ["array-like", "sparse matrix"]},
prefer_skip_nested_validation=True,
)
def paired_manhattan_distances(X, Y):
"""Compute the paired L1 distances between X and Y.
Distances are calculated between (X[0], Y[0]), (X[1], Y[1]),...,
(X[n_samples], Y[n_samples]).
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
An array-like where each row is a sample and each column is a feature.
Y : {array-like, sparse matrix} of shape (n_samples, n_features)
An array-like where each row is a sample and each column is a feature.
Returns
-------
distances : ndarray of shape (n_samples,)
L1 paired distances between the row vectors of `X`
and the row vectors of `Y`.
Examples
--------
>>> from sklearn.metrics.pairwise import paired_manhattan_distances
>>> import numpy as np
>>> X = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])
>>> Y = np.array([[0, 1, 0], [0, 0, 1], [0, 0, 0]])
>>> paired_manhattan_distances(X, Y)
array([1., 2., 1.])
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
@validate_params(
{"X": ["array-like", "sparse matrix"], "Y": ["array-like", "sparse matrix"]},
prefer_skip_nested_validation=True,
)
def paired_cosine_distances(X, Y):
"""
Compute the paired cosine distances between X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Y : {array-like, sparse matrix} of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Returns
-------
distances : ndarray of shape (n_samples,)
Returns the distances between the row vectors of `X`
and the row vectors of `Y`, where `distances[i]` is the
distance between `X[i]` and `Y[i]`.
Notes
-----
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm.
"""
X, Y = check_paired_arrays(X, Y)
return 0.5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
"cosine": paired_cosine_distances,
"euclidean": paired_euclidean_distances,
"l2": paired_euclidean_distances,
"l1": paired_manhattan_distances,
"manhattan": paired_manhattan_distances,
"cityblock": paired_manhattan_distances,
}
@validate_params(
{
"X": ["array-like"],
"Y": ["array-like"],
"metric": [StrOptions(set(PAIRED_DISTANCES)), callable],
},
prefer_skip_nested_validation=True,
)
def paired_distances(X, Y, *, metric="euclidean", **kwds):
"""
Compute the paired distances between X and Y.
Compute the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray of shape (n_samples, n_features)
Array 2 for distance computation.
metric : str or callable, default="euclidean"
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from `X` as input and return a value indicating
the distance between them.
**kwds : dict
Unused parameters.
Returns
-------
distances : ndarray of shape (n_samples,)
Returns the distances between the row vectors of `X`
and the row vectors of `Y`.
See Also
--------
sklearn.metrics.pairwise_distances : Computes the distance between every pair of
samples.
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([0., 1.])
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
# Kernels
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix", None],
"dense_output": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
A feature array.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
The Gram matrix of the linear kernel, i.e. `X @ Y.T`.
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix", None],
"degree": [Interval(Real, 1, None, closed="left")],
"gamma": [
Interval(Real, 0, None, closed="left"),
None,
Hidden(np.ndarray),
],
"coef0": [Interval(Real, None, None, closed="neither")],
},
prefer_skip_nested_validation=True,
)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y.
K(X, Y) = (gamma <X, Y> + coef0) ^ degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
A feature array.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
degree : float, default=3
Kernel degree.
gamma : float, default=None
Coefficient of the vector inner product. If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Constant offset added to scaled inner product.
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
The polynomial kernel.
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix", None],
"gamma": [
Interval(Real, 0, None, closed="left"),
None,
Hidden(np.ndarray),
],
"coef0": [Interval(Real, None, None, closed="neither")],
},
prefer_skip_nested_validation=True,
)
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""Compute the sigmoid kernel between X and Y.
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
A feature array.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
Coefficient of the vector inner product. If None, defaults to 1.0 / n_features.
coef0 : float, default=1
Constant offset added to scaled inner product.
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
Sigmoid kernel between two arrays.
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix", None],
"gamma": [
Interval(Real, 0, None, closed="left"),
None,
Hidden(np.ndarray),
],
},
prefer_skip_nested_validation=True,
)
def rbf_kernel(X, Y=None, gamma=None):
"""Compute the rbf (gaussian) kernel between X and Y.
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
A feature array.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
If None, defaults to 1.0 / n_features.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
The RBF kernel.
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix", None],
"gamma": [
Interval(Real, 0, None, closed="neither"),
Hidden(np.ndarray),
None,
],
},
prefer_skip_nested_validation=True,
)
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
A feature array.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
If None, defaults to 1.0 / n_features. Otherwise it should be strictly positive.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
The kernel matrix.
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix", None],
"dense_output": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
Input data.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), \
default=None
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : ndarray of shape (n_samples_X, n_samples_Y)
Returns the cosine similarity between samples in X and Y.
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
@validate_params(
{"X": ["array-like"], "Y": ["array-like", None]},
prefer_skip_nested_validation=True,
)
def additive_chi2_kernel(X, Y=None):
"""Compute the additive chi-squared kernel between observations in X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
A feature array.
Y : array-like of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
The kernel matrix.
See Also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://hal.archives-ouvertes.fr/hal-00171412/document
"""
X, Y = check_pairwise_arrays(X, Y, accept_sparse=False)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
@validate_params(
{
"X": ["array-like"],
"Y": ["array-like", None],
"gamma": [Interval(Real, 0, None, closed="neither"), Hidden(np.ndarray)],
},
prefer_skip_nested_validation=True,
)
def chi2_kernel(X, Y=None, gamma=1.0):
"""Compute the exponential chi-squared kernel between X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
A feature array.
Y : array-like of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=1
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
The kernel matrix.
See Also
--------
additive_chi2_kernel : The additive version of this kernel.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://hal.archives-ouvertes.fr/hal-00171412/document
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
"cityblock": manhattan_distances,
"cosine": cosine_distances,
"euclidean": euclidean_distances,
"haversine": haversine_distances,
"l2": euclidean_distances,
"l1": manhattan_distances,
"manhattan": manhattan_distances,
"precomputed": None, # HACK: precomputed is always allowed, never called
"nan_euclidean": nan_euclidean_distances,
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'haversine' metrics.pairwise.haversine_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
'nan_euclidean' metrics.pairwise.nan_euclidean_distances
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
Returns
-------
distance_metrics : dict
Returns valid metrics for pairwise_distances.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _dist_wrapper(dist_func, dist_matrix, slice_, *args, **kwargs):
"""Write in-place to a slice of a distance matrix."""
dist_matrix[:, slice_] = dist_func(*args, **kwargs)
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel."""
if Y is None:
Y = X
X, Y, dtype = _return_float_dtype(X, Y)
if effective_n_jobs(n_jobs) == 1:
return func(X, Y, **kwds)
# enforce a threading backend to prevent data communication overhead
fd = delayed(_dist_wrapper)
ret = np.empty((X.shape[0], Y.shape[0]), dtype=dtype, order="F")
Parallel(backend="threading", n_jobs=n_jobs)(
fd(func, ret, s, X, Y[s], **kwds)
for s in gen_even_slices(_num_samples(Y), effective_n_jobs(n_jobs))
)
if (X is Y or Y is None) and func is euclidean_distances:
# zeroing diagonal for euclidean norm.
# TODO: do it also for other norms.
np.fill_diagonal(ret, 0)
return ret
def _pairwise_callable(X, Y, metric, force_all_finite=True, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}."""
X, Y = check_pairwise_arrays(X, Y, force_all_finite=force_all_finite)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype="float")
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype="float")
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
def _check_chunk_size(reduced, chunk_size):
"""Checks chunk is a sequence of expected size or a tuple of same."""
if reduced is None:
return
is_tuple = isinstance(reduced, tuple)
if not is_tuple:
reduced = (reduced,)
if any(isinstance(r, tuple) or not hasattr(r, "__iter__") for r in reduced):
raise TypeError(
"reduce_func returned %r. Expected sequence(s) of length %d."
% (reduced if is_tuple else reduced[0], chunk_size)
)
if any(_num_samples(r)!= chunk_size for r in reduced):
actual_size = tuple(_num_samples(r) for r in reduced)
raise ValueError(
"reduce_func returned object of length %s. "
"Expected same length as input: %d."
% (actual_size if is_tuple else actual_size[0], chunk_size)
)
def _precompute_metric_params(X, Y, metric=None, **kwds):
"""Precompute data-derived metric parameters if not provided."""
if metric == "seuclidean" and "V" not in kwds:
if X is Y:
V = np.var(X, axis=0, ddof=1)
else:
raise ValueError(
"The 'V' parameter is required for the seuclidean metric "
"when Y is passed."
)
return {"V": V}
if metric == "mahalanobis" and "VI" not in kwds:
if X is Y:
VI = np.linalg.inv(np.cov(X.T)).T
else:
raise ValueError(
"The 'VI' parameter is required for the mahalanobis metric "
"when Y is passed."
)
return {"VI": VI}
return {}
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix", None],
"reduce_func": [callable, None],
"metric": [StrOptions({"precomputed"}.union(_VALID_METRICS)), callable],
"n_jobs": [Integral, None],
"working_memory": [Interval(Real, 0, None, closed="left"), None],
},
prefer_skip_nested_validation=False, # metric is not validated yet
)
def pairwise_distances_chunked(
X,
Y=None,
*,
reduce_func=None,
metric="euclidean",
n_jobs=None,
working_memory=None,
**kwds,
):
"""Generate a distance matrix chunk by chunk with optional reduction.
In cases where not all of a pairwise distance matrix needs to be
stored at once, this is used to calculate pairwise distances in
``working_memory``-sized chunks. If ``reduce_func`` is given, it is
run on each chunk and its return values are concatenated into lists,
arrays or sparse matrices.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise distances between samples, or a feature array.
The shape the array should be (n_samples_X, n_samples_X) if
metric='precomputed' and (n_samples_X, n_features) otherwise.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
An optional second feature array. Only allowed if
metric!= "precomputed".
reduce_func : callable, default=None
The function which is applied on each chunk of the distance matrix,
reducing it to needed values. ``reduce_func(D_chunk, start)``
is called repeatedly, where ``D_chunk`` is a contiguous vertical
slice of the pairwise distance matrix, starting at row ``start``.
It should return one of: None; an array, a list, or a sparse matrix
of length ``D_chunk.shape[0]``; or a tuple of such objects.
Returning None is useful for in-place operations, rather than
reductions.
If None, pairwise_distances_chunked returns a generator of vertical
chunks of the distance matrix.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter,
or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on
each pair of instances (rows) and the resulting value recorded.
The callable should take two arrays from X as input and return a
value indicating the distance between them.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by
breaking down the pairwise matrix into n_jobs even slices and
computing them in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
working_memory : float, default=None
The sought maximum memory for temporary distance matrix chunks.
When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Yields
------
D_chunk : {ndarray, sparse matrix}
A contiguous slice of distance matrix, optionally processed by
``reduce_func``.
Examples
--------
Without reduce_func:
>>> import numpy as np
>>> from sklearn.metrics import pairwise_distances_chunked
>>> X = np.random.RandomState(0).rand(5, 3)
>>> D_chunk = next(pairwise_distances_chunked(X))
>>> D_chunk
array([[0. ..., 0.29..., 0.41..., 0.19..., 0.57...],
[0.29..., 0. ..., 0.57..., 0.41..., 0.76...],
[0.41..., 0.57..., 0. ..., 0.44..., 0.90...],
[0.19..., 0.41..., 0.44..., 0. ..., 0.51...],
[0.57..., 0.76..., 0.90..., 0.51..., 0. ...]])
Retrieve all neighbors and average distance within radius r:
>>> r =.2
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r) for d in D_chunk]
... avg_dist = (D_chunk * (D_chunk < r)).mean(axis=1)
... return neigh, avg_dist
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func)
>>> neigh, avg_dist = next(gen)
>>> neigh
[array([0, 3]), array([1]), array([2]), array([0, 3]), array([4])]
>>> avg_dist
array([0.039..., 0. , 0. , 0.039..., 0. ])
Where r is defined per sample, we need to make use of ``start``:
>>> r = [.2,.4,.4,.3,.1]
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r[i])
... for i, d in enumerate(D_chunk, start)]
... return neigh
>>> neigh = next(pairwise_distances_chunked(X, reduce_func=reduce_func))
>>> neigh
[array([0, 3]), array([0, 1]), array([2]), array([0, 3]), array([4])]
Force row-by-row generation by reducing ``working_memory``:
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func,
... working_memory=0)
>>> next(gen)
[array([0, 3])]
>>> next(gen)
[array([0, 1])]
"""
n_samples_X = _num_samples(X)
if metric == "precomputed":
slices = (slice(0, n_samples_X),)
else:
if Y is None:
Y = X
# We get as many rows as possible within our working_memory budget to
# store len(Y) distances in each row of output.
#
# Note:
# - this will get at least 1 row, even if 1 row of distances will
# exceed working_memory.
# - this does not account for any temporary memory usage while
# calculating distances (e.g. difference of vectors in manhattan
# distance.
chunk_n_rows = get_chunk_n_rows(
row_bytes=8 * _num_samples(Y),
max_n_rows=n_samples_X,
working_memory=working_memory,
)
slices = gen_batches(n_samples_X, chunk_n_rows)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
for sl in slices:
if sl.start == 0 and sl.stop == n_samples_X:
X_chunk = X # enable optimised paths for X is Y
else:
X_chunk = X[sl]
D_chunk = pairwise_distances(X_chunk, Y, metric=metric, n_jobs=n_jobs, **kwds)
if (X is Y or Y is None) and PAIRWISE_DISTANCE_FUNCTIONS.get(
metric, None
) is euclidean_distances:
# zeroing diagonal, taking care of aliases of "euclidean",
# i.e. "l2"
D_chunk.flat[sl.start :: _num_samples(X) + 1] = 0
if reduce_func is not None:
chunk_size = D_chunk.shape[0]
D_chunk = reduce_func(D_chunk, sl.start)
_check_chunk_size(D_chunk, chunk_size)
yield D_chunk
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix", None],
"metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
"n_jobs": [Integral, None],
"force_all_finite": ["boolean", StrOptions({"allow-nan"})],
},
prefer_skip_nested_validation=True,
)
def pairwise_distances(
X, Y=None, metric="euclidean", *, n_jobs=None, force_all_finite=True, **kwds
):
"""Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix
inputs.
['nan_euclidean'] but it does not yet support sparse matrices.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski','mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao','seuclidean',
'sokalmichener','sokalsneath','sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
.. note::
`'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11.
.. note::
`'matching'` has been removed in SciPy 1.9 (use `'hamming'` instead).
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see :func:`sklearn.metrics.pairwise.distance_metrics`
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise distances between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
An optional second feature array. Only allowed if
metric!= "precomputed".
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. Ignored
for a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_samples_Y)
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
See Also
--------
pairwise_distances_chunked : Performs the same calculation as this
function, but returns a generator of chunks of the distance matrix, in
order to limit memory usage.
sklearn.metrics.pairwise.paired_distances : Computes the distances between
corresponding elements of two arrays.
"""
if metric == "precomputed":
X, _ = check_pairwise_arrays(
X, Y, precomputed=True, force_all_finite=force_all_finite
)
whom = (
"`pairwise_distances`. Precomputed distance "
" need to have non-negative values."
)
check_non_negative(X, whom=whom)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(
_pairwise_callable, metric=metric, force_all_finite=force_all_finite, **kwds
)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
if dtype == bool and (X.dtype!= bool or (Y is not None and Y.dtype!= bool)):
msg = "Data was converted to boolean for metric %s" % metric
warnings.warn(msg, DataConversionWarning)
X, Y = check_pairwise_arrays(
X, Y, dtype=dtype, force_all_finite=force_all_finite
)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
if effective_n_jobs(n_jobs) == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric, **kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances require boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
"dice",
"jaccard",
"rogerstanimoto",
"russellrao",
"sokalmichener",
"sokalsneath",
"yule",
]
if sp_base_version < parse_version("1.11"):
# Deprecated in SciPy 1.9 and removed in SciPy 1.11
PAIRWISE_BOOLEAN_FUNCTIONS += ["kulsinski"]
if sp_base_version < parse_version("1.9"):
# Deprecated in SciPy 1.0 and removed in SciPy 1.9
PAIRWISE_BOOLEAN_FUNCTIONS += ["matching"]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
"additive_chi2": additive_chi2_kernel,
"chi2": chi2_kernel,
"linear": linear_kernel,
"polynomial": polynomial_kernel,
"poly": polynomial_kernel,
"rbf": rbf_kernel,
"laplacian": laplacian_kernel,
"sigmoid": sigmoid_kernel,
"cosine": cosine_similarity,
}
def kernel_metrics():
"""Valid metrics for pairwise_kernels.
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
Returns
-------
kernel_metrics : dict
Returns valid metrics for pairwise_kernels.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": frozenset(["gamma"]),
"cosine": (),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"Y": ["array-like", "sparse matrix", None],
"metric": [
StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS) | {"precomputed"}),
callable,
],
"filter_params": ["boolean"],
"n_jobs": [Integral, None],
},
prefer_skip_nested_validation=True,
)
def pairwise_kernels(
X, Y=None, metric="linear", *, filter_params=False, n_jobs=None, **kwds
):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are:
['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf',
'laplacian','sigmoid', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise kernels between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
A second feature array only if X has shape (n_samples_X, n_features).
metric : str or callable, default="linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in ``pairwise.PAIRWISE_KERNEL_FUNCTIONS``.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two rows from X as input and return the corresponding
kernel value as a single number. This means that callables from
:mod:`sklearn.metrics.pairwise` are not allowed, as they operate on
matrices, not single samples. Use the string identifying the kernel
instead.
filter_params : bool, default=False
Whether to filter invalid parameters or not.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_X) or (n_samples_X, n_samples_Y)
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = {k: kwds[k] for k in kwds if k in KERNEL_PARAMS[metric]}
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds) |
scikit-learn__scikit-learn | naive_bayes.rst | Module doc | Generate documentation for this module | BSD 3-Clause New or Revised License | scikit-learn__scikit-learn/doc/modules/naive_bayes.rst | [
"scikit-learn__scikit-learn/sklearn/naive_bayes.py"
] | Naive Bayes
Naive Bayes methods are a set of supervised learning algorithms based on
applying Bayes' theorem with the "naive" assumption of conditional
independence between every pair of features given the value of the class
variable. Bayes' theorem states the following relationship, given class
variable y and dependent feature vector x₁ through x_(n), :
$$P(y \mid x_1, \dots, x_n) = \frac{P(y) P(x_1, \dots, x_n \mid y)}
{P(x_1, \dots, x_n)}$$
Using the naive conditional independence assumption that
P(x_(i)|y,x₁,…,x_(i − 1),x_(i + 1),…,x_(n)) = P(x_(i)|y),
for all i, this relationship is simplified to
$$P(y \mid x_1, \dots, x_n) = \frac{P(y) \prod_{i=1}^{n} P(x_i \mid y)}
{P(x_1, \dots, x_n)}$$
Since P(x₁,…,x_(n)) is constant given the input, we can use the
following classification rule:
$$P(y \mid x_1, \dots, x_n) \propto P(y) \prod_{i=1}^{n} P(x_i \mid y)$$
⇓
$$\hat{y} = \arg\max_y P(y) \prod_{i=1}^{n} P(x_i \mid y),$$
and we can use Maximum A Posteriori (MAP) estimation to estimate P(y)
and P(x_(i)∣y); the former is then the relative frequency of class y in
the training set.
The different naive Bayes classifiers differ mainly by the assumptions
they make regarding the distribution of P(x_(i)∣y).
In spite of their apparently over-simplified assumptions, naive Bayes
classifiers have worked quite well in many real-world situations,
famously document classification and spam filtering. They require a
small amount of training data to estimate the necessary parameters. (For
theoretical reasons why naive Bayes works well, and on which types of
data it does, see the references below.)
Naive Bayes learners and classifiers can be extremely fast compared to
more sophisticated methods. The decoupling of the class conditional
feature distributions means that each distribution can be independently
estimated as a one dimensional distribution. This in turn helps to
alleviate problems stemming from the curse of dimensionality.
On the flip side, although naive Bayes is known as a decent classifier,
it is known to be a bad estimator, so the probability outputs from
predict_proba are not to be taken too seriously.
Gaussian Naive Bayes
GaussianNB implements the Gaussian Naive Bayes algorithm for
classification. The likelihood of the features is assumed to be
Gaussian:
$$P(x_i \mid y) = \frac{1}{\sqrt{2\pi\sigma^2_y}} \exp\left(-\frac{(x_i - \mu_y)^2}{2\sigma^2_y}\right)$$
The parameters σ_(y) and μ_(y) are estimated using maximum likelihood.
>>> from sklearn.datasets import load_iris >>> from
sklearn.model_selection import train_test_split >>> from
sklearn.naive_bayes import GaussianNB >>> X, y =
load_iris(return_X_y=True) >>> X_train, X_test, y_train, y_test =
train_test_split(X, y, test_size=0.5, random_state=0) >>> gnb =
GaussianNB() >>> y_pred = gnb.fit(X_train,
y_train).predict(X_test) >>> print("Number of mislabeled points out of
a total %d points : %d" ... % (X_test.shape[0], (y_test !=
y_pred).sum())) Number of mislabeled points out of a total 75 points :
4
Multinomial Naive Bayes
MultinomialNB implements the naive Bayes algorithm for multinomially
distributed data, and is one of the two classic naive Bayes variants
used in text classification (where the data are typically represented as
word vector counts, although tf-idf vectors are also known to work well
in practice). The distribution is parametrized by vectors
θ_(y) = (θ_(y1),…,θ_(yn)) for each class y, where n is the number of
features (in text classification, the size of the vocabulary) and θ_(yi)
is the probability P(x_(i)∣y) of feature i appearing in a sample
belonging to class y.
The parameters θ_(y) is estimated by a smoothed version of maximum
likelihood, i.e. relative frequency counting:
$$\hat{\theta}_{yi} = \frac{ N_{yi} + \alpha}{N_y + \alpha n}$$
where N_(yi) = ∑_(x ∈ T)x_(i) is the number of times feature i appears
in a sample of class y in the training set T, and
$N_{y} = \sum_{i=1}^{n} N_{yi}$ is the total count of all features for
class y.
The smoothing priors α ≥ 0 accounts for features not present in the
learning samples and prevents zero probabilities in further
computations. Setting α = 1 is called Laplace smoothing, while α < 1 is
called Lidstone smoothing.
Complement Naive Bayes
ComplementNB implements the complement naive Bayes (CNB) algorithm. CNB
is an adaptation of the standard multinomial naive Bayes (MNB) algorithm
that is particularly suited for imbalanced data sets. Specifically, CNB
uses statistics from the complement of each class to compute the model's
weights. The inventors of CNB show empirically that the parameter
estimates for CNB are more stable than those for MNB. Further, CNB
regularly outperforms MNB (often by a considerable margin) on text
classification tasks. The procedure for calculating the weights is as
follows:
$$\hat{\theta}_{ci} = \frac{\alpha_i + \sum_{j:y_j \neq c} d_{ij}}
{\alpha + \sum_{j:y_j \neq c} \sum_{k} d_{kj}}$$
w_(ci) = log θ̂_(ci)
$$w_{ci} = \frac{w_{ci}}{\sum_{j} |w_{cj}|}$$
where the summations are over all documents j not in class c, d_(ij) is
either the count or tf-idf value of term i in document j, α_(i) is a
smoothing hyperparameter like that found in MNB, and α = ∑_(i)α_(i). The
second normalization addresses the tendency for longer documents to
dominate parameter estimates in MNB. The classification rule is:
ĉ = arg min_(c)∑_(i)t_(i)w_(ci)
i.e., a document is assigned to the class that is the poorest complement
match.
Bernoulli Naive Bayes
BernoulliNB implements the naive Bayes training and classification
algorithms for data that is distributed according to multivariate
Bernoulli distributions; i.e., there may be multiple features but each
one is assumed to be a binary-valued (Bernoulli, boolean) variable.
Therefore, this class requires samples to be represented as
binary-valued feature vectors; if handed any other kind of data, a
BernoulliNB instance may binarize its input (depending on the binarize
parameter).
The decision rule for Bernoulli naive Bayes is based on
P(x_(i)∣y) = P(x_(i)=1∣y)x_(i) + (1−P(x_(i)=1∣y))(1−x_(i))
which differs from multinomial NB's rule in that it explicitly penalizes
the non-occurrence of a feature i that is an indicator for class y,
where the multinomial variant would simply ignore a non-occurring
feature.
In the case of text classification, word occurrence vectors (rather than
word count vectors) may be used to train and use this classifier.
BernoulliNB might perform better on some datasets, especially those with
shorter documents. It is advisable to evaluate both models, if time
permits.
References:
- C.D. Manning, P. Raghavan and H. Schütze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
- A. McCallum and K. Nigam (1998). A comparison of event models for
Naive Bayes text classification. Proc. AAAI/ICML-98 Workshop on
Learning for Text Categorization, pp. 41-48.
- V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam
filtering with Naive Bayes -- Which Naive Bayes? 3rd Conf. on Email
and Anti-Spam (CEAS).
Categorical Naive Bayes
CategoricalNB implements the categorical naive Bayes algorithm for
categorically distributed data. It assumes that each feature, which is
described by the index i, has its own categorical distribution.
For each feature i in the training set X, CategoricalNB estimates a
categorical distribution for each feature i of X conditioned on the
class y. The index set of the samples is defined as J = {1, …, m}, with
m as the number of samples.
The probability of category t in feature i given class c is estimated
as:
$$P(x_i = t \mid y = c \: ;\, \alpha) = \frac{ N_{tic} + \alpha}{N_{c} +
\alpha n_i},$$
where N_(tic) = |{j∈J∣x_(ij)=t,y_(j)=c}| is the number of times category
t appears in the samples x_(i), which belong to class c,
N_(c) = |{j∈J∣y_(j)=c}| is the number of samples with class c, α is a
smoothing parameter and n_(i) is the number of available categories of
feature i.
CategoricalNB assumes that the sample matrix X is encoded (for instance
with the help of ~sklearn.preprocessing.OrdinalEncoder) such that all
categories for each feature i are represented with numbers
0, ..., n_(i) − 1 where n_(i) is the number of available categories of
feature i.
Out-of-core naive Bayes model fitting
Naive Bayes models can be used to tackle large scale classification
problems for which the full training set might not fit in memory. To
handle this case, MultinomialNB, BernoulliNB, and GaussianNB expose a
partial_fit method that can be used incrementally as done with other
classifiers as demonstrated in
sphx_glr_auto_examples_applications_plot_out_of_core_classification.py.
All naive Bayes classifiers support sample weighting.
Contrary to the fit method, the first call to partial_fit needs to be
passed the list of all the expected class labels.
For an overview of available strategies in scikit-learn, see also the
out-of-core learning <scaling_strategies> documentation.
Note
The partial_fit method call of naive Bayes models introduces some
computational overhead. It is recommended to use data chunk sizes that
are as large as possible, that is as the available RAM allows.
| """
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
import numpy as np
from scipy.special import logsumexp
from.base import BaseEstimator, ClassifierMixin, _fit_context
from.preprocessing import LabelBinarizer, binarize, label_binarize
from.utils._param_validation import Hidden, Interval, StrOptions
from.utils.extmath import safe_sparse_dot
from.utils.multiclass import _check_partial_fit_first_call
from.utils.validation import _check_sample_weight, check_is_fitted, check_non_negative
__all__ = [
"BernoulliNB",
"GaussianNB",
"MultinomialNB",
"ComplementNB",
"CategoricalNB",
]
class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape (n_samples, n_classes).
Public methods predict, predict_proba, predict_log_proba, and
predict_joint_log_proba pass the input through _check_X before handing it
over to _joint_log_likelihood. The term "joint log likelihood" is used
interchangibly with "joint log probability".
"""
@abstractmethod
def _check_X(self, X):
"""To be overridden in subclasses with the actual checks.
Only used in predict* methods.
"""
def predict_joint_log_proba(self, X):
"""Return joint log probability estimates for the test vector X.
For each row x of X and class y, the joint log probability is given by
``log P(x, y) = log P(y) + log P(x|y),``
where ``log P(y)`` is the class prior probability and ``log P(x|y)`` is
the class-conditional probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Returns the joint log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._check_X(X)
return self._joint_log_likelihood(X)
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X.
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1,..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(_BaseNB):
"""
Gaussian Naive Bayes (GaussianNB).
Can perform online updates to model parameters via :meth:`partial_fit`.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified, the priors are not
adjusted according to the data.
var_smoothing : float, default=1e-9
Portion of the largest variance of all features that is added to
variances for calculation stability.
.. versionadded:: 0.20
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
number of training samples observed in each class.
class_prior_ : ndarray of shape (n_classes,)
probability of each class.
classes_ : ndarray of shape (n_classes,)
class labels known to the classifier.
epsilon_ : float
absolute additive value to variances.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
var_ : ndarray of shape (n_classes, n_features)
Variance of each feature per class.
.. versionadded:: 1.0
theta_ : ndarray of shape (n_classes, n_features)
mean of each feature per class.
See Also
--------
BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models.
CategoricalNB : Naive Bayes classifier for categorical features.
ComplementNB : Complement Naive Bayes classifier.
MultinomialNB : Naive Bayes classifier for multinomial models.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
_parameter_constraints: dict = {
"priors": ["array-like", None],
"var_smoothing": [Interval(Real, 0, None, closed="left")],
}
def __init__(self, *, priors=None, var_smoothing=1e-9):
self.priors = priors
self.var_smoothing = var_smoothing
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns the instance itself.
"""
y = self._validate_data(y=y)
return self._partial_fit(
X, y, np.unique(y), _refit=True, sample_weight=sample_weight
)
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
return self._validate_data(X, reset=False)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like of shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like of shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like of shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like of shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
if np.isclose(n_new, 0.0):
return mu, var
new_mu = np.average(X, axis=0, weights=sample_weight)
new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = old_ssd + new_ssd + (n_new * n_past / n_total) * (mu - new_mu) ** 2
total_var = total_ssd / n_total
return total_mu, total_var
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns the instance itself.
"""
return self._partial_fit(
X, y, classes, _refit=False, sample_weight=sample_weight
)
def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit : bool, default=False
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
if _refit:
self.classes_ = None
first_call = _check_partial_fit_first_call(self, classes)
X, y = self._validate_data(X, y, reset=first_call)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
self.epsilon_ = self.var_smoothing * np.var(X, axis=0).max()
if first_call:
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.var_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provided prior matches the number of classes
if len(priors)!= n_classes:
raise ValueError("Number of priors must match number of classes.")
# Check that the sum is 1
if not np.isclose(priors.sum(), 1.0):
raise ValueError("The sum of the priors should be 1.")
# Check that the priors are non-negative
if (priors < 0).any():
raise ValueError("Priors must be non-negative.")
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_), dtype=np.float64)
else:
if X.shape[1]!= self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.var_[:, :] -= self.epsilon_
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = np.isin(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError(
"The target label(s) %s in y do not exist in the initial classes %s"
% (unique_y[~unique_y_in_classes], classes)
)
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.var_[i, :], X_i, sw_i
)
self.theta_[i, :] = new_theta
self.var_[i, :] = new_sigma
self.class_count_[i] += N_i
self.var_[:, :] += self.epsilon_
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = -0.5 * np.sum(np.log(2.0 * np.pi * self.var_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) / (self.var_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class _BaseDiscreteNB(_BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per _BaseNB
_update_feature_log_prob(alpha)
_count(X, Y)
"""
_parameter_constraints: dict = {
"alpha": [Interval(Real, 0, None, closed="left"), "array-like"],
"fit_prior": ["boolean"],
"class_prior": ["array-like", None],
"force_alpha": ["boolean", Hidden(StrOptions({"warn"}))],
}
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None, force_alpha="warn"):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.force_alpha = force_alpha
@abstractmethod
def _count(self, X, Y):
"""Update counts that are used to calculate probabilities.
The counts make up a sufficient statistic extracted from the data.
Accordingly, this method is called each time `fit` or `partial_fit`
update the model. `class_count_` and `feature_count_` must be updated
here along with any model specific counts.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
Y : ndarray of shape (n_samples, n_classes)
Binarized class labels.
"""
@abstractmethod
def _update_feature_log_prob(self, alpha):
"""Update feature log probabilities based on counts.
This method is called each time `fit` or `partial_fit` update the
model.
Parameters
----------
alpha : float
smoothing parameter. See :meth:`_check_alpha`.
"""
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
return self._validate_data(X, accept_sparse="csr", reset=False)
def _check_X_y(self, X, y, reset=True):
"""Validate X and y in fit methods."""
return self._validate_data(X, y, accept_sparse="csr", reset=reset)
def _update_class_log_prior(self, class_prior=None):
"""Update class log priors.
The class log priors are based on `class_prior`, class count or the
number of classes. This method is called each time `fit` or
`partial_fit` update the model.
"""
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior)!= n_classes:
raise ValueError("Number of priors must match number of classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
with warnings.catch_warnings():
# silence the warning when count is 0 because class was not yet
# observed
warnings.simplefilter("ignore", RuntimeWarning)
log_class_count = np.log(self.class_count_)
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = log_class_count - np.log(self.class_count_.sum())
else:
self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))
def _check_alpha(self):
alpha = (
np.asarray(self.alpha) if not isinstance(self.alpha, Real) else self.alpha
)
alpha_min = np.min(alpha)
if isinstance(alpha, np.ndarray):
if not alpha.shape[0] == self.n_features_in_:
raise ValueError(
"When alpha is an array, it should contains `n_features`. "
f"Got {alpha.shape[0]} elements instead of {self.n_features_in_}."
)
# check that all alpha are positive
if alpha_min < 0:
raise ValueError("All values in alpha must be greater than 0.")
alpha_lower_bound = 1e-10
# TODO(1.4): Replace w/ deprecation of self.force_alpha
# See gh #22269
_force_alpha = self.force_alpha
if _force_alpha == "warn" and alpha_min < alpha_lower_bound:
_force_alpha = False
warnings.warn(
(
"The default value for `force_alpha` will change to `True` in 1.4."
" To suppress this warning, manually set the value of"
" `force_alpha`."
),
FutureWarning,
)
if alpha_min < alpha_lower_bound and not _force_alpha:
warnings.warn(
"alpha too small will result in numeric errors, setting alpha ="
f" {alpha_lower_bound:.1e}. Use `force_alpha=True` to keep alpha"
" unchanged."
)
return np.maximum(alpha, alpha_lower_bound)
return alpha
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns the instance itself.
"""
first_call = not hasattr(self, "classes_")
X, y = self._check_X_y(X, y, reset=first_call)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_classes = len(classes)
self._init_counters(n_classes, n_features)
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
if len(self.classes_) == 2:
Y = np.concatenate((1 - Y, Y), axis=1)
else: # degenerate case: just one class
Y = np.ones_like(Y)
if X.shape[0]!= Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64, copy=False)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns the instance itself.
"""
X, y = self._check_X_y(X, y)
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
if len(self.classes_) == 2:
Y = np.concatenate((1 - Y, Y), axis=1)
else: # degenerate case: just one class
Y = np.ones_like(Y)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
if sample_weight is not None:
Y = Y.astype(np.float64, copy=False)
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_classes = Y.shape[1]
self._init_counters(n_classes, n_features)
self._count(X, Y)
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def _init_counters(self, n_classes, n_features):
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_classes, n_features), dtype=np.float64)
def _more_tags(self):
return {"poor_score": True}
class MultinomialNB(_BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models.
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float or array-like of shape (n_features,), default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(set alpha=0 and force_alpha=True, for no smoothing).
force_alpha : bool, default=False
If False and alpha is less than 1e-10, it will set alpha to
1e-10. If True, alpha will remain unchanged. This may cause
numerical errors if alpha is too close to 0.
.. versionadded:: 1.2
.. deprecated:: 1.2
The default value of `force_alpha` will change to `True` in v1.4.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified, the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models.
CategoricalNB : Naive Bayes classifier for categorical features.
ComplementNB : Complement Naive Bayes classifier.
GaussianNB : Gaussian Naive Bayes.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB(force_alpha=True)
>>> clf.fit(X, y)
MultinomialNB(force_alpha=True)
>>> print(clf.predict(X[2:3]))
[3]
"""
def __init__(
self, *, alpha=1.0, force_alpha="warn", fit_prior=True, class_prior=None
):
super().__init__(
alpha=alpha,
fit_prior=fit_prior,
class_prior=class_prior,
force_alpha=force_alpha,
)
def _more_tags(self):
return {"requires_positive_X": True}
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
check_non_negative(X, "MultinomialNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = np.log(smoothed_fc) - np.log(
smoothed_cc.reshape(-1, 1)
)
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
return safe_sparse_dot(X, self.feature_log_prob_.T) + self.class_log_prior_
class ComplementNB(_BaseDiscreteNB):
"""The Complement Naive Bayes classifier described in Rennie et al. (2003).
The Complement Naive Bayes classifier was designed to correct the "severe
assumptions" made by the standard Multinomial Naive Bayes classifier. It is
particularly suited for imbalanced data sets.
Read more in the :ref:`User Guide <complement_naive_bayes>`.
.. versionadded:: 0.20
Parameters
----------
alpha : float or array-like of shape (n_features,), default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(set alpha=0 and force_alpha=True, for no smoothing).
force_alpha : bool, default=False
If False and alpha is less than 1e-10, it will set alpha to
1e-10. If True, alpha will remain unchanged. This may cause
numerical errors if alpha is too close to 0.
.. versionadded:: 1.2
.. deprecated:: 1.2
The default value of `force_alpha` will change to `True` in v1.4.
fit_prior : bool, default=True
Only used in edge case with a single class in the training set.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. Not used.
norm : bool, default=False
Whether or not a second normalization of the weights is performed. The
default behavior mirrors the implementations found in Mahout and Weka,
which do not follow the full algorithm described in Table 9 of the
paper.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class. Only used in edge
case with a single class in the training set.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_all_ : ndarray of shape (n_features,)
Number of samples encountered for each feature during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature) during fitting.
This value is weighted by the sample weight when provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical weights for class complements.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models.
CategoricalNB : Naive Bayes classifier for categorical features.
GaussianNB : Gaussian Naive Bayes.
MultinomialNB : Naive Bayes classifier for multinomial models.
References
----------
Rennie, J. D., Shih, L., Teevan, J., & Karger, D. R. (2003).
Tackling the poor assumptions of naive bayes text classifiers. In ICML
(Vol. 3, pp. 616-623).
https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import ComplementNB
>>> clf = ComplementNB(force_alpha=True)
>>> clf.fit(X, y)
ComplementNB(force_alpha=True)
>>> print(clf.predict(X[2:3]))
[3]
"""
_parameter_constraints: dict = {
**_BaseDiscreteNB._parameter_constraints,
"norm": ["boolean"],
}
def __init__(
self,
*,
alpha=1.0,
force_alpha="warn",
fit_prior=True,
class_prior=None,
norm=False,
):
super().__init__(
alpha=alpha,
force_alpha=force_alpha,
fit_prior=fit_prior,
class_prior=class_prior,
)
self.norm = norm
def _more_tags(self):
return {"requires_positive_X": True}
def _count(self, X, Y):
"""Count feature occurrences."""
check_non_negative(X, "ComplementNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
self.feature_all_ = self.feature_count_.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and compute the weights."""
comp_count = self.feature_all_ + alpha - self.feature_count_
logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True))
# _BaseNB.predict uses argmax, but ComplementNB operates with argmin.
if self.norm:
summed = logged.sum(axis=1, keepdims=True)
feature_log_prob = logged / summed
else:
feature_log_prob = -logged
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X."""
jll = safe_sparse_dot(X, self.feature_log_prob_.T)
if len(self.classes_) == 1:
jll += self.class_log_prior_
return jll
class BernoulliNB(_BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float or array-like of shape (n_features,), default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(set alpha=0 and force_alpha=True, for no smoothing).
force_alpha : bool, default=False
If False and alpha is less than 1e-10, it will set alpha to
1e-10. If True, alpha will remain unchanged. This may cause
numerical errors if alpha is too close to 0.
.. versionadded:: 1.2
.. deprecated:: 1.2
The default value of `force_alpha` will change to `True` in v1.4.
binarize : float or None, default=0.0
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified, the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Log probability of each class (smoothed).
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features given a class, P(x_i|y).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
CategoricalNB : Naive Bayes classifier for categorical features.
ComplementNB : The Complement Naive Bayes classifier
described in Rennie et al. (2003).
GaussianNB : Gaussian Naive Bayes (GaussianNB).
MultinomialNB : Naive Bayes classifier for multinomial models.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB(force_alpha=True)
>>> clf.fit(X, Y)
BernoulliNB(force_alpha=True)
>>> print(clf.predict(X[2:3]))
[3]
"""
_parameter_constraints: dict = {
**_BaseDiscreteNB._parameter_constraints,
"binarize": [None, Interval(Real, 0, None, closed="left")],
}
def __init__(
self,
*,
alpha=1.0,
force_alpha="warn",
binarize=0.0,
fit_prior=True,
class_prior=None,
):
super().__init__(
alpha=alpha,
fit_prior=fit_prior,
class_prior=class_prior,
force_alpha=force_alpha,
)
self.binarize = binarize
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
X = super()._check_X(X)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X
def _check_X_y(self, X, y, reset=True):
X, y = super()._check_X_y(X, y, reset=reset)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X, y
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = self.class_count_ + alpha * 2
self.feature_log_prob_ = np.log(smoothed_fc) - np.log(
smoothed_cc.reshape(-1, 1)
)
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
n_features = self.feature_log_prob_.shape[1]
n_features_X = X.shape[1]
if n_features_X!= n_features:
raise ValueError(
"Expected input with %d features, got %d instead"
% (n_features, n_features_X)
)
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
class CategoricalNB(_BaseDiscreteNB):
"""Naive Bayes classifier for categorical features.
The categorical Naive Bayes classifier is suitable for classification with
discrete features that are categorically distributed. The categories of
each feature are drawn from a categorical distribution.
Read more in the :ref:`User Guide <categorical_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(set alpha=0 and force_alpha=True, for no smoothing).
force_alpha : bool, default=False
If False and alpha is less than 1e-10, it will set alpha to
1e-10. If True, alpha will remain unchanged. This may cause
numerical errors if alpha is too close to 0.
.. versionadded:: 1.2
.. deprecated:: 1.2
The default value of `force_alpha` will change to `True` in v1.4.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified, the priors are not
adjusted according to the data.
min_categories : int or array-like of shape (n_features,), default=None
Minimum number of categories per feature.
- integer: Sets the minimum number of categories per feature to
`n_categories` for each features.
- array-like: shape (n_features,) where `n_categories[i]` holds the
minimum number of categories for the ith column of the input.
- None (default): Determines the number of categories automatically
from the training data.
.. versionadded:: 0.24
Attributes
----------
category_count_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the number of samples
encountered for each class and category of the specific feature.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_log_prob_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the empirical log probability
of categories given the respective feature and class, ``P(x_i|y)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_categories_ : ndarray of shape (n_features,), dtype=np.int64
Number of categories for each feature. This value is
inferred from the data or set by the minimum number of categories.
.. versionadded:: 0.24
See Also
--------
BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models.
ComplementNB : Complement Naive Bayes classifier.
GaussianNB : Gaussian Naive Bayes.
MultinomialNB : Naive Bayes classifier for multinomial models.
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import CategoricalNB
>>> clf = CategoricalNB(force_alpha=True)
>>> clf.fit(X, y)
CategoricalNB(force_alpha=True)
>>> print(clf.predict(X[2:3]))
[3]
"""
_parameter_constraints: dict = {
**_BaseDiscreteNB._parameter_constraints,
"min_categories": [
None,
"array-like",
Interval(Integral, 1, None, closed="left"),
],
"alpha": [Interval(Real, 0, None, closed="left")],
}
def __init__(
self,
*,
alpha=1.0,
force_alpha="warn",
fit_prior=True,
class_prior=None,
min_categories=None,
):
super().__init__(
alpha=alpha,
force_alpha=force_alpha,
fit_prior=fit_prior,
class_prior=class_prior,
)
self.min_categories = min_categories
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0,..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns the instance itself.
"""
return super().fit(X, y, sample_weight=sample_weight)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0,..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns the instance itself.
"""
return super().partial_fit(X, y, classes, sample_weight=sample_weight)
def _more_tags(self):
return {"requires_positive_X": True}
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
X = self._validate_data(
X, dtype="int", accept_sparse=False, force_all_finite=True, reset=False
)
check_non_negative(X, "CategoricalNB (input X)")
return X
def _check_X_y(self, X, y, reset=True):
X, y = self._validate_data(
X, y, dtype="int", accept_sparse=False, force_all_finite=True, reset=reset
)
check_non_negative(X, "CategoricalNB (input X)")
return X, y
def _init_counters(self, n_classes, n_features):
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
self.category_count_ = [np.zeros((n_classes, 0)) for _ in range(n_features)]
@staticmethod
def _validate_n_categories(X, min_categories):
# rely on max for n_categories categories are encoded between 0...n-1
n_categories_X = X.max(axis=0) + 1
min_categories_ = np.array(min_categories)
if min_categories is not None:
if not np.issubdtype(min_categories_.dtype, np.signedinteger):
raise ValueError(
"'min_categories' should have integral type. Got "
f"{min_categories_.dtype} instead."
)
n_categories_ = np.maximum(n_categories_X, min_categories_, dtype=np.int64)
if n_categories_.shape!= n_categories_X.shape:
raise ValueError(
f"'min_categories' should have shape ({X.shape[1]},"
") when an array-like is provided. Got"
f" {min_categories_.shape} instead."
)
return n_categories_
else:
return n_categories_X
def _count(self, X, Y):
def _update_cat_count_dims(cat_count, highest_feature):
diff = highest_feature + 1 - cat_count.shape[1]
if diff > 0:
# we append a column full of zeros for each new category
return np.pad(cat_count, [(0, 0), (0, diff)], "constant")
return cat_count
def _update_cat_count(X_feature, Y, cat_count, n_classes):
for j in range(n_classes):
mask = Y[:, j].astype(bool)
if Y.dtype.type == np.int64:
weights = None
else:
weights = Y[mask, j]
counts = np.bincount(X_feature[mask], weights=weights)
indices = np.nonzero(counts)[0]
cat_count[j, indices] += counts[indices]
self.class_count_ += Y.sum(axis=0)
self.n_categories_ = self._validate_n_categories(X, self.min_categories)
for i in range(self.n_features_in_):
X_feature = X[:, i]
self.category_count_[i] = _update_cat_count_dims(
self.category_count_[i], self.n_categories_[i] - 1
)
_update_cat_count(
X_feature, Y, self.category_count_[i], self.class_count_.shape[0]
)
def _update_feature_log_prob(self, alpha):
feature_log_prob = []
for i in range(self.n_features_in_):
smoothed_cat_count = self.category_count_[i] + alpha
smoothed_class_count = smoothed_cat_count.sum(axis=1)
feature_log_prob.append(
np.log(smoothed_cat_count) - np.log(smoothed_class_count.reshape(-1, 1))
)
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
self._check_n_features(X, reset=False)
jll = np.zeros((X.shape[0], self.class_count_.shape[0]))
for i in range(self.n_features_in_):
indices = X[:, i]
jll += self.feature_log_prob_[i][:, indices].T
total_ll = jll + self.class_log_prior_
return total_ll |
|
scikit-learn__scikit-learn | random_projection.rst | Module doc | Generate documentation for this module | BSD 3-Clause New or Revised License | scikit-learn__scikit-learn/doc/modules/random_projection.rst | [
"scikit-learn__scikit-learn/sklearn/random_projection.py"
] | Random Projection
The sklearn.random_projection module implements a simple and
computationally efficient way to reduce the dimensionality of the data
by trading a controlled amount of accuracy (as additional variance) for
faster processing times and smaller model sizes. This module implements
two types of unstructured random matrix:
Gaussian random matrix <gaussian_random_matrix> and
sparse random matrix <sparse_random_matrix>.
The dimensions and distribution of random projections matrices are
controlled so as to preserve the pairwise distances between any two
samples of the dataset. Thus random projection is a suitable
approximation technique for distance based method.
The Johnson-Lindenstrauss lemma
The main theoretical result behind the efficiency of random projection
is the Johnson-Lindenstrauss lemma (quoting Wikipedia):
In mathematics, the Johnson-Lindenstrauss lemma is a result concerning
low-distortion embeddings of points from high-dimensional into
low-dimensional Euclidean space. The lemma states that a small set of
points in a high-dimensional space can be embedded into a space of
much lower dimension in such a way that distances between the points
are nearly preserved. The map used for the embedding is at least
Lipschitz, and can even be taken to be an orthogonal projection.
Knowing only the number of samples, the johnson_lindenstrauss_min_dim
estimates conservatively the minimal size of the random subspace to
guarantee a bounded distortion introduced by the random projection:
>>> from sklearn.random_projection import johnson_lindenstrauss_min_dim
>>> johnson_lindenstrauss_min_dim(n_samples=1e6, eps=0.5)
663
>>> johnson_lindenstrauss_min_dim(n_samples=1e6, eps=[0.5, 0.1, 0.01])
array([ 663, 11841, 1112658])
>>> johnson_lindenstrauss_min_dim(n_samples=[1e4, 1e5, 1e6], eps=0.1)
array([ 7894, 9868, 11841])
Gaussian random projection
The GaussianRandomProjection reduces the dimensionality by projecting
the original input space on a randomly generated matrix where components
are drawn from the following distribution
$N(0, \frac{1}{n_{components}})$.
Here a small excerpt which illustrates how to use the Gaussian random
projection transformer:
>>> import numpy as np
>>> from sklearn import random_projection
>>> X = np.random.rand(100, 10000)
>>> transformer = random_projection.GaussianRandomProjection()
>>> X_new = transformer.fit_transform(X)
>>> X_new.shape
(100, 3947)
Sparse random projection
The SparseRandomProjection reduces the dimensionality by projecting the
original input space using a sparse random matrix.
Sparse random matrices are an alternative to dense Gaussian random
projection matrix that guarantees similar embedding quality while being
much more memory efficient and allowing faster computation of the
projected data.
If we define s = 1 / density, the elements of the random matrix are
drawn from
$$\begin{aligned}
\left\{
\begin{array}{c c l}
-\sqrt{\frac{s}{n_{\text{components}}}} & & 1 / 2s\\
0 &\text{with probability} & 1 - 1 / s \\
+\sqrt{\frac{s}{n_{\text{components}}}} & & 1 / 2s\\
\end{array}
\right.
\end{aligned}$$
where n_(components) is the size of the projected subspace. By default
the density of non zero elements is set to the minimum density as
recommended by Ping Li et al.: $1 / \sqrt{n_{\text{features}}}$.
Here a small excerpt which illustrates how to use the sparse random
projection transformer:
>>> import numpy as np
>>> from sklearn import random_projection
>>> X = np.random.rand(100, 10000)
>>> transformer = random_projection.SparseRandomProjection()
>>> X_new = transformer.fit_transform(X)
>>> X_new.shape
(100, 3947)
Inverse Transform
The random projection transformers have compute_inverse_components
parameter. When set to True, after creating the random components_
matrix during fitting, the transformer computes the pseudo-inverse of
this matrix and stores it as inverse_components_. The
inverse_components_ matrix has shape n_(features) × n_(components), and
it is always a dense matrix, regardless of whether the components matrix
is sparse or dense. So depending on the number of features and
components, it may use a lot of memory.
When the inverse_transform method is called, it computes the product of
the input X and the transpose of the inverse components. If the inverse
components have been computed during fit, they are reused at each call
to inverse_transform. Otherwise they are recomputed each time, which can
be costly. The result is always dense, even if X is sparse.
Here a small code example which illustrates how to use the inverse
transform feature:
>>> import numpy as np
>>> from sklearn.random_projection import SparseRandomProjection
>>> X = np.random.rand(100, 10000)
>>> transformer = SparseRandomProjection(
... compute_inverse_components=True
... )
...
>>> X_new = transformer.fit_transform(X)
>>> X_new.shape
(100, 3947)
>>> X_new_inversed = transformer.inverse_transform(X_new)
>>> X_new_inversed.shape
(100, 10000)
>>> X_new_again = transformer.transform(X_new_inversed)
>>> np.allclose(X_new, X_new_again)
True
| """Random Projection transformers.
Random Projections are a simple and computationally efficient way to
reduce the dimensionality of the data by trading a controlled amount
of accuracy (as additional variance) for faster processing times and
smaller model sizes.
The dimensions and distribution of Random Projections matrices are
controlled so as to preserve the pairwise distances between any two
samples of the dataset.
The main theoretical result behind the efficiency of random projection is the
`Johnson-Lindenstrauss lemma (quoting Wikipedia)
<https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_:
In mathematics, the Johnson-Lindenstrauss lemma is a result
concerning low-distortion embeddings of points from high-dimensional
into low-dimensional Euclidean space. The lemma states that a small set
of points in a high-dimensional space can be embedded into a space of
much lower dimension in such a way that distances between the points are
nearly preserved. The map used for the embedding is at least Lipschitz,
and can even be taken to be an orthogonal projection.
"""
# Authors: Olivier Grisel <[email protected]>,
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from.exceptions import DataDimensionalityWarning
from.utils import check_random_state
from.utils._param_validation import Interval, StrOptions, validate_params
from.utils.extmath import safe_sparse_dot
from.utils.random import sample_without_replacement
from.utils.validation import check_array, check_is_fitted
__all__ = [
"SparseRandomProjection",
"GaussianRandomProjection",
"johnson_lindenstrauss_min_dim",
]
@validate_params(
{
"n_samples": ["array-like", Interval(Real, 1, None, closed="left")],
"eps": ["array-like", Interval(Real, 0, 1, closed="neither")],
},
prefer_skip_nested_validation=True,
)
def johnson_lindenstrauss_min_dim(n_samples, *, eps=0.1):
"""Find a'safe' number of components to randomly project to.
The distortion introduced by a random projection `p` only changes the
distance between two points by a factor (1 +- eps) in a euclidean space
with good probability. The projection `p` is an eps-embedding as defined
by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape (n_samples,
n_features), eps is in ]0, 1[ and p is a projection by a random Gaussian
N(0, 1) matrix of shape (n_components, n_features) (or a sparse
Achlioptas matrix).
The minimum number of components to guarantee the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
Note that the number of dimensions is independent of the original
number of features but instead depends on the size of the dataset:
the larger the dataset, the higher is the minimal dimensionality of
an eps-embedding.
Read more in the :ref:`User Guide <johnson_lindenstrauss>`.
Parameters
----------
n_samples : int or array-like of int
Number of samples that should be an integer greater than 0. If an array
is given, it will compute a safe number of components array-wise.
eps : float or array-like of shape (n_components,), dtype=float, \
default=0.1
Maximum distortion rate in the range (0, 1) as defined by the
Johnson-Lindenstrauss lemma. If an array is given, it will compute a
safe number of components array-wise.
Returns
-------
n_components : int or ndarray of int
The minimal number of components to guarantee with good probability
an eps-embedding with n_samples.
References
----------
.. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
.. [2] `Sanjoy Dasgupta and Anupam Gupta, 1999,
"An elementary proof of the Johnson-Lindenstrauss Lemma."
<https://citeseerx.ist.psu.edu/doc_view/pid/95cd464d27c25c9c8690b378b894d337cdf021f9>`_
Examples
--------
>>> from sklearn.random_projection import johnson_lindenstrauss_min_dim
>>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)
663
>>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])
array([ 663, 11841, 1112658])
>>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)
array([ 7894, 9868, 11841])
"""
eps = np.asarray(eps)
n_samples = np.asarray(n_samples)
if np.any(eps <= 0.0) or np.any(eps >= 1):
raise ValueError("The JL bound is defined for eps in ]0, 1[, got %r" % eps)
if np.any(n_samples <= 0):
raise ValueError(
"The JL bound is defined for n_samples greater than zero, got %r"
% n_samples
)
denominator = (eps**2 / 2) - (eps**3 / 3)
return (4 * np.log(n_samples) / denominator).astype(np.int64)
def _check_density(density, n_features):
"""Factorize density check according to Li et al."""
if density == "auto":
density = 1 / np.sqrt(n_features)
elif density <= 0 or density > 1:
raise ValueError("Expected density in range ]0, 1], got: %r" % density)
return density
def _check_input_size(n_components, n_features):
"""Factorize argument checking for random matrix generation."""
if n_components <= 0:
raise ValueError(
"n_components must be strictly positive, got %d" % n_components
)
if n_features <= 0:
raise ValueError("n_features must be strictly positive, got %d" % n_features)
def _gaussian_random_matrix(n_components, n_features, random_state=None):
"""Generate a dense Gaussian random matrix.
The components of the random matrix are drawn from
N(0, 1.0 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generator used to generate the matrix
at fit time.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
components : ndarray of shape (n_components, n_features)
The generated Gaussian random matrix.
See Also
--------
GaussianRandomProjection
"""
_check_input_size(n_components, n_features)
rng = check_random_state(random_state)
components = rng.normal(
loc=0.0, scale=1.0 / np.sqrt(n_components), size=(n_components, n_features)
)
return components
def _sparse_random_matrix(n_components, n_features, density="auto", random_state=None):
"""Generalized Achlioptas random sparse matrix for random projection.
Setting density to 1 / 3 will yield the original matrix by Dimitris
Achlioptas while setting a lower value will yield the generalization
by Ping Li et al.
If we note :math:`s = 1 / density`, the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
density : float or 'auto', default='auto'
Ratio of non-zero component in the random projection matrix in the
range `(0, 1]`
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generator used to generate the matrix
at fit time.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
components : {ndarray, sparse matrix} of shape (n_components, n_features)
The generated Gaussian random matrix. Sparse matrix will be of CSR
format.
See Also
--------
SparseRandomProjection
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
https://cgi.di.uoa.gr/~optas/papers/jl.pdf
"""
_check_input_size(n_components, n_features)
density = _check_density(density, n_features)
rng = check_random_state(random_state)
if density == 1:
# skip index generation if totally dense
components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1
return 1 / np.sqrt(n_components) * components
else:
# Generate location of non zero elements
indices = []
offset = 0
indptr = [offset]
for _ in range(n_components):
# find the indices of the non-zero components for row i
n_nonzero_i = rng.binomial(n_features, density)
indices_i = sample_without_replacement(
n_features, n_nonzero_i, random_state=rng
)
indices.append(indices_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
# Among non zero components the probability of the sign is 50%/50%
data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1
# build the CSR structure by concatenating the rows
components = sp.csr_matrix(
(data, indices, indptr), shape=(n_components, n_features)
)
return np.sqrt(1 / density) / np.sqrt(n_components) * components
class BaseRandomProjection(
TransformerMixin, BaseEstimator, ClassNamePrefixFeaturesOutMixin, metaclass=ABCMeta
):
"""Base class for random projections.
Warning: This class should not be used directly.
Use derived classes instead.
"""
_parameter_constraints: dict = {
"n_components": [
Interval(Integral, 1, None, closed="left"),
StrOptions({"auto"}),
],
"eps": [Interval(Real, 0, None, closed="neither")],
"compute_inverse_components": ["boolean"],
"random_state": ["random_state"],
}
@abstractmethod
def __init__(
self,
n_components="auto",
*,
eps=0.1,
compute_inverse_components=False,
random_state=None,
):
self.n_components = n_components
self.eps = eps
self.compute_inverse_components = compute_inverse_components
self.random_state = random_state
@abstractmethod
def _make_random_matrix(self, n_components, n_features):
"""Generate the random projection matrix.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : {ndarray, sparse matrix} of shape (n_components, n_features)
The generated random matrix. Sparse matrix will be of CSR format.
"""
def _compute_inverse_components(self):
"""Compute the pseudo-inverse of the (densified) components."""
components = self.components_
if sp.issparse(components):
components = components.toarray()
return linalg.pinv(components, check_finite=False)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Generate a sparse random projection matrix.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training set: only the shape is used to find optimal random
matrix dimensions based on the theory referenced in the
afore mentioned papers.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
BaseRandomProjection class instance.
"""
X = self._validate_data(
X, accept_sparse=["csr", "csc"], dtype=[np.float64, np.float32]
)
n_samples, n_features = X.shape
if self.n_components == "auto":
self.n_components_ = johnson_lindenstrauss_min_dim(
n_samples=n_samples, eps=self.eps
)
if self.n_components_ <= 0:
raise ValueError(
"eps=%f and n_samples=%d lead to a target dimension of "
"%d which is invalid" % (self.eps, n_samples, self.n_components_)
)
elif self.n_components_ > n_features:
raise ValueError(
"eps=%f and n_samples=%d lead to a target dimension of "
"%d which is larger than the original space with "
"n_features=%d"
% (self.eps, n_samples, self.n_components_, n_features)
)
else:
if self.n_components > n_features:
warnings.warn(
"The number of components is higher than the number of"
" features: n_features < n_components (%s < %s)."
"The dimensionality of the problem will not be reduced."
% (n_features, self.n_components),
DataDimensionalityWarning,
)
self.n_components_ = self.n_components
# Generate a projection matrix of size [n_components, n_features]
self.components_ = self._make_random_matrix(
self.n_components_, n_features
).astype(X.dtype, copy=False)
if self.compute_inverse_components:
self.inverse_components_ = self._compute_inverse_components()
# Required by ClassNamePrefixFeaturesOutMixin.get_feature_names_out.
self._n_features_out = self.n_components
return self
def inverse_transform(self, X):
"""Project data back to its original space.
Returns an array X_original whose transform would be X. Note that even
if X is sparse, X_original is dense: this may use a lot of RAM.
If `compute_inverse_components` is False, the inverse of the components is
computed during each call to `inverse_transform` which can be costly.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_components)
Data to be transformed back.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Reconstructed data.
"""
check_is_fitted(self)
X = check_array(X, dtype=[np.float64, np.float32], accept_sparse=("csr", "csc"))
if self.compute_inverse_components:
return X @ self.inverse_components_.T
inverse_components = self._compute_inverse_components()
return X @ inverse_components.T
def _more_tags(self):
return {
"preserves_dtype": [np.float64, np.float32],
}
class GaussianRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through Gaussian random projection.
The components of the random matrix are drawn from N(0, 1 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
.. versionadded:: 0.13
Parameters
----------
n_components : int or 'auto', default='auto'
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
eps : float, default=0.1
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when `n_components` is set to
'auto'. The value should be strictly positive.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
compute_inverse_components : bool, default=False
Learn the inverse transform by computing the pseudo-inverse of the
components during fit. Note that computing the pseudo-inverse does not
scale well to large matrices.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generator used to generate the
projection matrix at fit time.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
n_components_ : int
Concrete number of components computed when n_components="auto".
components_ : ndarray of shape (n_components, n_features)
Random matrix used for the projection.
inverse_components_ : ndarray of shape (n_features, n_components)
Pseudo-inverse of the components, only computed if
`compute_inverse_components` is True.
.. versionadded:: 1.1
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
SparseRandomProjection : Reduce dimensionality through sparse
random projection.
Examples
--------
>>> import numpy as np
>>> from sklearn.random_projection import GaussianRandomProjection
>>> rng = np.random.RandomState(42)
>>> X = rng.rand(25, 3000)
>>> transformer = GaussianRandomProjection(random_state=rng)
>>> X_new = transformer.fit_transform(X)
>>> X_new.shape
(25, 2759)
"""
def __init__(
self,
n_components="auto",
*,
eps=0.1,
compute_inverse_components=False,
random_state=None,
):
super().__init__(
n_components=n_components,
eps=eps,
compute_inverse_components=compute_inverse_components,
random_state=random_state,
)
def _make_random_matrix(self, n_components, n_features):
"""Generate the random projection matrix.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : ndarray of shape (n_components, n_features)
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
return _gaussian_random_matrix(
n_components, n_features, random_state=random_state
)
def transform(self, X):
"""Project the data by using matrix product with the random matrix.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input data to project into a smaller dimensional space.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Projected array.
"""
check_is_fitted(self)
X = self._validate_data(
X, accept_sparse=["csr", "csc"], reset=False, dtype=[np.float64, np.float32]
)
return X @ self.components_.T
class SparseRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through sparse random projection.
Sparse random matrix is an alternative to dense random
projection matrix that guarantees similar embedding quality while being
much more memory efficient and allowing faster computation of the
projected data.
If we note `s = 1 / density` the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
.. versionadded:: 0.13
Parameters
----------
n_components : int or 'auto', default='auto'
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
density : float or 'auto', default='auto'
Ratio in the range (0, 1] of non-zero component in the random
projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
eps : float, default=0.1
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'. This value should be strictly positive.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
dense_output : bool, default=False
If True, ensure that the output of the random projection is a
dense numpy array even if the input and random projection matrix
are both sparse. In practice, if the number of components is
small the number of zero components in the projected data will
be very small and it will be more CPU and memory efficient to
use a dense representation.
If False, the projected data uses a sparse representation if
the input is sparse.
compute_inverse_components : bool, default=False
Learn the inverse transform by computing the pseudo-inverse of the
components during fit. Note that the pseudo-inverse is always a dense
array, even if the training data was sparse. This means that it might be
necessary to call `inverse_transform` on a small batch of samples at a
time to avoid exhausting the available memory on the host. Moreover,
computing the pseudo-inverse does not scale well to large matrices.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generator used to generate the
projection matrix at fit time.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
n_components_ : int
Concrete number of components computed when n_components="auto".
components_ : sparse matrix of shape (n_components, n_features)
Random matrix used for the projection. Sparse matrix will be of CSR
format.
inverse_components_ : ndarray of shape (n_features, n_components)
Pseudo-inverse of the components, only computed if
`compute_inverse_components` is True.
.. versionadded:: 1.1
density_ : float in range 0.0 - 1.0
Concrete density computed from when density = "auto".
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
GaussianRandomProjection : Reduce dimensionality through Gaussian
random projection.
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
https://cgi.di.uoa.gr/~optas/papers/jl.pdf
Examples
--------
>>> import numpy as np
>>> from sklearn.random_projection import SparseRandomProjection
>>> rng = np.random.RandomState(42)
>>> X = rng.rand(25, 3000)
>>> transformer = SparseRandomProjection(random_state=rng)
>>> X_new = transformer.fit_transform(X)
>>> X_new.shape
(25, 2759)
>>> # very few components are non-zero
>>> np.mean(transformer.components_!= 0)
0.0182...
"""
_parameter_constraints: dict = {
**BaseRandomProjection._parameter_constraints,
"density": [Interval(Real, 0.0, 1.0, closed="right"), StrOptions({"auto"})],
"dense_output": ["boolean"],
}
def __init__(
self,
n_components="auto",
*,
density="auto",
eps=0.1,
dense_output=False,
compute_inverse_components=False,
random_state=None,
):
super().__init__(
n_components=n_components,
eps=eps,
compute_inverse_components=compute_inverse_components,
random_state=random_state,
)
self.dense_output = dense_output
self.density = density
def _make_random_matrix(self, n_components, n_features):
"""Generate the random projection matrix
Parameters
----------
n_components : int
Dimensionality of the target projection space.
n_features : int
Dimensionality of the original source space.
Returns
-------
components : sparse matrix of shape (n_components, n_features)
The generated random matrix in CSR format.
"""
random_state = check_random_state(self.random_state)
self.density_ = _check_density(self.density, n_features)
return _sparse_random_matrix(
n_components, n_features, density=self.density_, random_state=random_state
)
def transform(self, X):
"""Project the data by using matrix product with the random matrix.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input data to project into a smaller dimensional space.
Returns
-------
X_new : {ndarray, sparse matrix} of shape (n_samples, n_components)
Projected array. It is a sparse matrix only when the input is sparse and
`dense_output = False`.
"""
check_is_fitted(self)
X = self._validate_data(
X, accept_sparse=["csr", "csc"], reset=False, dtype=[np.float64, np.float32]
)
return safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output) |
|
scikit-learn__scikit-learn | working_with_text_data.rst | Tutorial | Generate tutorial about work with text data | BSD 3-Clause New or Revised License | scikit-learn__scikit-learn/doc/tutorial/text_analytics/working_with_text_data.rst | [
"scikit-learn__scikit-learn/sklearn/feature_extraction/text.py"
] | Working With Text Data
The goal of this guide is to explore some of the main scikit-learn tools
on a single practical task: analyzing a collection of text documents
(newsgroups posts) on twenty different topics.
In this section we will see how to:
- load the file contents and the categories
- extract feature vectors suitable for machine learning
- train a linear model to perform categorization
- use a grid search strategy to find a good configuration of both
the feature extraction components and the classifier
Tutorial setup
To get started with this tutorial, you must first install scikit-learn
and all of its required dependencies.
Please refer to the
installation instructions <installation-instructions> page for more
information and for system-specific instructions.
The source of this tutorial can be found within your scikit-learn
folder:
scikit-learn/doc/tutorial/text_analytics/
The source can also be found on Github.
The tutorial folder should contain the following sub-folders:
- *.rst files - the source of the tutorial document written with
sphinx
- data - folder to put the datasets used during the tutorial
- skeletons - sample incomplete scripts for the exercises
- solutions - solutions of the exercises
You can already copy the skeletons into a new folder somewhere on your
hard-drive named sklearn_tut_workspace, where you will edit your own
files for the exercises while keeping the original skeletons intact:
bash $
cp -r skeletons work_directory/sklearn_tut_workspace
Machine learning algorithms need data. Go to each $TUTORIAL_HOME/data
sub-folder and run the fetch_data.py script from there (after having
read them first).
For instance:
bash $
cd $TUTORIAL_HOME/data/languages less fetch_data.py python fetch_data.py
Loading the 20 newsgroups dataset
The dataset is called "Twenty Newsgroups". Here is the official
description, quoted from the website:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of our knowledge, it was originally collected
by Ken Lang, probably for his paper "Newsweeder: Learning to filter
netnews," though he does not explicitly mention this collection. The
20 newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
In the following we will use the built-in dataset loader for 20
newsgroups from scikit-learn. Alternatively, it is possible to download
the dataset manually from the website and use the
sklearn.datasets.load_files function by pointing it to the
20news-bydate-train sub-folder of the uncompressed archive folder.
In order to get faster execution times for this first example, we will
work on a partial dataset with only 4 categories out of the 20 available
in the dataset:
>>> categories = ['alt.atheism', 'soc.religion.christian',
... 'comp.graphics', 'sci.med']
We can now load the list of files matching those categories as follows:
>>> from sklearn.datasets import fetch_20newsgroups
>>> twenty_train = fetch_20newsgroups(subset='train',
... categories=categories, shuffle=True, random_state=42)
The returned dataset is a scikit-learn "bunch": a simple holder object
with fields that can be both accessed as python dict keys or object
attributes for convenience, for instance the target_names holds the list
of the requested category names:
>>> twenty_train.target_names
['alt.atheism', 'comp.graphics', 'sci.med', 'soc.religion.christian']
The files themselves are loaded in memory in the data attribute. For
reference the filenames are also available:
>>> len(twenty_train.data)
2257
>>> len(twenty_train.filenames)
2257
Let's print the first lines of the first loaded file:
>>> print("\n".join(twenty_train.data[0].split("\n")[:3]))
From: [email protected] (Michael Collier)
Subject: Converting images to HP LaserJet III?
Nntp-Posting-Host: hampton
>>> print(twenty_train.target_names[twenty_train.target[0]])
comp.graphics
Supervised learning algorithms will require a category label for each
document in the training set. In this case the category is the name of
the newsgroup which also happens to be the name of the folder holding
the individual documents.
For speed and space efficiency reasons, scikit-learn loads the target
attribute as an array of integers that corresponds to the index of the
category name in the target_names list. The category integer id of each
sample is stored in the target attribute:
>>> twenty_train.target[:10]
array([1, 1, 3, 3, 3, 3, 3, 2, 2, 2])
It is possible to get back the category names as follows:
>>> for t in twenty_train.target[:10]:
... print(twenty_train.target_names[t])
...
comp.graphics
comp.graphics
soc.religion.christian
soc.religion.christian
soc.religion.christian
soc.religion.christian
soc.religion.christian
sci.med
sci.med
sci.med
You might have noticed that the samples were shuffled randomly when we
called fetch_20newsgroups(..., shuffle=True, random_state=42): this is
useful if you wish to select only a subset of samples to quickly train a
model and get a first idea of the results before re-training on the
complete dataset later.
Extracting features from text files
In order to perform machine learning on text documents, we first need to
turn the text content into numerical feature vectors.
sklearn.feature_extraction.text
Bags of words
The most intuitive way to do so is to use a bags of words
representation:
1. Assign a fixed integer id to each word occurring in any document
of the training set (for instance by building a dictionary from
words to integer indices).
2. For each document #i, count the number of occurrences of each word
w and store it in X[i, j] as the value of feature #j where j is
the index of word w in the dictionary.
The bags of words representation implies that n_features is the number
of distinct words in the corpus: this number is typically larger than
100,000.
If n_samples == 10000, storing X as a NumPy array of type float32 would
require 10000 x 100000 x 4 bytes = 4GB in RAM which is barely manageable
on today's computers.
Fortunately, most values in X will be zeros since for a given document
less than a few thousand distinct words will be used. For this reason we
say that bags of words are typically high-dimensional sparse datasets.
We can save a lot of memory by only storing the non-zero parts of the
feature vectors in memory.
scipy.sparse matrices are data structures that do exactly this, and
scikit-learn has built-in support for these structures.
Tokenizing text with scikit-learn
Text preprocessing, tokenizing and filtering of stopwords are all
included in CountVectorizer, which builds a dictionary of features and
transforms documents to feature vectors:
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> count_vect = CountVectorizer()
>>> X_train_counts = count_vect.fit_transform(twenty_train.data)
>>> X_train_counts.shape
(2257, 35788)
CountVectorizer supports counts of N-grams of words or consecutive
characters. Once fitted, the vectorizer has built a dictionary of
feature indices:
>>> count_vect.vocabulary_.get(u'algorithm')
4690
The index value of a word in the vocabulary is linked to its frequency
in the whole training corpus.
The method count_vect.fit_transform performs two actions: it learns
the vocabulary and transforms the documents into count vectors. It's
possible to separate these steps by calling
count_vect.fit(twenty_train.data) followed by
X_train_counts = count_vect.transform(twenty_train.data), but doing so
would tokenize and vectorize each text file twice.
From occurrences to frequencies
Occurrence count is a good start but there is an issue: longer documents
will have higher average count values than shorter documents, even
though they might talk about the same topics.
To avoid these potential discrepancies it suffices to divide the number
of occurrences of each word in a document by the total number of words
in the document: these new features are called tf for Term Frequencies.
Another refinement on top of tf is to downscale weights for words that
occur in many documents in the corpus and are therefore less informative
than those that occur only in a smaller portion of the corpus.
This downscaling is called tf–idf for "Term Frequency times Inverse
Document Frequency".
Both tf and tf–idf can be computed as follows using TfidfTransformer:
>>> from sklearn.feature_extraction.text import TfidfTransformer
>>> tf_transformer = TfidfTransformer(use_idf=False).fit(X_train_counts)
>>> X_train_tf = tf_transformer.transform(X_train_counts)
>>> X_train_tf.shape
(2257, 35788)
In the above example-code, we firstly use the fit(..) method to fit our
estimator to the data and secondly the transform(..) method to transform
our count-matrix to a tf-idf representation. These two steps can be
combined to achieve the same end result faster by skipping redundant
processing. This is done through using the fit_transform(..) method as
shown below, and as mentioned in the note in the previous section:
>>> tfidf_transformer = TfidfTransformer()
>>> X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
>>> X_train_tfidf.shape
(2257, 35788)
Training a classifier
Now that we have our features, we can train a classifier to try to
predict the category of a post. Let's start with a
naïve Bayes <naive_bayes> classifier, which provides a nice baseline for
this task. scikit-learn includes several variants of this classifier,
and the one most suitable for word counts is the multinomial variant:
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB().fit(X_train_tfidf, twenty_train.target)
To try to predict the outcome on a new document we need to extract the
features using almost the same feature extracting chain as before. The
difference is that we call transform instead of fit_transform on the
transformers, since they have already been fit to the training set:
>>> docs_new = ['God is love', 'OpenGL on the GPU is fast']
>>> X_new_counts = count_vect.transform(docs_new)
>>> X_new_tfidf = tfidf_transformer.transform(X_new_counts)
>>> predicted = clf.predict(X_new_tfidf)
>>> for doc, category in zip(docs_new, predicted):
... print('%r => %s' % (doc, twenty_train.target_names[category]))
...
'God is love' => soc.religion.christian
'OpenGL on the GPU is fast' => comp.graphics
Building a pipeline
In order to make the vectorizer => transformer => classifier easier to
work with, scikit-learn provides a ~sklearn.pipeline.Pipeline class that
behaves like a compound classifier:
>>> from sklearn.pipeline import Pipeline
>>> text_clf = Pipeline([
... ('vect', CountVectorizer()),
... ('tfidf', TfidfTransformer()),
... ('clf', MultinomialNB()),
... ])
The names vect, tfidf and clf (classifier) are arbitrary. We will use
them to perform grid search for suitable hyperparameters below. We can
now train the model with a single command:
>>> text_clf.fit(twenty_train.data, twenty_train.target)
Pipeline(...)
Evaluation of the performance on the test set
Evaluating the predictive accuracy of the model is equally easy:
>>> import numpy as np
>>> twenty_test = fetch_20newsgroups(subset='test',
... categories=categories, shuffle=True, random_state=42)
>>> docs_test = twenty_test.data
>>> predicted = text_clf.predict(docs_test)
>>> np.mean(predicted == twenty_test.target)
0.8348...
We achieved 83.5% accuracy. Let's see if we can do better with a linear
support vector machine (SVM) <svm>, which is widely regarded as one of
the best text classification algorithms (although it's also a bit slower
than naïve Bayes). We can change the learner by simply plugging a
different classifier object into our pipeline:
>>> from sklearn.linear_model import SGDClassifier
>>> text_clf = Pipeline([
... ('vect', CountVectorizer()),
... ('tfidf', TfidfTransformer()),
... ('clf', SGDClassifier(loss='hinge', penalty='l2',
... alpha=1e-3, random_state=42,
... max_iter=5, tol=None)),
... ])
>>> text_clf.fit(twenty_train.data, twenty_train.target)
Pipeline(...)
>>> predicted = text_clf.predict(docs_test)
>>> np.mean(predicted == twenty_test.target)
0.9101...
We achieved 91.3% accuracy using the SVM. scikit-learn provides further
utilities for more detailed performance analysis of the results:
>>> from sklearn import metrics
>>> print(metrics.classification_report(twenty_test.target, predicted,
... target_names=twenty_test.target_names))
precision recall f1-score support
<BLANKLINE>
alt.atheism 0.95 0.80 0.87 319
comp.graphics 0.87 0.98 0.92 389
sci.med 0.94 0.89 0.91 396
soc.religion.christian 0.90 0.95 0.93 398
<BLANKLINE>
accuracy 0.91 1502
macro avg 0.91 0.91 0.91 1502
weighted avg 0.91 0.91 0.91 1502
<BLANKLINE>
>>> metrics.confusion_matrix(twenty_test.target, predicted)
array([[256, 11, 16, 36],
[ 4, 380, 3, 2],
[ 5, 35, 353, 3],
[ 5, 11, 4, 378]])
As expected the confusion matrix shows that posts from the newsgroups on
atheism and Christianity are more often confused for one another than
with computer graphics.
SGD stands for Stochastic Gradient Descent. This is a simple
optimization algorithms that is known to be scalable when the dataset
has many samples.
By setting loss="hinge" and penalty="l2" we are configuring the
classifier model to tune its parameters for the linear Support Vector
Machine cost function.
Alternatively we could have used sklearn.svm.LinearSVC (Linear Support
Vector Machine Classifier) that provides an alternative optimizer for
the same cost function based on the liblinear C++ library.
Parameter tuning using grid search
We've already encountered some parameters such as use_idf in the
TfidfTransformer. Classifiers tend to have many parameters as well;
e.g., MultinomialNB includes a smoothing parameter alpha and
SGDClassifier has a penalty parameter alpha and configurable loss and
penalty terms in the objective function (see the module documentation,
or use the Python help function to get a description of these).
Instead of tweaking the parameters of the various components of the
chain, it is possible to run an exhaustive search of the best parameters
on a grid of possible values. We try out all classifiers on either words
or bigrams, with or without idf, and with a penalty parameter of either
0.01 or 0.001 for the linear SVM:
>>> from sklearn.model_selection import GridSearchCV
>>> parameters = {
... 'vect__ngram_range': [(1, 1), (1, 2)],
... 'tfidf__use_idf': (True, False),
... 'clf__alpha': (1e-2, 1e-3),
... }
Obviously, such an exhaustive search can be expensive. If we have
multiple CPU cores at our disposal, we can tell the grid searcher to try
these eight parameter combinations in parallel with the n_jobs
parameter. If we give this parameter a value of -1, grid search will
detect how many cores are installed and use them all:
>>> gs_clf = GridSearchCV(text_clf, parameters, cv=5, n_jobs=-1)
The grid search instance behaves like a normal scikit-learn model. Let's
perform the search on a smaller subset of the training data to speed up
the computation:
>>> gs_clf = gs_clf.fit(twenty_train.data[:400], twenty_train.target[:400])
The result of calling fit on a GridSearchCV object is a classifier that
we can use to predict:
>>> twenty_train.target_names[gs_clf.predict(['God is love'])[0]]
'soc.religion.christian'
The object's best_score_ and best_params_ attributes store the best mean
score and the parameters setting corresponding to that score:
>>> gs_clf.best_score_
0.9...
>>> for param_name in sorted(parameters.keys()):
... print("%s: %r" % (param_name, gs_clf.best_params_[param_name]))
...
clf__alpha: 0.001
tfidf__use_idf: True
vect__ngram_range: (1, 1)
A more detailed summary of the search is available at
gs_clf.cv_results_.
The cv_results_ parameter can be easily imported into pandas as a
DataFrame for further inspection.
A GridSearchCV object also stores the best classifier that it trained
as its best_estimator_ attribute. In this case, that isn't much use as
we trained on a small, 400-document subset of our full training set.
Exercises
To do the exercises, copy the content of the 'skeletons' folder as a new
folder named 'workspace':
bash $
cp -r skeletons workspace
You can then edit the content of the workspace without fear of losing
the original exercise instructions.
Then fire an ipython shell and run the work-in-progress script with:
[1] %run workspace/exercise_XX_script.py arg1 arg2 arg3
If an exception is triggered, use %debug to fire-up a post mortem ipdb
session.
Refine the implementation and iterate until the exercise is solved.
For each exercise, the skeleton file provides all the necessary import
statements, boilerplate code to load the data and sample code to
evaluate the predictive accuracy of the model.
Exercise 1: Language identification
- Write a text classification pipeline using a custom preprocessor and
TfidfVectorizer set up to use character based n-grams, using data
from Wikipedia articles as the training set.
- Evaluate the performance on some held out test set.
ipython command line:
%run workspace/exercise_01_language_train_model.py data/languages/paragraphs/
Exercise 2: Sentiment Analysis on movie reviews
- Write a text classification pipeline to classify movie reviews as
either positive or negative.
- Find a good set of parameters using grid search.
- Evaluate the performance on a held out test set.
ipython command line:
%run workspace/exercise_02_sentiment.py data/movie_reviews/txt_sentoken/
Exercise 3: CLI text classification utility
Using the results of the previous exercises and the cPickle module of
the standard library, write a command line utility that detects the
language of some text provided on stdin and estimate the polarity
(positive or negative) if the text is written in English.
Bonus point if the utility is able to give a confidence level for its
predictions.
Where to from here
Here are a few suggestions to help further your scikit-learn intuition
upon the completion of this tutorial:
- Try playing around with the analyzer and token normalisation under
CountVectorizer.
- If you don't have labels, try using
Clustering <sphx_glr_auto_examples_text_plot_document_clustering.py>
on your problem.
- If you have multiple labels per document, e.g categories, have a
look at the Multiclass and multilabel section <multiclass>.
- Try using Truncated SVD <LSA> for latent semantic analysis.
- Have a look at using Out-of-core Classification
<sphx_glr_auto_examples_applications_plot_out_of_core_classification.py>
to learn from data that would not fit into the computer main memory.
- Have a look at the Hashing Vectorizer <hashing_vectorizer> as a
memory efficient alternative to CountVectorizer.
| # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
import array
import re
import unicodedata
import warnings
from collections import defaultdict
from collections.abc import Mapping
from functools import partial
from numbers import Integral
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from..base import BaseEstimator, OneToOneFeatureMixin, TransformerMixin, _fit_context
from..exceptions import NotFittedError
from..preprocessing import normalize
from..utils import _IS_32BIT
from..utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions
from..utils.validation import FLOAT_DTYPES, check_array, check_is_fitted
from._hash import FeatureHasher
from._stop_words import ENGLISH_STOP_WORDS
__all__ = [
"HashingVectorizer",
"CountVectorizer",
"ENGLISH_STOP_WORDS",
"TfidfTransformer",
"TfidfVectorizer",
"strip_accents_ascii",
"strip_accents_unicode",
"strip_tags",
]
def _preprocess(doc, accent_function=None, lower=False):
"""Chain together an optional series of text preprocessing steps to
apply to a document.
Parameters
----------
doc: str
The string to preprocess
accent_function: callable, default=None
Function for handling accented characters. Common strategies include
normalizing and removing.
lower: bool, default=False
Whether to use str.lower to lowercase all of the text
Returns
-------
doc: str
preprocessed string
"""
if lower:
doc = doc.lower()
if accent_function is not None:
doc = accent_function(doc)
return doc
def _analyze(
doc,
analyzer=None,
tokenizer=None,
ngrams=None,
preprocessor=None,
decoder=None,
stop_words=None,
):
"""Chain together an optional series of text processing steps to go from
a single document to ngrams, with or without tokenizing or preprocessing.
If analyzer is used, only the decoder argument is used, as the analyzer is
intended to replace the preprocessor, tokenizer, and ngrams steps.
Parameters
----------
analyzer: callable, default=None
tokenizer: callable, default=None
ngrams: callable, default=None
preprocessor: callable, default=None
decoder: callable, default=None
stop_words: list, default=None
Returns
-------
ngrams: list
A sequence of tokens, possibly with pairs, triples, etc.
"""
if decoder is not None:
doc = decoder(doc)
if analyzer is not None:
doc = analyzer(doc)
else:
if preprocessor is not None:
doc = preprocessor(doc)
if tokenizer is not None:
doc = tokenizer(doc)
if ngrams is not None:
if stop_words is not None:
doc = ngrams(doc, stop_words)
else:
doc = ngrams(doc)
return doc
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart.
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
Parameters
----------
s : str
The string to strip.
Returns
-------
s : str
The stripped string.
See Also
--------
strip_accents_ascii : Remove accentuated char for any unicode symbol that
has a direct ASCII equivalent.
"""
try:
# If `s` is ASCII-compatible, then it does not contain any accented
# characters and we can avoid an expensive list comprehension
s.encode("ASCII", errors="strict")
return s
except UnicodeEncodeError:
normalized = unicodedata.normalize("NFKD", s)
return "".join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing.
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
Parameters
----------
s : str
The string to strip.
Returns
-------
s : str
The stripped string.
See Also
--------
strip_accents_unicode : Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize("NFKD", s)
return nkfd_form.encode("ASCII", "ignore").decode("ASCII")
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function.
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
Parameters
----------
s : str
The string to strip.
Returns
-------
s : str
The stripped string.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, str):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class _VectorizerMixin:
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols.
The decoding strategy depends on the vectorizer parameters.
Parameters
----------
doc : bytes or str
The string to decode.
Returns
-------
doc: str
A string of unicode symbols.
"""
if self.input == "filename":
with open(doc, "rb") as fh:
doc = fh.read()
elif self.input == "file":
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError(
"np.nan is an invalid document, expected byte or unicode string."
)
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n!= 1:
original_tokens = tokens
if min_n == 1:
# no need to do any slicing for unigrams
# just iterate through the original tokens
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
# bind method outside of loop to reduce overhead
tokens_append = tokens.append
space_join = " ".join
for n in range(min_n, min(max_n + 1, n_original_tokens + 1)):
for i in range(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i : i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
min_n, max_n = self.ngram_range
if min_n == 1:
# no need to do any slicing for unigrams
# iterate through the string
ngrams = list(text_document)
min_n += 1
else:
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for n in range(min_n, min(max_n + 1, text_len + 1)):
for i in range(text_len - n + 1):
ngrams_append(text_document[i : i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
operating only inside word boundaries. n-grams at the edges
of words are padded with space."""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for w in text_document.split():
w = " " + w + " "
w_len = len(w)
for n in range(min_n, max_n + 1):
offset = 0
ngrams_append(w[offset : offset + n])
while offset + n < w_len:
offset += 1
ngrams_append(w[offset : offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization.
Returns
-------
preprocessor: callable
A function to preprocess the text before tokenization.
"""
if self.preprocessor is not None:
return self.preprocessor
# accent stripping
if not self.strip_accents:
strip_accents = None
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == "ascii":
strip_accents = strip_accents_ascii
elif self.strip_accents == "unicode":
strip_accents = strip_accents_unicode
else:
raise ValueError(
'Invalid value for "strip_accents": %s' % self.strip_accents
)
return partial(_preprocess, accent_function=strip_accents, lower=self.lowercase)
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens.
Returns
-------
tokenizer: callable
A function to split a string into a sequence of tokens.
"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
if token_pattern.groups > 1:
raise ValueError(
"More than 1 capturing group in token pattern. Only a single "
"group should be captured."
)
return token_pattern.findall
def get_stop_words(self):
"""Build or fetch the effective stop words list.
Returns
-------
stop_words: list or None
A list of stop words.
"""
return _check_stop_list(self.stop_words)
def _check_stop_words_consistency(self, stop_words, preprocess, tokenize):
"""Check if stop words are consistent
Returns
-------
is_consistent : True if stop words are consistent with the preprocessor
and tokenizer, False if they are not, None if the check
was previously performed, "error" if it could not be
performed (e.g. because of the use of a custom
preprocessor / tokenizer)
"""
if id(self.stop_words) == getattr(self, "_stop_words_id", None):
# Stop words are were previously validated
return None
# NB: stop_words is validated, unlike self.stop_words
try:
inconsistent = set()
for w in stop_words or ():
tokens = list(tokenize(preprocess(w)))
for token in tokens:
if token not in stop_words:
inconsistent.add(token)
self._stop_words_id = id(self.stop_words)
if inconsistent:
warnings.warn(
"Your stop_words may be inconsistent with "
"your preprocessing. Tokenizing the stop "
"words generated tokens %r not in "
"stop_words."
% sorted(inconsistent)
)
return not inconsistent
except Exception:
# Failed to check stop words consistency (e.g. because a custom
# preprocessor or tokenizer was used)
self._stop_words_id = id(self.stop_words)
return "error"
def build_analyzer(self):
"""Return a callable to process input data.
The callable handles preprocessing, tokenization, and n-grams generation.
Returns
-------
analyzer: callable
A function to handle preprocessing, tokenization
and n-grams generation.
"""
if callable(self.analyzer):
return partial(_analyze, analyzer=self.analyzer, decoder=self.decode)
preprocess = self.build_preprocessor()
if self.analyzer == "char":
return partial(
_analyze,
ngrams=self._char_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "char_wb":
return partial(
_analyze,
ngrams=self._char_wb_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "word":
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
self._check_stop_words_consistency(stop_words, preprocess, tokenize)
return partial(
_analyze,
ngrams=self._word_ngrams,
tokenizer=tokenize,
preprocessor=preprocess,
decoder=self.decode,
stop_words=stop_words,
)
else:
raise ValueError(
"%s is not a valid tokenization scheme/analyzer" % self.analyzer
)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i)!= i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(vocabulary.values())
if len(indices)!= len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in range(len(vocabulary)):
if i not in indices:
msg = "Vocabulary of size %d doesn't contain index %d." % (
len(vocabulary),
i,
)
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fitted)"""
if not hasattr(self, "vocabulary_"):
self._validate_vocabulary()
if not self.fixed_vocabulary_:
raise NotFittedError("Vocabulary not fitted or provided")
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
def _validate_ngram_range(self):
"""Check validity of ngram_range parameter"""
min_n, max_m = self.ngram_range
if min_n > max_m:
raise ValueError(
"Invalid value for ngram_range=%s "
"lower boundary larger than the upper boundary."
% str(self.ngram_range)
)
def _warn_for_unused_params(self):
if self.tokenizer is not None and self.token_pattern is not None:
warnings.warn(
"The parameter 'token_pattern' will not be used"
" since 'tokenizer' is not None'"
)
if self.preprocessor is not None and callable(self.analyzer):
warnings.warn(
"The parameter 'preprocessor' will not be used"
" since 'analyzer' is callable'"
)
if (
self.ngram_range!= (1, 1)
and self.ngram_range is not None
and callable(self.analyzer)
):
warnings.warn(
"The parameter 'ngram_range' will not be used"
" since 'analyzer' is callable'"
)
if self.analyzer!= "word" or callable(self.analyzer):
if self.stop_words is not None:
warnings.warn(
"The parameter'stop_words' will not be used"
" since 'analyzer'!= 'word'"
)
if (
self.token_pattern is not None
and self.token_pattern!= r"(?u)\b\w\w+\b"
):
warnings.warn(
"The parameter 'token_pattern' will not be used"
" since 'analyzer'!= 'word'"
)
if self.tokenizer is not None:
warnings.warn(
"The parameter 'tokenizer' will not be used"
" since 'analyzer'!= 'word'"
)
class HashingVectorizer(
TransformerMixin, _VectorizerMixin, BaseEstimator, auto_wrap_output_keys=None
):
r"""Convert a collection of text documents to a matrix of token occurrences.
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory.
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters.
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
For an efficiency comparision of the different feature extractors, see
:ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
- If `'file'`, the sequence items must have a'read' method (file-like
object) that is called to fetch the bytes in memory.
- If `'content'`, the input is expected to be a sequence of items that
can be of type string or byte.
encoding : str, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore','replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and'replace'.
strip_accents : {'ascii', 'unicode'} or callable, default=None
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
a direct ASCII mapping.
'unicode' is a slightly slower method that works on any character.
None (default) means no character normalization is performed.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable, default=None
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
Only applies if ``analyzer`` is not callable.
tokenizer : callable, default=None
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
stop_words : {'english'}, list, default=None
If 'english', a built-in stop word list for English is used.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
token_pattern : str or None, default=r"(?u)\\b\\w\\w+\\b"
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
If there is a capturing group in token_pattern then the
captured group content, not the entire match, becomes the token.
At most one capturing group is permitted.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used. For example an ``ngram_range`` of ``(1, 1)`` means only
unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means
only bigrams.
Only applies if ``analyzer`` is not callable.
analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data
is first read from the file and then passed to the given callable
analyzer.
n_features : int, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
binary : bool, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
norm : {'l1', 'l2'}, default='l2'
Norm used to normalize term vectors. None for no normalization.
alternate_sign : bool, default=True
When True, an alternating sign is added to the features as to
approximately conserve the inner product in the hashed space even for
small n_features. This approach is similar to sparse random projection.
.. versionadded:: 0.19
dtype : type, default=np.float64
Type of the matrix returned by fit_transform() or transform().
See Also
--------
CountVectorizer : Convert a collection of text documents to a matrix of
token counts.
TfidfVectorizer : Convert a collection of raw documents to a matrix of
TF-IDF features.
Notes
-----
This estimator is :term:`stateless` and does not need to be fitted.
However, we recommend to call :meth:`fit_transform` instead of
:meth:`transform`, as parameter validation is only performed in
:meth:`fit`.
Examples
--------
>>> from sklearn.feature_extraction.text import HashingVectorizer
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = HashingVectorizer(n_features=2**4)
>>> X = vectorizer.fit_transform(corpus)
>>> print(X.shape)
(4, 16)
"""
_parameter_constraints: dict = {
"input": [StrOptions({"filename", "file", "content"})],
"encoding": [str],
"decode_error": [StrOptions({"strict", "ignore", "replace"})],
"strip_accents": [StrOptions({"ascii", "unicode"}), None, callable],
"lowercase": ["boolean"],
"preprocessor": [callable, None],
"tokenizer": [callable, None],
"stop_words": [StrOptions({"english"}), list, None],
"token_pattern": [str, None],
"ngram_range": [tuple],
"analyzer": [StrOptions({"word", "char", "char_wb"}), callable],
"n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="left")],
"binary": ["boolean"],
"norm": [StrOptions({"l1", "l2"}), None],
"alternate_sign": ["boolean"],
"dtype": "no_validation", # delegate to numpy
}
def __init__(
self,
*,
input="content",
encoding="utf-8",
decode_error="strict",
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1),
analyzer="word",
n_features=(2**20),
binary=False,
norm="l2",
alternate_sign=True,
dtype=np.float64,
):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.alternate_sign = alternate_sign
self.dtype = dtype
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : ndarray of shape [n_samples, n_features]
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
HashingVectorizer instance.
"""
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : ndarray of shape [n_samples, n_features]
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
HashingVectorizer instance.
"""
# triggers a parameter validation
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._warn_for_unused_params()
self._validate_ngram_range()
self._get_hasher().fit(X, y=y)
return self
def transform(self, X):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._validate_ngram_range()
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
def fit_transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
return self.fit(X, y).transform(X)
def _get_hasher(self):
return FeatureHasher(
n_features=self.n_features,
input_type="string",
dtype=self.dtype,
alternate_sign=self.alternate_sign,
)
def _more_tags(self):
return {"X_types": ["string"]}
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.issparse(X) and X.format == "csr":
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(X.indptr)
class CountVectorizer(_VectorizerMixin, BaseEstimator):
r"""Convert a collection of text documents to a matrix of token counts.
This implementation produces a sparse representation of the counts using
scipy.sparse.csr_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
For an efficiency comparision of the different feature extractors, see
:ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
- If `'file'`, the sequence items must have a'read' method (file-like
object) that is called to fetch the bytes in memory.
- If `'content'`, the input is expected to be a sequence of items that
can be of type string or byte.
encoding : str, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore','replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and'replace'.
strip_accents : {'ascii', 'unicode'} or callable, default=None
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
a direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) means no character normalization is performed.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable, default=None
Override the preprocessing (strip_accents and lowercase) stage while
preserving the tokenizing and n-grams generation steps.
Only applies if ``analyzer`` is not callable.
tokenizer : callable, default=None
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
stop_words : {'english'}, list, default=None
If 'english', a built-in stop word list for English is used.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. In this case, setting `max_df`
to a higher value, such as in the range (0.7, 1.0), can automatically detect
and filter stop words based on intra corpus document frequency of terms.
token_pattern : str or None, default=r"(?u)\\b\\w\\w+\\b"
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
If there is a capturing group in token_pattern then the
captured group content, not the entire match, becomes the token.
At most one capturing group is permitted.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
word n-grams or char n-grams to be extracted. All values of n such
such that min_n <= n <= max_n will be used. For example an
``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means
unigrams and bigrams, and ``(2, 2)`` means only bigrams.
Only applies if ``analyzer`` is not callable.
analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
Whether the feature should be made of word n-gram or character
n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``filename`` or ``file``, the data is
first read from the file and then passed to the given callable
analyzer.
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int, default=None
If not None, build a vocabulary that only consider the top
`max_features` ordered by term frequency across the corpus.
Otherwise, all features are used.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, default=None
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : bool, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : dtype, default=np.int64
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
fixed_vocabulary_ : bool
True if a fixed vocabulary of term to indices mapping
is provided by the user.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See Also
--------
HashingVectorizer : Convert a collection of text documents to a
matrix of token counts.
TfidfVectorizer : Convert a collection of raw documents to a matrix
of TF-IDF features.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
Examples
--------
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = CountVectorizer()
>>> X = vectorizer.fit_transform(corpus)
>>> vectorizer.get_feature_names_out()
array(['and', 'document', 'first', 'is', 'one','second', 'the', 'third',
'this'],...)
>>> print(X.toarray())
[[0 1 1 1 0 0 1 0 1]
[0 2 0 1 0 1 1 0 1]
[1 0 0 1 1 0 1 1 1]
[0 1 1 1 0 0 1 0 1]]
>>> vectorizer2 = CountVectorizer(analyzer='word', ngram_range=(2, 2))
>>> X2 = vectorizer2.fit_transform(corpus)
>>> vectorizer2.get_feature_names_out()
array(['and this', 'document is', 'first document', 'is the', 'is this',
'second document', 'the first', 'the second', 'the third', 'third one',
'this document', 'this is', 'this the'],...)
>>> print(X2.toarray())
[[0 0 1 1 0 0 1 0 0 0 0 1 0]
[0 1 0 1 0 1 0 1 0 0 1 0 0]
[1 0 0 1 0 0 0 0 1 1 0 1 0]
[0 0 1 0 1 0 1 0 0 0 0 0 1]]
"""
_parameter_constraints: dict = {
"input": [StrOptions({"filename", "file", "content"})],
"encoding": [str],
"decode_error": [StrOptions({"strict", "ignore", "replace"})],
"strip_accents": [StrOptions({"ascii", "unicode"}), None, callable],
"lowercase": ["boolean"],
"preprocessor": [callable, None],
"tokenizer": [callable, None],
"stop_words": [StrOptions({"english"}), list, None],
"token_pattern": [str, None],
"ngram_range": [tuple],
"analyzer": [StrOptions({"word", "char", "char_wb"}), callable],
"max_df": [
Interval(RealNotInt, 0, 1, closed="both"),
Interval(Integral, 1, None, closed="left"),
],
"min_df": [
Interval(RealNotInt, 0, 1, closed="both"),
Interval(Integral, 1, None, closed="left"),
],
"max_features": [Interval(Integral, 1, None, closed="left"), None],
"vocabulary": [Mapping, HasMethods("__iter__"), None],
"binary": ["boolean"],
"dtype": "no_validation", # delegate to numpy
}
def __init__(
self,
*,
input="content",
encoding="utf-8",
decode_error="strict",
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1),
analyzer="word",
max_df=1.0,
min_df=1,
max_features=None,
vocabulary=None,
binary=False,
dtype=np.int64,
):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
self.max_features = max_features
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(vocabulary.items())
map_index = np.empty(len(sorted_features), dtype=X.indices.dtype)
for new_val, (term, old_val) in enumerate(sorted_features):
vocabulary[term] = new_val
map_index[old_val] = new_val
X.indices = map_index.take(X.indices, mode="clip")
return X
def _limit_features(self, X, vocabulary, high=None, low=None, limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
tfs = np.asarray(X.sum(axis=0)).ravel()
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(vocabulary.items()):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError(
"After pruning, no terms remain. Try a lower min_df or a higher max_df."
)
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = []
indptr = []
values = _make_int_array()
indptr.append(0)
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
j_indices.extend(feature_counter.keys())
values.extend(feature_counter.values())
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError(
"empty vocabulary; perhaps the documents only contain stop words"
)
if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1
if _IS_32BIT:
raise ValueError(
(
"sparse CSR array has {} non-zero "
"elements and requires 64 bit indexing, "
"which is unsupported with 32 bit Python."
).format(indptr[-1])
)
indices_dtype = np.int64
else:
indices_dtype = np.int32
j_indices = np.asarray(j_indices, dtype=indices_dtype)
indptr = np.asarray(indptr, dtype=indices_dtype)
values = np.frombuffer(values, dtype=np.intc)
X = sp.csr_matrix(
(values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype,
)
X.sort_indices()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
self : object
Fitted vectorizer.
"""
self.fit_transform(raw_documents)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return document-term matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
X : array of shape (n_samples, n_features)
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
if isinstance(raw_documents, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._validate_ngram_range()
self._warn_for_unused_params()
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
if self.fixed_vocabulary_ and self.lowercase:
for term in self.vocabulary:
if any(map(str.isupper, term)):
warnings.warn(
"Upper case characters found in"
" vocabulary while 'lowercase'"
" is True. These entries will not"
" be matched with any documents"
)
break
vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
n_doc = X.shape[0]
max_doc_count = max_df if isinstance(max_df, Integral) else max_df * n_doc
min_doc_count = min_df if isinstance(min_df, Integral) else min_df * n_doc
if max_doc_count < min_doc_count:
raise ValueError("max_df corresponds to < documents than min_df")
if max_features is not None:
X = self._sort_features(X, vocabulary)
X, self.stop_words_ = self._limit_features(
X, vocabulary, max_doc_count, min_doc_count, max_features
)
if max_features is None:
X = self._sort_features(X, vocabulary)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
if isinstance(raw_documents, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document-term matrix.
Returns
-------
X_inv : list of arrays of shape (n_samples,)
List of arrays of terms.
"""
self._check_vocabulary()
# We need CSR format for fast row manipulations.
X = check_array(X, accept_sparse="csr")
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
if sp.issparse(X):
return [
inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)
]
else:
return [
inverse_vocabulary[np.flatnonzero(X[i, :])].ravel()
for i in range(n_samples)
]
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
self._check_vocabulary()
return np.asarray(
[t for t, i in sorted(self.vocabulary_.items(), key=itemgetter(1))],
dtype=object,
)
def _more_tags(self):
return {"X_types": ["string"]}
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(
OneToOneFeatureMixin, TransformerMixin, BaseEstimator, auto_wrap_output_keys=None
):
"""Transform a count matrix to a normalized tf or tf-idf representation.
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The formula that is used to compute the tf-idf for a term t of a document d
in a document set is tf-idf(t, d) = tf(t, d) * idf(t), and the idf is
computed as idf(t) = log [ n / df(t) ] + 1 (if ``smooth_idf=False``), where
n is the total number of documents in the document set and df(t) is the
document frequency of t; the document frequency is the number of documents
in the document set that contain the term t. The effect of adding "1" to
the idf in the equation above is that terms with zero idf, i.e., terms
that occur in all documents in a training set, will not be entirely
ignored.
(Note that the idf formula above differs from the standard textbook
notation that defines the idf as
idf(t) = log [ n / (df(t) + 1) ]).
If ``smooth_idf=True`` (the default), the constant "1" is added to the
numerator and denominator of the idf as if an extra document was seen
containing every term in the collection exactly once, which prevents
zero divisions: idf(t) = log [ (1 + n) / (1 + df(t)) ] + 1.
Furthermore, the formulas used to compute tf and idf depend
on parameter settings that correspond to the SMART notation used in IR
as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when
``sublinear_tf=True``.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when ``norm='l2'``, "n" (none)
when ``norm=None``.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : {'l1', 'l2'} or None, default='l2'
Each output row will have unit norm, either:
- 'l2': Sum of squares of vector elements is 1. The cosine
similarity between two vectors is their dot product when l2 norm has
been applied.
- 'l1': Sum of absolute values of vector elements is 1.
See :func:`~sklearn.preprocessing.normalize`.
- None: No normalization.
use_idf : bool, default=True
Enable inverse-document-frequency reweighting. If False, idf(t) = 1.
smooth_idf : bool, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : bool, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array of shape (n_features)
The inverse document frequency (IDF) vector; only defined
if ``use_idf`` is True.
.. versionadded:: 0.20
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 1.0
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
CountVectorizer : Transforms text into a sparse matrix of n-gram counts.
TfidfVectorizer : Convert a collection of raw documents to a matrix of
TF-IDF features.
HashingVectorizer : Convert a collection of text documents to a matrix
of token occurrences.
References
----------
.. [Yates2011] R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.
.. [MRS2008] C.D. Manning, P. Raghavan and H. Schütze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.
Examples
--------
>>> from sklearn.feature_extraction.text import TfidfTransformer
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> from sklearn.pipeline import Pipeline
>>> corpus = ['this is the first document',
... 'this document is the second document',
... 'and this is the third one',
... 'is this the first document']
>>> vocabulary = ['this', 'document', 'first', 'is','second', 'the',
... 'and', 'one']
>>> pipe = Pipeline([('count', CountVectorizer(vocabulary=vocabulary)),
... ('tfid', TfidfTransformer())]).fit(corpus)
>>> pipe['count'].transform(corpus).toarray()
array([[1, 1, 1, 1, 0, 1, 0, 0],
[1, 2, 0, 1, 1, 1, 0, 0],
[1, 0, 0, 1, 0, 1, 1, 1],
[1, 1, 1, 1, 0, 1, 0, 0]])
>>> pipe['tfid'].idf_
array([1. , 1.22314355, 1.51082562, 1. , 1.91629073,
1. , 1.91629073, 1.91629073])
>>> pipe.transform(corpus).shape
(4, 8)
"""
_parameter_constraints: dict = {
"norm": [StrOptions({"l1", "l2"}), None],
"use_idf": ["boolean"],
"smooth_idf": ["boolean"],
"sublinear_tf": ["boolean"],
}
def __init__(self, *, norm="l2", use_idf=True, smooth_idf=True, sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Learn the idf vector (global term weights).
Parameters
----------
X : sparse matrix of shape n_samples, n_features)
A matrix of term/token counts.
y : None
This parameter is not needed to compute tf-idf.
Returns
-------
self : object
Fitted transformer.
"""
# large sparse data is not supported for 32bit platforms because
# _document_frequency uses np.bincount which works on arrays of
# dtype NPY_INTP which is int32 for 32bit platforms. See #20923
X = self._validate_data(
X, accept_sparse=("csr", "csc"), accept_large_sparse=not _IS_32BIT
)
if not sp.issparse(X):
X = sp.csr_matrix(X)
dtype = X.dtype if X.dtype in FLOAT_DTYPES else np.float64
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
df = df.astype(dtype, copy=False)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(n_samples / df) + 1
self._idf_diag = sp.diags(
idf,
offsets=0,
shape=(n_features, n_features),
format="csr",
dtype=dtype,
)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation.
Parameters
----------
X : sparse matrix of (n_samples, n_features)
A matrix of term/token counts.
copy : bool, default=True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix of shape (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
X = self._validate_data(
X, accept_sparse="csr", dtype=FLOAT_DTYPES, copy=copy, reset=False
)
if not sp.issparse(X):
X = sp.csr_matrix(X, dtype=np.float64)
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
# idf_ being a property, the automatic attributes detection
# does not work as usual and we need to specify the attribute
# name:
check_is_fitted(self, attributes=["idf_"], msg="idf vector is not fitted")
# *= doesn't work
X = X * self._idf_diag
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
"""Inverse document frequency vector, only defined if `use_idf=True`.
Returns
-------
ndarray of shape (n_features,)
"""
# if _idf_diag is not set, this will raise an attribute error,
# which means hasattr(self, "idf_") is False
return np.ravel(self._idf_diag.sum(axis=0))
@idf_.setter
def idf_(self, value):
value = np.asarray(value, dtype=np.float64)
n_features = value.shape[0]
self._idf_diag = sp.spdiags(
value, diags=0, m=n_features, n=n_features, format="csr"
)
def _more_tags(self):
return {"X_types": ["2darray", "sparse"]}
class TfidfVectorizer(CountVectorizer):
r"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to :class:`CountVectorizer` followed by
:class:`TfidfTransformer`.
For an example of usage, see
:ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py`.
For an efficiency comparision of the different feature extractors, see
:ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
- If `'file'`, the sequence items must have a'read' method (file-like
object) that is called to fetch the bytes in memory.
- If `'content'`, the input is expected to be a sequence of items that
can be of type string or byte.
encoding : str, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore','replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and'replace'.
strip_accents : {'ascii', 'unicode'} or callable, default=None
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
a direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) means no character normalization is performed.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable, default=None
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
Only applies if ``analyzer`` is not callable.
tokenizer : callable, default=None
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data
is first read from the file and then passed to the given callable
analyzer.
stop_words : {'english'}, list, default=None
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. In this case, setting `max_df`
to a higher value, such as in the range (0.7, 1.0), can automatically detect
and filter stop words based on intra corpus document frequency of terms.
token_pattern : str, default=r"(?u)\\b\\w\\w+\\b"
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
If there is a capturing group in token_pattern then the
captured group content, not the entire match, becomes the token.
At most one capturing group is permitted.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used. For example an ``ngram_range`` of ``(1, 1)`` means only
unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means
only bigrams.
Only applies if ``analyzer`` is not callable.
max_df : float or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float in range [0.0, 1.0], the parameter represents a proportion of
documents, integer absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float in range of [0.0, 1.0], the parameter represents a proportion
of documents, integer absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int, default=None
If not None, build a vocabulary that only consider the top
`max_features` ordered by term frequency across the corpus.
Otherwise, all features are used.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, default=None
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : bool, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set `binary` to True, `use_idf` to False and
`norm` to None to get 0/1 outputs).
dtype : dtype, default=float64
Type of the matrix returned by fit_transform() or transform().
norm : {'l1', 'l2'} or None, default='l2'
Each output row will have unit norm, either:
- 'l2': Sum of squares of vector elements is 1. The cosine
similarity between two vectors is their dot product when l2 norm has
been applied.
- 'l1': Sum of absolute values of vector elements is 1.
See :func:`~sklearn.preprocessing.normalize`.
- None: No normalization.
use_idf : bool, default=True
Enable inverse-document-frequency reweighting. If False, idf(t) = 1.
smooth_idf : bool, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : bool, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
fixed_vocabulary_ : bool
True if a fixed vocabulary of term to indices mapping
is provided by the user.
idf_ : array of shape (n_features,)
The inverse document frequency (IDF) vector; only defined
if ``use_idf`` is True.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See Also
--------
CountVectorizer : Transforms text into a sparse matrix of n-gram counts.
TfidfTransformer : Performs the TF-IDF transformation from a provided
matrix of counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
Examples
--------
>>> from sklearn.feature_extraction.text import TfidfVectorizer
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = TfidfVectorizer()
>>> X = vectorizer.fit_transform(corpus)
>>> vectorizer.get_feature_names_out()
array(['and', 'document', 'first', 'is', 'one','second', 'the', 'third',
'this'],...)
>>> print(X.shape)
(4, 9)
"""
_parameter_constraints: dict = {**CountVectorizer._parameter_constraints}
_parameter_constraints.update(
{
"norm": [StrOptions({"l1", "l2"}), None],
"use_idf": ["boolean"],
"smooth_idf": ["boolean"],
"sublinear_tf": ["boolean"],
}
)
def __init__(
self,
*,
input="content",
encoding="utf-8",
decode_error="strict",
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
analyzer="word",
stop_words=None,
token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1),
max_df=1.0,
min_df=1,
max_features=None,
vocabulary=None,
binary=False,
dtype=np.float64,
norm="l2",
use_idf=True,
smooth_idf=True,
sublinear_tf=False,
):
super().__init__(
input=input,
encoding=encoding,
decode_error=decode_error,
strip_accents=strip_accents,
lowercase=lowercase,
preprocessor=preprocessor,
tokenizer=tokenizer,
analyzer=analyzer,
stop_words=stop_words,
token_pattern=token_pattern,
ngram_range=ngram_range,
max_df=max_df,
min_df=min_df,
max_features=max_features,
vocabulary=vocabulary,
binary=binary,
dtype=dtype,
)
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def idf_(self):
"""Inverse document frequency vector, only defined if `use_idf=True`.
Returns
-------
ndarray of shape (n_features,)
"""
if not hasattr(self, "_tfidf"):
raise NotFittedError(
f"{self.__class__.__name__} is not fitted yet. Call 'fit' with "
"appropriate arguments before using this attribute."
)
return self._tfidf.idf_
@idf_.setter
def idf_(self, value):
if not self.use_idf:
raise ValueError("`idf_` cannot be set when `user_idf=False`.")
if not hasattr(self, "_tfidf"):
# We should support transferring `idf_` from another `TfidfTransformer`
# and therefore, we need to create the transformer instance it does not
# exist yet.
self._tfidf = TfidfTransformer(
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
self._validate_vocabulary()
if hasattr(self, "vocabulary_"):
if len(self.vocabulary_)!= len(value):
raise ValueError(
"idf length = %d must be equal to vocabulary size = %d"
% (len(value), len(self.vocabulary))
)
self._tfidf.idf_ = value
def _check_params(self):
if self.dtype not in FLOAT_DTYPES:
warnings.warn(
"Only {} 'dtype' should be used. {} 'dtype' will "
"be converted to np.float64.".format(FLOAT_DTYPES, self.dtype),
UserWarning,
)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is not needed to compute tfidf.
Returns
-------
self : object
Fitted vectorizer.
"""
self._check_params()
self._warn_for_unused_params()
self._tfidf = TfidfTransformer(
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return document-term matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
X : sparse matrix of (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
self._check_params()
self._tfidf = TfidfTransformer(
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
Returns
-------
X : sparse matrix of (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, msg="The TF-IDF vectorizer is not fitted")
X = super().transform(raw_documents)
return self._tfidf.transform(X, copy=False)
def _more_tags(self):
return {"X_types": ["string"], "_skip_test": True} |
|
pytorch__vision | feature_extraction.rst | Module doc / Tutorial | Generate documentation and example for this module | BSD 3-Clause New or Revised License | pytorch__vision/docs/source/feature_extraction.rst | [
"pytorch__vision/torchvision/models/feature_extraction.py"
] | Feature extraction for model inspection
The torchvision.models.feature_extraction package contains feature
extraction utilities that let us tap into our models to access
intermediate transformations of our inputs. This could be useful for a
variety of applications in computer vision. Just a few examples are:
- Visualizing feature maps.
- Extracting features to compute image descriptors for tasks like
facial recognition, copy-detection, or image retrieval.
- Passing selected features to downstream sub-networks for end-to-end
training with a specific task in mind. For example, passing a
hierarchy of features to a Feature Pyramid Network with object
detection heads.
Torchvision provides create_feature_extractor for this purpose. It works
by following roughly these steps:
1. Symbolically tracing the model to get a graphical representation of
how it transforms the input, step by step.
2. Setting the user-selected graph nodes as outputs.
3. Removing all redundant nodes (anything downstream of the output
nodes).
4. Generating python code from the resulting graph and bundling that
into a PyTorch module together with the graph itself.
The torch.fx documentation provides a more general and detailed
explanation of the above procedure and the inner workings of the
symbolic tracing.
About Node Names
In order to specify which nodes should be output nodes for extracted
features, one should be familiar with the node naming convention used
here (which differs slightly from that used in torch.fx). A node name is
specified as a . separated path walking the module hierarchy from top
level module down to leaf operation or leaf module. For instance
"layer4.2.relu" in ResNet-50 represents the output of the ReLU of the
2nd block of the 4th layer of the ResNet module. Here are some finer
points to keep in mind:
- When specifying node names for create_feature_extractor, you may
provide a truncated version of a node name as a shortcut. To see how
this works, try creating a ResNet-50 model and printing the node
names with
train_nodes, _ = get_graph_node_names(model) print(train_nodes) and
observe that the last node pertaining to layer4 is
"layer4.2.relu_2". One may specify "layer4.2.relu_2" as the return
node, or just "layer4" as this, by convention, refers to the last
node (in order of execution) of layer4.
- If a certain module or operation is repeated more than once, node
names get an additional _{int} postfix to disambiguate. For
instance, maybe the addition (+) operation is used three times in
the same forward method. Then there would be "path.to.module.add",
"path.to.module.add_1", "path.to.module.add_2". The counter is
maintained within the scope of the direct parent. So in ResNet-50
there is a "layer4.1.add" and a "layer4.2.add". Because the addition
operations reside in different blocks, there is no need for a
postfix to disambiguate.
An Example
Here is an example of how we might extract features for MaskRCNN:
import torch
from torchvision.models import resnet50
from torchvision.models.feature_extraction import get_graph_node_names
from torchvision.models.feature_extraction import create_feature_extractor
from torchvision.models.detection.mask_rcnn import MaskRCNN
from torchvision.models.detection.backbone_utils import LastLevelMaxPool
from torchvision.ops.feature_pyramid_network import FeaturePyramidNetwork
# To assist you in designing the feature extractor you may want to print out
# the available nodes for resnet50.
m = resnet50()
train_nodes, eval_nodes = get_graph_node_names(resnet50())
# The lists returned, are the names of all the graph nodes (in order of
# execution) for the input model traced in train mode and in eval mode
# respectively. You'll find that `train_nodes` and `eval_nodes` are the same
# for this example. But if the model contains control flow that's dependent
# on the training mode, they may be different.
# To specify the nodes you want to extract, you could select the final node
# that appears in each of the main layers:
return_nodes = {
# node_name: user-specified key for output dict
'layer1.2.relu_2': 'layer1',
'layer2.3.relu_2': 'layer2',
'layer3.5.relu_2': 'layer3',
'layer4.2.relu_2': 'layer4',
}
# But `create_feature_extractor` can also accept truncated node specifications
# like "layer1", as it will just pick the last node that's a descendent of
# of the specification. (Tip: be careful with this, especially when a layer
# has multiple outputs. It's not always guaranteed that the last operation
# performed is the one that corresponds to the output you desire. You should
# consult the source code for the input model to confirm.)
return_nodes = {
'layer1': 'layer1',
'layer2': 'layer2',
'layer3': 'layer3',
'layer4': 'layer4',
}
# Now you can build the feature extractor. This returns a module whose forward
# method returns a dictionary like:
# {
# 'layer1': output of layer 1,
# 'layer2': output of layer 2,
# 'layer3': output of layer 3,
# 'layer4': output of layer 4,
# }
create_feature_extractor(m, return_nodes=return_nodes)
# Let's put all that together to wrap resnet50 with MaskRCNN
# MaskRCNN requires a backbone with an attached FPN
class Resnet50WithFPN(torch.nn.Module):
def __init__(self):
super(Resnet50WithFPN, self).__init__()
# Get a resnet50 backbone
m = resnet50()
# Extract 4 main layers (note: MaskRCNN needs this particular name
# mapping for return nodes)
self.body = create_feature_extractor(
m, return_nodes={f'layer{k}': str(v)
for v, k in enumerate([1, 2, 3, 4])})
# Dry run to get number of channels for FPN
inp = torch.randn(2, 3, 224, 224)
with torch.no_grad():
out = self.body(inp)
in_channels_list = [o.shape[1] for o in out.values()]
# Build FPN
self.out_channels = 256
self.fpn = FeaturePyramidNetwork(
in_channels_list, out_channels=self.out_channels,
extra_blocks=LastLevelMaxPool())
def forward(self, x):
x = self.body(x)
x = self.fpn(x)
return x
# Now we can build our model!
model = MaskRCNN(Resnet50WithFPN(), num_classes=91).eval()
| import inspect
import math
import re
import warnings
from collections import OrderedDict
from copy import deepcopy
from itertools import chain
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torchvision
from torch import fx, nn
from torch.fx.graph_module import _copy_attr
__all__ = ["create_feature_extractor", "get_graph_node_names"]
class LeafModuleAwareTracer(fx.Tracer):
"""
An fx.Tracer that allows the user to specify a set of leaf modules, i.e.
modules that are not to be traced through. The resulting graph ends up
having single nodes referencing calls to the leaf modules' forward methods.
"""
def __init__(self, *args, **kwargs):
self.leaf_modules = {}
if "leaf_modules" in kwargs:
leaf_modules = kwargs.pop("leaf_modules")
self.leaf_modules = leaf_modules
super().__init__(*args, **kwargs)
def is_leaf_module(self, m: nn.Module, module_qualname: str) -> bool:
if isinstance(m, tuple(self.leaf_modules)):
return True
return super().is_leaf_module(m, module_qualname)
class NodePathTracer(LeafModuleAwareTracer):
"""
NodePathTracer is an FX tracer that, for each operation, also records the
name of the Node from which the operation originated. A node name here is
a `.` separated path walking the hierarchy from top level module down to
leaf operation or leaf module. The name of the top level module is not
included as part of the node name. For example, if we trace a module whose
forward method applies a ReLU module, the name for that node will simply
be'relu'.
Some notes on the specifics:
- Nodes are recorded to `self.node_to_qualname` which is a dictionary
mapping a given Node object to its node name.
- Nodes are recorded in the order which they are executed during
tracing.
- When a duplicate node name is encountered, a suffix of the form
_{int} is added. The counter starts from 1.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Track the qualified name of the Node being traced
self.current_module_qualname = ""
# A map from FX Node to the qualified name\#
# NOTE: This is loosely like the "qualified name" mentioned in the
# torch.fx docs https://pytorch.org/docs/stable/fx.html but adapted
# for the purposes of the torchvision feature extractor
self.node_to_qualname = OrderedDict()
def call_module(self, m: torch.nn.Module, forward: Callable, args, kwargs):
"""
Override of `fx.Tracer.call_module`
This override:
1) Stores away the qualified name of the caller for restoration later
2) Adds the qualified name of the caller to
`current_module_qualname` for retrieval by `create_proxy`
3) Once a leaf module is reached, calls `create_proxy`
4) Restores the caller's qualified name into current_module_qualname
"""
old_qualname = self.current_module_qualname
try:
module_qualname = self.path_of_module(m)
self.current_module_qualname = module_qualname
if not self.is_leaf_module(m, module_qualname):
out = forward(*args, **kwargs)
return out
return self.create_proxy("call_module", module_qualname, args, kwargs)
finally:
self.current_module_qualname = old_qualname
def create_proxy(
self, kind: str, target: fx.node.Target, args, kwargs, name=None, type_expr=None, *_
) -> fx.proxy.Proxy:
"""
Override of `Tracer.create_proxy`. This override intercepts the recording
of every operation and stores away the current traced module's qualified
name in `node_to_qualname`
"""
proxy = super().create_proxy(kind, target, args, kwargs, name, type_expr)
self.node_to_qualname[proxy.node] = self._get_node_qualname(self.current_module_qualname, proxy.node)
return proxy
def _get_node_qualname(self, module_qualname: str, node: fx.node.Node) -> str:
node_qualname = module_qualname
if node.op!= "call_module":
# In this case module_qualname from torch.fx doesn't go all the
# way to the leaf function/op, so we need to append it
if len(node_qualname) > 0:
# Only append '.' if we are deeper than the top level module
node_qualname += "."
node_qualname += str(node)
# Now we need to add an _{index} postfix on any repeated node names
# For modules we do this from scratch
# But for anything else, torch.fx already has a globally scoped
# _{index} postfix. But we want it locally (relative to direct parent)
# scoped. So first we need to undo the torch.fx postfix
if re.match(r".+_[0-9]+$", node_qualname) is not None:
node_qualname = node_qualname.rsplit("_", 1)[0]
#... and now we add on our own postfix
for existing_qualname in reversed(self.node_to_qualname.values()):
# Check to see if existing_qualname is of the form
# {node_qualname} or {node_qualname}_{int}
if re.match(rf"{node_qualname}(_[0-9]+)?$", existing_qualname) is not None:
postfix = existing_qualname.replace(node_qualname, "")
if len(postfix):
# existing_qualname is of the form {node_qualname}_{int}
next_index = int(postfix[1:]) + 1
else:
# existing_qualname is of the form {node_qualname}
next_index = 1
node_qualname += f"_{next_index}"
break
return node_qualname
def _is_subseq(x, y):
"""Check if y is a subsequence of x
https://stackoverflow.com/a/24017747/4391249
"""
iter_x = iter(x)
return all(any(x_item == y_item for x_item in iter_x) for y_item in y)
def _warn_graph_differences(train_tracer: NodePathTracer, eval_tracer: NodePathTracer):
"""
Utility function for warning the user if there are differences between
the train graph nodes and the eval graph nodes.
"""
train_nodes = list(train_tracer.node_to_qualname.values())
eval_nodes = list(eval_tracer.node_to_qualname.values())
if len(train_nodes) == len(eval_nodes) and all(t == e for t, e in zip(train_nodes, eval_nodes)):
return
suggestion_msg = (
"When choosing nodes for feature extraction, you may need to specify "
"output nodes for train and eval mode separately."
)
if _is_subseq(train_nodes, eval_nodes):
msg = (
"NOTE: The nodes obtained by tracing the model in eval mode "
"are a subsequence of those obtained in train mode. "
)
elif _is_subseq(eval_nodes, train_nodes):
msg = (
"NOTE: The nodes obtained by tracing the model in train mode "
"are a subsequence of those obtained in eval mode. "
)
else:
msg = "The nodes obtained by tracing the model in train mode are different to those obtained in eval mode. "
warnings.warn(msg + suggestion_msg)
def _get_leaf_modules_for_ops() -> List[type]:
members = inspect.getmembers(torchvision.ops)
result = []
for _, obj in members:
if inspect.isclass(obj) and issubclass(obj, torch.nn.Module):
result.append(obj)
return result
def _set_default_tracer_kwargs(original_tr_kwargs: Optional[Dict[str, Any]]) -> Dict[str, Any]:
default_autowrap_modules = (math, torchvision.ops)
default_leaf_modules = _get_leaf_modules_for_ops()
result_tracer_kwargs = {} if original_tr_kwargs is None else original_tr_kwargs
result_tracer_kwargs["autowrap_modules"] = (
tuple(set(result_tracer_kwargs["autowrap_modules"] + default_autowrap_modules))
if "autowrap_modules" in result_tracer_kwargs
else default_autowrap_modules
)
result_tracer_kwargs["leaf_modules"] = (
list(set(result_tracer_kwargs["leaf_modules"] + default_leaf_modules))
if "leaf_modules" in result_tracer_kwargs
else default_leaf_modules
)
return result_tracer_kwargs
def get_graph_node_names(
model: nn.Module,
tracer_kwargs: Optional[Dict[str, Any]] = None,
suppress_diff_warning: bool = False,
) -> Tuple[List[str], List[str]]:
"""
Dev utility to return node names in order of execution. See note on node
names under :func:`create_feature_extractor`. Useful for seeing which node
names are available for feature extraction. There are two reasons that
node names can't easily be read directly from the code for a model:
1. Not all submodules are traced through. Modules from ``torch.nn`` all
fall within this category.
2. Nodes representing the repeated application of the same operation
or leaf module get a ``_{counter}`` postfix.
The model is traced twice: once in train mode, and once in eval mode. Both
sets of node names are returned.
For more details on the node naming conventions used here, please see the
:ref:`relevant subheading <about-node-names>` in the
`documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.
Args:
model (nn.Module): model for which we'd like to print node names
tracer_kwargs (dict, optional): a dictionary of keyword arguments for
``NodePathTracer`` (they are eventually passed onto
`torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
By default, it will be set to wrap and make leaf nodes all torchvision ops:
{"autowrap_modules": (math, torchvision.ops,),"leaf_modules": _get_leaf_modules_for_ops(),}
WARNING: In case the user provides tracer_kwargs, above default arguments will be appended to the user
provided dictionary.
suppress_diff_warning (bool, optional): whether to suppress a warning
when there are discrepancies between the train and eval version of
the graph. Defaults to False.
Returns:
tuple(list, list): a list of node names from tracing the model in
train mode, and another from tracing the model in eval mode.
Examples::
>>> model = torchvision.models.resnet18()
>>> train_nodes, eval_nodes = get_graph_node_names(model)
"""
tracer_kwargs = _set_default_tracer_kwargs(tracer_kwargs)
is_training = model.training
train_tracer = NodePathTracer(**tracer_kwargs)
train_tracer.trace(model.train())
eval_tracer = NodePathTracer(**tracer_kwargs)
eval_tracer.trace(model.eval())
train_nodes = list(train_tracer.node_to_qualname.values())
eval_nodes = list(eval_tracer.node_to_qualname.values())
if not suppress_diff_warning:
_warn_graph_differences(train_tracer, eval_tracer)
# Restore training state
model.train(is_training)
return train_nodes, eval_nodes
class DualGraphModule(fx.GraphModule):
"""
A derivative of `fx.GraphModule`. Differs in the following ways:
- Requires a train and eval version of the underlying graph
- Copies submodules according to the nodes of both train and eval graphs.
- Calling train(mode) switches between train graph and eval graph.
"""
def __init__(
self, root: torch.nn.Module, train_graph: fx.Graph, eval_graph: fx.Graph, class_name: str = "GraphModule"
):
"""
Args:
root (nn.Module): module from which the copied module hierarchy is
built
train_graph (fx.Graph): the graph that should be used in train mode
eval_graph (fx.Graph): the graph that should be used in eval mode
"""
super(fx.GraphModule, self).__init__()
self.__class__.__name__ = class_name
self.train_graph = train_graph
self.eval_graph = eval_graph
# Copy all get_attr and call_module ops (indicated by BOTH train and
# eval graphs)
for node in chain(iter(train_graph.nodes), iter(eval_graph.nodes)):
if node.op in ["get_attr", "call_module"]:
if not isinstance(node.target, str):
raise TypeError(f"node.target should be of type str instead of {type(node.target)}")
_copy_attr(root, self, node.target)
# train mode by default
self.train()
self.graph = train_graph
# (borrowed from fx.GraphModule):
# Store the Tracer class responsible for creating a Graph separately as part of the
# GraphModule state, except when the Tracer is defined in a local namespace.
# Locally defined Tracers are not pickleable. This is needed because torch.package will
# serialize a GraphModule without retaining the Graph, and needs to use the correct Tracer
# to re-create the Graph during deserialization.
if self.eval_graph._tracer_cls!= self.train_graph._tracer_cls:
raise TypeError(
f"Train mode and eval mode should use the same tracer class. Instead got {self.eval_graph._tracer_cls} for eval vs {self.train_graph._tracer_cls} for train"
)
self._tracer_cls = None
if self.graph._tracer_cls and "<locals>" not in self.graph._tracer_cls.__qualname__:
self._tracer_cls = self.graph._tracer_cls
def train(self, mode=True):
"""
Swap out the graph depending on the selected training mode.
NOTE this should be safe when calling model.eval() because that just
calls this with mode == False.
"""
# NOTE: Only set self.graph if the current graph is not the desired
# one. This saves us from recompiling the graph where not necessary.
if mode and not self.training:
self.graph = self.train_graph
elif not mode and self.training:
self.graph = self.eval_graph
return super().train(mode=mode)
def create_feature_extractor(
model: nn.Module,
return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
train_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
eval_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
tracer_kwargs: Optional[Dict[str, Any]] = None,
suppress_diff_warning: bool = False,
) -> fx.GraphModule:
"""
Creates a new graph module that returns intermediate nodes from a given
model as dictionary with user specified keys as strings, and the requested
outputs as values. This is achieved by re-writing the computation graph of
the model via FX to return the desired nodes as outputs. All unused nodes
are removed, together with their corresponding parameters.
Desired output nodes must be specified as a ``.`` separated
path walking the module hierarchy from top level module down to leaf
operation or leaf module. For more details on the node naming conventions
used here, please see the :ref:`relevant subheading <about-node-names>`
in the `documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.
Not all models will be FX traceable, although with some massaging they can
be made to cooperate. Here's a (not exhaustive) list of tips:
- If you don't need to trace through a particular, problematic
sub-module, turn it into a "leaf module" by passing a list of
``leaf_modules`` as one of the ``tracer_kwargs`` (see example below).
It will not be traced through, but rather, the resulting graph will
hold a reference to that module's forward method.
- Likewise, you may turn functions into leaf functions by passing a
list of ``autowrap_functions`` as one of the ``tracer_kwargs`` (see
example below).
- Some inbuilt Python functions can be problematic. For instance,
``int`` will raise an error during tracing. You may wrap them in your
own function and then pass that in ``autowrap_functions`` as one of
the ``tracer_kwargs``.
For further information on FX see the
`torch.fx documentation <https://pytorch.org/docs/stable/fx.html>`_.
Args:
model (nn.Module): model on which we will extract the features
return_nodes (list or dict, optional): either a ``List`` or a ``Dict``
containing the names (or partial names - see note above)
of the nodes for which the activations will be returned. If it is
a ``Dict``, the keys are the node names, and the values
are the user-specified keys for the graph module's returned
dictionary. If it is a ``List``, it is treated as a ``Dict`` mapping
node specification strings directly to output names. In the case
that ``train_return_nodes`` and ``eval_return_nodes`` are specified,
this should not be specified.
train_return_nodes (list or dict, optional): similar to
``return_nodes``. This can be used if the return nodes
for train mode are different than those from eval mode.
If this is specified, ``eval_return_nodes`` must also be specified,
and ``return_nodes`` should not be specified.
eval_return_nodes (list or dict, optional): similar to
``return_nodes``. This can be used if the return nodes
for train mode are different than those from eval mode.
If this is specified, ``train_return_nodes`` must also be specified,
and `return_nodes` should not be specified.
tracer_kwargs (dict, optional): a dictionary of keyword arguments for
``NodePathTracer`` (which passes them onto it's parent class
`torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
By default, it will be set to wrap and make leaf nodes all torchvision ops:
{"autowrap_modules": (math, torchvision.ops,),"leaf_modules": _get_leaf_modules_for_ops(),}
WARNING: In case the user provides tracer_kwargs, above default arguments will be appended to the user
provided dictionary.
suppress_diff_warning (bool, optional): whether to suppress a warning
when there are discrepancies between the train and eval version of
the graph. Defaults to False.
Examples::
>>> # Feature extraction with resnet
>>> model = torchvision.models.resnet18()
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> model = create_feature_extractor(
>>> model, {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = model(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
>>> # Specifying leaf modules and leaf functions
>>> def leaf_function(x):
>>> # This would raise a TypeError if traced through
>>> return int(x)
>>>
>>> class LeafModule(torch.nn.Module):
>>> def forward(self, x):
>>> # This would raise a TypeError if traced through
>>> int(x.shape[0])
>>> return torch.nn.functional.relu(x + 4)
>>>
>>> class MyModule(torch.nn.Module):
>>> def __init__(self):
>>> super().__init__()
>>> self.conv = torch.nn.Conv2d(3, 1, 3)
>>> self.leaf_module = LeafModule()
>>>
>>> def forward(self, x):
>>> leaf_function(x.shape[0])
>>> x = self.conv(x)
>>> return self.leaf_module(x)
>>>
>>> model = create_feature_extractor(
>>> MyModule(), return_nodes=['leaf_module'],
>>> tracer_kwargs={'leaf_modules': [LeafModule],
>>> 'autowrap_functions': [leaf_function]})
"""
tracer_kwargs = _set_default_tracer_kwargs(tracer_kwargs)
is_training = model.training
if all(arg is None for arg in [return_nodes, train_return_nodes, eval_return_nodes]):
raise ValueError(
"Either `return_nodes` or `train_return_nodes` and `eval_return_nodes` together, should be specified"
)
if (train_return_nodes is None) ^ (eval_return_nodes is None):
raise ValueError(
"If any of `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified"
)
if not ((return_nodes is None) ^ (train_return_nodes is None)):
raise ValueError("If `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified")
# Put *_return_nodes into Dict[str, str] format
def to_strdict(n) -> Dict[str, str]:
if isinstance(n, list):
return {str(i): str(i) for i in n}
return {str(k): str(v) for k, v in n.items()}
if train_return_nodes is None:
return_nodes = to_strdict(return_nodes)
train_return_nodes = deepcopy(return_nodes)
eval_return_nodes = deepcopy(return_nodes)
else:
train_return_nodes = to_strdict(train_return_nodes)
eval_return_nodes = to_strdict(eval_return_nodes)
# Repeat the tracing and graph rewriting for train and eval mode
tracers = {}
graphs = {}
mode_return_nodes: Dict[str, Dict[str, str]] = {"train": train_return_nodes, "eval": eval_return_nodes}
for mode in ["train", "eval"]:
if mode == "train":
model.train()
elif mode == "eval":
model.eval()
# Instantiate our NodePathTracer and use that to trace the model
tracer = NodePathTracer(**tracer_kwargs)
graph = tracer.trace(model)
name = model.__class__.__name__ if isinstance(model, nn.Module) else model.__name__
graph_module = fx.GraphModule(tracer.root, graph, name)
available_nodes = list(tracer.node_to_qualname.values())
# FIXME We don't know if we should expect this to happen
if len(set(available_nodes))!= len(available_nodes):
raise ValueError(
"There are duplicate nodes! Please raise an issue https://github.com/pytorch/vision/issues"
)
# Check that all outputs in return_nodes are present in the model
for query in mode_return_nodes[mode].keys():
# To check if a query is available we need to check that at least
# one of the available names starts with it up to a.
if not any([re.match(rf"^{query}(\.|$)", n) is not None for n in available_nodes]):
raise ValueError(
f"node: '{query}' is not present in model. Hint: use "
"`get_graph_node_names` to make sure the "
"`return_nodes` you specified are present. It may even "
"be that you need to specify `train_return_nodes` and "
"`eval_return_nodes` separately."
)
# Remove existing output nodes (train mode)
orig_output_nodes = []
for n in reversed(graph_module.graph.nodes):
if n.op == "output":
orig_output_nodes.append(n)
if not orig_output_nodes:
raise ValueError("No output nodes found in graph_module.graph.nodes")
for n in orig_output_nodes:
graph_module.graph.erase_node(n)
# Find nodes corresponding to return_nodes and make them into output_nodes
nodes = [n for n in graph_module.graph.nodes]
output_nodes = OrderedDict()
for n in reversed(nodes):
module_qualname = tracer.node_to_qualname.get(n)
if module_qualname is None:
# NOTE - Know cases where this happens:
# - Node representing creation of a tensor constant - probably
# not interesting as a return node
# - When packing outputs into a named tuple like in InceptionV3
continue
for query in mode_return_nodes[mode]:
depth = query.count(".")
if ".".join(module_qualname.split(".")[: depth + 1]) == query:
output_nodes[mode_return_nodes[mode][query]] = n
mode_return_nodes[mode].pop(query)
break
output_nodes = OrderedDict(reversed(list(output_nodes.items())))
# And add them in the end of the graph
with graph_module.graph.inserting_after(nodes[-1]):
graph_module.graph.output(output_nodes)
# Remove unused modules / parameters
graph_module.graph.eliminate_dead_code()
graph_module.recompile()
# Keep track of the tracer and graph, so we can choose the main one
tracers[mode] = tracer
graphs[mode] = graph
# Warn user if there are any discrepancies between the graphs of the
# train and eval modes
if not suppress_diff_warning:
_warn_graph_differences(tracers["train"], tracers["eval"])
# Build the final graph module
graph_module = DualGraphModule(model, graphs["train"], graphs["eval"], class_name=name)
# Restore original training mode
model.train(is_training)
graph_module.train(is_training)
return graph_module |
|
pytorch__vision | transforms.rst | Module doc / Tutorial | Generate documentation and example for this module | BSD 3-Clause New or Revised License | pytorch__vision/docs/source/transforms.rst | [
"pytorch__vision/torchvision/transforms/functional.py"
] | pytorch__vision/torchvision/transforms | Transforming and augmenting images
Torchvision supports common computer vision transformations in the
torchvision.transforms and torchvision.transforms.v2 modules. Transforms
can be used to transform or augment data for training or inference of
different tasks (image classification, detection, segmentation, video
classification).
# Image Classification
import torch
from torchvision.transforms import v2
H, W = 32, 32
img = torch.randint(0, 256, size=(3, H, W), dtype=torch.uint8)
transforms = v2.Compose([
v2.RandomResizedCrop(size=(224, 224), antialias=True),
v2.RandomHorizontalFlip(p=0.5),
v2.ToDtype(torch.float32, scale=True),
v2.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
img = transforms(img)
# Detection (re-using imports and transforms from above)
from torchvision import tv_tensors
img = torch.randint(0, 256, size=(3, H, W), dtype=torch.uint8)
boxes = torch.randint(0, H // 2, size=(3, 4))
boxes[:, 2:] += boxes[:, :2]
boxes = tv_tensors.BoundingBoxes(boxes, format="XYXY", canvas_size=(H, W))
# The same transforms can be used!
img, boxes = transforms(img, boxes)
# And you can pass arbitrary input structures
output_dict = transforms({"image": img, "boxes": boxes})
Transforms are typically passed as the transform or transforms argument
to the Datasets <datasets>.
Start here
Whether you're new to Torchvision transforms, or you're already
experienced with them, we encourage you to start with
sphx_glr_auto_examples_transforms_plot_transforms_getting_started.py in
order to learn more about what can be done with the new v2 transforms.
Then, browse the sections in below this page for general information and
performance tips. The available transforms and functionals are listed in
the API reference <v2_api_ref>.
More information and tutorials can also be found in our example gallery
<gallery>, e.g. sphx_glr_auto_examples_transforms_plot_transforms_e2e.py
or sphx_glr_auto_examples_transforms_plot_custom_transforms.py.
Supported input types and conventions
Most transformations accept both PIL images and tensor inputs. Both CPU
and CUDA tensors are supported. The result of both backends (PIL or
Tensors) should be very close. In general, we recommend relying on the
tensor backend for
performance <transforms_perf>. The conversion transforms
<conversion_transforms> may be used to convert to and from PIL images,
or for converting dtypes and ranges.
Tensor image are expected to be of shape (C, H, W), where C is the
number of channels, and H and W refer to height and width. Most
transforms support batched tensor input. A batch of Tensor images is a
tensor of shape (N, C, H, W), where N is a number of images in the
batch. The v2 <v1_or_v2> transforms generally accept an arbitrary number
of leading dimensions (..., C, H, W) and can handle batched images or
batched videos.
Dtype and expected value range
The expected range of the values of a tensor image is implicitly defined
by the tensor dtype. Tensor images with a float dtype are expected to
have values in [0, 1]. Tensor images with an integer dtype are expected
to have values in [0, MAX_DTYPE] where MAX_DTYPE is the largest value
that can be represented in that dtype. Typically, images of dtype
torch.uint8 are expected to have values in [0, 255].
Use ~torchvision.transforms.v2.ToDtype to convert both the dtype and
range of the inputs.
V1 or V2? Which one should I use?
TL;DR We recommending using the torchvision.transforms.v2 transforms
instead of those in torchvision.transforms. They're faster and they can
do more things. Just change the import and you should be good to go.
In Torchvision 0.15 (March 2023), we released a new set of transforms
available in the torchvision.transforms.v2 namespace. These transforms
have a lot of advantages compared to the v1 ones (in
torchvision.transforms):
- They can transform images but also bounding boxes, masks, or videos.
This provides support for tasks beyond image classification:
detection, segmentation, video classification, etc. See
sphx_glr_auto_examples_transforms_plot_transforms_getting_started.py
and sphx_glr_auto_examples_transforms_plot_transforms_e2e.py.
- They support more transforms like ~torchvision.transforms.v2.CutMix
and ~torchvision.transforms.v2.MixUp. See
sphx_glr_auto_examples_transforms_plot_cutmix_mixup.py.
- They're faster <transforms_perf>.
- They support arbitrary input structures (dicts, lists, tuples,
etc.).
- Future improvements and features will be added to the v2 transforms
only.
These transforms are fully backward compatible with the v1 ones, so if
you're already using tranforms from torchvision.transforms, all you need
to do to is to update the import to torchvision.transforms.v2. In terms
of output, there might be negligible differences due to implementation
differences.
Note
The v2 transforms are still BETA, but at this point we do not expect
disruptive changes to be made to their public APIs. We're planning to
make them fully stable in version 0.17. Please submit any feedback you
may have here.
Performance considerations
We recommend the following guidelines to get the best performance out of
the transforms:
- Rely on the v2 transforms from torchvision.transforms.v2
- Use tensors instead of PIL images
- Use torch.uint8 dtype, especially for resizing
- Resize with bilinear or bicubic mode
This is what a typical transform pipeline could look like:
from torchvision.transforms import v2
transforms = v2.Compose([
v2.ToImage(), # Convert to tensor, only needed if you had a PIL image
v2.ToDtype(torch.uint8, scale=True), # optional, most input are already uint8 at this point
# ...
v2.RandomResizedCrop(size=(224, 224), antialias=True), # Or Resize(antialias=True)
# ...
v2.ToDtype(torch.float32, scale=True), # Normalize expects float input
v2.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
The above should give you the best performance in a typical training
environment that relies on the torch.utils.data.DataLoader with
num_workers > 0.
Transforms tend to be sensitive to the input strides / memory format.
Some transforms will be faster with channels-first images while others
prefer channels-last. Like torch operators, most transforms will
preserve the memory format of the input, but this may not always be
respected due to implementation details. You may want to experiment a
bit if you're chasing the very best performance. Using torch.compile on
individual transforms may also help factoring out the memory format
variable (e.g. on ~torchvision.transforms.v2.Normalize). Note that we're
talking about memory format, not tensor shape <conventions>.
Note that resize transforms like ~torchvision.transforms.v2.Resize and
~torchvision.transforms.v2.RandomResizedCrop typically prefer
channels-last input and tend not to benefit from torch.compile at this
time.
Transform classes, functionals, and kernels
Transforms are available as classes like
~torchvision.transforms.v2.Resize, but also as functionals like
~torchvision.transforms.v2.functional.resize in the
torchvision.transforms.v2.functional namespace. This is very much like
the torch.nn package which defines both classes and functional
equivalents in torch.nn.functional.
The functionals support PIL images, pure tensors, or TVTensors
<tv_tensors>, e.g. both resize(image_tensor) and resize(boxes) are
valid.
Note
Random transforms like ~torchvision.transforms.v2.RandomCrop will
randomly sample some parameter each time they're called. Their
functional counterpart (~torchvision.transforms.v2.functional.crop) does
not do any kind of random sampling and thus have a slighlty different
parametrization. The get_params() class method of the transforms class
can be used to perform parameter sampling when using the functional
APIs.
The torchvision.transforms.v2.functional namespace also contains what we
call the "kernels". These are the low-level functions that implement the
core functionalities for specific types, e.g. resize_bounding_boxes or
`resized_crop_mask. They are public, although not documented. Check the
code to see which ones are available (note that those starting with a
leading underscore are not public!). Kernels are only really useful if
you want torchscript support <transforms_torchscript> for types like
bounding boxes or masks.
Torchscript support
Most transform classes and functionals support torchscript. For
composing transforms, use torch.nn.Sequential instead of
~torchvision.transforms.v2.Compose:
transforms = torch.nn.Sequential(
CenterCrop(10),
Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
)
scripted_transforms = torch.jit.script(transforms)
Warning
v2 transforms support torchscript, but if you call torch.jit.script() on
a v2 class transform, you'll actually end up with its (scripted) v1
equivalent. This may lead to slightly different results between the
scripted and eager executions due to implementation differences between
v1 and v2.
If you really need torchscript support for the v2 transforms, we
recommend scripting the functionals from the
torchvision.transforms.v2.functional namespace to avoid surprises.
Also note that the functionals only support torchscript for pure
tensors, which are always treated as images. If you need torchscript
support for other types like bounding boxes or masks, you can rely on
the low-level kernels
<functional_transforms>.
For any custom transformations to be used with torch.jit.script, they
should be derived from torch.nn.Module. | import math
import numbers
import warnings
from enum import Enum
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import torch
from PIL import Image
from torch import Tensor
try:
import accimage
except ImportError:
accimage = None
from..utils import _log_api_usage_once
from. import _functional_pil as F_pil, _functional_tensor as F_t
class InterpolationMode(Enum):
"""Interpolation modes
Available interpolation methods are ``nearest``, ``nearest-exact``, ``bilinear``, ``bicubic``, ``box``, ``hamming``,
and ``lanczos``.
"""
NEAREST = "nearest"
NEAREST_EXACT = "nearest-exact"
BILINEAR = "bilinear"
BICUBIC = "bicubic"
# For PIL compatibility
BOX = "box"
HAMMING = "hamming"
LANCZOS = "lanczos"
# TODO: Once torchscript supports Enums with staticmethod
# this can be put into InterpolationMode as staticmethod
def _interpolation_modes_from_int(i: int) -> InterpolationMode:
inverse_modes_mapping = {
0: InterpolationMode.NEAREST,
2: InterpolationMode.BILINEAR,
3: InterpolationMode.BICUBIC,
4: InterpolationMode.BOX,
5: InterpolationMode.HAMMING,
1: InterpolationMode.LANCZOS,
}
return inverse_modes_mapping[i]
pil_modes_mapping = {
InterpolationMode.NEAREST: 0,
InterpolationMode.BILINEAR: 2,
InterpolationMode.BICUBIC: 3,
InterpolationMode.NEAREST_EXACT: 0,
InterpolationMode.BOX: 4,
InterpolationMode.HAMMING: 5,
InterpolationMode.LANCZOS: 1,
}
_is_pil_image = F_pil._is_pil_image
def get_dimensions(img: Tensor) -> List[int]:
"""Returns the dimensions of an image as [channels, height, width].
Args:
img (PIL Image or Tensor): The image to be checked.
Returns:
List[int]: The image dimensions.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(get_dimensions)
if isinstance(img, torch.Tensor):
return F_t.get_dimensions(img)
return F_pil.get_dimensions(img)
def get_image_size(img: Tensor) -> List[int]:
"""Returns the size of an image as [width, height].
Args:
img (PIL Image or Tensor): The image to be checked.
Returns:
List[int]: The image size.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(get_image_size)
if isinstance(img, torch.Tensor):
return F_t.get_image_size(img)
return F_pil.get_image_size(img)
def get_image_num_channels(img: Tensor) -> int:
"""Returns the number of channels of an image.
Args:
img (PIL Image or Tensor): The image to be checked.
Returns:
int: The number of channels.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(get_image_num_channels)
if isinstance(img, torch.Tensor):
return F_t.get_image_num_channels(img)
return F_pil.get_image_num_channels(img)
@torch.jit.unused
def _is_numpy(img: Any) -> bool:
return isinstance(img, np.ndarray)
@torch.jit.unused
def _is_numpy_image(img: Any) -> bool:
return img.ndim in {2, 3}
def to_tensor(pic) -> Tensor:
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
This function does not support torchscript.
See :class:`~torchvision.transforms.ToTensor` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(to_tensor)
if not (F_pil._is_pil_image(pic) or _is_numpy(pic)):
raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}")
if _is_numpy(pic) and not _is_numpy_image(pic):
raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.")
default_float_dtype = torch.get_default_dtype()
if isinstance(pic, np.ndarray):
# handle numpy array
if pic.ndim == 2:
pic = pic[:, :, None]
img = torch.from_numpy(pic.transpose((2, 0, 1))).contiguous()
# backward compatibility
if isinstance(img, torch.ByteTensor):
return img.to(dtype=default_float_dtype).div(255)
else:
return img
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic).to(dtype=default_float_dtype)
# handle PIL Image
mode_to_nptype = {"I": np.int32, "I;16": np.int16, "F": np.float32}
img = torch.from_numpy(np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True))
if pic.mode == "1":
img = 255 * img
img = img.view(pic.size[1], pic.size[0], F_pil.get_image_num_channels(pic))
# put it from HWC to CHW format
img = img.permute((2, 0, 1)).contiguous()
if isinstance(img, torch.ByteTensor):
return img.to(dtype=default_float_dtype).div(255)
else:
return img
def pil_to_tensor(pic: Any) -> Tensor:
"""Convert a ``PIL Image`` to a tensor of the same type.
This function does not support torchscript.
See :class:`~torchvision.transforms.PILToTensor` for more details.
.. note::
A deep copy of the underlying array is performed.
Args:
pic (PIL Image): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(pil_to_tensor)
if not F_pil._is_pil_image(pic):
raise TypeError(f"pic should be PIL Image. Got {type(pic)}")
if accimage is not None and isinstance(pic, accimage.Image):
# accimage format is always uint8 internally, so always return uint8 here
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8)
pic.copyto(nppic)
return torch.as_tensor(nppic)
# handle PIL Image
img = torch.as_tensor(np.array(pic, copy=True))
img = img.view(pic.size[1], pic.size[0], F_pil.get_image_num_channels(pic))
# put it from HWC to CHW format
img = img.permute((2, 0, 1))
return img
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
"""Convert a tensor image to the given ``dtype`` and scale the values accordingly
This function does not support PIL Image.
Args:
image (torch.Tensor): Image to be converted
dtype (torch.dtype): Desired data type of the output
Returns:
Tensor: Converted image
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(convert_image_dtype)
if not isinstance(image, torch.Tensor):
raise TypeError("Input img should be Tensor Image")
return F_t.convert_image_dtype(image, dtype)
def to_pil_image(pic, mode=None):
"""Convert a tensor or an ndarray to PIL Image. This function does not support torchscript.
See :class:`~torchvision.transforms.ToPILImage` for more details.
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
Returns:
PIL Image: Image converted to PIL Image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(to_pil_image)
if not (isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
raise TypeError(f"pic should be Tensor or ndarray. Got {type(pic)}.")
elif isinstance(pic, torch.Tensor):
if pic.ndimension() not in {2, 3}:
raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndimension()} dimensions.")
elif pic.ndimension() == 2:
# if 2D image, add channel dimension (CHW)
pic = pic.unsqueeze(0)
# check number of channels
if pic.shape[-3] > 4:
raise ValueError(f"pic should not have > 4 channels. Got {pic.shape[-3]} channels.")
elif isinstance(pic, np.ndarray):
if pic.ndim not in {2, 3}:
raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.")
elif pic.ndim == 2:
# if 2D image, add channel dimension (HWC)
pic = np.expand_dims(pic, 2)
# check number of channels
if pic.shape[-1] > 4:
raise ValueError(f"pic should not have > 4 channels. Got {pic.shape[-1]} channels.")
npimg = pic
if isinstance(pic, torch.Tensor):
if pic.is_floating_point() and mode!= "F":
pic = pic.mul(255).byte()
npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0))
if not isinstance(npimg, np.ndarray):
raise TypeError("Input pic must be a torch.Tensor or NumPy ndarray, not {type(npimg)}")
if npimg.shape[2] == 1:
expected_mode = None
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
expected_mode = "L"
elif npimg.dtype == np.int16:
expected_mode = "I;16"
elif npimg.dtype == np.int32:
expected_mode = "I"
elif npimg.dtype == np.float32:
expected_mode = "F"
if mode is not None and mode!= expected_mode:
raise ValueError(f"Incorrect mode ({mode}) supplied for input type {np.dtype}. Should be {expected_mode}")
mode = expected_mode
elif npimg.shape[2] == 2:
permitted_2_channel_modes = ["LA"]
if mode is not None and mode not in permitted_2_channel_modes:
raise ValueError(f"Only modes {permitted_2_channel_modes} are supported for 2D inputs")
if mode is None and npimg.dtype == np.uint8:
mode = "LA"
elif npimg.shape[2] == 4:
permitted_4_channel_modes = ["RGBA", "CMYK", "RGBX"]
if mode is not None and mode not in permitted_4_channel_modes:
raise ValueError(f"Only modes {permitted_4_channel_modes} are supported for 4D inputs")
if mode is None and npimg.dtype == np.uint8:
mode = "RGBA"
else:
permitted_3_channel_modes = ["RGB", "YCbCr", "HSV"]
if mode is not None and mode not in permitted_3_channel_modes:
raise ValueError(f"Only modes {permitted_3_channel_modes} are supported for 3D inputs")
if mode is None and npimg.dtype == np.uint8:
mode = "RGB"
if mode is None:
raise TypeError(f"Input type {npimg.dtype} is not supported")
return Image.fromarray(npimg, mode=mode)
def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor:
"""Normalize a float tensor image with mean and standard deviation.
This transform does not support PIL Image.
.. note::
This transform acts out of place by default, i.e., it does not mutates the input tensor.
See :class:`~torchvision.transforms.Normalize` for more details.
Args:
tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation inplace.
Returns:
Tensor: Normalized Tensor image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(normalize)
if not isinstance(tensor, torch.Tensor):
raise TypeError(f"img should be Tensor Image. Got {type(tensor)}")
return F_t.normalize(tensor, mean=mean, std=std, inplace=inplace)
def _compute_resized_output_size(
image_size: Tuple[int, int], size: List[int], max_size: Optional[int] = None
) -> List[int]:
if len(size) == 1: # specified size only for the smallest edge
h, w = image_size
short, long = (w, h) if w <= h else (h, w)
requested_new_short = size if isinstance(size, int) else size[0]
new_short, new_long = requested_new_short, int(requested_new_short * long / short)
if max_size is not None:
if max_size <= requested_new_short:
raise ValueError(
f"max_size = {max_size} must be strictly greater than the requested "
f"size for the smaller edge size = {size}"
)
if new_long > max_size:
new_short, new_long = int(max_size * new_short / new_long), max_size
new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short)
else: # specified both h and w
new_w, new_h = size[1], size[0]
return [new_h, new_w]
def resize(
img: Tensor,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
max_size: Optional[int] = None,
antialias: Optional[Union[str, bool]] = "warn",
) -> Tensor:
r"""Resize the input image to the given size.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where... means an arbitrary number of leading dimensions
.. warning::
The output image might be different depending on its type: when downsampling, the interpolation of PIL images
and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences
in the performance of a network. Therefore, it is preferable to train and serve a model with the same input
types. See also below the ``antialias`` parameter, which can help making the output of PIL images and tensors
closer.
Args:
img (PIL Image or Tensor): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaining
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
.. note::
In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`.
Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
``InterpolationMode.NEAREST_EXACT``, ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are
supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
max_size (int, optional): The maximum allowed for the longer edge of
the resized image. If the longer edge of the image is greater
than ``max_size`` after being resized according to ``size``,
``size`` will be overruled so that the longer edge is equal to
``max_size``.
As a result, the smaller edge may be shorter than ``size``. This
is only supported if ``size`` is an int (or a sequence of length
1 in torchscript mode).
antialias (bool, optional): Whether to apply antialiasing.
It only affects **tensors** with bilinear or bicubic modes and it is
ignored otherwise: on PIL images, antialiasing is always applied on
bilinear or bicubic modes; on other modes (for PIL images and
tensors), antialiasing makes no sense and this parameter is ignored.
Possible values are:
- ``True``: will apply antialiasing for bilinear or bicubic modes.
Other mode aren't affected. This is probably what you want to use.
- ``False``: will not apply antialiasing for tensors on any mode. PIL
images are still antialiased on bilinear or bicubic modes, because
PIL doesn't support no antialias.
- ``None``: equivalent to ``False`` for tensors and ``True`` for
PIL images. This value exists for legacy reasons and you probably
don't want to use it unless you really know what you are doing.
The current default is ``None`` **but will change to** ``True`` **in
v0.17** for the PIL and Tensor backends to be consistent.
Returns:
PIL Image or Tensor: Resized image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(resize)
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
elif not isinstance(interpolation, InterpolationMode):
raise TypeError(
"Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
)
if isinstance(size, (list, tuple)):
if len(size) not in [1, 2]:
raise ValueError(
f"Size must be an int or a 1 or 2 element tuple/list, not a {len(size)} element tuple/list"
)
if max_size is not None and len(size)!= 1:
raise ValueError(
"max_size should only be passed if size specifies the length of the smaller edge, "
"i.e. size should be an int or a sequence of length 1 in torchscript mode."
)
_, image_height, image_width = get_dimensions(img)
if isinstance(size, int):
size = [size]
output_size = _compute_resized_output_size((image_height, image_width), size, max_size)
if [image_height, image_width] == output_size:
return img
antialias = _check_antialias(img, antialias, interpolation)
if not isinstance(img, torch.Tensor):
if antialias is False:
warnings.warn("Anti-alias option is always applied for PIL Image input. Argument antialias is ignored.")
pil_interpolation = pil_modes_mapping[interpolation]
return F_pil.resize(img, size=output_size, interpolation=pil_interpolation)
return F_t.resize(img, size=output_size, interpolation=interpolation.value, antialias=antialias)
def pad(img: Tensor, padding: List[int], fill: Union[int, float] = 0, padding_mode: str = "constant") -> Tensor:
r"""Pad the given image on all sides with the given "pad" value.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where... means at most 2 leading dimensions for mode reflect and symmetric,
at most 3 leading dimensions for mode edge,
and an arbitrary number of leading dimensions for mode constant
Args:
img (PIL Image or Tensor): Image to be padded.
padding (int or sequence): Padding on each border. If a single int is provided this
is used to pad all borders. If sequence of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a sequence of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively.
.. note::
In torchscript mode padding as single int is not supported, use a sequence of
length 1: ``[padding, ]``.
fill (number or tuple): Pixel fill value for constant fill. Default is 0.
If a tuple of length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant.
Only number is supported for torch Tensor.
Only int or tuple value is supported for PIL Image.
padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value at the edge of the image.
If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
- reflect: pads with reflection of image without repeating the last value on the edge.
For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image repeating the last value on the edge.
For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
PIL Image or Tensor: Padded image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(pad)
if not isinstance(img, torch.Tensor):
return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
"""Crop the given image at specified location and output size.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where... means an arbitrary number of leading dimensions.
If image size is smaller than output size along any edge, image is padded with 0 and then cropped.
Args:
img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
top (int): Vertical component of the top left corner of the crop box.
left (int): Horizontal component of the top left corner of the crop box.
height (int): Height of the crop box.
width (int): Width of the crop box.
Returns:
PIL Image or Tensor: Cropped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(crop)
if not isinstance(img, torch.Tensor):
return F_pil.crop(img, top, left, height, width)
return F_t.crop(img, top, left, height, width)
def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
"""Crops the given image at the center.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where... means an arbitrary number of leading dimensions.
If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
Args:
img (PIL Image or Tensor): Image to be cropped.
output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int,
it is used for both directions.
Returns:
PIL Image or Tensor: Cropped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(center_crop)
if isinstance(output_size, numbers.Number):
output_size = (int(output_size), int(output_size))
elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
output_size = (output_size[0], output_size[0])
_, image_height, image_width = get_dimensions(img)
crop_height, crop_width = output_size
if crop_width > image_width or crop_height > image_height:
padding_ltrb = [
(crop_width - image_width) // 2 if crop_width > image_width else 0,
(crop_height - image_height) // 2 if crop_height > image_height else 0,
(crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
(crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
]
img = pad(img, padding_ltrb, fill=0) # PIL uses fill value 0
_, image_height, image_width = get_dimensions(img)
if crop_width == image_width and crop_height == image_height:
return img
crop_top = int(round((image_height - crop_height) / 2.0))
crop_left = int(round((image_width - crop_width) / 2.0))
return crop(img, crop_top, crop_left, crop_height, crop_width)
def resized_crop(
img: Tensor,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
antialias: Optional[Union[str, bool]] = "warn",
) -> Tensor:
"""Crop the given image and resize it to desired size.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where... means an arbitrary number of leading dimensions
Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
Args:
img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
top (int): Vertical component of the top left corner of the crop box.
left (int): Horizontal component of the top left corner of the crop box.
height (int): Height of the crop box.
width (int): Width of the crop box.
size (sequence or int): Desired output size. Same semantics as ``resize``.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`.
Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
``InterpolationMode.NEAREST_EXACT``, ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are
supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
antialias (bool, optional): Whether to apply antialiasing.
It only affects **tensors** with bilinear or bicubic modes and it is
ignored otherwise: on PIL images, antialiasing is always applied on
bilinear or bicubic modes; on other modes (for PIL images and
tensors), antialiasing makes no sense and this parameter is ignored.
Possible values are:
- ``True``: will apply antialiasing for bilinear or bicubic modes.
Other mode aren't affected. This is probably what you want to use.
- ``False``: will not apply antialiasing for tensors on any mode. PIL
images are still antialiased on bilinear or bicubic modes, because
PIL doesn't support no antialias.
- ``None``: equivalent to ``False`` for tensors and ``True`` for
PIL images. This value exists for legacy reasons and you probably
don't want to use it unless you really know what you are doing.
The current default is ``None`` **but will change to** ``True`` **in
v0.17** for the PIL and Tensor backends to be consistent.
Returns:
PIL Image or Tensor: Cropped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(resized_crop)
img = crop(img, top, left, height, width)
img = resize(img, size, interpolation, antialias=antialias)
return img
def hflip(img: Tensor) -> Tensor:
"""Horizontally flip the given image.
Args:
img (PIL Image or Tensor): Image to be flipped. If img
is a Tensor, it is expected to be in [..., H, W] format,
where... means it can have an arbitrary number of leading
dimensions.
Returns:
PIL Image or Tensor: Horizontally flipped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(hflip)
if not isinstance(img, torch.Tensor):
return F_pil.hflip(img)
return F_t.hflip(img)
def _get_perspective_coeffs(startpoints: List[List[int]], endpoints: List[List[int]]) -> List[float]:
"""Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.
In Perspective Transform each pixel (x, y) in the original image gets transformed as,
(x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )
Args:
startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.
Returns:
octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
"""
a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float)
for i, (p1, p2) in enumerate(zip(endpoints, startpoints)):
a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
b_matrix = torch.tensor(startpoints, dtype=torch.float).view(8)
res = torch.linalg.lstsq(a_matrix, b_matrix, driver="gels").solution
output: List[float] = res.tolist()
return output
def perspective(
img: Tensor,
startpoints: List[List[int]],
endpoints: List[List[int]],
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
fill: Optional[List[float]] = None,
) -> Tensor:
"""Perform perspective transform of the given image.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where... means an arbitrary number of leading dimensions.
Args:
img (PIL Image or Tensor): Image to be transformed.
startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
.. note::
In torchscript mode single int/float value is not supported, please use a sequence
of length 1: ``[value, ]``.
Returns:
PIL Image or Tensor: transformed Image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(perspective)
coeffs = _get_perspective_coeffs(startpoints, endpoints)
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
elif not isinstance(interpolation, InterpolationMode):
raise TypeError(
"Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
)
if not isinstance(img, torch.Tensor):
pil_interpolation = pil_modes_mapping[interpolation]
return F_pil.perspective(img, coeffs, interpolation=pil_interpolation, fill=fill)
return F_t.perspective(img, coeffs, interpolation=interpolation.value, fill=fill)
def vflip(img: Tensor) -> Tensor:
"""Vertically flip the given image.
Args:
img (PIL Image or Tensor): Image to be flipped. If img
is a Tensor, it is expected to be in [..., H, W] format,
where... means it can have an arbitrary number of leading
dimensions.
Returns:
PIL Image or Tensor: Vertically flipped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(vflip)
if not isinstance(img, torch.Tensor):
return F_pil.vflip(img)
return F_t.vflip(img)
def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""Crop the given image into four corners and the central crop.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where... means an arbitrary number of leading dimensions
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
img (PIL Image or Tensor): Image to be cropped.
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
Returns:
tuple: tuple (tl, tr, bl, br, center)
Corresponding top left, top right, bottom left, bottom right and center crop.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(five_crop)
if isinstance(size, numbers.Number):
size = (int(size), int(size))
elif isinstance(size, (tuple, list)) and len(size) == 1:
size = (size[0], size[0])
if len(size)!= 2:
raise ValueError("Please provide only two dimensions (h, w) for size.")
_, image_height, image_width = get_dimensions(img)
crop_height, crop_width = size
if crop_width > image_width or crop_height > image_height:
msg = "Requested crop size {} is bigger than input size {}"
raise ValueError(msg.format(size, (image_height, image_width)))
tl = crop(img, 0, 0, crop_height, crop_width)
tr = crop(img, 0, image_width - crop_width, crop_height, crop_width)
bl = crop(img, image_height - crop_height, 0, crop_height, crop_width)
br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width)
center = center_crop(img, [crop_height, crop_width])
return tl, tr, bl, br, center
def ten_crop(
img: Tensor, size: List[int], vertical_flip: bool = False
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
"""Generate ten cropped images from the given image.
Crop the given image into four corners and the central crop plus the
flipped version of these (horizontal flipping is used by default).
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where... means an arbitrary number of leading dimensions
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
img (PIL Image or Tensor): Image to be cropped.
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
vertical_flip (bool): Use vertical flipping instead of horizontal
Returns:
tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
Corresponding top left, top right, bottom left, bottom right and
center crop and same for the flipped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(ten_crop)
if isinstance(size, numbers.Number):
size = (int(size), int(size))
elif isinstance(size, (tuple, list)) and len(size) == 1:
size = (size[0], size[0])
if len(size)!= 2:
raise ValueError("Please provide only two dimensions (h, w) for size.")
first_five = five_crop(img, size)
if vertical_flip:
img = vflip(img)
else:
img = hflip(img)
second_five = five_crop(img, size)
return first_five + second_five
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
"""Adjust brightness of an image.
Args:
img (PIL Image or Tensor): Image to be adjusted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where... means it can have an arbitrary number of leading dimensions.
brightness_factor (float): How much to adjust the brightness. Can be
any non-negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.
Returns:
PIL Image or Tensor: Brightness adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_brightness)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_brightness(img, brightness_factor)
return F_t.adjust_brightness(img, brightness_factor)
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
"""Adjust contrast of an image.
Args:
img (PIL Image or Tensor): Image to be adjusted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where... means it can have an arbitrary number of leading dimensions.
contrast_factor (float): How much to adjust the contrast. Can be any
non-negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
PIL Image or Tensor: Contrast adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_contrast)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_contrast(img, contrast_factor)
return F_t.adjust_contrast(img, contrast_factor)
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
"""Adjust color saturation of an image.
Args:
img (PIL Image or Tensor): Image to be adjusted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where... means it can have an arbitrary number of leading dimensions.
saturation_factor (float): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2.
Returns:
PIL Image or Tensor: Saturation adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_saturation)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_saturation(img, saturation_factor)
return F_t.adjust_saturation(img, saturation_factor)
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
"""Adjust hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See `Hue`_ for more details.
.. _Hue: https://en.wikipedia.org/wiki/Hue
Args:
img (PIL Image or Tensor): Image to be adjusted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where... means it can have an arbitrary number of leading dimensions.
If img is PIL Image mode "1", "I", "F" and modes with transparency (alpha channel) are not supported.
Note: the pixel values of the input image has to be non-negative for conversion to HSV space;
thus it does not work if you normalize your image to an interval with negative values,
or use an interpolation that generates negative values before using this function.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
PIL Image or Tensor: Hue adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_hue)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_hue(img, hue_factor)
return F_t.adjust_hue(img, hue_factor)
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
r"""Perform gamma correction on an image.
Also known as Power Law Transform. Intensities in RGB mode are adjusted
based on the following equation:
.. math::
I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}
See `Gamma Correction`_ for more details.
.. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
Args:
img (PIL Image or Tensor): PIL Image to be adjusted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, modes with transparency (alpha channel) are not supported.
gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
gamma larger than 1 make the shadows darker,
while gamma smaller than 1 make dark regions lighter.
gain (float): The constant multiplier.
Returns:
PIL Image or Tensor: Gamma correction adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_gamma)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_gamma(img, gamma, gain)
return F_t.adjust_gamma(img, gamma, gain)
def _get_inverse_affine_matrix(
center: List[float], angle: float, translate: List[float], scale: float, shear: List[float], inverted: bool = True
) -> List[float]:
# Helper method to compute inverse matrix for affine transformation
# Pillow requires inverse affine transformation matrix:
# Affine matrix is : M = T * C * RotateScaleShear * C^-1
#
# where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
# C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
# RotateScaleShear is rotation with scale and shear matrix
#
# RotateScaleShear(a, s, (sx, sy)) =
# = R(a) * S(s) * SHy(sy) * SHx(sx)
# = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(sx)/cos(sy) - sin(a)), 0 ]
# [ s*sin(a - sy)/cos(sy), s*(-sin(a - sy)*tan(sx)/cos(sy) + cos(a)), 0 ]
# [ 0 , 0 , 1 ]
# where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
# SHx(s) = [1, -tan(s)] and SHy(s) = [1 , 0]
# [0, 1 ] [-tan(s), 1]
#
# Thus, the inverse is M^-1 = C * RotateScaleShear^-1 * C^-1 * T^-1
rot = math.radians(angle)
sx = math.radians(shear[0])
sy = math.radians(shear[1])
cx, cy = center
tx, ty = translate
# RSS without scaling
a = math.cos(rot - sy) / math.cos(sy)
b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
c = math.sin(rot - sy) / math.cos(sy)
d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
if inverted:
# Inverted rotation matrix with scale and shear
# det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
matrix = [d, -b, 0.0, -c, a, 0.0]
matrix = [x / scale for x in matrix]
# Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
# Apply center translation: C * RSS^-1 * C^-1 * T^-1
matrix[2] += cx
matrix[5] += cy
else:
matrix = [a, b, 0.0, c, d, 0.0]
matrix = [x * scale for x in matrix]
# Apply inverse of center translation: RSS * C^-1
matrix[2] += matrix[0] * (-cx) + matrix[1] * (-cy)
matrix[5] += matrix[3] * (-cx) + matrix[4] * (-cy)
# Apply translation and center : T * C * RSS * C^-1
matrix[2] += cx + tx
matrix[5] += cy + ty
return matrix
def rotate(
img: Tensor,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[int]] = None,
fill: Optional[List[float]] = None,
) -> Tensor:
"""Rotate the image by angle.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where... means an arbitrary number of leading dimensions.
Args:
img (PIL Image or Tensor): image to be rotated.
angle (number): rotation angle value in degrees, counter-clockwise.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
Default is the center of the image.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
.. note::
In torchscript mode single int/float value is not supported, please use a sequence
of length 1: ``[value, ]``.
Returns:
PIL Image or Tensor: Rotated image.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(rotate)
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
elif not isinstance(interpolation, InterpolationMode):
raise TypeError(
"Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
)
if not isinstance(angle, (int, float)):
raise TypeError("Argument angle should be int or float")
if center is not None and not isinstance(center, (list, tuple)):
raise TypeError("Argument center should be a sequence")
if not isinstance(img, torch.Tensor):
pil_interpolation = pil_modes_mapping[interpolation]
return F_pil.rotate(img, angle=angle, interpolation=pil_interpolation, expand=expand, center=center, fill=fill)
center_f = [0.0, 0.0]
if center is not None:
_, height, width = get_dimensions(img)
# Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])]
# due to current incoherence of rotation angle direction between affine and rotate implementations
# we need to set -angle.
matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0])
return F_t.rotate(img, matrix=matrix, interpolation=interpolation.value, expand=expand, fill=fill)
def affine(
img: Tensor,
angle: float,
translate: List[int],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: Optional[List[float]] = None,
center: Optional[List[int]] = None,
) -> Tensor:
"""Apply affine transformation on the image keeping image center invariant.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where... means an arbitrary number of leading dimensions.
Args:
img (PIL Image or Tensor): image to transform.
angle (number): rotation angle in degrees between -180 and 180, clockwise direction.
translate (sequence of integers): horizontal and vertical translations (post-rotation translation)
scale (float): overall scale
shear (float or sequence): shear angle value in degrees between -180 to 180, clockwise direction.
If a sequence is specified, the first value corresponds to a shear parallel to the x-axis, while
the second value corresponds to a shear parallel to the y-axis.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
.. note::
In torchscript mode single int/float value is not supported, please use a sequence
of length 1: ``[value, ]``.
center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
Default is the center of the image.
Returns:
PIL Image or Tensor: Transformed image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(affine)
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
elif not isinstance(interpolation, InterpolationMode):
raise TypeError(
"Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
)
if not isinstance(angle, (int, float)):
raise TypeError("Argument angle should be int or float")
if not isinstance(translate, (list, tuple)):
raise TypeError("Argument translate should be a sequence")
if len(translate)!= 2:
raise ValueError("Argument translate should be a sequence of length 2")
if scale <= 0.0:
raise ValueError("Argument scale should be positive")
if not isinstance(shear, (numbers.Number, (list, tuple))):
raise TypeError("Shear should be either a single value or a sequence of two values")
if isinstance(angle, int):
angle = float(angle)
if isinstance(translate, tuple):
translate = list(translate)
if isinstance(shear, numbers.Number):
shear = [shear, 0.0]
if isinstance(shear, tuple):
shear = list(shear)
if len(shear) == 1:
shear = [shear[0], shear[0]]
if len(shear)!= 2:
raise ValueError(f"Shear should be a sequence containing two values. Got {shear}")
if center is not None and not isinstance(center, (list, tuple)):
raise TypeError("Argument center should be a sequence")
_, height, width = get_dimensions(img)
if not isinstance(img, torch.Tensor):
# center = (width * 0.5 + 0.5, height * 0.5 + 0.5)
# it is visually better to estimate the center without 0.5 offset
# otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
if center is None:
center = [width * 0.5, height * 0.5]
matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
pil_interpolation = pil_modes_mapping[interpolation]
return F_pil.affine(img, matrix=matrix, interpolation=pil_interpolation, fill=fill)
center_f = [0.0, 0.0]
if center is not None:
_, height, width = get_dimensions(img)
# Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])]
translate_f = [1.0 * t for t in translate]
matrix = _get_inverse_affine_matrix(center_f, angle, translate_f, scale, shear)
return F_t.affine(img, matrix=matrix, interpolation=interpolation.value, fill=fill)
# Looks like to_grayscale() is a stand-alone functional that is never called
# from the transform classes. Perhaps it's still here for BC? I can't be
# bothered to dig.
@torch.jit.unused
def to_grayscale(img, num_output_channels=1):
"""Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image.
This transform does not support torch Tensor.
Args:
img (PIL Image): PIL Image to be converted to grayscale.
num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default is 1.
Returns:
PIL Image: Grayscale version of the image.
- if num_output_channels = 1 : returned image is single channel
- if num_output_channels = 3 : returned image is 3 channel with r = g = b
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(to_grayscale)
if isinstance(img, Image.Image):
return F_pil.to_grayscale(img, num_output_channels)
raise TypeError("Input should be PIL Image")
def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
"""Convert RGB image to grayscale version of image.
If the image is torch Tensor, it is expected
to have [..., 3, H, W] shape, where... means an arbitrary number of leading dimensions
Note:
Please, note that this method supports only RGB images as input. For inputs in other color spaces,
please, consider using meth:`~torchvision.transforms.functional.to_grayscale` with PIL Image.
Args:
img (PIL Image or Tensor): RGB Image to be converted to grayscale.
num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1.
Returns:
PIL Image or Tensor: Grayscale version of the image.
- if num_output_channels = 1 : returned image is single channel
- if num_output_channels = 3 : returned image is 3 channel with r = g = b
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(rgb_to_grayscale)
if not isinstance(img, torch.Tensor):
return F_pil.to_grayscale(img, num_output_channels)
return F_t.rgb_to_grayscale(img, num_output_channels)
def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor:
"""Erase the input Tensor Image with given value.
This transform does not support PIL Image.
Args:
img (Tensor Image): Tensor image of size (C, H, W) to be erased
i (int): i in (i,j) i.e coordinates of the upper left corner.
j (int): j in (i,j) i.e coordinates of the upper left corner.
h (int): Height of the erased region.
w (int): Width of the erased region.
v: Erasing value.
inplace(bool, optional): For in-place operations. By default, is set False.
Returns:
Tensor Image: Erased image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(erase)
if not isinstance(img, torch.Tensor):
raise TypeError(f"img should be Tensor Image. Got {type(img)}")
return F_t.erase(img, i, j, h, w, v, inplace=inplace)
def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor:
"""Performs Gaussian blurring on the image by given kernel.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where... means an arbitrary number of leading dimensions.
Args:
img (PIL Image or Tensor): Image to be blurred
kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers
like ``(kx, ky)`` or a single integer for square kernels.
.. note::
In torchscript mode kernel_size as single int is not supported, use a sequence of
length 1: ``[ksize, ]``.
sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a
sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the
same sigma in both X/Y directions. If None, then it is computed using
``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``.
Default, None.
.. note::
In torchscript mode sigma as single float is
not supported, use a sequence of length 1: ``[sigma, ]``.
Returns:
PIL Image or Tensor: Gaussian Blurred version of the image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(gaussian_blur)
if not isinstance(kernel_size, (int, list, tuple)):
raise TypeError(f"kernel_size should be int or a sequence of integers. Got {type(kernel_size)}")
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
if len(kernel_size)!= 2:
raise ValueError(f"If kernel_size is a sequence its length should be 2. Got {len(kernel_size)}")
for ksize in kernel_size:
if ksize % 2 == 0 or ksize < 0:
raise ValueError(f"kernel_size should have odd and positive integers. Got {kernel_size}")
if sigma is None:
sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]
if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
raise TypeError(f"sigma should be either float or sequence of floats. Got {type(sigma)}")
if isinstance(sigma, (int, float)):
sigma = [float(sigma), float(sigma)]
if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
sigma = [sigma[0], sigma[0]]
if len(sigma)!= 2:
raise ValueError(f"If sigma is a sequence, its length should be 2. Got {len(sigma)}")
for s in sigma:
if s <= 0.0:
raise ValueError(f"sigma should have positive values. Got {sigma}")
t_img = img
if not isinstance(img, torch.Tensor):
if not F_pil._is_pil_image(img):
raise TypeError(f"img should be PIL Image or Tensor. Got {type(img)}")
t_img = pil_to_tensor(img)
output = F_t.gaussian_blur(t_img, kernel_size, sigma)
if not isinstance(img, torch.Tensor):
output = to_pil_image(output, mode=img.mode)
return output
def invert(img: Tensor) -> Tensor:
"""Invert the colors of an RGB/grayscale image.
Args:
img (PIL Image or Tensor): Image to have its colors inverted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Returns:
PIL Image or Tensor: Color inverted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(invert)
if not isinstance(img, torch.Tensor):
return F_pil.invert(img)
return F_t.invert(img)
def posterize(img: Tensor, bits: int) -> Tensor:
"""Posterize an image by reducing the number of bits for each color channel.
Args:
img (PIL Image or Tensor): Image to have its colors posterized.
If img is torch Tensor, it should be of type torch.uint8, and
it is expected to be in [..., 1 or 3, H, W] format, where... means
it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
bits (int): The number of bits to keep for each channel (0-8).
Returns:
PIL Image or Tensor: Posterized image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(posterize)
if not (0 <= bits <= 8):
raise ValueError(f"The number if bits should be between 0 and 8. Got {bits}")
if not isinstance(img, torch.Tensor):
return F_pil.posterize(img, bits)
return F_t.posterize(img, bits)
def solarize(img: Tensor, threshold: float) -> Tensor:
"""Solarize an RGB/grayscale image by inverting all pixel values above a threshold.
Args:
img (PIL Image or Tensor): Image to have its colors inverted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
threshold (float): All pixels equal or above this value are inverted.
Returns:
PIL Image or Tensor: Solarized image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(solarize)
if not isinstance(img, torch.Tensor):
return F_pil.solarize(img, threshold)
return F_t.solarize(img, threshold)
def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
"""Adjust the sharpness of an image.
Args:
img (PIL Image or Tensor): Image to be adjusted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where... means it can have an arbitrary number of leading dimensions.
sharpness_factor (float): How much to adjust the sharpness. Can be
any non-negative number. 0 gives a blurred image, 1 gives the
original image while 2 increases the sharpness by a factor of 2.
Returns:
PIL Image or Tensor: Sharpness adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_sharpness)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_sharpness(img, sharpness_factor)
return F_t.adjust_sharpness(img, sharpness_factor)
def autocontrast(img: Tensor) -> Tensor:
"""Maximize contrast of an image by remapping its
pixels per channel so that the lowest becomes black and the lightest
becomes white.
Args:
img (PIL Image or Tensor): Image on which autocontrast is applied.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Returns:
PIL Image or Tensor: An image that was autocontrasted.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(autocontrast)
if not isinstance(img, torch.Tensor):
return F_pil.autocontrast(img)
return F_t.autocontrast(img)
def equalize(img: Tensor) -> Tensor:
"""Equalize the histogram of an image by applying
a non-linear mapping to the input in order to create a uniform
distribution of grayscale values in the output.
Args:
img (PIL Image or Tensor): Image on which equalize is applied.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where... means it can have an arbitrary number of leading dimensions.
The tensor dtype must be ``torch.uint8`` and values are expected to be in ``[0, 255]``.
If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
Returns:
PIL Image or Tensor: An image that was equalized.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(equalize)
if not isinstance(img, torch.Tensor):
return F_pil.equalize(img)
return F_t.equalize(img)
def elastic_transform(
img: Tensor,
displacement: Tensor,
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
fill: Optional[List[float]] = None,
) -> Tensor:
"""Transform a tensor image with elastic transformations.
Given alpha and sigma, it will generate displacement
vectors for all pixels based on random offsets. Alpha controls the strength
and sigma controls the smoothness of the displacements.
The displacements are added to an identity grid and the resulting grid is
used to grid_sample from the image.
Applications:
Randomly transforms the morphology of objects in images and produces a
see-through-water-like effect.
Args:
img (PIL Image or Tensor): Image on which elastic_transform is applied.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
displacement (Tensor): The displacement field. Expected shape is [1, H, W, 2].
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`.
Default is ``InterpolationMode.BILINEAR``.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
fill (number or str or tuple): Pixel fill value for constant fill. Default is 0.
If a tuple of length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(elastic_transform)
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
if not isinstance(displacement, torch.Tensor):
raise TypeError("Argument displacement should be a Tensor")
t_img = img
if not isinstance(img, torch.Tensor):
if not F_pil._is_pil_image(img):
raise TypeError(f"img should be PIL Image or Tensor. Got {type(img)}")
t_img = pil_to_tensor(img)
shape = t_img.shape
shape = (1,) + shape[-2:] + (2,)
if shape!= displacement.shape:
raise ValueError(f"Argument displacement shape should be {shape}, but given {displacement.shape}")
# TODO: if image shape is [N1, N2,..., C, H, W] and
# displacement is [1, H, W, 2] we need to reshape input image
# such grid_sampler takes internal code for 4D input
output = F_t.elastic_transform(
t_img,
displacement,
interpolation=interpolation.value,
fill=fill,
)
if not isinstance(img, torch.Tensor):
output = to_pil_image(output, mode=img.mode)
return output
# TODO in v0.17: remove this helper and change default of antialias to True everywhere
def _check_antialias(
img: Tensor, antialias: Optional[Union[str, bool]], interpolation: InterpolationMode
) -> Optional[bool]:
if isinstance(antialias, str): # it should be "warn", but we don't bother checking against that
if isinstance(img, Tensor) and (
interpolation == InterpolationMode.BILINEAR or interpolation == InterpolationMode.BICUBIC
):
warnings.warn(
"The default value of the antialias parameter of all the resizing transforms "
"(Resize(), RandomResizedCrop(), etc.) "
"will change from None to True in v0.17, "
"in order to be consistent across the PIL and Tensor backends. "
"To suppress this warning, directly pass "
"antialias=True (recommended, future default), antialias=None (current default, "
"which means False for Tensors and True for PIL), "
"or antialias=False (only works on Tensors - PIL will still use antialiasing). "
"This also applies if you are using the inference transforms from the models weights: "
"update the call to weights.transforms(antialias=True)."
)
antialias = None
return antialias |
pytables__pytables | filenode.rst | Module doc / Tutorial | Generate documentation for this module | BSD 3-Clause New or Revised License | pytables__pytables/doc/source/usersguide/filenode.rst | [
"pytables__pytables/tables/nodes/filenode.py"
] | filenode - simulating a filesystem with PyTables
What is filenode?
filenode is a module which enables you to create a PyTables database of
nodes which can be used like regular opened files in Python. In other
words, you can store a file in a PyTables database, and read and write
it as you would do with any other file in Python. Used in conjunction
with PyTables hierarchical database organization, you can have your
database turned into an open, extensible, efficient, high capacity,
portable and metadata-rich filesystem for data exchange with other
systems (including backup purposes).
Between the main features of filenode, one can list:
- Open: Since it relies on PyTables, which in turn, sits over HDF5
(see [HDGG1] <HDFG1>), a standard hierarchical data format from
NCSA.
- Extensible: You can define new types of nodes, and their instances
will be safely preserved (as are normal groups, leafs and
attributes) by PyTables applications having no knowledge of their
types. Moreover, the set of possible attributes for a node is not
fixed, so you can define your own node attributes.
- Efficient: Thanks to PyTables' proven extreme efficiency on handling
huge amounts of data. filenode can make use of PyTables' on-the-fly
compression and decompression of data.
- High capacity: Since PyTables and HDF5 are designed for massive data
storage (they use 64-bit addressing even where the platform does not
support it natively).
- Portable: Since the HDF5 format has an architecture-neutral design,
and the HDF5 libraries and PyTables are known to run under a variety
of platforms. Besides that, a PyTables database fits into a single
file, which poses no trouble for transportation.
- Metadata-rich: Since PyTables can store arbitrary key-value pairs
(even Python objects!) for every database node. Metadata may include
authorship, keywords, MIME types and encodings, ownership
information, access control lists (ACL), decoding functions and
anything you can imagine!
Finding a filenode node
filenode nodes can be recognized because they have a NODE_TYPE system
attribute with a 'file' value. It is recommended that you use the
File.get_node_attr method of tables.File class to get the NODE_TYPE
attribute independently of the nature (group or leaf) of the node, so
you do not need to care about.
filenode - simulating files inside PyTables
The filenode module is part of the nodes sub-package of PyTables. The
recommended way to import the module is:
>>> from tables.nodes import filenode
However, filenode exports very few symbols, so you can import * for
interactive usage. In fact, you will most probably only use the NodeType
constant and the new_node() and open_node() calls.
The NodeType constant contains the value that the NODE_TYPE system
attribute of a node file is expected to contain ('file', as we have
seen). Although this is not expected to change, you should use
filenode.NodeType instead of the literal 'file' when possible.
new_node() and open_node() are the equivalent to the Python file() call
(alias open()) for ordinary files. Their arguments differ from that of
file(), but this is the only point where you will note the difference
between working with a node file and working with an ordinary file.
For this little tutorial, we will assume that we have a PyTables
database opened for writing. Also, if you are somewhat lazy at typing
sentences, the code that we are going to explain is included in the
examples/filenodes1.py file.
You can create a brand new file with these sentences:
>>> import tables
>>> h5file = tables.open_file('fnode.h5', 'w')
Creating a new file node
Creation of a new file node is achieved with the new_node() call. You
must tell it in which PyTables file you want to create it, where in the
PyTables hierarchy you want to create the node and which will be its
name. The PyTables file is the first argument to new_node(); it will be
also called the 'host PyTables file'. The other two arguments must be
given as keyword arguments where and name, respectively. As a result of
the call, a brand new appendable and readable file node object is
returned.
So let us create a new node file in the previously opened h5file
PyTables file, named 'fnode_test' and placed right under the root of the
database hierarchy. This is that command:
>>> fnode = filenode.new_node(h5file, where='/', name='fnode_test')
That is basically all you need to create a file node. Simple, isn't it?
From that point on, you can use fnode as any opened Python file (i.e.
you can write data, read data, lines of text and so on).
new_node() accepts some more keyword arguments. You can give a title to
your file with the title argument. You can use PyTables' compression
features with the filters argument. If you know beforehand the size that
your file will have, you can give its final file size in bytes to the
expectedsize argument so that the PyTables library would be able to
optimize the data access.
new_node() creates a PyTables node where it is told to. To prove it, we
will try to get the NODE_TYPE attribute from the newly created node:
>>> print(h5file.get_node_attr('/fnode_test', 'NODE_TYPE'))
file
Using a file node
As stated above, you can use the new node file as any other opened file.
Let us try to write some text in and read it:
>>> print("This is a test text line.", file=fnode)
>>> print("And this is another one.", file=fnode)
>>> print(file=fnode)
>>> fnode.write("Of course, file methods can also be used.")
>>>
>>> fnode.seek(0) # Go back to the beginning of file.
>>>
>>> for line in fnode:
... print(repr(line))
'This is a test text line.\\n'
'And this is another one.\\n'
'\\n'
'Of course, file methods can also be used.'
This was run on a Unix system, so newlines are expressed as 'n'. In
fact, you can override the line separator for a file by setting its
line_separator property to any string you want.
While using a file node, you should take care of closing it before you
close the PyTables host file. Because of the way PyTables works, your
data it will not be at a risk, but every operation you execute after
closing the host file will fail with a ValueError. To close a file node,
simply delete it or call its close() method:
>>> fnode.close()
>>> print(fnode.closed)
True
Opening an existing file node
If you have a file node that you created using new_node(), you can open
it later by calling open_node(). Its arguments are similar to that of
file() or open(): the first argument is the PyTables node that you want
to open (i.e. a node with a NODE_TYPE attribute having a 'file' value),
and the second argument is a mode string indicating how to open the
file. Contrary to file(), open_node() can not be used to create a new
file node.
File nodes can be opened in read-only mode ('r') or in read-and-append
mode ('a+'). Reading from a file node is allowed in both modes, but
appending is only allowed in the second one. Just like Python files do,
writing data to an appendable file places it after the file pointer if
it is on or beyond the end of the file, or otherwise after the existing
data. Let us see an example:
>>> node = h5file.root.fnode_test
>>> fnode = filenode.open_node(node, 'a+')
>>> print(repr(fnode.readline()))
'This is a test text line.\\n'
>>> print(fnode.tell())
26
>>> print("This is a new line.", file=fnode)
>>> print(repr(fnode.readline()))
''
Of course, the data append process places the pointer at the end of the
file, so the last readline() call hit EOF. Let us seek to the beginning
of the file to see the whole contents of our file:
>>> fnode.seek(0)
>>> for line in fnode:
... print(repr(line))
'This is a test text line.\\n'
'And this is another one.\\n'
'\\n'
'Of course, file methods can also be used.This is a new line.\\n'
As you can check, the last string we wrote was correctly appended at the
end of the file, instead of overwriting the second line, where the file
pointer was positioned by the time of the appending.
Adding metadata to a file node
You can associate arbitrary metadata to any open node file, regardless
of its mode, as long as the host PyTables file is writable. Of course,
you could use the set_node_attr() method of tables.File to do it
directly on the proper node, but filenode offers a much more comfortable
way to do it. filenode objects have an attrs property which gives you
direct access to their corresponding AttributeSet object.
For instance, let us see how to associate MIME type metadata to our file
node:
>>> fnode.attrs.content_type = 'text/plain; charset=us-ascii'
As simple as A-B-C. You can put nearly anything in an attribute, which
opens the way to authorship, keywords, permissions and more. Moreover,
there is not a fixed list of attributes. However, you should avoid names
in all caps or starting with '_', since PyTables and filenode may use
them internally. Some valid examples:
>>> fnode.attrs.author = "Ivan Vilata i Balaguer"
>>> fnode.attrs.creation_date = '2004-10-20T13:25:25+0200'
>>> fnode.attrs.keywords_en = ["FileNode", "test", "metadata"]
>>> fnode.attrs.keywords_ca = ["FileNode", "prova", "metadades"]
>>> fnode.attrs.owner = 'ivan'
>>> fnode.attrs.acl = {'ivan': 'rw', '@users': 'r'}
You can check that these attributes get stored by running the ptdump
command on the host PyTables file.
$ ptdump -a fnode.h5:/fnode_test
/fnode_test (EArray(113,)) ''
/fnode_test.attrs (AttributeSet), 14 attributes:
[CLASS := 'EARRAY',
EXTDIM := 0,
FLAVOR := 'numpy',
NODE_TYPE := 'file',
NODE_TYPE_VERSION := 2,
TITLE := '',
VERSION := '1.2',
acl := {'ivan': 'rw', '@users': 'r'},
author := 'Ivan Vilata i Balaguer',
content_type := 'text/plain; charset=us-ascii',
creation_date := '2004-10-20T13:25:25+0200',
keywords_ca := ['FileNode', 'prova', 'metadades'],
keywords_en := ['FileNode', 'test', 'metadata'],
owner := 'ivan']
Note that filenode makes no assumptions about the meaning of your
metadata, so its handling is entirely left to your needs and
imagination.
Complementary notes
You can use file nodes and PyTables groups to mimic a filesystem with
files and directories. Since you can store nearly anything you want as
file metadata, this enables you to use a PyTables file as a portable
compressed backup, even between radically different platforms. Take this
with a grain of salt, since node files are restricted in their naming
(only valid Python identifiers are valid); however, remember that you
can use node titles and metadata to overcome this limitation. Also, you
may need to devise some strategy to represent special files such as
devices, sockets and such (not necessarily using filenode).
We are eager to hear your opinion about filenode and its potential uses.
Suggestions to improve filenode and create other node types are also
welcome. Do not hesitate to contact us!
Current limitations
filenode is still a young piece of software, so it lacks some
functionality. This is a list of known current limitations:
1. Node files can only be opened for read-only or read and append mode.
This should be enhanced in the future.
2. Near future?
3. Only binary I/O is supported currently (read/write strings of bytes)
4. There is no universal newline support yet. The only new-line
character used at the moment is \n. This is likely to be improved in
a near future.
5. Sparse files (files with lots of zeros) are not treated specially;
if you want them to take less space, you should be better off using
compression.
These limitations still make filenode entirely adequate to work with
most binary and text files. Of course, suggestions and patches are
welcome.
See filenode_classes for detailed documentation on the filenode
interface.
| """A file interface to nodes for PyTables databases.
The FileNode module provides a file interface for using inside of
PyTables database files. Use the new_node() function to create a brand
new file node which can be read and written as any ordinary Python
file. Use the open_node() function to open an existing (i.e. created
with new_node()) node for read-only or read-write access. Read acces
is always available. Write access (enabled on new files and files
opened with mode 'a+') only allows appending data to a file node.
Currently only binary I/O is supported.
See :ref:`filenode_usersguide` for instructions on use.
.. versionchanged:: 3.0
In version 3.0 the module as been completely rewritten to be fully
compliant with the interfaces defined in the :mod:`io` module.
"""
import io
import os
import re
import warnings
from pathlib import Path
import numpy as np
import tables as tb
NodeType = 'file'
"""Value for NODE_TYPE node system attribute."""
NodeTypeVersions = [1, 2]
"""Supported values for NODE_TYPE_VERSION node system attribute."""
class RawPyTablesIO(io.RawIOBase):
"""Base class for raw binary I/O on HDF5 files using PyTables."""
# A lambda to turn a size into a shape, for each version.
_size_to_shape = [
None,
lambda l: (l, 1),
lambda l: (l, ),
]
def __init__(self, node, mode=None):
super().__init__()
self._check_node(node)
self._check_attributes(node)
if mode is None:
mode = node._v_file.mode
else:
self._check_mode(mode)
self._cross_check_mode(mode, node._v_file.mode)
self._node = node
self._mode = mode
self._pos = 0
self._version = int(node.attrs.NODE_TYPE_VERSION)
self._vshape = self._size_to_shape[self._version]
self._vtype = node.atom.dtype.base.type
# read only attribute
@property
def mode(self):
"""File mode."""
return self._mode
# def tell(self) -> int:
def tell(self):
"""Return current stream position."""
self._checkClosed()
return self._pos
# def seek(self, pos: int, whence: int = 0) -> int:
def seek(self, pos, whence=0):
"""Change stream position.
Change the stream position to byte offset offset. offset is
interpreted relative to the position indicated by whence. Values
for whence are:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Return the new absolute position.
"""
self._checkClosed()
try:
pos = pos.__index__()
# except AttributeError as err:
# raise TypeError("an integer is required") from err
except AttributeError:
raise TypeError("an integer is required")
if whence == 0:
if pos < 0:
raise ValueError(f"negative seek position {pos!r}")
self._pos = pos
elif whence == 1:
self._pos = max(0, self._pos + pos)
elif whence == 2:
self._pos = max(0, self._node.nrows + pos)
else:
raise ValueError("invalid whence value")
return self._pos
# def seekable(self) -> bool:
def seekable(self):
"""Return whether object supports random access.
If False, seek(), tell() and truncate() will raise IOError. This
method may need to do a test seek().
"""
return True
# def fileno(self) -> int:
def fileno(self):
"""Returns underlying file descriptor if one exists.
An IOError is raised if the IO object does not use a file
descriptor.
"""
self._checkClosed()
return self._node._v_file.fileno()
# def close(self) -> None:
def close(self):
"""Flush and close the IO object.
This method has no effect if the file is already closed.
"""
if not self.closed:
if getattr(self._node, '_v_file', None) is None:
warnings.warn("host PyTables file is already closed!")
try:
super().close()
finally:
# Release node object to allow closing the file.
self._node = None
def flush(self):
"""Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
self._checkClosed()
self._node.flush()
# def truncate(self, pos: int = None) -> int:
def truncate(self, pos=None):
"""Truncate file to size bytes.
Size defaults to the current IO position as reported by tell().
Return the new size.
Currently, this method only makes sense to grow the file node,
since data can not be rewritten nor deleted.
"""
self._checkClosed()
self._checkWritable()
if pos is None:
pos = self._pos
elif pos < 0:
raise ValueError(f"negative truncate position {pos!r}")
if pos < self._node.nrows:
raise OSError("truncating is only allowed for growing a file")
self._append_zeros(pos - self._node.nrows)
return self.seek(pos)
# def readable(self) -> bool:
def readable(self):
"""Return whether object was opened for reading.
If False, read() will raise IOError.
"""
mode = self._mode
return 'r' in mode or '+' in mode
# def writable(self) -> bool:
def writable(self):
"""Return whether object was opened for writing.
If False, write() and truncate() will raise IOError.
"""
mode = self._mode
return 'w' in mode or 'a' in mode or '+' in mode
# def readinto(self, b: bytearray) -> int:
def readinto(self, b):
"""Read up to len(b) bytes into b.
Returns number of bytes read (0 for EOF), or None if the object
is set not to block as has no data to read.
"""
self._checkClosed()
self._checkReadable()
if self._pos >= self._node.nrows:
return 0
n = len(b)
start = self._pos
stop = self._pos + n
# XXX optimized path
# if stop <= self._node.nrows and isinstance(b, np.ndarray):
# self._node.read(start, stop, out=b)
# self._pos += n
# return n
if stop > self._node.nrows:
stop = self._node.nrows
n = stop - start
# XXX This ought to work with anything that supports the buffer API
b[:n] = self._node.read(start, stop).tobytes()
self._pos += n
return n
# def readline(self, limit: int = -1) -> bytes:
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
The line terminator is always ``\\n`` for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
self._checkClosed()
self._checkReadable()
chunksize = self._node.chunkshape[0] if self._node.chunkshape else -1
# XXX: check
lsep = b'\n'
lseplen = len(lsep)
# Set the remaining bytes to read to the specified size.
remsize = limit
partial = []
finished = False
while not finished:
# Read a string limited by the remaining number of bytes.
if limit <= 0:
ibuff = self.read(chunksize)
else:
ibuff = self.read(min(remsize, chunksize))
ibufflen = len(ibuff)
remsize -= ibufflen
if ibufflen >= lseplen:
# Separator fits, look for EOL string.
eolindex = ibuff.find(lsep)
elif ibufflen == 0:
# EOF was immediately reached.
finished = True
continue
else: # ibufflen < lseplen
# EOF was hit and separator does not fit. ;)
partial.append(ibuff)
finished = True
continue
if eolindex >= 0:
# Found an EOL. If there are trailing characters,
# cut the input buffer and seek back;
# else add the whole input buffer.
trailing = ibufflen - lseplen - eolindex # Bytes beyond EOL.
if trailing > 0:
obuff = ibuff[:-trailing]
self.seek(-trailing, 1)
remsize += trailing
else:
obuff = ibuff
finished = True
elif lseplen > 1 and (limit <= 0 or remsize > 0):
# Seek back a little since the end of the read string
# may have fallen in the middle of the line separator.
obuff = ibuff[:-lseplen + 1]
self.seek(-lseplen + 1, 1)
remsize += lseplen - 1
else: # eolindex<0 and (lseplen<=1 or (limit>0 and remsize<=0))
# Did not find an EOL, add the whole input buffer.
obuff = ibuff
# Append (maybe cut) buffer.
partial.append(obuff)
# If a limit has been specified and the remaining count
# reaches zero, the reading is finished.
if limit > 0 and remsize <= 0:
finished = True
return b''.join(partial)
# def write(self, b: bytes) -> int:
def write(self, b):
"""Write the given buffer to the IO stream.
Returns the number of bytes written, which may be less than
len(b).
"""
self._checkClosed()
self._checkWritable()
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
n = len(b)
if n == 0:
return 0
pos = self._pos
# Is the pointer beyond the real end of data?
end2off = pos - self._node.nrows
if end2off > 0:
# Zero-fill the gap between the end of data and the pointer.
self._append_zeros(end2off)
# Append data.
self._node.append(
np.ndarray(buffer=b, dtype=self._vtype, shape=self._vshape(n)))
self._pos += n
return n
def _checkClosed(self):
"""Checks if file node is open.
Checks whether the file node is open or has been closed. In the
second case, a ValueError is raised. If the host PyTables has
been closed, ValueError is also raised.
"""
super()._checkClosed()
if getattr(self._node, '_v_file', None) is None:
raise ValueError("host PyTables file is already closed!")
def _check_node(self, node):
if not isinstance(node, tb.EArray):
raise TypeError('the "node" parameter should be a tables.EArray')
if not isinstance(node.atom, tb.UInt8Atom):
raise TypeError('only nodes with atom "UInt8Atom" are allowed')
def _check_mode(self, mode):
if not isinstance(mode, str):
raise TypeError("invalid mode: %r" % mode)
modes = set(mode)
if modes - set("arwb+tU") or len(mode) > len(modes):
raise ValueError("invalid mode: %r" % mode)
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
# updating = "+" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending:
raise ValueError("can't use U and writing mode at once")
reading = True
if text and binary:
raise ValueError("can't have text and binary mode at once")
if reading + writing + appending > 1:
raise ValueError("can't have read/write/append mode at once")
if not (reading or writing or appending):
raise ValueError("must have exactly one of read/write/append mode")
def _cross_check_mode(self, mode, h5filemode):
# XXX: check
# readable = bool('r' in mode or '+' in mode)
# h5readable = bool('r' in h5filemode or '+' in h5filemode)
#
# if readable and not h5readable:
# raise ValueError("RawPyTablesIO can't be open in read mode if "
# "the underlying hdf5 file is not readable")
writable = bool('w' in mode or 'a' in mode or '+' in mode)
h5writable = bool('w' in h5filemode or 'a' in h5filemode or
'+' in h5filemode)
if writable and not h5writable:
raise ValueError("RawPyTablesIO can't be open in write mode if "
"the underlying hdf5 file is not writable")
def _check_attributes(self, node):
"""Checks file node-specific attributes.
Checks for the presence and validity
of the system attributes 'NODE_TYPE' and 'NODE_TYPE_VERSION'
in the specified PyTables node (leaf).
ValueError is raised if an attribute is missing or incorrect.
"""
attrs = node.attrs
ltype = getattr(attrs, 'NODE_TYPE', None)
ltypever = getattr(attrs, 'NODE_TYPE_VERSION', None)
if ltype!= NodeType:
raise ValueError(f"invalid type of node object: {ltype}")
if ltypever not in NodeTypeVersions:
raise ValueError(
f"unsupported type version of node object: {ltypever}")
def _append_zeros(self, size):
"""_append_zeros(size) -> None. Appends a string of zeros.
Appends a string of'size' zeros to the array,
without moving the file pointer.
"""
# Appending an empty array would raise an error.
if size == 0:
return
# XXX This may be redone to avoid a potentially large in-memory array.
self._node.append(
np.zeros(dtype=self._vtype, shape=self._vshape(size)))
class FileNodeMixin:
"""Mixin class for FileNode objects.
It provides access to the attribute set of the node that becomes
available via the attrs property. You can add attributes there, but
try to avoid attribute names in all caps or starting with '_', since
they may clash with internal attributes.
"""
# The attribute set property methods.
def _get_attrs(self):
"""Returns the attribute set of the file node."""
# sefl._checkClosed()
return self._node.attrs
def _set_attrs(self, value):
"""set_attrs(string) -> None. Raises ValueError."""
raise ValueError("changing the whole attribute set is not allowed")
def _del_attrs(self):
"""del_attrs() -> None. Raises ValueError."""
raise ValueError("deleting the whole attribute set is not allowed")
# The attribute set property.
attrs = property(
_get_attrs, _set_attrs, _del_attrs,
"A property pointing to the attribute set of the file node.")
class ROFileNode(FileNodeMixin, RawPyTablesIO):
"""Creates a new read-only file node.
Creates a new read-only file node associated with the specified
PyTables node, providing a standard Python file interface to it.
The node has to have been created on a previous occasion
using the new_node() function.
The node used as storage is also made available via the read-only
attribute node. Please do not tamper with this object if it's
avoidable, since you may break the operation of the file node object.
The constructor is not intended to be used directly.
Use the open_node() function in read-only mode ('r') instead.
:Version 1:
implements the file storage as a UInt8 uni-dimensional EArray.
:Version 2:
uses an UInt8 N vector EArray.
.. versionchanged:: 3.0
The offset attribute is no more available, please use seek/tell
methods instead.
.. versionchanged:: 3.0
The line_separator property is no more available.
The only line separator used for binary I/O is ``\\n``.
"""
def __init__(self, node):
RawPyTablesIO.__init__(self, node, 'r')
self._checkReadable()
@property
def node(self):
return self._node
class RAFileNode(FileNodeMixin, RawPyTablesIO):
"""Creates a new read-write file node.
The first syntax opens the specified PyTables node, while the
second one creates a new node in the specified PyTables file.
In the second case, additional named arguments 'where' and 'name'
must be passed to specify where the file node is to be created.
Other named arguments such as 'title' and 'filters' may also be
passed. The special named argument 'expectedsize', indicating an
estimate of the file size in bytes, may also be passed.
Write access means reading as well as appending data is allowed.
The node used as storage is also made available via the read-only
attribute node. Please do not tamper with this object if it's
avoidable, since you may break the operation of the file node object.
The constructor is not intended to be used directly.
Use the new_node() or open_node() functions instead.
:Version 1:
implements the file storage as a UInt8 uni-dimensional EArray.
:Version 2:
uses an UInt8 N vector EArray.
.. versionchanged:: 3.0
The offset attribute is no more available, please use seek/tell
methods instead.
.. versionchanged:: 3.0
The line_separator property is no more available.
The only line separator used for binary I/O is ``\\n``.
"""
# The atom representing a byte in the array, for each version.
_byte_shape = [
None,
(0, 1),
(0,),
]
__allowed_init_kwargs = [
'where', 'name', 'title', 'filters', 'expectedsize']
def __init__(self, node, h5file, **kwargs):
if node is not None:
# Open an existing node and get its version.
self._check_attributes(node)
self._version = node.attrs.NODE_TYPE_VERSION
elif h5file is not None:
# Check for allowed keyword arguments,
# to avoid unwanted arguments falling through to array constructor.
for kwarg in kwargs:
if kwarg not in self.__allowed_init_kwargs:
raise TypeError(
"%s keyword argument is not allowed" % repr(kwarg))
# Turn 'expectedsize' into 'expectedrows'.
if 'expectedsize' in kwargs:
# These match since one byte is stored per row.
expectedrows = kwargs['expectedsize']
kwargs = kwargs.copy()
del kwargs['expectedsize']
kwargs['expectedrows'] = expectedrows
# Create a new array in the specified PyTables file.
self._version = NodeTypeVersions[-1]
shape = self._byte_shape[self._version]
node = h5file.create_earray(
atom=tb.UInt8Atom(), shape=shape, **kwargs)
# Set the node attributes, else remove the array itself.
try:
self._set_attributes(node)
except RuntimeError:
h5file.remove_node(kwargs['where'], kwargs['name'])
raise
RawPyTablesIO.__init__(self, node, 'a+')
self._checkReadable()
self._checkWritable()
@property
def node(self):
return self._node
def _set_attributes(self, node):
"""_set_attributes(node) -> None. Adds file node-specific attributes.
Sets the system attributes 'NODE_TYPE' and 'NODE_TYPE_VERSION'
in the specified PyTables node (leaf).
"""
attrs = node.attrs
attrs.NODE_TYPE = NodeType
attrs.NODE_TYPE_VERSION = NodeTypeVersions[-1]
def new_node(h5file, **kwargs):
"""Creates a new file node object in the specified PyTables file object.
Additional named arguments where and name must be passed to specify where
the file node is to be created. Other named arguments such as title and
filters may also be passed.
The special named argument expectedsize, indicating an estimate of the
file size in bytes, may also be passed. It returns the file node object.
"""
return RAFileNode(None, h5file, **kwargs)
def open_node(node, mode='r'):
"""Opens an existing file node.
Returns a file node object from the existing specified PyTables
node. If mode is not specified or it is 'r', the file can only be
read, and the pointer is positioned at the beginning of the file. If
mode is 'a+', the file can be read and appended, and the pointer is
positioned at the end of the file.
"""
if mode == 'r':
return ROFileNode(node)
elif mode == 'a+':
return RAFileNode(node, None)
else:
raise OSError(f"invalid mode: {mode}")
def save_to_filenode(h5file, filename, where, name=None, overwrite=False,
title="", filters=None):
"""Save a file's contents to a filenode inside a PyTables file.
.. versionadded:: 3.2
Parameters
----------
h5file
The PyTables file to be written to; can be either a string
giving the file's location or a :class:`File` object. If a file
with name *h5file* already exists, it will be opened in
mode ``a``.
filename
Path of the file which shall be stored within the PyTables file.
where, name
Location of the filenode where the data shall be stored. If
*name* is not given, and *where* is either a :class:`Group`
object or a string ending on ``/``, the leaf name will be set to
the file name of *filename*. The *name* will be modified to
adhere to Python's natural naming convention; the original
filename will be preserved in the filenode's *_filename*
attribute.
overwrite
Whether or not a possibly existing filenode of the specified
name shall be overwritten.
title
A description for this node (it sets the ``TITLE`` HDF5
attribute on disk).
filters
An instance of the :class:`Filters` class that provides
information about the desired I/O filters to be applied
during the life of this object.
"""
path = Path(filename).resolve()
# sanity checks
if not os.access(path, os.R_OK):
raise OSError(f"The file '{path}' could not be read")
if isinstance(h5file, tb.file.File) and h5file.mode == "r":
raise OSError(f"The file '{h5file.filename}' is opened read-only")
# guess filenode's name if necessary
if name is None:
if isinstance(where, tb.group.Group):
name = os.path.split(filename)[1]
if isinstance(where, str):
if where.endswith("/"):
name = os.path.split(filename)[1]
else:
nodepath = where.split("/")
where = "/" + "/".join(nodepath[:-1])
name = nodepath[-1]
# sanitize name if necessary
if not tb.path._python_id_re.match(name):
name = re.sub('(?![a-zA-Z0-9_]).', "_",
re.sub('^(?![a-zA-Z_]).', "_", name))
new_h5file = not isinstance(h5file, tb.file.File)
f = tb.File(h5file, "a") if new_h5file else h5file
# check for already existing filenode
try:
f.get_node(where=where, name=name)
if not overwrite:
if new_h5file:
f.close()
raise OSError(
f"Specified node already exists in file '{f.filename}'"
)
except tb.NoSuchNodeError:
pass
# read data from disk
data = path.read_bytes()
# remove existing filenode if present
try:
f.remove_node(where=where, name=name)
except tb.NoSuchNodeError:
pass
# write file's contents to filenode
fnode = new_node(f, where=where, name=name, title=title, filters=filters)
fnode.write(data)
fnode.attrs._filename = path.name
fnode.close()
# cleanup
if new_h5file:
f.close()
def read_from_filenode(h5file, filename, where, name=None, overwrite=False,
create_target=False):
r"""Read a filenode from a PyTables file and write its contents to a file.
.. versionadded:: 3.2
Parameters
----------
h5file
The PyTables file to be read from; can be either a string
giving the file's location or a :class:`File` object.
filename
Path of the file where the contents of the filenode shall be
written to. If *filename* points to a directory or ends with
``/`` (``\`` on Windows), the filename will be set to the
*_filename* (if present; otherwise the *name*) attribute of the
read filenode.
where, name
Location of the filenode where the data shall be read from. If
no node *name* can be found at *where*, the first node at
*where* whose *_filename* attribute matches *name* will be read.
overwrite
Whether or not a possibly existing file of the specified
*filename* shall be overwritten.
create_target
Whether or not the folder hierarchy needed to accomodate the
given target ``filename`` will be created.
"""
path = Path(filename).resolve()
new_h5file = not isinstance(h5file, tb.file.File)
f = tb.File(h5file, "r") if new_h5file else h5file
try:
fnode = open_node(f.get_node(where=where, name=name))
except tb.NoSuchNodeError:
fnode = None
for n in f.walk_nodes(where=where, classname="EArray"):
if n.attrs._filename == name:
fnode = open_node(n)
break
if fnode is None:
f.close()
raise tb.NoSuchNodeError("A filenode '%s' cannot be found at "
"'%s'" % (name, where))
# guess output filename if necessary
# TODO: pathlib.Path strips trailing slash automatically :-(
if path.is_dir() or filename.endswith(os.path.sep):
try:
path = path / fnode.node.attrs._filename
except Exception:
path = path / fnode.node.name
if os.access(path, os.R_OK) and not overwrite:
if new_h5file:
f.close()
raise OSError(f"The file '{path}' already exists")
# create folder hierarchy if necessary
if create_target:
path.parent.mkdir(parents=True, exist_ok=True)
if not os.access(path.parent, os.W_OK):
if new_h5file:
f.close()
raise OSError("The file '%s' cannot be written to" % filename)
# read data from filenode
data = fnode.read()
fnode.close()
# store data to file
path.write_bytes(data)
# cleanup
del data
if new_h5file:
f.close() |
|
pyspeckit__pyspeckit | classfiles.rst | Module doc / Tutorial | Generate documentation for this module | MIT License | pyspeckit__pyspeckit/docs/classfiles.rst | [
"pyspeckit__pyspeckit/pyspeckit/spectrum/readers/read_class.py"
] | Gildas CLASS files
Pyspeckit is capable of reading files from some versions of CLASS. The
CLASS developers have stated that the GILDAS file format is private and
will remain so, and therefore there are no guarantees that the CLASS
reader will work for your file.
Nonetheless, if you want to develop in python instead of SIC, the
~pyspeckit.spectrum.readers.read_class module is probably the best way
to access CLASS data.
The CLASS file specification is incomplete, so much of the data reading
is hacked together. The code style is based off of Tom Robitaille's
idlsave package.
An example usage. Note that telescope and line are NOT optional keyword
arguments, they are just specified as such for clarity :
n2hp = class_to_obsblocks(fn1, telescope=['SMT-F1M-HU','SMT-F1M-VU'],
line=['N2HP(3-2)','N2H+(3-2)'])
This will generate a ~pyspeckit.spectrum.ObsBlock from all data tagged
with the 'telescope' flags listed and lines matching either of those
above. The data selection is equivalent to a combination of :
find /telescope SMT-F1M-HU
find /telescope SMT-F1M-VU
find /line N2HP(3-2)
find /line N2H+(3-2)
ALL of the data matching those criteria will be included in an ObsBlock.
They will then be accessible through the ObsBlock's speclist attribute,
or just by indexing the ObsBlock directly.
| """
------------------------
GILDAS CLASS file reader
------------------------
Read a CLASS file into an :class:`pyspeckit.spectrum.ObsBlock`
"""
from __future__ import print_function
from six.moves import xrange
from six import iteritems
import six
import astropy.io.fits as pyfits
import numpy
import numpy as np
from numpy import pi
from astropy import log
# from astropy.time import Time
from astropy import units as u
import pyspeckit
import sys
import re
try:
from astropy.utils.console import ProgressBar
except ImportError:
ProgressBar = lambda x: None
ProgressBar.update = lambda x: None
import struct
import time
# 'range' is needed as a keyword
irange = range
def print_timing(func):
"""
Prints execution time of decorated function.
Included here because CLASS files can take a little while to read;
this should probably be replaced with a progressbar
"""
def wrapper(*arg,**kwargs):
t1 = time.time()
res = func(*arg,**kwargs)
t2 = time.time()
log.info('%s took %0.5g s' % (func.__name__, (t2-t1)))
return res
wrapper.__doc__ = func.__doc__
return wrapper
def ensure_bytes(string):
"""
Ensure a given string is in byte form
"""
if six.PY3:
return bytes(string, 'utf-8')
else:
return str(string)
""" Specification: http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html """
filetype_dict = {'1A ':'Multiple_IEEE',
'1 ':'Multiple_Vax',
'1B ':'Multiple_EEEI',
'2A ':'v2',
'2 ':'v2',
'2B ':'v2',
'9A ':'Single_IEEE',
'9 ':'Single_Vax',
'9B ':'Single_EEEI'}
for key in list(filetype_dict.keys()):
filetype_dict[ensure_bytes(key)] = filetype_dict[key]
fileversion_dict = {'1A ':'v1',
'2A ':'v2',
'9A ':'v1', # untested
}
for key in list(fileversion_dict.keys()):
fileversion_dict[ensure_bytes(key)] = fileversion_dict[key]
record_lengths = {'1A': 512,
'2A': 1024*4}
header_id_numbers = {0: 'USER CODE',
-1: 'COMMENT',
-2: 'GENERAL',
-3: 'POSITION',
-4: 'SPECTRO',
-5: 'BASELINE',
-6: 'HISTORY',
-7: 'UNKNOWN-APEX',
# -8: 'SWITCH',
-9: 'GAUSSFIT', # "private"; see class-interfaces-private.f90
-10: 'DRIFT',
-11: 'BEAMSWITCH', # "private"; see class-interfaces-private.f90
-12: 'SHELLFIT', # "private"; see class-interfaces-private.f90
-13: 'NH3FIT', # "private"; see class-interfaces-private.f90
-14: 'CALIBRATION',
-18: 'ABSFIT', # "private"; see class-interfaces-private.f90
}
header_id_lengths = {-2: 9, # may really be 10?
-3: 17,
-4: 17,
-5: None, # variable length
-6: 3, # variable length
-14: 25,
}
# from packages/classic/lib/classic_mod.f90
filedescv2_nw1=14
"""
GENERAL
integer(kind=obsnum_length) :: num ! [ ] Observation number
integer(kind=4) :: ver ! [ ] Version number
integer(kind=4) :: teles(3)! [ ] Telescope name
integer(kind=4) :: dobs ! [MJD-60549] Date of observation
integer(kind=4) :: dred ! [MJD-60549] Date of reduction
integer(kind=4) :: typec ! [ code] Type of coordinates
integer(kind=4) :: kind ! [ code] Type of data
integer(kind=4) :: qual ! [ code] Quality of data
integer(kind=4) :: subscan ! [ ] Subscan number
integer(kind=obsnum_length) :: scan ! [ ] Scan number
! Written in the entry
real(kind=8) :: ut ! 1-2 [ rad] UT of observation
real(kind=8) :: st ! 3-4 [ rad] LST of observation
real(kind=4) :: az ! 5 [ rad] Azimuth
real(kind=4) :: el ! 6 [ rad] Elevation
real(kind=4) :: tau ! 7 [neper] Opacity
real(kind=4) :: tsys ! 8 [ K] System temperature
real(kind=4) :: time ! 9 [ s] Integration time
! Not in this section in file
integer(kind=4) :: xunit ! [ code] X unit (if X coordinates section is present)
! NOT in data ---
character(len=12) :: cdobs ! [string] Duplicate of dobs
character(len=12) :: cdred ! [string] Duplicate of dred
"""
keys_lengths = {
'unknown': [
('NUM' ,1,'int32'), # Observation number
('VER' ,1,'int32'), # Version number
('TELES' ,3,'|S12'), # Telescope name
('DOBS' ,1,'int32'), # Date of observation
('DRED' ,1,'int32'), # Date of reduction
('TYPEC' ,1,'int32'), # Type of coordinates
('KIND' ,1,'int32'), # Type of data
('QUAL' ,1,'int32'), # Quality of data
('SCAN' ,1,'int32'), # Scan number
('SUBSCAN',1,'int32'), # Subscan number
],
'COMMENT': [ # -1
('LTEXT',1,'int32'), # integer(kind=4) :: ltext ! Length of comment
('CTEXT',1024//4,'|S1024'), # character ctext*1024 ! Comment string
],
'GENERAL': [ # -2
('UT' ,2,'float64'), # rad UT of observation
('ST' ,2,'float64'), # rad LST of observation
('AZ' ,1,'float32'), # rad Azimuth
('EL' ,1,'float32'), # rad Elevation
('TAU' ,1,'float32'), # neper Opacity
('TSYS' ,1,'float32'), # K System temperature
('TIME' ,1,'float32'), # s Integration time
# XUNIT should not be there?
#( 'XUNIT' ,1,'int32'), # code X unit (if xcoord_sec is present)
],
'POSITION': [ # -3
('SOURC',3,'|S12') , # [ ] Source name
('EPOCH',1,'float32'), # [ ] Epoch of coordinates
('LAM' ,2,'float64'), #[rad] Lambda
('BET' ,2,'float64'), #[rad] Beta
('LAMOF',1,'float32'), # [rad] Offset in Lambda
('BETOF',1,'float32'), # [rad] Offset in Beta
('PROJ',1,'int32') , # [rad] Projection system
('SL0P',1,'float64'), # lambda of descriptive system # MAY NOT EXIST IN OLD CLASS
('SB0P',1,'float64'), # beta of descriptive system # MAY NOT EXIST IN OLD CLASS
('SK0P',1,'float64'), # angle of descriptive system # MAY NOT EXIST IN OLD CLASS
],
'SPECTRO': [ # -4
#('align' ,1,'int32'), # [ ] Alignment padding
('LINE' ,3,'|S12'), # [ ] Line name
('RESTF' ,2,'float64'), # [ MHz] Rest frequency
('NCHAN' ,1,'int32'), # [ ] Number of channels
('RCHAN' ,1,'float32'), # [ ] Reference channels
('FRES' ,1,'float32'), # [ MHz] Frequency resolution
('FOFF' ,1,'float32'), # [ MHz] Frequency offset
('VRES' ,1,'float32'), # [km/s] Velocity resolution
('VOFF' ,1,'float32'), # [km/s] Velocity at reference channel
('BAD' ,1,'float32'), # [ ] Blanking value
#('ALIGN_1',1,'int32'), # [ ] Alignment padding
('IMAGE' ,2,'float64'), # [ MHz] Image frequency
#('ALIGN_2',1,'int32'), # [ ] Alignment padding
('VTYPE' ,1,'int32'), # [code] Type of velocity
('DOPPLER',2,'float64'), # [ ] Doppler factor = -V/c (CLASS convention)
],
'CALIBRATION': [ # -14
('ALIGN',1,'int32'), # BUFFER (it's a zero - it is not declared in the docs!!!!)
('BEEFF',1,'float32'), # [ ] Beam efficiency
('FOEFF',1,'float32'), # [ ] Forward efficiency
('GAINI',1,'float32'), # [ ] Image/Signal gain ratio
('H2OMM',1,'float32'), # [ mm] Water vapor content
('PAMB',1,'float32'), # [ hPa] Ambient pressure
('TAMB',1,'float32'), # [ K] Ambient temperature
('TATMS',1,'float32'), # [ K] Atmosphere temp. in signal band
('TCHOP',1,'float32'), # [ K] Chopper temperature
('TCOLD',1,'float32'), # [ K] Cold load temperature
('TAUS',1,'float32'), # [neper] Opacity in signal band
('TAUI',1,'float32'), # [neper] Opacity in image band
('TATMI',1,'float32'), # [ K] Atmosphere temp. in image band
('TREC',1,'float32'), # [ K] Receiver temperature
('CMODE',1,'int32'), # [ code] Calibration mode
('ATFAC',1,'float32'), # [ ] Applied calibration factor
('ALTI',1,'float32'), # [ m] Site elevation
('COUNT',3,'3float32'), # [count] Power of Atm., Chopp., Cold
('LCALOF',1,'float32'), # [ rad] Longitude offset for sky measurement
('BCALOF',1,'float32'), # [ rad] Latitude offset for sky measurement
('GEOLONG',1,'float64'), # [ rad] Geographic longitude of observatory # MAY NOT EXIST IN OLD CLASS
('GEOLAT',1,'float64'), # [ rad] Geographic latitude of observatory # MAY NOT EXIST IN OLD CLASS
],
'BASELINE':[
('DEG',1,'int32'), #! [ ] Degree of last baseline
('SIGFI',1,'float32'), #! [Int. unit] Sigma
('AIRE',1,'float32'), #! [Int. unit] Area under windows
('NWIND',1,'int32'), #! [ ] Number of line windows
# WARNING: These should probably have 'n', the second digit, = NWIND
# The docs are really unclear about this, they say "W1(MWIND)"
('W1MWIND',1,'float32'), #! [km/s] Lower limits of windows
('W2MWIND',1,'float32'), #! [km/s] Upper limits of windows
('SINUS',3,'float32'), #![] Sinus baseline results
],
'DRIFT':[ # 16?
('FREQ',1,'float64'), #! [ MHz] Rest frequency real(kind=8) ::
('WIDTH',1,'float32'), #! [ MHz] Bandwidth real(kind=4) ::
('NPOIN',1,'int32') , #! [ ] Number of data points integer(kind=4) ::
('RPOIN',1,'float32'), #! [ ] Reference point real(kind=4) ::
('TREF',1,'float32'), #! [ ?] Time at reference real(kind=4) ::
('AREF',1,'float32'), #! [ rad] Angular offset at ref. real(kind=4) ::
('APOS',1,'float32'), #! [ rad] Position angle of drift real(kind=4) ::
('TRES',1,'float32'), #! [ ?] Time resolution real(kind=4) ::
('ARES',1,'float32'), #! [ rad] Angular resolution real(kind=4) ::
('BAD',1,'float32') , #! [ ] Blanking value real(kind=4) ::
('CTYPE',1,'int32') , #! [code] Type of offsets integer(kind=4) ::
('CIMAG',1,'float64'), #! [ MHz] Image frequency real(kind=8) ::
('COLLA',1,'float32'), #! [ ?] Collimation error Az real(kind=4) ::
('COLLE',1,'float32'), #! [ ?] Collimation error El real(kind=4) ::
],
}
def _read_bytes(f, n):
'''Read the next `n` bytes (from idlsave)'''
return f.read(n)
"""
Warning: UNCLEAR what endianness should be!
Numpy seemed to get it right, and I think numpy assumes NATIVE endianness
"""
def _read_byte(f):
'''Read a single byte (from idlsave)'''
return numpy.uint8(struct.unpack('=B', f.read(4)[:1])[0])
def _read_int16(f):
'''Read a signed 16-bit integer (from idlsave)'''
return numpy.int16(struct.unpack('=h', f.read(4)[2:4])[0])
def _read_int32(f):
'''Read a signed 32-bit integer (from idlsave)'''
return numpy.int32(struct.unpack('=i', f.read(4))[0])
def _read_int64(f):
'''Read a signed 64-bit integer '''
return numpy.int64(struct.unpack('=q', f.read(8))[0])
def _read_float32(f):
'''Read a 32-bit float (from idlsave)'''
return numpy.float32(struct.unpack('=f', f.read(4))[0])
def _align_32(f):
'''Align to the next 32-bit position in a file (from idlsave)'''
pos = f.tell()
if pos % 4!= 0:
f.seek(pos + 4 - pos % 4)
return
def _read_word(f,length):
if length > 0:
chars = _read_bytes(f, length)
_align_32(f)
else:
chars = None
return chars
def _read_int(f):
return struct.unpack('i',f.read(4))
def is_ascii(s):
"""Check if there are non-ascii characters in Unicode string
Parameters
----------
s : str
The string to be checked
Returns
-------
is_ascii : bool
Returns True if all characters in the string are ascii. False
otherwise.
"""
return len(s) == len(s.decode('ascii').encode('utf-8'))
def is_all_null(s):
return all(x=='\x00' for x in s) or all(x==b'\x00' for x in s)
"""
from clic_file.f90: v1, v2
integer(kind=4) :: bloc ! 1 : observation address [records] integer(kind=8) :: bloc ! 1- 2: observation address [records] integer(kind=4) :: bloc ! 1 : block read from index
integer(kind=4) :: num ! 2 : observation number integer(kind=4) :: word ! 3 : address offset [4-bytes] integer(kind=4) :: num ! 2 : number read
integer(kind=4) :: ver ! 3 : observation version integer(kind=4) :: ver ! 4 : observation version integer(kind=4) :: ver ! 3 : version read from index
integer(kind=4) :: sourc(3) ! 4- 6: source name integer(kind=8) :: num ! 5- 6: observation number character(len=12) :: csour ! 4- 6: source read from index
integer(kind=4) :: line(3) ! 7- 9: line name integer(kind=4) :: sourc(3) ! 7- 9: source name character(len=12) :: cline ! 7- 9: line read from index
integer(kind=4) :: teles(3) ! 10-12: telescope name integer(kind=4) :: line(3) ! 10-12: line name character(len=12) :: ctele ! 10-12: telescope read from index
integer(kind=4) :: dobs ! 13 : observation date [class_date] integer(kind=4) :: teles(3) ! 13-15: telescope name integer(kind=4) :: dobs ! 13 : date obs. read from index
integer(kind=4) :: dred ! 14 : reduction date [class_date] integer(kind=4) :: dobs ! 16 : observation date [class_date] integer(kind=4) :: dred ! 14 : date red. read from index
real(kind=4) :: off1 ! 15 : lambda offset [radian] integer(kind=4) :: dred ! 17 : reduction date [class_date] real(kind=4) :: off1 ! 15 : read offset 1
real(kind=4) :: off2 ! 16 : beta offset [radian] real(kind=4) :: off1 ! 18 : lambda offset [radian] real(kind=4) :: off2 ! 16 : read offset 2
integer(kind=4) :: typec ! 17 : coordinates types real(kind=4) :: off2 ! 19 : beta offset [radian] integer(kind=4) :: type ! 17 : type of read offsets
integer(kind=4) :: kind ! 18 : data kind integer(kind=4) :: typec ! 20 : coordinates types integer(kind=4) :: kind ! 18 : type of observation
integer(kind=4) :: qual ! 19 : data quality integer(kind=4) :: kind ! 21 : data kind integer(kind=4) :: qual ! 19 : Quality read from index
integer(kind=4) :: scan ! 20 : scan number integer(kind=4) :: qual ! 22 : data quality integer(kind=4) :: scan ! 20 : Scan number read from index
integer(kind=4) :: proc ! 21 : procedure type integer(kind=4) :: scan ! 23 : scan number real(kind=4) :: posa ! 21 : Position angle
integer(kind=4) :: itype ! 22 : observation type integer(kind=4) :: proc ! 24 : procedure type integer(kind=4) :: subscan ! 22 : Subscan number
real(kind=4) :: houra ! 23 : hour angle [radian] integer(kind=4) :: itype ! 25 : observation type integer(kind=4) :: pad(10) ! 23-32: Pad to 32 words
integer(kind=4) :: project ! 24 : project name real(kind=4) :: houra ! 26 : hour angle [radian]
integer(kind=4) :: pad1 ! 25 : unused word integer(kind=4) :: project(2)! 27 : project name
integer(kind=4) :: bpc ! 26 : baseline bandpass cal status integer(kind=4) :: bpc ! 29 : baseline bandpass cal status
integer(kind=4) :: ic ! 27 : instrumental cal status integer(kind=4) :: ic ! 30 : instrumental cal status
integer(kind=4) :: recei ! 28 : receiver number integer(kind=4) :: recei ! 31 : receiver number
real(kind=4) :: ut ! 29 : UT [s] real(kind=4) :: ut ! 32 : UT [s]
integer(kind=4) :: pad2(3) ! 30-32: padding to 32 4-bytes word
equivalently
integer(kind=obsnum_length) :: num ! [ ] Observation number
integer(kind=4) :: ver ! [ ] Version number
integer(kind=4) :: teles(3)! [ ] Telescope name
integer(kind=4) :: dobs ! [MJD-60549] Date of observation
integer(kind=4) :: dred ! [MJD-60549] Date of reduction
integer(kind=4) :: typec ! [ code] Type of coordinates
integer(kind=4) :: kind ! [ code] Type of data
integer(kind=4) :: qual ! [ code] Quality of data
integer(kind=4) :: subscan ! [ ] Subscan number
integer(kind=obsnum_length) :: scan ! [ ] Scan number
"""
"""
index.f90:
call conv%read%i8(data(1), indl%bloc, 1) ! bloc
call conv%read%i4(data(3), indl%word, 1) ! word
call conv%read%i8(data(4), indl%num, 1) ! num
call conv%read%i4(data(6), indl%ver, 1) ! ver
call conv%read%cc(data(7), indl%csour, 3) ! csour
call conv%read%cc(data(10),indl%cline, 3) ! cline
call conv%read%cc(data(13),indl%ctele, 3) ! ctele
call conv%read%i4(data(16),indl%dobs, 1) ! dobs
call conv%read%i4(data(17),indl%dred, 1) ! dred
call conv%read%r4(data(18),indl%off1, 1) ! off1
call conv%read%r4(data(19),indl%off2, 1) ! off2
call conv%read%i4(data(20),indl%type, 1) ! type
call conv%read%i4(data(21),indl%kind, 1) ! kind
call conv%read%i4(data(22),indl%qual, 1) ! qual
call conv%read%r4(data(23),indl%posa, 1) ! posa
call conv%read%i8(data(24),indl%scan, 1) ! scan
call conv%read%i4(data(26),indl%subscan,1) ! subscan
if (isv3) then
call conv%read%r8(data(27),indl%ut, 1) ! ut
else
"""
def _read_indices(f, file_description):
#if file_description['version'] in (1,2):
# extension_positions = (file_description['aex']-1)*file_description['reclen']*4
# all_indices = {extension:
# [_read_index(f,
# filetype=file_description['version'],
# entry=ii,
# #position=position,
# )
# for ii in range(file_description['lex1'])]
# for extension,position in enumerate(extension_positions)
# if position > 0
# }
#elif file_description['version'] == 1:
extension_positions = ((file_description['aex'].astype('int64')-1)
*file_description['reclen']*4)
all_indices = [_read_index(f,
filetype=file_description['version'],
# 1-indexed files
entry_number=ii+1,
file_description=file_description,
)
for ii in range(file_description['xnext']-1)]
#else:
# raise ValueError("Invalid file version {0}".format(file_description['version']))
return all_indices
def _find_index(entry_number, file_description, return_position=False):
if file_description['gex'] == 10:
kex=(entry_number-1)//file_description['lex1'] + 1
else:
# exponential growth:
#kex = gi8_dicho(file_description['nex'], file_description['lexn'], entry_number) - 1
kex = len([xx for xx in file_description['lexn'] if xx<entry_number])
ken = entry_number - file_description['lexn'][kex-1]
#! Find ken (relative entry number in the extension, starts from 1)
#ken = entry_num - file%desc%lexn(kex-1)
kb = ((ken-1)*file_description['lind'])//file_description['reclen']
#kb = ((ken-1)*file%desc%lind)/file%desc%reclen ! In the extension, the
# ! relative record position (as an offset, starts from 0) where the
# ! Entry Index starts. NB: there can be a non-integer number of Entry
# ! Indexes per record
# Subtract 1: 'aex' is 1-indexed
kbl = (file_description['aex'][kex-1]+kb)-1
# kbl = file%desc%aex(kex)+kb ! The absolute record number where the Entry Index goes
k = ((ken-1)*file_description['lind']) % file_description['reclen']
#k = mod((ken-1)*file%desc%lind,file%desc%reclen)+1 ! = in the record, the
# ! first word of the Entry Index of the entry number 'entry_num'
if return_position:
return (kbl*file_description['reclen']+k)*4
else:
return kbl,k
def _read_index(f, filetype='v1', DEBUG=False, clic=False, position=None,
entry_number=None, file_description=None):
if position is not None:
f.seek(position)
if entry_number is not None:
indpos = _find_index(entry_number, file_description, return_position=True)
f.seek(indpos)
x0 = f.tell()
if filetype in ('1A ','v1', 1):
log.debug('Index filetype 1A')
index = {
"XBLOC":_read_int32(f),
"XNUM":_read_int32(f),
"XVER":_read_int32(f),
"XSOURC":_read_word(f,12),
"XLINE":_read_word(f,12),
"XTEL":_read_word(f,12),
"XDOBS":_read_int32(f),
"XDRED":_read_int32(f),
"XOFF1":_read_float32(f),# first offset (real, radians)
"XOFF2":_read_float32(f),# second offset (real, radians)
"XTYPE":_read_int32(f),# coordinate system ('EQ'', 'GA', 'HO')
"XKIND":_read_int32(f),# Kind of observation (0: spectral, 1: continuum, )
"XQUAL":_read_int32(f),# Quality (0-9)
"XSCAN":_read_int32(f),# Scan number
}
index['BLOC'] = index['XBLOC'] # v2 compatibility
index['WORD'] = 1 # v2 compatibility
index['SOURC'] = index['CSOUR'] = index['XSOURC']
index['DOBS'] = index['CDOBS'] = index['XDOBS']
index['CTELE'] = index['XTEL']
index['LINE'] = index['XLINE']
index['OFF1'] = index['XOFF1']
index['OFF2'] = index['XOFF2']
index['QUAL'] = index['XQUAL']
index['SCAN'] = index['XSCAN']
index['KIND'] = index['XKIND']
if clic: # use header set up in clic
nextchunk = {
"XPROC":_read_int32(f),# "procedure type"
"XITYPE":_read_int32(f),#
"XHOURANG":_read_float32(f),#
"XPROJNAME":_read_int32(f),#
"XPAD1":_read_int32(f),
"XBPC" :_read_int32(f),
"XIC" :_read_int32(f),
"XRECEI" :_read_int32(f),
"XUT":_read_float32(f),
"XPAD2":numpy.fromfile(f,count=3,dtype='int32') # BLANK is NOT ALLOWED!!! It is a special KW
}
else:
nextchunk = {"XPOSA":_read_float32(f),
"XSUBSCAN":_read_int32(f),
'XPAD2': numpy.fromfile(f,count=10,dtype='int32'),
}
nextchunk['SUBSCAN'] = nextchunk['XSUBSCAN']
nextchunk['POSA'] = nextchunk['XPOSA']
index.update(nextchunk)
if (f.tell() - x0!= 128):
missed_bits = (f.tell()-x0)
X = f.read(128-missed_bits)
if DEBUG: print("read_index missed %i bits: %s" % (128-missed_bits,X))
#raise IndexError("read_index did not successfully read 128 bytes at %i. Read %i bytes." % (x0,f.tell()-x0))
if any(not is_ascii(index[x]) for x in ('XSOURC','XLINE','XTEL')):
raise ValueError("Invalid index read from {0}.".format(x0))
elif filetype in ('2A ','v2', 2):
log.debug('Index filetype 2A')
index = {
"BLOC" : _read_int64(f) , #(data(1), 1) ! bloc
"WORD" : _read_int32(f) , #(data(3), 1) ! word
"NUM" : _read_int64(f) , #(data(4), 1) ! num
"VER" : _read_int32(f) , #(data(6), 1) ! ver
"CSOUR" : _read_word(f,12), #(data(7), 3) ! csour
"CLINE" : _read_word(f,12), #(data(10), 3) ! cline
"CTELE" : _read_word(f,12), #(data(13), 3) ! ctele
"DOBS" : _read_int32(f) , #(data(16), 1) ! dobs
"DRED" : _read_int32(f) , #(data(17), 1) ! dred
"OFF1" : _read_float32(f), #(data(18), 1) ! off1
"OFF2" : _read_float32(f), #(data(19), 1) ! off2
"TYPE" : _read_int32(f) , #(data(20), 1) ! type
"KIND" : _read_int32(f) , #(data(21), 1) ! kind
"QUAL" : _read_int32(f) , #(data(22), 1) ! qual
"POSA" : _read_float32(f), #(data(23), 1) ! posa
"SCAN" : _read_int64(f) , #(data(24), 1) ! scan
"SUBSCAN": _read_int32(f) , #(data(26), 1) ! subscan
}
#last24bits = f.read(24)
#log.debug("Read 24 bits: '{0}'".format(last24bits))
if any((is_all_null(index[x]) or not is_ascii(index[x]))
for x in ('CSOUR','CLINE','CTELE')):
raise ValueError("Invalid index read from {0}.".format(x0))
index['SOURC'] = index['XSOURC'] = index['CSOUR']
index['LINE'] = index['XLINE'] = index['CLINE']
index['XKIND'] = index['KIND']
try:
index['DOBS'] = index['XDOBS'] = index['CDOBS']
except KeyError:
index['CDOBS'] = index['XDOBS'] = index['DOBS']
else:
raise NotImplementedError("Filetype {0} not implemented.".format(filetype))
# from kernel/lib/gsys/date.f90: gag_julda
index['MJD'] = index['DOBS'] + 60549
class_dobs = index['DOBS']
index['DOBS'] = ((class_dobs + 365*2025)/365.2425 + 1)
# SLOW
#index['DATEOBS'] = Time(index['DOBS'], format='jyear')
#index['DATEOBSS'] = index['DATEOBS'].iso
log.debug("Indexing finished at {0}".format(f.tell()))
return index
def _read_header(f, type=0, position=None):
"""
Read a header entry from a CLASS file
(helper function)
"""
if position is not None:
f.seek(position)
if type in keys_lengths:
hdrsec = [(x[0],numpy.fromfile(f,count=1,dtype=x[2])[0])
for x in keys_lengths[type]]
return dict(hdrsec)
else:
return {}
raise ValueError("Unrecognized type {0}".format(type))
def _read_first_record(f):
f.seek(0)
filetype = f.read(4)
if fileversion_dict[filetype] == 'v1':
return _read_first_record_v1(f)
elif fileversion_dict[filetype] == 'v2':
return _read_first_record_v2(f)
else:
raise ValueError("Unrecognized filetype {0}".format(filetype))
def _read_first_record_v1(f, record_length_words=128):
r"""
Position & Parameter & Fortran Kind & Purpose \\
\hline
1 & {\tt code} & Character*4 & File code \\
2 & {\tt next} & Integer*4 & Next free record \\
3 & {\tt lex} & Integer*4 & Length of first extension (number of entries) \\
4 & {\tt nex} & Integer*4 & Number of extensions \\
5 & {\tt xnext} & Integer*4 & Next available entry number \\
6:2*{\tt reclen} & {\tt ex(:)} & Integer*4 & Array of extension addresses
from classic_mod.f90:
integer(kind=4) :: code ! 1 File code
integer(kind=4) :: next ! 2 Next free record
integer(kind=4) :: lex ! 3 Extension length (number of entries)
integer(kind=4) :: nex ! 4 Number of extensions
integer(kind=4) :: xnext ! 5 Next available entry number
integer(kind=4) :: aex(mex_v1) ! 6:256 Extension addresses
from old (<dec2013) class, file.f90:
read(ilun,rec=1,err=11,iostat=ier) ibx%code,ibx%next, &
& ibx%ilex,ibx%imex,ibx%xnext
also uses filedesc_v1tov2 from classic/lib/file.f90
"""
# OLD NOTES
# hdr = header
# hdr.update(obshead) # re-overwrite things
# hdr.update({'OBSNUM':obsnum,'RECNUM':spcount})
# hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})
# hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})
# hdr.update({'OBJECT':hdr['SOURC'].strip()})
# hdr.update({'BUNIT':'Tastar'})
# hdr.update({'EXPOSURE':hdr['TIME']})
f.seek(0)
file_description = {
'code': f.read(4),
'next': _read_int32(f),
'lex': _read_int32(f),
'nex': _read_int32(f),
'xnext': _read_int32(f),
'gex': 10.,
'vind': 1, # classic_vind_v1 packages/classic/lib/classic_mod.f90
'version': 1,
'nextrec': 3,
'nextword': 1,
'lind': 32, #classic_lind_v1 packages/classic/lib/classic_mod.f90
'kind': 'unknown',
'flags': 0,
}
file_description['reclen'] = record_length_words # should be 128w = 512 bytes
ex = np.fromfile(f, count=(record_length_words*2-5), dtype='int32')
file_description['ex'] = ex[ex!=0]
file_description['nextrec'] = file_description['next'] # this can't be...
file_description['lex1'] = file_description['lex'] # number of entries
file_description['lexn'] = (np.arange(file_description['nex']+1) *
file_description['lex1'])
file_description['nentries'] = np.sum(file_description['lexn'])
file_description['aex'] = file_description['ex'][:file_description['nex']]
#file_description['version'] = fileversion_dict[file_description['code']]
assert f.tell() == 1024
# Something is not quite right with the 'ex' parsing
#assert len(file_description['ex']) == file_description['nex']
return file_description
def _read_first_record_v2(f):
r""" packages/classic/lib/file.f90
Position & Parameter & Fortran Kind & Purpose & Unit \\
\hline
1 & {\tt code} & Character*4 & File code & - \\
2 & {\tt reclen} & Integer*4 & Record length & words \\
3 & {\tt kind} & Integer*4 & File kind & - \\
4 & {\tt vind} & Integer*4 & Index version & - \\
5 & {\tt lind} & Integer*4 & Index length & words \\
6 & {\tt flags} & Integer*4 & Bit flags. \#1: single or multiple, & - \\
& & & \#2-32: provision (0-filled) & \\
\hline
7:8 & {\tt xnext} & Integer*8 & Next available entry number & - \\
9:10 & {\tt nextrec} & Integer*8 & Next record which contains free space & record \\
11 & {\tt nextword} & Integer*4 & Next free word in this record & word \\
\hline
12 & {\tt lex1} & Integer*4 & Length of first extension index & entries \\
13 & {\tt nex} & Integer*4 & Number of extensions & - \\
14 & {\tt gex} & Integer*4 & Extension growth rule & - \\
15:{\tt reclen} & {\tt aex(:)} & Integer*8 & Array of extension addresses & record
"""
f.seek(0)
file_description = {
'code': f.read(4),
'reclen': _read_int32(f),
'kind': _read_int32(f),
'vind': _read_int32(f),
'lind': _read_int32(f),
'flags': _read_int32(f),
'xnext': _read_int64(f),
'nextrec': _read_int64(f),
'nextword': _read_int32(f),
'lex1': _read_int32(f),
'nex': _read_int32(f),
'gex': _read_int32(f),
}
file_description['lexn'] = [0]
if file_description['gex'] == 10:
for ii in range(1, file_description['nex']+1):
file_description['lexn'].append(file_description['lexn'][-1]+file_description['lex1'])
else:
#! Exponential growth. Only growth with mantissa 2.0 is supported
for ii in range(1, file_description['nex']):
# I don't know what the fortran does here!!!
# ahh, maybe 2_8 means int(2, dtype='int64')
nent = int(file_description['lex1'] * 2**(ii-1))
#nent = int(file%desc%lex1,kind=8) * 2_8**(iex-1)
file_description['lexn'].append(file_description['lexn'][-1]+nent)
#file%desc%lexn(iex) = file%desc%lexn(iex-1) + nent
file_description['nentries'] = np.sum(file_description['lexn'])
record_length_words = file_description['reclen']
aex = numpy.fromfile(f, count=(record_length_words-15)//2, dtype='int64')
file_description['aex'] = aex[aex!=0]
assert len(file_description['aex']) == file_description['nex']
file_description['version'] = 2
return file_description
def gi8_dicho(ninp,lexn,xval,ceil=True):
"""
! @ public
! Find ival such as
! X(ival-1) < xval <= X(ival) (ceiling mode)
! or
! X(ival) <= xval < X(ival+1) (floor mode)
! for input data ordered. Use a dichotomic search for that.
call gi8_dicho(nex,file%desc%lexn,entry_num,.true.,kex,error)
"""
#integer(kind=size_length), intent(in) :: np ! Number of input points
#integer(kind=8), intent(in) :: x(np) ! Input ordered Values
#integer(kind=8), intent(in) :: xval ! The value we search for
#logical, intent(in) :: ceil ! Ceiling or floor mode?
#integer(kind=size_length), intent(out) :: ival ! Position in the array
#logical, intent(inout) :: error ! Logical error flag
iinf = 1
isup = ninp
#! Ceiling mode
while isup > (iinf+1):
imid = int(np.floor((isup + iinf)/2.))
if (lexn[imid-1] < xval):
iinf = imid
else:
isup = imid
ival = isup
return ival
def _read_obshead(f, file_description, position=None, verbose=False):
if file_description['version'] == 1:
return _read_obshead_v1(f, position=position, verbose=verbose)
if file_description['version'] == 2:
return _read_obshead_v2(f, position=position)
else:
raise ValueError("Invalid file version {0}.".
format(file_description['version']))
def _read_obshead_v2(f, position=None):
"""
! Version 2 (public)
integer(kind=4), parameter :: entrydescv2_nw1=11 ! Number of words, in 1st part
integer(kind=4), parameter :: entrydescv2_nw2=5 ! Number of words for 1 section in 2nd part
type classic_entrydesc_t
sequence
integer(kind=4) :: code ! 1 : code observation icode
integer(kind=4) :: version ! 2 : observation version
integer(kind=4) :: nsec ! 3 : number of sections
integer(kind=4) :: pad1 ! - : memory padding (not in data)
integer(kind=8) :: nword ! 4- 5: number of words
integer(kind=8) :: adata ! 6- 7: data address
integer(kind=8) :: ldata ! 8- 9: data length
integer(kind=8) :: xnum ! 10-11: entry number
! Out of the'sequence' block:
integer(kind=4) :: msec ! Not in data: maximum number of sections the
! Observation Index can hold
integer(kind=4) :: pad2 ! Memory padding for 8 bytes alignment
integer(kind=4) :: seciden(classic_maxsec) ! Section Numbers (on disk: 1 to ed%nsec)
integer(kind=8) :: secleng(classic_maxsec) ! Section Lengths (on disk: 1 to ed%nsec)
integer(kind=8) :: secaddr(classic_maxsec) ! Section Addresses (on disk: 1 to ed%nsec)
end type classic_entrydesc_t
"""
if position is not None:
f.seek(position)
else:
position = f.tell()
IDcode = f.read(4)
if IDcode.strip()!= b'2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(position))
f.seek(position)
entrydescv2_nw1 = 11
entrydescv2_nw2 = 5
obshead = {
'CODE': f.read(4),
'VERSION': _read_int32(f),
'NSEC': _read_int32(f),
#'_blank': _read_int32(f),
'NWORD': _read_int64(f),
'ADATA': _read_int64(f),
'LDATA': _read_int64(f),
'XNUM': _read_int64(f),
#'MSEC': _read_int32(f),
#'_blank2': _read_int32(f),
}
section_numbers = np.fromfile(f, count=obshead['NSEC'], dtype='int32')
section_lengths = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
section_addresses = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
return obshead['XNUM'],obshead,dict(zip(section_numbers,section_addresses))
def _read_obshead_v1(f, position=None, verbose=False):
"""
Read the observation header of a CLASS file
(helper function for read_class; should not be used independently)
"""
if position is not None:
f.seek(position)
IDcode = f.read(4)
if IDcode.strip()!= b'2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(f.tell() - 4))
(nblocks, nbyteob, data_address, nheaders, data_length, obindex, nsec,
obsnum) = numpy.fromfile(f, count=8, dtype='int32')
if verbose:
print("nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum",nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum)
print("DATA_LENGTH: ",data_length)
seccodes = numpy.fromfile(f,count=nsec,dtype='int32')
# Documentation says addresses then length: It is apparently wrong
seclen = numpy.fromfile(f,count=nsec,dtype='int32')
secaddr = numpy.fromfile(f,count=nsec,dtype='int32')
if verbose:
print("Section codes, addresses, lengths: ",seccodes,secaddr,seclen)
hdr = {'NBLOCKS':nblocks, 'NBYTEOB':nbyteob, 'DATAADDR':data_address,
'DATALEN':data_length, 'NHEADERS':nheaders, 'OBINDEX':obindex,
'NSEC':nsec, 'OBSNUM':obsnum}
#return obsnum,seccodes
return obsnum,hdr,dict(zip(seccodes,secaddr))
# THIS IS IN READ_OBSHEAD!!!
# def _read_preheader(f):
# """
# Not entirely clear what this is, but it is stuff that precedes the actual data
#
# Looks something like this:
# array([ 1, -2, -3, -4, -14,
# 9, 17, 18, 25, 55,
# 64, 81, 99, -1179344801, 979657591,
#
# -2, -3, -4, -14 indicate the 4 header types
# 9,17,18,25 *MAY* indicate the number of bytes in each
#
#
# HOW is it indicated how many entries there are?
# """
# # 13 comes from counting 1, -2,....99 above
# numbers = np.fromfile(f, count=13, dtype='int32')
# sections = [n for n in numbers if n in header_id_numbers]
# return sections
def downsample_1d(myarr,factor,estimator=np.mean, weight=None):
"""
Downsample a 1D array by averaging over *factor* pixels.
Crops right side if the shape is not a multiple of factor.
This code is pure numpy and should be fast.
keywords:
estimator - default to mean. You can downsample by summing or
something else if you want a different estimator
(e.g., downsampling error: you want to sum & divide by sqrt(n))
weight: np.ndarray
An array of weights to use for the downsampling. If None,
assumes uniform 1
"""
if myarr.ndim!= 1:
raise ValueError("Only works on 1d data. Says so in the title.")
xs = myarr.size
crarr = myarr[:xs-(xs % int(factor))]
if weight is None:
dsarr = estimator(np.concatenate([[crarr[i::factor] for i in
range(factor)]]),axis=0)
else:
dsarr = estimator(np.concatenate([[crarr[i::factor]*weight[i::factor] for i in
range(factor)]]),axis=0)
warr = estimator(np.concatenate([[weight[i::factor] for i in
range(factor)]]),axis=0)
dsarr = dsarr/warr
return dsarr
# unit test
def test_downsample1d():
data = np.arange(10)
weight = np.ones(10)
weight[5]=0
assert np.all(downsample_1d(data, 2, weight=weight, estimator=np.mean) ==
np.array([0.5, 2.5, 4.0, 6.5, 8.5]))
def read_observation(f, obsid, file_description=None, indices=None,
my_memmap=None, memmap=True, verbose=False):
if isinstance(f, str):
f = open(f,'rb')
opened = True
if memmap:
my_memmap = numpy.memmap(f, offset=0, dtype='float32',
mode='r')
else:
my_memmap = None
elif my_memmap is None and memmap:
raise ValueError("Must pass in a memmap object if passing in a file object.")
else:
opened = False
if file_description is None:
file_description = _read_first_record(f)
if indices is None:
indices = _read_indices(f, file_description)
index = indices[obsid]
obs_position = (index['BLOC']-1)*file_description['reclen']*4 + (index['WORD']-1)*4
log.debug("Reading observation at position {0}".format(obs_position))
obsnum,obshead,sections = _read_obshead(f, file_description,
position=obs_position,
verbose=verbose)
header = obshead
datastart = 0
for section_id,section_address in iteritems(sections):
# Section addresses are 1-indexed byte addresses
# in the current "block"
sec_position = obs_position + (section_address-1)*4
temp_hdr = _read_header(f, type=header_id_numbers[section_id],
position=sec_position)
header.update(temp_hdr)
datastart = max(datastart,f.tell())
hdr = header
hdr.update(obshead) # re-overwrite things
hdr.update({'OBSNUM':obsnum,'RECNUM':obsid})
hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})
hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})
hdr.update({'OBJECT':hdr['SOURC'].strip()})
hdr.update({'BUNIT':'Tastar'})
hdr.update({'EXPOSURE':float(hdr['TIME'])})
hdr['HDRSTART'] = obs_position
hdr['DATASTART'] = datastart
hdr.update(indices[obsid])
# Define MJD as mid-exposure time in MJD
hdr.update({'OBSDATE': hdr['MJD'] + hdr['UT']/2./pi})
# Apparently the data are still valid in this case?
#if hdr['XNUM']!= obsid+1:
# log.error("The spectrum read was {0} but {1} was requested.".
# format(hdr['XNUM']-1, obsid))
if hdr['KIND'] == 1: # continuum
nchan = hdr['NPOIN']
elif 'NCHAN' in hdr:
nchan = hdr['NCHAN']
else:
log.error("No NCHAN in header. This is not a spectrum.")
import ipdb; ipdb.set_trace()
# There may be a 1-channel offset? CHECK!!!
# (changed by 1 pixel - October 14, 2014)
# (changed back - October 21, 2014 - I think the ends are just bad, but not
# zero.)
f.seek(datastart-1)
spec = _read_spectrum(f, position=datastart-1, nchan=nchan,
memmap=memmap, my_memmap=my_memmap)
if opened:
f.close()
return spec, hdr
def _read_spectrum(f, position, nchan, my_memmap=None, memmap=True):
if position!= f.tell():
log.warning("Reading data from {0}, but the file is wound "
"to {1}.".format(position, f.tell()))
if memmap:
here = position
#spectrum = numpy.memmap(filename, offset=here, dtype='float32',
# mode='r', shape=(nchan,))
spectrum = my_memmap[here//4:here//4+nchan]
f.seek(here+nchan*4)
else:
f.seek(position)
spectrum = numpy.fromfile(f,count=nchan,dtype='float32')
return spectrum
def _spectrum_from_header(fileobj, header, memmap=None):
return _read_spectrum(fileobj, position=header['DATASTART'],
nchan=header['NCHAN'] if 'NCHAN' in hdr else hdr['NPOIN'],
my_memmap=memmap)
def clean_header(header):
newheader = {}
for k in header:
if not isinstance(header[k], (int, float, str)):
if isinstance(header[k], np.ndarray) and header[k].size > 1:
if header[k].size > 10:
raise ValueError("Large array being put in header. That's no good. key={0}".format(k))
for ii,val in enumerate(header[k]):
newheader[k[:7]+str(ii)] = val
else:
newheader[k[:8]] = str(header[k])
else:
newheader[k[:8]] = header[k]
return newheader
class ClassObject(object):
def __init__(self, filename, verbose=False):
t0 = time.time()
self._file = open(filename, 'rb')
self.file_description = _read_first_record(self._file)
self.allind = _read_indices(self._file, self.file_description)
self._data = np.memmap(self._file, dtype='float32', mode='r')
if verbose: log.info("Setting _spectra")
self._spectra = LazyItem(self)
t1 = time.time()
if verbose: log.info("Setting posang. t={0}".format(t1-t0))
self.set_posang()
t2 = time.time()
if verbose: log.info("Identifying otf scans. t={0}".format(t2-t1))
self._identify_otf_scans(verbose=verbose)
t3 = time.time()
#self._load_all_spectra()
if verbose:
log.info("Loaded CLASS object with {3} indices. Time breakdown:"
" {0}s for indices, "
"{1}s for posang, and {2}s for OTF scan identification"
.format(t1-t0, t2-t1, t3-t2, len(self.allind)))
def __repr__(self):
s = "\n".join(["{k}: {v}".format(k=k,v=v)
for k,v in iteritems(self.getinfo())])
return "ClassObject({id}) with {nspec} entries\n".format(id=id(self),
nspec=len(self.allind)) + s
def getinfo(self, allsources=False):
info = dict(
tels = self.tels,
lines = self.lines,
scans = self.scans,
sources = self.sources if allsources else self.sci_sources,
)
return info
def set_posang(self):
h0 = self.headers[0]
for h in self.headers:
dx = h['OFF1'] - h0['OFF1']
dy = h['OFF2'] - h0['OFF2']
h['COMPPOSA'] = np.arctan2(dy,dx)*180/np.pi
h0 = h
def _identify_otf_scans(self, verbose=False):
h0 = self.allind[0]
st = 0
otfscan = 0
posangs = [h['COMPPOSA'] for h in self.allind]
if verbose:
pb = ProgressBar(len(self.allind))
for ii,h in enumerate(self.allind):
if (h['SCAN']!= h0['SCAN']
or h['SOURC']!= h0['SOURC']):
h0['FIRSTSCAN'] = st
cpa = np.median(posangs[st:ii])
for hh in self.allind[st:ii]:
hh['SCANPOSA'] = cpa % 180
st = ii
if h['SCAN'] == h0['SCAN']:
h0['OTFSCAN'] = otfscan
otfscan += 1
h['OTFSCAN'] = otfscan
else:
otfscan = 0
h['OTFSCAN'] = otfscan
else:
h['OTFSCAN'] = otfscan
if verbose:
pb.update(ii)
def listscans(self, source=None, telescope=None, out=sys.stdout):
minid=0
scan = -1
sourc = ""
#tel = ''
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
print("{entries:15s} {SOURC:12s} {XTEL:12s} {SCAN:>8s} {SUBSCAN:>8s} "
"[ {RAmin:>12s}, {RAmax:>12s} ] "
"[ {DECmin:>12s}, {DECmax:>12s} ] "
"{angle:>12s} {SCANPOSA:>12s} {OTFSCAN:>8s} {TSYS:>8s} {UTD:>12s}"
.format(entries='Scans', SOURC='Source', XTEL='Telescope',
SCAN='Scan', SUBSCAN='Subscan',
RAmin='min(RA)', RAmax='max(RA)',
DECmin='min(DEC)', DECmax='max(DEC)',
SCANPOSA='Scan PA',
angle='Angle', OTFSCAN='OTFscan',
TSYS='TSYS', UTD='UTD'),
file=out)
data_rows = []
for ii,row in enumerate(self.headers):
if (row['SCAN'] == scan
and row['SOURC'] == sourc
#and row['XTEL'] == tel
):
minoff1 = min(minoff1, row['OFF1'])
maxoff1 = max(maxoff1, row['OFF1'])
minoff2 = min(minoff2, row['OFF2'])
maxoff2 = max(maxoff2, row['OFF2'])
ttlangle += np.arctan2(row['OFF2'] - prevrow['OFF2'],
row['OFF1'] - prevrow['OFF1'])%np.pi
nangle += 1
prevrow = row
else:
if scan == -1:
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
prevrow = row
continue
ok = True
if source is not None:
if isinstance(source, (list,tuple)):
ok = ok and any(re.search((s), prevrow['SOURC'])
for s in source)
else:
ok = ok and re.search((source), prevrow['SOURC'])
if telescope is not None:
ok = ok and re.search((telescope), prevrow['XTEL'])
if ok:
data = dict(RAmin=minoff1*180/np.pi*3600,
RAmax=maxoff1*180/np.pi*3600,
DECmin=minoff2*180/np.pi*3600,
DECmax=maxoff2*180/np.pi*3600,
angle=(ttlangle/nangle)*180/np.pi if nangle>0 else 0,
e0=minid,
e1=ii-1,
#TSYS=row['TSYS'] if 'TSYS' in row else '--',
UTD=row['DOBS']+row['UT'] if 'UT' in row else -99,
**prevrow)
print("{e0:7d}-{e1:7d} {SOURC:12s} {XTEL:12s} {SCAN:8d} {SUBSCAN:8d} "
"[ {RAmin:12f}, {RAmax:12f} ] "
"[ {DECmin:12f}, {DECmax:12f} ] "
"{angle:12.1f} {SCANPOSA:12.1f} {OTFSCAN:8d}"
" {TSYS:>8.1f} {UTD:12f}".
format(**data),
file=out)
data_rows.append(data)
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
minid = ii
return data
@property
def tels(self):
if hasattr(self,'_tels'):
return self._tels
else:
self._tels = set([h['XTEL'] for h in self.allind])
return self._tels
@property
def sources(self):
if hasattr(self,'_source'):
return self._source
else:
self._source = set([h['SOURC'] for h in self.allind])
return self._source
@property
def scans(self):
if hasattr(self,'_scan'):
return self._scan
else:
self._scan = set([h['SCAN'] for h in self.allind])
return self._scan
@property
def sci_sources(self):
return set([s for s in self.sources
if s[:4] not in ('SKY-', 'TSYS', 'TCAL', 'TREC', 'HOT-',
'COLD')])
@property
def lines(self):
if hasattr(self,'_lines'):
return self._lines
else:
self._lines = set([h['LINE'] for h in self.allind])
return self._lines
def _load_all_spectra(self, indices=None):
if indices is None:
indices = range(self.file_description['xnext']-1)
if hasattr(self, '_loaded_indices'):
indices_set = set(indices)
indices_to_load = (indices_set.difference(self._loaded_indices))
self._loaded_indices = self._loaded_indices.union(indices_set)
if any(indices_to_load):
pb = ProgressBar(len(indices_to_load))
for ii,k in enumerate(xrange(indices_to_load)):
self._spectra[k]
pb.update(ii)
else:
self._loaded_indices = set(indices)
self._spectra.load_all()
@property
def spectra(self):
return [x[0] for x in self._spectra]
@property
def headers(self):
return [self._spectra[ii][1]
if ii in self._spectra else x
for ii,x in enumerate(self.allind)]
def select_spectra(self,
all=None,
line=None,
linere=None,
linereflags=re.IGNORECASE,
number=None,
scan=None,
offset=None,
source=None,
sourcere=None,
sourcereflags=re.IGNORECASE,
range=None,
quality=None,
telescope=None,
telescopere=None,
telescopereflags=re.IGNORECASE,
subscan=None,
entry=None,
posang=None,
#observed=None,
#reduced=None,
frequency=None,
section=None,
user=None,
include_old_versions=False,
):
"""
Parameters
----------
include_old_versions: bool
Include spectra with XVER numbers <0? These are CLASS spectra that
have been "overwritten" (re-reduced?)
"""
if entry is not None and len(entry)==2:
return irange(entry[0], entry[1])
if frequency is not None:
self._load_all_spectra()
sel = [(re.search(re.escape(ensure_bytes(line)), h['LINE'], re.IGNORECASE)
if line is not None else True) and
(re.search(ensure_bytes(linere), h['LINE'], linereflags)
if linere is not None else True) and
(h['SCAN'] == scan if scan is not None else True) and
((h['OFF1'] == offset or
h['OFF2'] == offset) if offset is not None else True) and
(re.search(re.escape(ensure_bytes(source)), h['CSOUR'], re.IGNORECASE)
if source is not None else True) and
(re.search(ensure_bytes(sourcere), h['CSOUR'], sourcereflags)
if sourcere is not None else True) and
(h['OFF1']>range[0] and h['OFF1'] < range[1] and
h['OFF2']>range[2] and h['OFF2'] < range[3]
if range is not None and len(range)==4 else True) and
(h['QUAL'] == quality if quality is not None else True) and
(re.search(re.escape(ensure_bytes(telescope)), h['CTELE'], re.IGNORECASE)
if telescope is not None else True) and
(re.search(ensure_bytes(telescopere), h['CTELE'], telescopereflags)
if telescopere is not None else True) and
(h['SUBSCAN']==subscan if subscan is not None else True) and
(h['NUM'] >= number[0] and h['NUM'] < number[1]
if number is not None else True) and
('RESTF' in h and # Need to check that it IS a spectrum: continuum data can't be accessed this way
h['RESTF'] > frequency[0] and
h['RESTF'] < frequency[1]
if frequency is not None and len(frequency)==2
else True) and
(h['COMPPOSA']%180 > posang[0] and
h['COMPPOSA']%180 < posang[1]
if posang is not None and len(posang)==2
else True) and
# 1A uses XVER, 2A uses VER. If neither are present, it's
# probably not a valid spectrum?
(h.get('XVER', h.get('VER', -999)) > 0
if not include_old_versions else True)
for h in self.headers
]
return [ii for ii,k in enumerate(sel) if k]
def get_spectra(self, progressbar=True, **kwargs):
selected_indices = self.select_spectra(**kwargs)
if not any(selected_indices):
raise ValueError("Selection yielded empty.")
self._spectra.load(selected_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in selected_indices]
def get_pyspeckit_spectra(self, progressbar=True, **kwargs):
spdata = self.get_spectra(progressbar=progressbar, **kwargs)
spectra = [pyspeckit.Spectrum(data=data,
xarr=make_axis(header),
header=clean_header(header))
for data,header in spdata]
return spectra
def read_observations(self, observation_indices, progressbar=True):
self._spectra.load(observation_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in observation_indices]
@print_timing
def read_class(filename, downsample_factor=None, sourcename=None,
telescope=None, line=None, posang=None, verbose=False,
flag_array=None):
"""
Read a binary class file.
Based on the
`GILDAS CLASS file type Specification
<http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html>`_
Parameters
----------
filename: str
downsample_factor: None or int
Factor by which to downsample data by averaging. Useful for
overresolved data.
sourcename: str or list of str
Source names to match to the data (uses regex)
telescope: str or list of str
'XTEL' or 'TELE' parameters: the telescope & instrument
line: str or list of str
The line name
posang: tuple of 2 floats
The first float is the minimum value for the position angle. The second
float is the maximum value for the position angle.
verbose: bool
Log messages with severity INFO
flag_array: np.ndarray
An array with the same shape as the data used to flag out
(remove) data when downsampling. True = flag out
"""
classobj = ClassObject(filename)
if not isinstance(sourcename, (list,tuple)):
sourcename = [sourcename]
if not isinstance(telescope, (list,tuple)):
telescope = [telescope]
if not isinstance(line, (list,tuple)):
line = [line]
spectra,headers = [],[]
if verbose:
log.info("Reading...")
selection = [ii
for source in sourcename
for tel in telescope
for li in line
for ii in classobj.select_spectra(sourcere=source,
telescope=tel,
line=li,
posang=posang)]
sphdr = classobj.read_observations(selection)
if len(sphdr) == 0:
return None
spec,hdr = zip(*sphdr)
spectra += spec
headers += hdr
indexes = headers
weight = ~flag_array if flag_array is not None else None
if downsample_factor is not None:
if verbose:
log.info("Downsampling...")
spectra = [downsample_1d(spec, downsample_factor,
weight=weight)
for spec in ProgressBar(spectra)]
headers = [downsample_header(h, downsample_factor)
for h in ProgressBar(headers)]
for hdr in headers:
stringify_header(hdr)
return spectra,headers,indexes
def stringify_header(header):
from six import string_types, integer_types
import string
FITS_allowed_types = (string_types + integer_types +
(float, complex, bool, np.floating, np.integer,
np.complexfloating, np.bool_))
bad_chars = string.printable[96:]
badcharre = re.compile("[{0}]".format(bad_chars))
for key, value in header.items():
if isinstance(value, bytes):
header[key] = value.decode()
elif not isinstance(value, FITS_allowed_types):
header[key] = badcharre.sub("", str(header[key]))
def downsample_header(hdr, downsample_factor):
for k in ('NCHAN','NPOIN','DATALEN'):
if k in hdr:
hdr[k] = int((hdr[k] / downsample_factor))
# maybe wrong? h['RCHAN'] = (h['RCHAN']-1) / downsample_factor + 1
scalefactor = 1./downsample_factor
hdr['RCHAN'] = (hdr['RCHAN']-1)*scalefactor + 0.5 + scalefactor/2.
for kw in ['FRES','VRES']:
if kw in hdr:
hdr[kw] *= downsample_factor
return hdr
def make_axis(header,imagfreq=False):
"""
Create a :class:`pyspeckit.spectrum.units.SpectroscopicAxis` from the CLASS "header"
"""
from.. import units
rest_frequency = header.get('RESTF')
xunits = 'MHz'
nchan = header.get('NCHAN')
voff = header.get('VOFF')
foff = header.get('FOFF')
doppler = header.get('DOPPLER')
fres = header.get('FRES')
refchan = header.get('RCHAN')
imfreq = header.get('IMAGE')
if foff in (None, 0.0) and voff not in (None, 0.0):
# Radio convention
foff = -voff/2.997924580e5 * rest_frequency
if not imagfreq:
xarr = rest_frequency + foff + (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=rest_frequency*u.MHz)
else:
xarr = imfreq - (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=imfreq*u.MHz)
return XAxis
@print_timing
def class_to_obsblocks(filename, telescope, line, datatuple=None, source=None,
imagfreq=False, DEBUG=False, **kwargs):
"""
Load an entire CLASS observing session into a list of ObsBlocks based on
matches to the 'telescope', 'line' and'source' names
Parameters
----------
filename : string
The Gildas CLASS data file to read the spectra from.
telescope : list
List of telescope names to be matched.
line : list
List of line names to be matched.
source : list (optional)
List of source names to be matched. Defaults to None.
imagfreq : bool
Create a SpectroscopicAxis with the image frequency.
"""
if datatuple is None:
spectra,header,indexes = read_class(filename, **kwargs)
else:
spectra,header,indexes = datatuple
obslist = []
lastscannum = -1
spectrumlist = None
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
# this is slow but necessary...
H = pyfits.Header()
for k,v in iteritems(hdr):
if hasattr(v,"__len__") and not isinstance(v,str):
# make an array of header entries, but this
# supports only up to 10 of them...
if len(v) > 1:
if len(v) < 10:
for ii,vv in enumerate(v):
newkey = k[:7]+str(ii)
H[newkey] = vv
elif len(v) < 100:
for ii,vv in enumerate(v):
newkey = k[:6]+str(ii)
H[newkey] = vv
else:
raise ValueError("Too many entries for {0}".format(k))
else:
H[k] = v[0]
#elif not any(x in str(v).lower() for x in ('comment', 'end', 'history')):
# # do not try to add comments...
# This commented out block used to attempt to reject comments
# using a private regex in the old pyfits which no longer exists.
# I don't know if it was necessary.
else:
H[k] = v
scannum = hdr['SCAN']
if 'XTEL' in hdr and hdr['XTEL'].strip() not in telescope:
continue
if hdr['LINE'].strip() not in line:
continue
if (source is not None) and (hdr['SOURC'].strip() not in source):
continue
hdr['RESTFREQ'] = hdr.get('RESTF')
H['RESTFREQ'] = hdr.get('RESTF')
#print "Did not skip %s,%s. Scannum, last: %i,%i" % (hdr['XTEL'],hdr['LINE'],scannum,lastscannum)
if scannum!= lastscannum:
lastscannum = scannum
if spectrumlist is not None:
obslist.append(pyspeckit.ObsBlock(spectrumlist))
xarr = make_axis(hdr,imagfreq=imagfreq)
spectrumlist = [(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))]
else:
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))
return obslist
class LazyItem(object):
"""
Simple lazy spectrum-retriever wrapper
"""
def __init__(self, parent):
self.parent = parent
self.sphdr = {}
self.nind = len(self.parent.allind)
self.nloaded = 0
def __repr__(self):
return ("Set of {0} spectra & headers, {1} loaded"
" ({2:0.2f}%)".format(self.nind, self.nloaded,
(float(self.nloaded)/self.nind)*100))
def load_all(self, progressbar=True):
self.load(range(self.nind))
def load(self, indices, progressbar=True):
pb = ProgressBar(len(indices))
counter = 0
for k in indices:
self[k]
counter += 1
pb.update(counter)
def __getitem__(self, key):
if key in self.sphdr:
return self.sphdr[key]
elif isinstance(key, slice):
return [self[k] for k in xrange(key.start or 0,
key.end or len(self.parent.allind),
key.step or 1)]
else:
sphd = read_observation(self.parent._file, key,
file_description=self.parent.file_description,
indices=self.parent.allind,
my_memmap=self.parent._data)
# Update the header with OTFSCAN and POSANG info
sphd[1].update(self.parent.allind[key])
self.sphdr[key] = sphd
self.nloaded += 1
return sphd
def __iter__(self):
return self.next()
def __next__(self):
for k in self.spheader:
yield self.spheader[k]
def __contains__(self, key):
return key in self.sphdr
@print_timing
def class_to_spectra(filename, datatuple=None, **kwargs):
"""
Load each individual spectrum within a CLASS file into a list of Spectrum
objects
"""
if datatuple is None:
spectra,header,indexes = read_class(filename, **kwargs)
else:
spectra,header,indexes = datatuple
spectrumlist = []
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
xarr = make_axis(hdr)
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=hdr,
data=sp))
return pyspeckit.Spectra(spectrumlist)
def tests():
"""
Tests are specific to the machine on which this code was developed.
"""
fn1 = '/Users/adam/work/bolocam/hht/class_003.smt'
#fn1 = '/Users/adam/work/bolocam/hht/class_001.smt'
#fn1 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-20824-073.cls'
#fn2 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-79472+203.cls'
#F1 = read_class(fn1)#,DEBUG=True)
#F2 = read_class(fn2)
n2hp = class_to_obsblocks(fn1,telescope=['SMT-F1M-HU','SMT-F1M-VU'],line=['N2HP(3-2)','N2H+(3-2)'])
hcop = class_to_obsblocks(fn1,telescope=['SMT-F1M-HL','SMT-F1M-VL'],line=['HCOP(3-2)','HCO+(3-2)']) |
|
pyspeckit__pyspeckit | cubes.rst | Module doc / Tutorial | Generate documentation for this module | MIT License | pyspeckit__pyspeckit/docs/cubes.rst | [
"pyspeckit__pyspeckit/pyspeckit/cubes/mapplot.py",
"pyspeckit__pyspeckit/pyspeckit/cubes/cubes.py"
] | Cubes
Pyspeckit can do a few things with spectral cubes. The most interesting
is the spectral line fitting.
~pyspeckit.cubes.SpectralCube.Cube objects have a
~pyspeckit.cubes.SpectralCube.Cube.fiteach method that will fit each
spectral line within a cube. It can be made to do this in parallel with
the multicore option.
As of version 0.16, pyspeckit cubes can be read from SpectralCube
objects:
>>> pcube = pyspeckit.Cube(cube=mySpectralCube)
Otherwise, they can be created from FITS cubes on disk:
>>> pcube = pyspeckit.Cube(filename="mycube.fits")
or from arrays:
>>> mycube = np.random.randn(250,50,50)
>>> myxaxis = np.linspace(-100,100,250)
>>> pcube = pyspeckit.Cube(cube=mycube, xarr=myxaxis, xunit='km/s')
The most interesting features of the ~pyspeckit.cubes.SpectralCube.Cube
object are the ~pyspeckit.cubes.SpectralCube.Cube.fiteach method, which
fits a model spectrum to each element of the cube, and
mapplot <pyspeckit.cubes.mapplot.MapPlotter>, which plots up various
projections of the cube.
Cube.mapplot <pyspeckit.cubes.mapplot.MapPlotter> will create an
interactive plot window. You can click on any pixel shown in that window
and pull up a second window showing the spectrum at that pixel. If
you've fitted the cube, the associated best-fit model will also be
shown. This interactive setup can be a bit fragile, though, so please
report bugs aggressively so we can weed them out!
The interactive viewer has a few button interactions described here
<pyspeckit.cubes.mapplot.MapPlotter.mapplot>. | """
MapPlot
-------
Make plots of the cube and interactively connect them to spectrum plotting.
This is really an interactive component of the package; nothing in here is
meant for publication-quality plots, but more for user interactive analysis.
That said, the plotter makes use of `APLpy <https://github.com/aplpy/aplpy>`_,
so it is possible to make publication-quality plots.
:author: Adam Ginsburg
:date: 03/17/2011
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function
import matplotlib
import matplotlib.figure
import numpy as np
import copy
import itertools
import six
try:
import astropy.wcs as pywcs
import astropy.io.fits as pyfits
pywcsOK = True
except ImportError:
try:
import pyfits
import pywcs
pywcsOK = True
except ImportError:
pywcsOK = False
try:
import aplpy
icanhasaplpy = True
except: # aplpy fails with generic exceptions instead of ImportError
icanhasaplpy = False
from. import cubes
class MapPlotter(object):
"""
Class to plot a spectrum
See `mapplot` for use documentation; this docstring is only for
initialization.
"""
def __init__(self, Cube=None, figure=None, doplot=False, **kwargs):
"""
Create a map figure for future plotting
"""
import matplotlib.pyplot
self._pyplot = matplotlib.pyplot
# figure out where to put the plot
if isinstance(figure,matplotlib.figure.Figure):
self.figure = figure
elif type(figure) is int:
self.figure = self._pyplot.figure(figure)
else:
self.figure = None
self.axis = None
self.FITSFigure = None
self._click_marks = []
self._circles = []
self._clickX = None
self._clickY = None
self.overplot_colorcycle = itertools.cycle(['b', 'g', 'r', 'c','m', 'y'])
self.overplot_linestyle = '-'
self.Cube = Cube
if self.Cube is not None:
self.header = cubes.flatten_header(self.Cube.header, delete=True)
if pywcsOK:
self.wcs = pywcs.WCS(self.header)
if doplot: self.mapplot(**kwargs)
def __call__(self, **kwargs):
""" see mapplot """
return self.mapplot(**kwargs)
def mapplot(self, convention='calabretta', colorbar=True, useaplpy=True,
vmin=None, vmax=None, cmap=None, plotkwargs={}, **kwargs):
"""
Plot up a map based on an input data cube.
The map to be plotted is selected using `makeplane`.
The `estimator` keyword argument is passed to that function.
The plotted map, once shown, is interactive. You can click on it with any
of the three mouse buttons.
Button 1 or keyboard '1':
Plot the selected pixel's spectrum in another window. Mark the
clicked pixel with an 'x'
Button 2 or keyboard 'o':
Overplot a second (or third, fourth, fifth...) spectrum in the
external plot window
Button 3:
Disconnect the interactive viewer
You can also click-and-drag with button 1 to average over a circular
region. This same effect can be achieved by using the 'c' key to
set the /c/enter of a circle and the 'r' key to set its /r/adius (i.e.,
hover over the center and press 'c', then hover some distance away and
press 'r').
Parameters
----------
convention : 'calabretta' or 'griesen'
The default projection to assume for Galactic data when plotting
with aplpy.
colorbar : bool
Whether to show a colorbar
plotkwargs : dict, optional
A dictionary of keyword arguments to pass to aplpy.show_colorscale
or matplotlib.pyplot.imshow
useaplpy : bool
Use aplpy if a FITS header is available
vmin, vmax: float or None
Override values for the vmin/vmax values. Will be automatically
determined if left as None
.. todo:
Allow mapplot in subfigure
"""
if (self.figure is None):
self.figure = self._pyplot.figure()
elif (not self._pyplot.fignum_exists(self.figure.number)):
self.figure = self._pyplot.figure()
else:
self._disconnect()
self.figure.clf()
# this is where the map is created; everything below this is just plotting
self.makeplane(**kwargs)
# have tot pop out estimator so that kwargs can be passed to imshow
if 'estimator' in kwargs:
kwargs.pop('estimator')
# Below here is all plotting stuff
if vmin is None: vmin = self.plane[self.plane==self.plane].min()
if vmax is None: vmax = self.plane[self.plane==self.plane].max()
if icanhasaplpy and useaplpy:
self.fitsfile = pyfits.PrimaryHDU(data=self.plane,header=self.header)
self.FITSFigure = aplpy.FITSFigure(self.fitsfile,figure=self.figure,convention=convention)
self.FITSFigure.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, **plotkwargs)
if hasattr(self.FITSFigure, '_ax1'):
self.axis = self.FITSFigure._ax1
else:
self.axis = self.FITSFigure.ax
if colorbar:
try:
self.FITSFigure.add_colorbar()
except Exception as ex:
print("ERROR: Could not create colorbar! Error was %s" % str(ex))
self._origin = 0 # FITS convention
# TODO: set _origin to 1 if using PIXEL units, not real wcs
else:
self.axis = self.figure.add_subplot(111)
if hasattr(self,'colorbar') and self.colorbar is not None:
if self.colorbar.ax in self.axis.figure.axes:
self.axis.figure.delaxes(self.colorbar.ax)
self.axis.imshow(self.plane, vmin=vmin, vmax=vmax, cmap=cmap, **plotkwargs)
if colorbar:
try:
self.colorbar = self._pyplot.colorbar(self.axis.images[0])
except Exception as ex:
print("ERROR: Could not create colorbar! Error was %s" % str(ex))
self._origin = 0 # normal convention
self.canvas = self.axis.figure.canvas
self._connect()
def _connect(self):
""" Connect click, click up (release click), and key press to events """
self.clickid = self.canvas.callbacks.connect('button_press_event',self.click)
self.clickupid = self.canvas.callbacks.connect('button_release_event',self.plot_spectrum)
self.keyid = self.canvas.callbacks.connect('key_press_event',self.plot_spectrum)
def _disconnect(self):
""" Disconnect click, click up (release click), and key press from events """
if hasattr(self,'canvas'):
self.canvas.mpl_disconnect(self.clickid)
self.canvas.mpl_disconnect(self.clickupid)
self.canvas.mpl_disconnect(self.keyid)
def makeplane(self, estimator=np.nanmean):
"""
Create a "plane" view of the cube, either by slicing or projecting it
or by showing a slice from the best-fit model parameter cube.
Parameters
----------
estimator : [ function |'max' | 'int' | FITS filename | integer | slice ]
A non-pythonic, non-duck-typed variable. If it's a function, apply that function
along the cube's spectral axis to obtain an estimate (e.g., mean, min, max, etc.).
'max' will do the same thing as passing np.max
'int' will attempt to integrate the image (which is why I didn't duck-type)
(integrate means sum and multiply by dx)
a.fits filename will be read using pyfits (so you can make your own cover figure)
an integer will get then'th slice in the parcube if it exists
If it's a slice, slice the input data cube along the Z-axis with this slice
"""
# THIS IS A HACK!!! isinstance(a function, function) must be a thing...
FUNCTION = type(np.max)
# estimator is NOT duck-typed
if type(estimator) is FUNCTION:
self.plane = estimator(self.Cube.cube,axis=0)
elif isinstance(estimator, six.string_types):
if estimator =='max':
self.plane = self.Cube.cube.max(axis=0)
elif estimator == 'int':
dx = np.abs(self.Cube.xarr[1:] - self.Cube.xarr[:-1])
dx = np.concatenate([dx,[dx[-1]]])
self.plane = (self.Cube.cube * dx[:,np.newaxis,np.newaxis]).sum(axis=0)
elif estimator[-5:] == ".fits":
self.plane = pyfits.getdata(estimator)
elif type(estimator) is slice:
self.plane = self.Cube.cube[estimator,:,:]
elif type(estimator) is int:
if hasattr(self.Cube,'parcube'):
self.plane = self.Cube.parcube[estimator,:,:]
if self.plane is None:
raise ValueError("Invalid estimator %s" % (str(estimator)))
if np.sum(np.isfinite(self.plane)) == 0:
raise ValueError("Map is all NaNs or infs. Check your estimator or your input cube.")
def click(self,event):
"""
Record location of downclick
"""
if event.inaxes:
self._clickX = np.round(event.xdata) - self._origin
self._clickY = np.round(event.ydata) - self._origin
def plot_spectrum(self, event, plot_fit=True):
"""
Connects map cube to Spectrum...
"""
self.event = event
if event.inaxes:
clickX = np.round(event.xdata) - self._origin
clickY = np.round(event.ydata) - self._origin
# grab toolbar info so that we don't do anything if a tool is selected
tb = self.canvas.toolbar
if tb.mode!= '':
return
elif event.key is not None:
if event.key == 'c':
self._center = (clickX-1,clickY-1)
self._remove_circle()
self._add_click_mark(clickX,clickY,clear=True)
elif event.key == 'r':
x,y = self._center
self._add_circle(x,y,clickX,clickY)
self.circle(x,y,clickX-1,clickY-1)
elif event.key == 'o':
clickX,clickY = round(clickX),round(clickY)
print("OverPlotting spectrum from point %i,%i" % (clickX-1,clickY-1))
color = next(self.overplot_colorcycle)
self._add_click_mark(clickX,clickY,clear=False, color=color)
self.Cube.plot_spectrum(clickX-1,clickY-1,clear=False, color=color, linestyle=self.overplot_linestyle)
elif event.key in ('1','2'):
event.button = int(event.key)
event.key = None
self.plot_spectrum(event)
elif (hasattr(event,'button') and event.button in (1,2)
and not (self._clickX == clickX and self._clickY == clickY)):
if event.button == 1:
self._remove_circle()
clear=True
color = 'k'
linestyle ='steps-mid'
else:
color = next(self.overplot_colorcycle)
linestyle = self.overplot_linestyle
clear=False
rad = ( (self._clickX-clickX)**2 + (self._clickY-clickY)**2 )**0.5
print("Plotting circle from point %i,%i to %i,%i (r=%f)" % (self._clickX,self._clickY,clickX,clickY,rad))
self._add_circle(self._clickX,self._clickY,clickX,clickY)
self.circle(self._clickX,self._clickY,clickX,clickY,clear=clear,linestyle=linestyle,color=color)
elif hasattr(event,'button') and event.button is not None:
if event.button==1:
clickX,clickY = round(clickX),round(clickY)
print("Plotting spectrum from point %i,%i" % (clickX,clickY))
self._remove_circle()
self._add_click_mark(clickX,clickY,clear=True)
self.Cube.plot_spectrum(clickX,clickY,clear=True)
if plot_fit: self.Cube.plot_fit(clickX, clickY, silent=True)
elif event.button==2:
clickX,clickY = round(clickX),round(clickY)
print("OverPlotting spectrum from point %i,%i" % (clickX,clickY))
color = next(self.overplot_colorcycle)
self._add_click_mark(clickX,clickY,clear=False, color=color)
self.Cube.plot_spectrum(clickX,clickY,clear=False, color=color, linestyle=self.overplot_linestyle)
elif event.button==3:
print("Disconnecting GAIA-like tool")
self._disconnect()
else:
print("Call failed for some reason: ")
print("event: ",event)
else:
pass
# never really needed... warn("Click outside of axes")
def _add_click_mark(self,x,y,clear=False,color='k'):
"""
Add an X at some position
"""
if clear:
self._clear_click_marks()
if self.FITSFigure is not None:
label = 'xmark%i' % (len(self._click_marks)+1)
x,y = self.FITSFigure.pixel2world(x,y)
self.FITSFigure.show_markers(x,y,marker='x',c=color,layer=label)
self._click_marks.append( label )
else:
self._click_marks.append( self.axis.plot(x,y,'kx') )
self.refresh()
def _clear_click_marks(self):
"""
Remove all marks added by previous clicks
"""
if self.FITSFigure is not None:
for mark in self._click_marks:
if mark in self.FITSFigure._layers:
self.FITSFigure.remove_layer(mark)
else:
for mark in self._click_marks:
self._click_marks.remove(mark)
if mark in self.axis.lines:
self.axis.lines.remove(mark)
self.refresh()
def _add_circle(self,x,y,x2,y2,**kwargs):
"""
"""
if self.FITSFigure is not None:
x,y = self.FITSFigure.pixel2world(x,y)
x2,y2 = self.FITSFigure.pixel2world(x2,y2)
r = (np.linalg.norm(np.array([x,y])-np.array([x2,y2])))
#self.FITSFigure.show_markers(x,y,s=r,marker='o',facecolor='none',edgecolor='black',layer='circle')
layername = "circle%02i" % len(self._circles)
self.FITSFigure.show_circles(x,y,r,edgecolor='black',facecolor='none',layer=layername,**kwargs)
self._circles.append(layername)
else:
r = np.linalg.norm(np.array([x,y])-np.array([x2,y2]))
circle = matplotlib.patches.Circle([x,y],radius=r,**kwargs)
self._circles.append( circle )
self.axis.patches.append(circle)
self.refresh()
def _remove_circle(self):
"""
"""
if self.FITSFigure is not None:
for layername in self._circles:
if layername in self.FITSFigure._layers:
self.FITSFigure.remove_layer(layername)
else:
for circle in self._circles:
if circle in self.axis.patches:
self.axis.patches.remove(circle)
self._circles.remove(circle)
self.refresh()
def refresh(self):
if self.axis is not None:
self.axis.figure.canvas.draw()
def circle(self,x1,y1,x2,y2,**kwargs):
"""
Plot the spectrum of a circular aperture
"""
r = (np.linalg.norm(np.array([x1,y1])-np.array([x2,y2])))
self.Cube.plot_apspec([x1,y1,r],**kwargs)
#self.Cube.data = cubes.extract_aperture( self.Cube.cube, [x1,y1,r], coordsys=None )
#self.Cube.plotter()
def copy(self, parent=None):
"""
Create a copy of the map plotter with blank (uninitialized) axis & figure
[ parent ]
A spectroscopic axis instance that is the parent of the specfit
instance. This needs to be specified at some point, but defaults
to None to prevent overwriting a previous plot.
"""
newmapplot = copy.copy(self)
newmapplot.Cube = parent
newmapplot.axis = None
newmapplot.figure = None
return newmapplot
"""
~~~~~~~~
cubes.py
~~~~~~~~
From `agpy <http://code.google.com/p/agpy/source/browse/trunk/agpy/cubes.py>`_,
contains functions to perform various transformations on data cubes and their
headers.
"""
from __future__ import print_function
from six.moves import xrange
from numpy import sqrt,repeat,indices,newaxis,pi,cos,sin,array,mean,nansum
from math import acos,atan2,tan
import numpy
import numpy as np
import copy
import os
import astropy.io.fits as fits
import astropy.wcs as pywcs
import tempfile
import warnings
import astropy
from astropy import coordinates
from astropy import log
try:
from AG_fft_tools import smooth
smoothOK = True
except ImportError:
smoothOK = False
try:
from scipy.interpolate import UnivariateSpline
scipyOK = True
except ImportError:
scipyOK = False
from. import posang # agpy code
from..parallel_map import parallel_map
dtor = pi/180.0
def blfunc_generator(x=None, polyorder=None, splineorder=None,
sampling=1):
"""
Generate a function that will fit a baseline (polynomial or spline) to a
data set. Either ``splineorder`` or ``polyorder`` must be set
Parameters
----------
x : np.ndarray or None
The X-axis of the fitted array. Will be set to
``np.arange(len(data))`` if not specified
polyorder : None or int
The polynomial order.
splineorder : None or int
sampling : int
The sampling rate to use for the data. Can set to higher numbers to
effectively downsample the data before fitting
"""
def blfunc(args, x=x):
yfit,yreal = args
if hasattr(yfit,'mask'):
mask = ~yfit.mask
else:
mask = np.isfinite(yfit)
if x is None:
x = np.arange(yfit.size, dtype=yfit.dtype)
ngood = np.count_nonzero(mask)
if polyorder is not None:
if ngood < polyorder:
return yreal
else:
endpoint = ngood - (ngood % sampling)
y = np.mean([yfit[mask][ii:endpoint:sampling]
for ii in range(sampling)], axis=0)
polypars = np.polyfit(x[mask][sampling/2:endpoint:sampling],
y, polyorder)
return yreal-np.polyval(polypars, x).astype(yreal.dtype)
elif splineorder is not None and scipyOK:
if splineorder < 1 or splineorder > 4:
raise ValueError("Spline order must be in {1,2,3,4}")
elif ngood <= splineorder:
return yreal
else:
log.debug("splinesampling: {0} "
"splineorder: {1}".format(sampling, splineorder))
endpoint = ngood - (ngood % sampling)
y = np.mean([yfit[mask][ii:endpoint:sampling]
for ii in range(sampling)], axis=0)
if len(y) <= splineorder:
raise ValueError("Sampling is too sparse. Use finer sampling or "
"decrease the spline order.")
spl = UnivariateSpline(x[mask][sampling/2:endpoint:sampling],
y,
k=splineorder,
s=0)
return yreal-spl(x)
else:
raise ValueError("Must provide polyorder or splineorder")
return blfunc
def baseline_cube(cube, polyorder=None, cubemask=None, splineorder=None,
numcores=None, sampling=1):
"""
Given a cube, fit a polynomial to each spectrum
Parameters
----------
cube: np.ndarray
An ndarray with ndim = 3, and the first dimension is the spectral axis
polyorder: int
Order of the polynomial to fit and subtract
cubemask: boolean ndarray
Mask to apply to cube. Values that are True will be ignored when
fitting.
numcores : None or int
Number of cores to use for parallelization. If None, will be set to
the number of available cores.
"""
x = np.arange(cube.shape[0], dtype=cube.dtype)
#polyfitfunc = lambda y: np.polyfit(x, y, polyorder)
blfunc = blfunc_generator(x=x,
splineorder=splineorder,
polyorder=polyorder,
sampling=sampling)
reshaped_cube = cube.reshape(cube.shape[0], cube.shape[1]*cube.shape[2]).T
if cubemask is None:
log.debug("No mask defined.")
fit_cube = reshaped_cube
else:
if cubemask.dtype!= 'bool':
raise TypeError("Cube mask *must* be a boolean array.")
if cubemask.shape!= cube.shape:
raise ValueError("Mask shape does not match cube shape")
log.debug("Masking cube with shape {0} "
"with mask of shape {1}".format(cube.shape, cubemask.shape))
masked_cube = cube.copy()
masked_cube[cubemask] = np.nan
fit_cube = masked_cube.reshape(cube.shape[0], cube.shape[1]*cube.shape[2]).T
baselined = np.array(parallel_map(blfunc, zip(fit_cube,reshaped_cube), numcores=numcores))
blcube = baselined.T.reshape(cube.shape)
return blcube
def flatten_header(header,delete=False):
"""
Attempt to turn an N-dimensional fits header into a 2-dimensional header
Turns all CRPIX[>2] etc. into new keywords with suffix 'A'
header must be a fits.Header instance
"""
if not isinstance(header,fits.Header):
raise Exception("flatten_header requires a fits.Header instance")
newheader = header.copy()
for key in newheader.keys():
try:
if delete and int(key[-1]) >= 3 and key[:2] in ['CD','CR','CT','CU','NA']:
newheader.pop(key)
elif (int(key[-1]) >= 3 or int(key[2])>=3) and key[:2] in ['CD','CR','CT','CU','NA','PC']:
newheader.rename_keyword(key,'A'+key,force=True)
if delete and (int(key[4]) >= 3 or int(key[7]) >= 3) and key[:2]=='PC' and key in newheader:
newheader.pop(key)
except ValueError:
# if key[-1] is not an int
pass
except IndexError:
# if len(key) < 2
pass
newheader['NAXIS'] = 2
if header.get('WCSAXES'):
newheader['WCSAXES'] = 2
return newheader
def speccen_header(header, lon=None, lat=None, proj='TAN', system='celestial',
spectral_axis=3, celestial_axes=[1,2]):
"""
Turn a cube header into a spectrum header, retaining RA/Dec vals where possible
(speccen is like flatten; spec-ify would be better but, specify? nah)
Assumes 3rd axis is velocity
"""
newheader = header.copy()
new_spectral_axis = 1
newheader['CRVAL{0}'.format(new_spectral_axis)] = header.get('CRVAL{0}'.format(spectral_axis))
newheader['CRPIX{0}'.format(new_spectral_axis)] = header.get('CRPIX{0}'.format(spectral_axis))
if 'CD{0}_{0}'.format(new_spectral_axis) in header:
newheader.rename_keyword('CD{0}_{0}'.format(new_spectral_axis),
'OLDCD{0}_{0}'.format(new_spectral_axis))
elif 'CDELT{0}'.format(new_spectral_axis) in header:
newheader.rename_keyword('CDELT{0}'.format(new_spectral_axis),'OLDCDEL{0}'.format(new_spectral_axis))
if 'CD{0}_{0}'.format(spectral_axis) in header:
newheader['CDELT{0}'.format(new_spectral_axis)] = header.get('CD{0}_{0}'.format(spectral_axis))
elif 'CDELT{0}'.format(spectral_axis) in header:
newheader['CDELT{0}'.format(new_spectral_axis)] = header.get('CDELT{0}'.format(spectral_axis))
newheader['CTYPE{0}'.format(new_spectral_axis)] = 'VRAD'
if header.get('CUNIT{0}'.format(spectral_axis)):
newheader['CUNIT{0}'.format(new_spectral_axis)] = header.get('CUNIT{0}'.format(spectral_axis))
else:
print("Assuming CUNIT3 is km/s in speccen_header")
newheader['CUNIT{0}'.format(new_spectral_axis)] = 'km/s'
newheader['CRPIX2'] = 1
newheader['CRPIX{0}'.format(spectral_axis)] = 1
if system == 'celestial':
c2 = 'RA---'
c3 = 'DEC--'
elif system == 'galactic':
c2 = 'GLON-'
c3 = 'GLAT-'
elif system == 'PIXEL':
c2 = 'PIX--'
c3 = 'PIX--'
newheader['CTYPE2'] = c2+proj
newheader['CTYPE{0}'.format(spectral_axis)] = c3+proj
if lon is not None:
newheader['CRVAL2'] = lon
if lat is not None:
newheader['CRVAL{0}'.format(spectral_axis)] = lat
if 'CD2_2' in header:
newheader.rename_keyword('CD2_2','OLDCD2_2')
if 'CD{0}_{0}'.format(spectral_axis) in header:
newheader.rename_keyword('CD{0}_{0}'.format(spectral_axis),
'OLDCD{0}_{0}'.format(spectral_axis))
if 'CROTA2' in header:
newheader.rename_keyword('CROTA2','OLDCROT2')
return newheader
def extract_aperture(cube, ap, r_mask=False, wcs=None,
coordsys='galactic', wunit='arcsec', debug=False,
method='mean'):
"""
Extract an aperture from a data cube. E.g. to acquire a spectrum
of an outflow that is extended.
Cube should have shape [z,y,x], e.g.
cube = fits.getdata('datacube.fits')
Apertures are specified in PIXEL units with an origin of 0,0 (NOT the 1,1
fits standard!) unless wcs and coordsys are specified
Parameters
----------
ap : list
For a circular aperture, len(ap)=3:
ap = [xcen,ycen,radius]
For an elliptical aperture, len(ap)=5:
ap = [xcen,ycen,height,width,PA]
wcs : wcs
a pywcs.WCS instance associated with the data cube
coordsys : str
the coordinate system the aperture is specified in.
Options are 'celestial' and 'galactic'. Default is 'galactic'
wunit : str
units of width/height. default 'arcsec', options 'arcmin' and 'degree'
method : str
'mean' or'sum' (average over spectra, or sum them)
or 'error' for sqrt(sum-of-squares / n)
Other Parameters
----------------
r_mask : bool
return mask in addition to spectrum (for error checking?)
"""
warnings.warn("SpectralCube can do what subimage_integ does much more easily!",
DeprecationWarning)
if wcs is not None and coordsys is not None:
if debug:
print("Converting aperture ",ap,)
ap = aper_world2pix(ap,wcs,coordsys=coordsys,wunit=wunit)
if debug:
print(" to ",ap)
if len(ap) == 3:
sh = cube.shape
yind,xind = indices(sh[1:3]) # recall that python indices are backwards
dis = sqrt((xind-ap[0])**2+(yind-ap[1])**2)
mask = dis < ap[2]
elif len(ap) == 5:
yinds,xinds = indices(cube.shape[1:3])
th = (ap[4])*dtor
xindr = (xinds-ap[0])*cos(th) + (yinds-ap[1])*sin(th)
yindr = (xinds-ap[0])*-sin(th) + (yinds-ap[1])*cos(th)
ratio = max(ap[2:4])/min(ap[2:4])
mask = ((xindr*ratio)**2 + yindr**2)**0.5 < max(ap[2:4])
else:
raise Exception("Wrong number of parameters. Need either 3 parameters "
"for a circular aperture or 5 parameters for an "
"elliptical aperture.")
npixinmask = mask.sum()
if method =='mean':
specsum = nansum(cube[:, mask], axis=1)
spec = specsum / npixinmask
elif method == 'error':
specsum = nansum(cube[:, mask]**2, axis=1)
spec = (specsum)**0.5 / npixinmask
else:
specsum = nansum(cube[:, mask], axis=1)
if r_mask:
return spec,mask
else:
return spec
def integ(file,vrange,xcen=None,xwidth=None,ycen=None,ywidth=None,**kwargs):
"""
wrapper of subimage_integ that defaults to using the full image
"""
if isinstance(file,fits.PrimaryHDU):
header = file.header
cube = file.data
elif isinstance(file,fits.HDUList):
header = file[0].header
cube = file[0].data
else:
file = fits.open(file)
header = file[0].header
cube = file[0].data
if None in [xcen,xwidth,ycen,ywidth]:
xcen = header['NAXIS1'] / 2
xwidth = xcen + header['NAXIS1'] % 2
ycen = header['NAXIS2'] / 2
ywidth = ycen + header['NAXIS2'] % 2
return subimage_integ(cube,xcen,xwidth,ycen,ywidth,vrange,header=header,**kwargs)
def subimage_integ(cube, xcen, xwidth, ycen, ywidth, vrange, header=None,
average=mean, dvmult=False, return_HDU=False,
units="pixels", zunits=None):
"""
Returns a sub-image from a data cube integrated over the specified velocity range
NOTE: With `spectral_cube <spectral-cube.rtfd.org>`_, subcube features can
be easily applied with the `.subcube` method, and integration is handled
separately.
Parameters
----------
cube : np.ndarray
A 3-dimensional numpy array with dimensions (velocity, y, x)
xcen,ycen : float
The center in the X,Y-dimension. See `units` below for unit information
xwidth,ywidth : float
The width in the X,Y-dimension. See `units` below for unit information
xwidth and ywidth are "radius" values, i.e. half the length that will be extracted
vrange : (float,float)
The velocity range to integrate over. See `zunits` below for unit information
header : `astropy.io.fits.Header` or None
If specified, will allow the use of WCS units
average : function
The function to apply when 'integrating' over the subcube
dvmult : bool
If dvmult is set, multiply the average by DV (this is useful if you set
average=sum and dvmul=True to get an integrated value, e.g. K km/s or
Jy km/s)
return_hdu : bool
If specified, will return an HDU object, otherwise will return the
array and header
units : 'pixels' or 'wcs'
If 'pixels', all units (xcen, ycen, xwidth, ywidth) will be in pixels.
If 'wcs', the values will be converted from WCS units to pixel units
using the WCS specified by the `header`
zunits : 'pixels' or 'wcs' or None
If None, will be set to be the same as `units`
Returns
-------
subim, hdu : tuple
A tuple (integrated array, header) if ``return_hdu`` is ``False``, or an HDU if
it is True
"""
if header:
flathead = flatten_header(header.copy())
wcs = pywcs.WCS(header=flathead)
if header.get('CD3_3'): CD3 = header.get('CD3_3')
else: CD3 = header.get('CDELT3')
if units=="pixels":
xlo = int( max([xcen-xwidth,0]) )
ylo = int( max([ycen-ywidth,0]) )
xhi = int( min([xcen+xwidth,cube.shape[2]]) )
yhi = int( min([ycen+ywidth,cube.shape[1]]) )
elif units=="wcs" and header:
newxcen,newycen = wcs.wcs_world2pix(xcen,ycen,0)
try:
newxwid,newywid = xwidth / abs(wcs.wcs.cd[0,0]), ywidth / abs(wcs.wcs.cd[1,1])
except AttributeError:
newxwid,newywid = xwidth / abs(wcs.wcs.cdelt[0]), ywidth / abs(wcs.wcs.cdelt[1])
xlo = int( max([newxcen-newxwid,0]) )
ylo = int( max([newycen-newywid,0]) )
xhi = int( min([newxcen+newxwid,cube.shape[2]]) )
yhi = int( min([newycen+newywid,cube.shape[1]]) )
else:
print("Can only use wcs if you pass a header.")
if zunits is None:
zunits = units
if zunits == 'pixels':
zrange = vrange
if zunits == 'wcs':
zrange = ( array(vrange)-header.get('CRVAL3') ) / CD3 - 1 + header.get('CRPIX3')
subim = average(cube[zrange[0]:zrange[1],ylo:yhi,xlo:xhi],axis=0)
if dvmult and CD3: subim *= CD3
elif dvmult:
print("Error: could not multiply by dv; CD3=",CD3)
if header is None:
return subim
else:
# Cannot set crval2!= 0 for Galactic coordinates: therefore, probably
# wrong approach in general
#crv1,crv2 = wcs.wcs_pix2world(xlo,ylo,0)
#try:
# flathead['CRVAL1'] = crv1[0]
# flathead['CRVAL2'] = crv2[0]
#except IndexError:
# flathead['CRVAL1'] = crv1.item() # np 0-d arrays are not scalar
# flathead['CRVAL2'] = crv2.item() # np 0-d arrays are not scalar
# xlo, ylo have been forced to integers already above
flathead['CRPIX1'] = flathead['CRPIX1'] - xlo
flathead['CRPIX2'] = flathead['CRPIX2'] - ylo
if return_HDU:
return fits.PrimaryHDU(data=subim,header=flathead)
else:
return subim,flathead
def subcube(cube, xcen, xwidth, ycen, ywidth, header=None,
dvmult=False, return_HDU=False, units="pixels",
widthunits="pixels"):
"""
Crops a data cube
All units assumed to be pixel units
cube has dimensions (velocity, y, x)
xwidth and ywidth are "radius" values, i.e. half the length that will be extracted
if dvmult is set, multiple the average by DV (this is useful if you set
average=sum and dvmul=True to get an integrated value)
"""
if header:
newheader = header.copy()
flathead = flatten_header(header.copy())
wcs = pywcs.WCS(header=flathead)
if widthunits == "pixels":
newxwid, newywid = xwidth, ywidth
elif widthunits == "wcs":
try:
newxwid,newywid = xwidth / abs(wcs.wcs.cd[0,0]), ywidth / abs(wcs.wcs.cd[1,1])
except AttributeError:
newxwid,newywid = xwidth / abs(wcs.wcs.cdelt[0]), ywidth / abs(wcs.wcs.cdelt[1])
else:
raise Exception("widthunits must be either 'wcs' or 'pixels'")
if units=="pixels":
newxcen,newycen = xcen,ycen
elif units=="wcs" and header:
newxcen,newycen = wcs.wcs_world2pix(xcen,ycen,0)
else:
raise Exception("units must be either 'wcs' or 'pixels'")
x1 = int( numpy.floor( max([newxcen-newxwid,0]) ) )
y1 = int( numpy.floor( max([newycen-newywid,0]) ) )
x2 = int( numpy.ceil( min([newxcen+newxwid,cube.shape[2]]) ) )
y2 = int( numpy.ceil( min([newycen+newywid,cube.shape[1]]) ) )
xhi = max(x1,x2)
xlo = min(x1,x2)
yhi = max(y1,y2)
ylo = min(y1,y2)
subim = cube[:,ylo:yhi,xlo:xhi]
if return_HDU:
xmid_sky,ymid_sky = wcs.wcs_pix2world(xlo+xwidth,ylo+ywidth,0)
try:
newheader['CRVAL1'] = xmid_sky[0]
newheader['CRVAL2'] = ymid_sky[0]
except IndexError:
newheader['CRVAL1'] = float(xmid_sky)
newheader['CRVAL2'] = float(ymid_sky)
newheader['CRPIX1'] = 1+xwidth
newheader['CRPIX2'] = 1+ywidth
newHDU = fits.PrimaryHDU(data=subim,header=newheader)
if newHDU.header.get('NAXIS1') == 0 or newHDU.header.get('NAXIS2') == 0:
raise Exception("Cube has been cropped to 0 in one dimension")
return newHDU
else:
return subim
def aper_world2pix(ap,wcs,coordsys='galactic',wunit='arcsec'):
"""
Converts an elliptical aperture (x,y,width,height,PA) from
WCS to pixel coordinates given an input wcs (an instance
of the pywcs.WCS class). Must be a 2D WCS header.
"""
convopt = {'arcsec':3600.0,'arcmin':60.0,'degree':1.0}
try:
conv = convopt[wunit]
except:
raise Exception("Must specify wunit='arcsec','arcmin', or 'degree'")
if len(wcs.wcs.cdelt)!= 2:
raise Exception("WCS header is not strictly 2-dimensional. Look for 3D keywords.")
if '' in wcs.wcs.ctype:
raise Exception("WCS header has no CTYPE.")
if coordsys.lower() == 'galactic':
pos = coordinates.SkyCoord(ap[0],ap[1],unit=('deg','deg'), frame='galactic')
elif coordsys.lower() in ('radec','fk5','icrs','celestial'):
pos = coordinates.SkyCoord(ap[0],ap[1],unit=('deg','deg'), frame='fk5')
if wcs.wcs.ctype[0][:2] == 'RA':
ra,dec = pos.icrs.ra.deg,pos.icrs.dec.deg
elif wcs.wcs.ctype[0][:4] == 'GLON':
ra,dec = pos.galactic.l.deg,pos.galactic.b.deg
else:
raise Exception("WCS CTYPE has no match.")
# workaround for a broken wcs.wcs_sky2pix
try:
radif = (wcs.wcs.crval[0]-ra)*dtor
gamma = acos(cos(dec*dtor)*cos(wcs.wcs.crval[1]*dtor)*cos(radif)+sin(dec*dtor)*sin(wcs.wcs.crval[1]*dtor)) / dtor
theta = atan2( sin(radif), ( tan(dec*dtor)*cos(wcs.wcs.crval[1]*dtor)-sin(wcs.wcs.crval[1]*dtor)*cos(radif) ) )
x = -gamma * sin(theta) / wcs.wcs.cd[0,0] + wcs.wcs.crpix[0]
y = gamma * cos(theta) / wcs.wcs.cd[1,1] + wcs.wcs.crpix[1]
except:
radif = (wcs.wcs.crval[0]-ra)*dtor
gamma = acos(cos(dec*dtor)*cos(wcs.wcs.crval[1]*dtor)*cos(radif)+sin(dec*dtor)*sin(wcs.wcs.crval[1]*dtor)) / dtor
theta = atan2( sin(radif), ( tan(dec*dtor)*cos(wcs.wcs.crval[1]*dtor)-sin(wcs.wcs.crval[1]*dtor)*cos(radif) ) )
x = -gamma * sin(theta) / wcs.wcs.cdelt[0] + wcs.wcs.crpix[0]
y = gamma * cos(theta) / wcs.wcs.cdelt[1] + wcs.wcs.crpix[1]
#print "DEBUG: x,y from math (vectors): ",x,y
#x,y = wcs.wcs_world2pix(ra,dec,0) # convert WCS coordinate to pixel coordinate (0 is origin, do not use fits convention)
#print "DEBUG: x,y from wcs: ",x,y
try:
x=x[0] - 1 # change from FITS to python convention
y=y[0] - 1 # change from FITS to python convention
#print "DEBUG: x,y from math: ",x,y
except:
pass
# cd is default, cdelt is backup
if len(ap) > 3:
try:
width = ap[2] / conv / abs(wcs.wcs.cd[0,0]) # first is width, second is height in DS9 PA convention
height = ap[3] / conv / abs(wcs.wcs.cd[0,0])
except:
width = ap[2] / conv / abs(wcs.wcs.cdelt[0]) # first is width, second is height in DS9 PA convention
height = ap[3] / conv / abs(wcs.wcs.cdelt[0])
apold = copy.copy(ap)
if len(ap) == 5:
PA = ap[4]
ap = [x,y,width,height,PA]
else:
ap = [x,y,width,height]
elif len(ap) == 3:
try:
width = ap[2] / conv / abs(wcs.wcs.cd[0,0]) # first is width, second is height in DS9 PA convention
except:
width = ap[2] / conv / abs(wcs.wcs.cdelt[0]) # first is width, second is height in DS9 PA convention
apold = copy.copy(ap)
ap = [x,y,width]
else:
raise TypeError("Aperture length is incorrect.")
return ap
def getspec(lon,lat,rad,cube,header,r_fits=True,inherit=True,wunit='arcsec'):
"""
Given a longitude, latitude, aperture radius (arcsec), and a cube file,
return a.fits file or a spectrum.
Parameters
----------
lon: float
lat: float
longitude and latitude center of a circular aperture in WCS coordinates
must be in coordinate system of the file
rad: float
radius (default degrees) of aperture
"""
convopt = {'arcsec':1.0,'arcmin':60.0,'degree':3600.0}
flathead = flatten_header(header)
wcs = pywcs.WCS(flathead)
if wcs.wcs.ctype[0][:2] == 'RA':
coordsys='celestial'
elif wcs.wcs.ctype[0][:4] == 'GLON':
coordsys='galactic'
spec = extract_aperture(cube,[lon,lat,rad],wcs=wcs,
coordsys=coordsys,wunit=wunit)
if nansum(spec) == 0:
print("Total of extracted spectrum was zero. lon,lat,rad: ",lon,lat,rad)
#import pdb; pdb.set_trace()
if r_fits:
if inherit:
newhead = header.copy()
else:
newhead = fits.Header()
try:
newhead['CD1_1'] = header['CD3_3']
except KeyError:
newhead['CD1_1'] = header['CDELT3']
newhead['CRPIX1'] = header['CRPIX3']
newhead['CRVAL1'] = header['CRVAL3']
try:
newhead['CTYPE1'] = header['CTYPE3']
except KeyError:
newhead['CTYPE1'] = "VRAD"
try:
newhead['CUNIT1'] = header['CUNIT3']
except KeyError:
print("Header did not contain CUNIT3 keyword. Defaulting to km/s")
newhead['CUNIT1'] = "km/s"
newhead['BUNIT'] = header['BUNIT']
newhead['APGLON'] = lon
newhead['APGLAT'] = lat
newhead['APRAD'] = (rad*convopt[wunit],'arcseconds') # radius in arcsec
newfile = fits.PrimaryHDU(data=spec,header=newhead)
return newfile
else:
return spec
def getspec_reg(cubefilename,region,**kwargs):
"""
Aperture extraction from a cube using a pyregion circle region
The region must be in the same coordinate system as the cube header
.. warning:: The second argument of getspec_reg requires a pyregion region list,
and therefore this code depends on `pyregion`_.
"""
ds9tocoords = {'fk5':'celestial','galactic':'galactic','icrs':'celestial'}
if region.name!= 'circle':
raise Exception("Only circular apertures are implemented so far")
l,b,r = region.coord_list
#pos = coords.Position([l,b],system=ds9tocoords[region.coord_format])
if isinstance(cubefilename,fits.HDUList):
cubefile = cubefilename
else:
cubefile = fits.open(cubefilename)
header = cubefile[0].header
cube = cubefile[0].data
if len(cube.shape) == 4: cube = cube[0,:,:,:]
sp = getspec(l,b,r,cube,header,wunit='degree',**kwargs)
return sp
def coords_in_image(fitsfile,lon,lat,system='galactic'):
"""
Determine whether the coordinates are inside the image
"""
if not isinstance(fitsfile,fits.HDUList):
fitsfile = fits.open(fitsfile)
wcs = pywcs.WCS(flatten_header(fitsfile[0].header))
if 'RA' in wcs.wcs.ctype[0]:
pos = coordinates.Position((lon,lat),system=system)
lon,lat = pos.j2000()
if 'GLON' in wcs.wcs.ctype[0]:
pos = coordinates.Position((lon,lat),system=system)
lon,lat = pos.galactic()
x,y = wcs.wcs_world2pix(lon,lat,0)
#DEBUG print x,y,wcs.naxis1,wcs.naxis2
if (0 < x < wcs.naxis1) and (0 < y < wcs.naxis2):
return True
else:
return False
def spectral_smooth(cube, smooth_factor, downsample=True, parallel=True,
numcores=None, **kwargs):
"""
Smooth the cube along the spectral direction
"""
yy,xx = numpy.indices(cube.shape[1:])
if downsample:
newshape = cube[::smooth_factor,:,:].shape
else:
newshape = cube.shape
# need to make the cube "flat" along dims 1&2 for iteration in the "map"
flatshape = (cube.shape[0],cube.shape[1]*cube.shape[2])
Ssmooth = lambda x: smooth.smooth(x, smooth_factor, downsample=downsample, **kwargs)
if parallel:
newcube = numpy.array(parallel_map(Ssmooth, cube.reshape(flatshape).T, numcores=numcores)).T.reshape(newshape)
else:
newcube = numpy.array(map(Ssmooth, cube.reshape(flatshape).T)).T.reshape(newshape)
#naive, non-optimal version
# for (x,y) in zip(xx.flat,yy.flat):
# newcube[:,y,x] = smooth.smooth(cube[:,y,x], smooth_factor,
# downsample=downsample, **kwargs)
return newcube
def plane_smooth(cube,cubedim=0,parallel=True,numcores=None,**kwargs):
"""
parallel-map the smooth function
Parameters
----------
parallel: bool
defaults True. Set to false if you want serial (for debug purposes?)
numcores: int
pass to parallel_map (None = use all available)
"""
if not smoothOK:
return
if cubedim!= 0:
cube = cube.swapaxes(0,cubedim)
cubelist = [cube[ii,:,:] for ii in xrange(cube.shape[0])]
Psmooth = lambda C: smooth.smooth(C,**kwargs)
if parallel:
smoothcube = array(parallel_map(Psmooth,cubelist,numcores=numcores))
else:
smoothcube = array(map(Psmooth,cubelist))
if cubedim!= 0:
smoothcube = smoothcube.swapaxes(0,cubedim)
return smoothcube
try:
import montage
def rotcrop_cube(x1, y1, x2, y2, cubename, outname, xwidth=25, ywidth=25,
in_system='galactic', out_system='equatorial',
overwrite=True, newheader=None, xcen=None, ycen=None):
"""
Crop a data cube and then rotate it with montage
"""
cubefile = fits.open(cubename)
if xcen is None and ycen is None:
pos1 = coordinates.Position([x1,y1],system=in_system)
pos2 = coordinates.Position([x2,y2],system=in_system)
if cubefile[0].header.get('CTYPE1')[:2] == 'RA':
x1,y1 = pos1.j2000()
x2,y2 = pos2.j2000()
coord_system = 'celestial'
elif cubefile[0].header.get('CTYPE1')[:4] == 'GLON':
x1,y1 = pos1.galactic()
x2,y2 = pos2.galactic()
coord_system = 'galactic'
xcen = (x1+x2)/2.0
ycen = (y1+y2)/2.0
print(xcen,ycen,xwidth,ywidth,coord_system)
else:
coord_system = in_system
sc = subcube(cubefile[0].data, xcen, xwidth, ycen, ywidth,
widthunits='pixels', units="wcs", header=cubefile[0].header,
return_HDU=True)
# note: there should be no security risk here because fits' writeto
# will not overwrite by default
tempcube = tempfile.mktemp(suffix='.fits')
sc.writeto(tempcube)
pa = posang.posang(x1,y1,x2,y2,system=coord_system) - 90
if newheader is None:
newheader = sc.header.copy()
cd11 = newheader.get('CDELT1') if newheader.get('CDELT1') else newheader.get('CD1_1')
cd22 = newheader.get('CDELT2') if newheader.get('CDELT2') else newheader.get('CD2_2')
cd12 = newheader.get('CD1_2') if newheader.get('CD1_2') else 0.0
cd21 = newheader.get('CD2_1') if newheader.get('CD2_1') else 0.0
cdelt = numpy.sqrt(cd11**2+cd12**2)
tempheader = tempfile.mktemp(suffix='.hdr')
ycensign = "+" if numpy.sign(ycen) >= 0 else "-"
montage.mHdr("%s %1s%s" % (xcen, ycensign, numpy.abs(ycen)), xwidth*cdelt,
tempheader, system=out_system, height=ywidth*cdelt,
pix_size=cdelt*3600.0, rotation=pa)
os.system("sed -i bck '/END/d' %s" % (tempheader))
newheader2 = fits.Header()
newheader2.fromTxtFile(tempheader)
#newheader2.fromtextfile(tempheader)
for key in ('CRPIX3','CRVAL3','CDELT3','CD3_3','CUNIT3','WCSTYPE3','CTYPE3'):
if newheader.get(key):
newheader2[key] = newheader.get(key)
if newheader.get('CD3_3') and newheader2.get('CDELT3') is None:
newheader2['CDELT3'] = newheader.get('CD3_3')
if astropy.version.major >= 2 or (astropy.version.major==1 and astropy.version.minor>=3):
newheader2.toTxtFile(tempheader,overwrite=True)
else:
newheader2.toTxtFile(tempheader,clobber=True)
#if newheader2.get('CDELT3') is None:
# raise Exception("No CD3_3 or CDELT3 in header.")
else:
if isinstance(newheader,str):
newheader2 = fits.Header()
newheader2.fromTxtFile(newheader)
tempheader = tempfile.mktemp(suffix='.hdr')
if astropy.version.major >= 2 or (astropy.version.major==1 and astropy.version.minor>=3):
newheader2.toTxtFile(tempheader,overwrite=True)
else:
newheader2.toTxtFile(tempheader,clobber=True)
montage.wrappers.reproject_cube(tempcube,outname,header=tempheader,clobber=overwrite)
#print "\n",outname
#os.system('imhead %s | grep CDELT' % outname)
# AWFUL hack because montage removes CDELT3
tempcube = fits.open(outname)
tempcube.header = newheader2
#if tempcube.header.get('CDELT3') is None:
# raise Exception("No CD3_3 or CDELT3 in header.")
#print tempcube.header.get('CDELT3')
if astropy.version.major >= 2 or (astropy.version.major==1 and astropy.version.minor>=3):
tempcube.writeto(outname,overwrite=True)
else:
tempcube.writeto(outname,clobber=True)
#print tempcube.get('CDELT3')
#print "\n",outname
#os.system('imhead %s | grep CDELT' % outname)
return
def resample_cube(cubefilename, header):
inhdr = fits.getheader(cubefilename)
except:
pass |
|
pyspeckit__pyspeckit | models.rst | Module doc / Tutorial | Generate documentation for this module | MIT License | pyspeckit__pyspeckit/docs/models.rst | [
"pyspeckit__pyspeckit/pyspeckit/spectrum/models/fitter.py",
"pyspeckit__pyspeckit/pyspeckit/spectrum/models/model.py"
] | Models
See parameters for information on how to restrict/modify model
parameters.
The generic SpectralModel class is a wrapper for model functions. A
model should take in an X-axis and some number of parameters. In order
to declare a SpectralModel, you give SpectralModel the function name and
the number of parameters it requires. The rest of the options are
optional, though parnames & shortvarnames are strongly recommended. If
you do not specify fitunits, your fitting code must deal with units
internally.
Here are some examples of how to make your own fitters:
hill5_fitter = model.SpectralModel(hill5_model, 5,
parnames=['tau', 'v_lsr', 'v_infall', 'sigma', 'tpeak'],
parlimited=[(True,False),(False,False),(True,False),(True,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0), (0,0)],
# specify the parameter names (TeX is OK)
shortvarnames=("\\tau","v_{lsr}","v_{infall}","\\sigma","T_{peak}"),
fitunits='Hz' )
gaussfitter = model.SpectralModel(gaussian, 3,
parnames=['amplitude','shift','width'],
parlimited=[(False,False),(False,False),(True,False)],
parlimits=[(0,0), (0,0), (0,0)],
shortvarnames=('A',r'\Delta x',r'\sigma'))
Then you can register these fitters.
Fitting
Once you have a model defined, you can fit it using the
pyspeckit.Spectrum.specfit module. Documents on fitting have not been
prepared yet, but you can learn most of the tricks by looking at the
various fitting examples and the parameters documentation.
See also fitting.
Implement the gaussian-hermite profile described here:
http://pipelinesandarchives.blogspot.com/2012/09/fit1d-new-smurf-command-for-acsis-data.html
Specific Models
Ammonia Temperature and Hyperfine model <ammonia_model> Formaldehyde
model <formaldehyde_model> HCN model <hcn_model> hill5infall_model
n2hp_model hydrogen_model
API Documentation for Models
We include the API documentation for the generic model and fitter
wrappers here.
| """
====================
SimpleFitter wrapper
====================
Adds a variable height (background) component to any model
Module API
^^^^^^^^^^
"""
import numpy
from pyspeckit.mpfit import mpfit
from numpy.ma import median
from pyspeckit.spectrum.moments import moments
class SimpleFitter(object):
def __init__():
pass
def moments(self, *args, **kwargs):
"""
Get the spectral moments from the moments package
"""
return moments(*args,**kwargs)
def vheightmodel(zeroheightmodel):
def vhm(xax, *pars,**kwargs):
"""
Wrapper function vhm to set variable height.
Parameter order: height, amplitude, shift, width
"""
vheight=True
if 'vheight' in kwargs:
vheight = kwargs.pop('vheight')
if vheight:
return zeroheightmodel(xax, *pars[1:],**kwargs) + pars[0]
else:
return zeroheightmodel(xax, *pars[1:],**kwargs)
vhm.__doc__ += zeroheightmodel.__doc__
return vhm
"""
=============================
Generic SpectralModel wrapper
=============================
.. moduleauthor:: Adam Ginsburg <[email protected]>
Module API
^^^^^^^^^^
"""
import numpy as np
from pyspeckit.mpfit import mpfit,mpfitException
from pyspeckit.spectrum.parinfo import ParinfoList,Parinfo
import copy
from astropy import log
import matplotlib.cbook as mpcb
from. import fitter
from. import mpfit_messages
from pyspeckit.specwarnings import warn
from pyspeckit.spectrum.units import SpectroscopicAxis
import itertools
import operator
import six
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
except ImportError:
warn("OrderedDict is required for modeling. "
"If you have python <2.7, install the ordereddict module.")
# define the allowed guess types and the order in which they are received
valid_guess_types = ('amplitude', 'center', 'width')
class SpectralModel(fitter.SimpleFitter):
"""
A wrapper class for a spectra model. Includes internal functions to
generate multi-component models, annotations, integrals, and individual
components. The declaration can be complex, since you should name
individual variables, set limits on them, set the units the fit will be
performed in, and set the annotations to be used. Check out some
of the hyperfine codes (hcn, n2hp) for examples.
"""
def __init__(self, modelfunc, npars,
shortvarnames=("A","\\Delta x","\\sigma"),
fitunit=None,
centroid_par=None,
fwhm_func=None,
fwhm_pars=None,
integral_func=None,
use_lmfit=False,
guess_types=('amplitude', 'center', 'width'),
**kwargs):
"""
Spectral Model Initialization
Create a Spectral Model class for data fitting
Parameters
----------
modelfunc : function
the model function to be fitted. Should take an X-axis
(spectroscopic axis) as an input followed by input parameters.
Returns an array with the same shape as the input X-axis
npars : int
number of parameters required by the model
use_lmfit: bool
Use lmfit instead of mpfit to do the fitting
parnames : list (optional)
a list or tuple of the parameter names
parvalues : list (optional)
the initial guesses for the input parameters (defaults to ZEROS)
parlimits : list (optional)
the upper/lower limits for each variable (defaults to ZEROS)
parfixed : list (optional)
Can declare any variables to be fixed (defaults to ZEROS)
parerror : list (optional)
technically an output parameter. Specifying it here will have no
effect. (defaults to ZEROS)
partied : list (optional)
not the past tense of party. Can declare, via text, that
some parameters are tied to each other. Defaults to zeros like the
others, but it's not clear if that's a sensible default
fitunit : str (optional)
convert X-axis to these units before passing to model
parsteps : list (optional)
minimum step size for each paremeter (defaults to ZEROS)
npeaks : list (optional)
default number of peaks to assume when fitting (can be overridden)
shortvarnames : list (optional)
TeX names of the variables to use when annotating
amplitude_types : tuple
A tuple listing the types of the different parameters when guessing.
The valid values are 'amplitude', 'width', and 'center'. These are
handled by parse_3par_guesses, which translate these into input
guess lists for the fitter. For a "standard" 3-parameter Gaussian
fitter, nothing changes, but for other models that have more than
3 parameters, some translation is needed.
Returns
-------
A tuple containing (model best-fit parameters, the model, parameter
errors, chi^2 value)
"""
self.modelfunc = modelfunc
if self.__doc__ is None:
self.__doc__ = modelfunc.__doc__
elif modelfunc.__doc__ is not None:
self.__doc__ += modelfunc.__doc__
self.npars = npars
self.default_npars = npars
self.fitunit = fitunit
# this needs to be set once only
self.shortvarnames = shortvarnames
self.default_parinfo = None
self.default_parinfo, kwargs = self._make_parinfo(**kwargs)
self.parinfo = copy.copy(self.default_parinfo)
self.modelfunc_kwargs = kwargs
self.use_lmfit = use_lmfit
# default name of parameter that represents the profile centroid
self.centroid_par = centroid_par
# FWHM function and parameters
self.fwhm_func = fwhm_func
self.fwhm_pars = fwhm_pars
# analytic integral function
self.integral_func = integral_func
for gt in guess_types:
if not isinstance(gt, float) and not any(g in gt for g in valid_guess_types):
raise ValueError("Guess type must be one of {0} or a float"
.format(valid_guess_types))
self.guess_types = guess_types
def __copy__(self):
# http://stackoverflow.com/questions/1500718/what-is-the-right-way-to-override-the-copy-deepcopy-operations-on-an-object-in-p
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
def __call__(self, *args, **kwargs):
log.debug("Fitter called with args={0} and kwargs={1}".format(args, kwargs))
use_lmfit = kwargs.pop('use_lmfit') if 'use_lmfit' in kwargs else self.use_lmfit
if use_lmfit:
return self.lmfitter(*args,**kwargs)
return self.fitter(*args,**kwargs)
@property
def npeaks(self):
return int(self._npeaks)
@npeaks.setter
def npeaks(self, value):
if int(value)!= value:
raise ValueError("npeaks must be an integer")
self._npeaks = int(value)
def make_parinfo(self, **kwargs):
return self._make_parinfo(**kwargs)[0]
def _make_parinfo(self, params=None, parnames=None, parvalues=None,
parlimits=None, parlimited=None, parfixed=None,
parerror=None, partied=None, fitunit=None,
parsteps=None, npeaks=1, parinfo=None, names=None,
values=None, limits=None, limited=None, fixed=None,
error=None, tied=None, steps=None, negamp=None,
limitedmin=None, limitedmax=None, minpars=None,
maxpars=None, vheight=False, debug=False, **kwargs):
"""
Generate a `ParinfoList` that matches the inputs
This code is complicated - it can take inputs in a variety of different
forms with different priority. It will return a `ParinfoList` (and
therefore must have values within parameter ranges)
"""
log.debug("BEGIN _make_parinfo")
# for backwards compatibility - partied = tied, etc.
locals_dict = locals()
for varname in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",","):
shortvarname = varname.replace("par","")
if locals_dict.get(shortvarname) is not None and locals_dict.get(varname) is not None:
raise ValueError("Cannot specify both {0} and {1}".format(varname, shortvarname))
input_pardict = {k: locals_dict.get(k)
for k in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",",")}
_tip = {'par'+k: locals_dict.get(k)
for k in str.split("names,values,steps,limits,limited,fixed,error,tied",",")
if locals_dict.get(k)
}
input_pardict.update(_tip)
if params is not None and parvalues is not None:
raise ValueError("parvalues and params both specified; they're redundant so that's not allowed.")
elif params is not None and parvalues is None:
input_pardict['parvalues'] = params
log.debug("Parvalues = {0}, npeaks = {1}".format(input_pardict['parvalues'], npeaks))
# this is used too many damned times to keep referencing a dict.
parnames = input_pardict['parnames']
parlimited = input_pardict['parlimited']
parlimits = input_pardict['parlimits']
parvalues = input_pardict['parvalues']
if parnames is not None:
self.parnames = parnames
elif parnames is None and hasattr(self,'parnames') and self.parnames is not None:
parnames = self.parnames
elif self.default_parinfo is not None and parnames is None:
parnames = [p['parname'] for p in self.default_parinfo]
input_pardict['parnames'] = parnames
assert input_pardict['parnames'] is not None
if limitedmin is not None:
if limitedmax is not None:
parlimited = list(zip(limitedmin,limitedmax))
else:
parlimited = list(zip(limitedmin,(False,)*len(parnames)))
elif limitedmax is not None:
parlimited = list(zip((False,)*len(parnames),limitedmax))
elif self.default_parinfo is not None and parlimited is None:
parlimited = [p['limited'] for p in self.default_parinfo]
input_pardict['parlimited'] = parlimited
if minpars is not None:
if maxpars is not None:
parlimits = list(zip(minpars,maxpars))
else:
parlimits = list(zip(minpars,(False,)*len(parnames)))
elif maxpars is not None:
parlimits = list(zip((False,)*len(parnames),maxpars))
elif limits is not None:
parlimits = limits
elif self.default_parinfo is not None and parlimits is None:
parlimits = [p['limits'] for p in self.default_parinfo]
input_pardict['parlimits'] = parlimits
self.npeaks = int(npeaks)
# the height / parvalue popping needs to be done before the temp_pardict is set in order to make sure
# that the height guess isn't assigned to the amplitude
self.vheight = vheight
if ((vheight and len(self.parinfo) == self.default_npars and
len(parvalues) == self.default_npars + 1)):
# if the right number of parameters are passed, the first is the height
self.parinfo = [{'n':0, 'value':parvalues.pop(0), 'limits':(0,0),
'limited': (False,False), 'fixed':False, 'parname':'HEIGHT',
'error': 0, 'tied':""}]
elif vheight and len(self.parinfo) == self.default_npars and len(parvalues) == self.default_npars:
# if you're one par short, guess zero
self.parinfo = [{
'n':0, 'value': 0, 'limits':(0,0), 'limited': (False,False),
'fixed':False, 'parname':'HEIGHT', 'error': 0, 'tied':""
}]
elif vheight and len(self.parinfo) == self.default_npars+1 and len(parvalues) == self.default_npars+1:
# the right numbers are passed *AND* there is already a height param
self.parinfo = [{
'n':0, 'value':parvalues.pop(0), 'limits':(0,0),
'limited': (False,False), 'fixed': False, 'parname':'HEIGHT',
'error': 0, 'tied':""
}]
#heightparnum = (i for i,s in self.parinfo if 'HEIGHT' in s['parname'])
#for hpn in heightparnum:
# self.parinfo[hpn]['value'] = parvalues[0]
elif vheight:
raise ValueError('VHEIGHT is specified but a case was found that did not allow it to be included.')
else:
self.parinfo = []
log.debug("After VHEIGHT parse len(parinfo): %i vheight: %s" % (len(self.parinfo), vheight))
# this is a clever way to turn the parameter lists into a dict of lists
# clever = hard to read
temp_pardict = OrderedDict([(varname, np.zeros(self.npars*self.npeaks,
dtype='bool'))
if input_pardict.get(varname) is None else
(varname, list(input_pardict.get(varname)))
for varname in str.split("parnames,parvalues,parsteps,parlimits,parlimited,parfixed,parerror,partied",",")])
temp_pardict['parlimits'] = parlimits if parlimits is not None else [(0,0)] * (self.npars*self.npeaks)
temp_pardict['parlimited'] = parlimited if parlimited is not None else [(False,False)] * (self.npars*self.npeaks)
for k,v in temp_pardict.items():
if (self.npars*self.npeaks) / len(v) > 1:
n_components = ((self.npars*self.npeaks) / len(v))
if n_components!= int(n_components):
raise ValueError("The number of parameter values is not a "
"multiple of the number of allowed "
"parameters.")
temp_pardict[k] = list(v) * int(n_components)
# generate the parinfo dict
# note that 'tied' must be a blank string (i.e. ""), not False, if it is not set
# parlimited, parfixed, and parlimits are all two-element items (tuples or lists)
self.parinfo += [{'n':ii+self.npars*jj+vheight,
'value':float(temp_pardict['parvalues'][ii+self.npars*jj]),
'step':temp_pardict['parsteps'][ii+self.npars*jj],
'limits':temp_pardict['parlimits'][ii+self.npars*jj],
'limited':temp_pardict['parlimited'][ii+self.npars*jj],
'fixed':temp_pardict['parfixed'][ii+self.npars*jj],
'parname':temp_pardict['parnames'][ii].upper()+"%0i" % int(jj),
'error':float(temp_pardict['parerror'][ii+self.npars*jj]),
'tied':temp_pardict['partied'][ii+self.npars*jj] if temp_pardict['partied'][ii+self.npars*jj] else ""}
for jj in range(self.npeaks)
for ii in range(self.npars) ] # order matters!
log.debug("After Generation step len(parinfo): %i vheight: %s "
"parinfo: %s" % (len(self.parinfo), vheight, self.parinfo))
if debug > True:
import pdb; pdb.set_trace()
# special keyword to specify emission/absorption lines
if negamp is not None:
if negamp:
for p in self.parinfo:
if 'AMP' in p['parname']:
p['limited'] = (p['limited'][0], True)
p['limits'] = (p['limits'][0], 0)
else:
for p in self.parinfo:
if 'AMP' in p['parname']:
p['limited'] = (True, p['limited'][1])
p['limits'] = (0, p['limits'][1])
# This is effectively an override of all that junk above (3/11/2012)
# Much of it is probably unnecessary, but it was easier to do this than
# rewrite the above
self.parinfo = ParinfoList([Parinfo(p) for p in self.parinfo])
# New feature: scaleability
for par in self.parinfo:
if par.parname.lower().strip('0123456789') in ('amplitude','amp'):
par.scaleable = True
log.debug("Parinfo has been set: {0}".format(self.parinfo))
log.debug("kwargs {0} were passed.".format(kwargs))
assert self.parinfo!= []
return self.parinfo, kwargs
def n_modelfunc(self, pars=None, debug=False, **kwargs):
"""
Simple wrapper to deal with N independent peaks for a given spectral model
"""
if pars is None:
pars = self.parinfo
elif not isinstance(pars, ParinfoList):
try:
partemp = copy.copy(self.parinfo)
partemp._from_Parameters(pars)
pars = partemp
except AttributeError:
log.log(5, "Reading pars {0} as LMPar failed.".format(pars))
if debug > 1:
import pdb; pdb.set_trace()
if hasattr(pars,'values'):
# important to treat as Dictionary, since lmfit params & parinfo both have.items
parnames,parvals = list(zip(*list(pars.items())))
parnames = [p.lower() for p in parnames]
parvals = [p.value for p in parvals]
else:
parvals = list(pars)
if np.any(np.isnan(parvals)):
raise ValueError("A parameter is NaN. Unless you gave a NaN "
"value directly, this is a bug and should be "
"reported. If you specified a NaN parameter, "
"don't do that.")
log.debug("pars to n_modelfunc: {0}, parvals:{1}".format(pars, parvals))
def L(x):
if hasattr(x, 'value') and not hasattr(x, 'x_to_coord'):
x = SpectroscopicAxis(x)
v = np.zeros(len(x))
if self.vheight:
v += parvals[0]
# use len(pars) instead of self.npeaks because we want this to work
# independent of the current best fit
for jj in range(int((len(parvals)-self.vheight)/self.npars)):
lower_parind = jj*self.npars+self.vheight
upper_parind = (jj+1)*self.npars+self.vheight
v += self.modelfunc(x, *parvals[lower_parind:upper_parind], **kwargs)
return v
return L
def mpfitfun(self,x,y,err=None):
"""
Wrapper function to compute the fit residuals in an mpfit-friendly format
"""
if err is None:
def f(p,fjac=None):
residuals = (y-self.n_modelfunc(p, **self.modelfunc_kwargs)(x))
return [0,residuals]
else:
def f(p,fjac=None):
residuals = (y-self.n_modelfunc(p, **self.modelfunc_kwargs)(x))/err
return [0,residuals]
return f
def lmfitfun(self,x,y,err=None,debug=False):
"""
Wrapper function to compute the fit residuals in an lmfit-friendly format
"""
def f(p):
#pars = [par.value for par in p.values()]
kwargs = {}
kwargs.update(self.modelfunc_kwargs)
log.debug("Pars, kwarg keys: {0},{1}".format(p,list(kwargs.keys())))
if err is None:
return (y-self.n_modelfunc(p,**kwargs)(x))
else:
return (y-self.n_modelfunc(p,**kwargs)(x))/err
return f
def lmfitter(self, xax, data, err=None, parinfo=None, quiet=True, debug=False, **kwargs):
"""
Use lmfit instead of mpfit to do the fitting
Parameters
----------
xax : SpectroscopicAxis
The X-axis of the spectrum
data : ndarray
The data to fit
err : ndarray (optional)
The error on the data. If unspecified, will be uniform unity
parinfo : ParinfoList
The guesses, parameter limits, etc. See
`pyspeckit.spectrum.parinfo` for details
quiet : bool
If false, print out some messages about the fitting
"""
try:
import lmfit
except ImportError as e:
raise ImportError("Could not import lmfit, try using mpfit instead.")
log.debug("lmfit called with parinfo=\n{0}".format(parinfo))
self.xax = xax # the'stored' xax is just a link to the original
if hasattr(xax,'convert_to_unit') and self.fitunit is not None:
# some models will depend on the input units. For these, pass in an X-axis in those units
# (gaussian, voigt, lorentz profiles should not depend on units. Ammonia, formaldehyde,
# H-alpha, etc. should)
xax = copy.copy(xax)
xax.convert_to_unit(self.fitunit, quiet=quiet)
elif self.fitunit is not None:
raise TypeError("X axis does not have a convert method")
if np.any(np.isnan(data)) or np.any(np.isinf(data)):
err[np.isnan(data) + np.isinf(data)] = np.inf
data[np.isnan(data) + np.isinf(data)] = 0
if np.any(np.isnan(err)):
raise ValueError("One or more of the error values is NaN."
" This is not allowed. Errors can be infinite "
"(which is equivalent to giving zero weight to "
"a data point), but otherwise they must be positive "
"floats.")
elif np.any(err<0):
raise ValueError("At least one error value is negative, which is "
"not allowed as negative errors are not "
"meaningful in the optimization process.")
if parinfo is None:
parinfo, kwargs = self._make_parinfo(debug=debug, **kwargs)
log.debug("Parinfo created from _make_parinfo: {0}".format(parinfo))
LMParams = parinfo.as_Parameters()
log.debug("LMParams: "+"\n".join([repr(p) for p in list(LMParams.values())]))
log.debug("parinfo: {0}".format(parinfo))
log.debug("BEGIN MINIMIZER")
minimizer = lmfit.minimize(self.lmfitfun(xax,np.array(data),err,debug=debug),LMParams,**kwargs)
log.debug("END MINIMIZER")
if not quiet:
log.info("There were %i function evaluations" % (minimizer.nfev))
#modelpars = [p.value for p in parinfo.values()]
#modelerrs = [p.stderr for p in parinfo.values() if p.stderr is not None else 0]
self.LMParams = minimizer.params
# Force consistency w/earlier versions of lmfit: if error == 0 exactly,
# change it to None
for par in self.LMParams:
if hasattr(par,'stderr') and par.stderr == 0:
#assert minimizer.ier == 4
par.stderr = None
self.parinfo._from_Parameters(self.LMParams)
log.debug("LMParams: {0}".format(self.LMParams))
log.debug("parinfo: {0}".format(parinfo))
self.mp = minimizer
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
modelkwargs = {}
modelkwargs.update(self.modelfunc_kwargs)
self.model = self.n_modelfunc(self.parinfo, **modelkwargs)(xax)
if hasattr(minimizer,'chisqr'):
chi2 = minimizer.chisqr
else:
try:
chi2 = (((data-self.model)/err)**2).sum()
except TypeError:
chi2 = ((data-self.model)**2).sum()
if np.isnan(chi2):
warn("Warning: chi^2 is nan")
if hasattr(self.mp,'ier') and self.mp.ier not in [1,2,3,4]:
log.warning("Fitter failed: %s, %s" % (self.mp.message, self.mp.lmdif_message))
return self.mpp,self.model,self.mpperr,chi2
def fitter(self, xax, data, err=None, quiet=True, veryverbose=False,
debug=False, parinfo=None, **kwargs):
"""
Run the fitter using mpfit.
kwargs will be passed to _make_parinfo and mpfit.
Parameters
----------
xax : SpectroscopicAxis
The X-axis of the spectrum
data : ndarray
The data to fit
err : ndarray (optional)
The error on the data. If unspecified, will be uniform unity
parinfo : ParinfoList
The guesses, parameter limits, etc. See
`pyspeckit.spectrum.parinfo` for details
quiet : bool
pass to mpfit. If False, will print out the parameter values for
each iteration of the fitter
veryverbose : bool
print out a variety of mpfit output parameters
debug : bool
raise an exception (rather than a warning) if chi^2 is nan
"""
if parinfo is None:
parinfo, kwargs = self._make_parinfo(debug=debug, **kwargs)
else:
log.debug("Using user-specified parinfo dict")
# clean out disallowed kwargs (don't want to pass them to mpfit)
#throwaway, kwargs = self._make_parinfo(debug=debug, **kwargs)
self.xax = xax # the'stored' xax is just a link to the original
if hasattr(xax,'as_unit') and self.fitunit is not None:
# some models will depend on the input units. For these, pass in an X-axis in those units
# (gaussian, voigt, lorentz profiles should not depend on units. Ammonia, formaldehyde,
# H-alpha, etc. should)
xax = copy.copy(xax)
# xax.convert_to_unit(self.fitunit, quiet=quiet)
xax = xax.as_unit(self.fitunit, quiet=quiet, **kwargs)
elif self.fitunit is not None:
raise TypeError("X axis does not have a convert method")
if np.any(np.isnan(data)) or np.any(np.isinf(data)):
err[np.isnan(data) + np.isinf(data)] = np.inf
data[np.isnan(data) + np.isinf(data)] = 0
if np.any(np.isnan(err)):
raise ValueError("One or more of the error values is NaN."
" This is not allowed. Errors can be infinite "
"(which is equivalent to giving zero weight to "
"a data point), but otherwise they must be positive "
"floats.")
elif np.any(err<0):
raise ValueError("At least one error value is negative, which is "
"not allowed as negative errors are not "
"meaningful in the optimization process.")
for p in parinfo: log.debug( p )
log.debug( "\n".join(["%s %i: tied: %s value: %s" % (p['parname'],p['n'],p['tied'],p['value']) for p in parinfo]) )
mp = mpfit(self.mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet,debug=debug,**kwargs)
mpp = mp.params
if mp.perror is not None:
mpperr = mp.perror
else:
mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
if "parameters are not within PARINFO limits" in mp.errmsg:
log.warning(parinfo)
raise mpfitException(mp.errmsg)
for i,(p,e) in enumerate(zip(mpp,mpperr)):
self.parinfo[i]['value'] = p
# for consistency w/lmfit, and because it makes more sense, errors
# of 0 will instead be None
self.parinfo[i]['error'] = e if (e!= 0 or mp.status!= 4) else None
# sanity check: if status==4, errors could not be computed
# Apparently some parameters can have errors estimated even if all can't?
#if mp.status == 4:
# assert all([self.parinfo[ii]['error'] is None
# for ii in range(len(mpp))])
if veryverbose:
log.info("Fit status: {0}".format(mp.status))
log.info("Fit error message: {0}".format(mp.errmsg))
log.info("Fit message: {0}".format(mpfit_messages[mp.status]))
for i,p in enumerate(mpp):
log.info("{0}: {1} +/- {2}".format(self.parinfo[i]['parname'],
p,mpperr[i]))
log.info("Chi2: {0} Reduced Chi2: {1} DOF:{2}".format(mp.fnorm,
mp.fnorm/(len(data)-len(mpp)),
len(data)-len(mpp)))
self.mp = mp
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
self.model = self.n_modelfunc(self.parinfo,**self.modelfunc_kwargs)(xax)
log.debug("Modelpars: {0}".format(self.mpp))
if np.isnan(chi2):
if debug:
raise ValueError("Error: chi^2 is nan")
else:
log.warning("Warning: chi^2 is nan")
return mpp,self.model,mpperr,chi2
def slope(self, xinp):
"""
Find the local slope of the model at location x
(x must be in xax's units)
"""
if hasattr(self,'model'):
dm = np.diff(self.model)
# convert requested x to pixels
xpix = self.xax.x_to_pix(xinp)
dmx = np.average(dm[xpix-1:xpix+1])
if np.isfinite(dmx):
return dmx
else:
return 0
def annotations(self, shortvarnames=None, debug=False):
"""
Return a list of TeX-formatted labels
The values and errors are formatted so that only the significant digits
are displayed. Rounding is performed using the decimal package.
Parameters
----------
shortvarnames : list
A list of variable names (tex is allowed) to include in the
annotations. Defaults to self.shortvarnames
Examples
--------
>>> # Annotate a Gaussian
>>> sp.specfit.annotate(shortvarnames=['A','\\Delta x','\\sigma'])
"""
from decimal import Decimal # for formatting
svn = self.shortvarnames if shortvarnames is None else shortvarnames
# if pars need to be replicated....
if len(svn) < self.npeaks*self.npars:
svn = svn * self.npeaks
parvals = self.parinfo.values
parerrs = self.parinfo.errors
loop_list = [(parvals[ii+jj*self.npars+self.vheight],
parerrs[ii+jj*self.npars+self.vheight],
svn[ii+jj*self.npars],
self.parinfo.fixed[ii+jj*self.npars+self.vheight],
jj)
for jj in range(self.npeaks) for ii in range(self.npars)]
label_list = []
for (value, error, varname, fixed, varnumber) in loop_list:
log.debug(", ".join([str(x) for x in (value, error, varname, fixed, varnumber)]))
if None in (value, error):
label = "{0}({1})=None".format(varname, varnumber)
elif fixed or error==0:
label = ("$%s(%i)$=%8s" % (varname, varnumber,
Decimal("%g" % value).quantize(Decimal("%0.6g" % (value)))))
else:
label = ("$%s(%i)$=%8s $\\pm$ %8s" % (varname, varnumber,
Decimal("%g" % value).quantize(Decimal("%0.2g" % (min(np.abs([value,error]))))),
Decimal("%g" % error).quantize(Decimal("%0.2g" % (error))),))
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
def components(self, xarr, pars, **kwargs):
"""
Return a numpy ndarray of shape [npeaks x modelshape] of the
independent components of the fits
"""
modelcomponents = np.array(
[self.modelfunc(xarr,
*pars[i*self.npars:(i+1)*self.npars],
**dict(list(self.modelfunc_kwargs.items())+list(kwargs.items())))
for i in range(self.npeaks)])
if len(modelcomponents.shape) == 3:
newshape = [modelcomponents.shape[0]*modelcomponents.shape[1], modelcomponents.shape[2]]
modelcomponents = np.reshape(modelcomponents, newshape)
return modelcomponents
def integral(self, modelpars, dx=None, **kwargs):
"""
Extremely simple integrator:
IGNORES modelpars;
just sums self.model
"""
if not hasattr(self,'model'):
raise ValueError("Must fit (or compute) a model before computing"
" its integral.")
if dx is not None:
return (self.model*dx).sum()
else:
return self.model.sum()
def analytic_integral(self, modelpars=None, npeaks=None, npars=None):
"""
Placeholder for analyic integrals; these must be defined for individual models
"""
if self.integral_func is None:
raise NotImplementedError("Analytic integrals must be implemented independently for each model type")
# all of these parameters are allowed to be overwritten
if modelpars is None:
modelpars = self.parinfo.values
if npeaks is None:
npeaks = self.npeaks
if npars is None:
npars = self.npars
return np.sum([
self.integral_func(modelpars[npars*ii:npars*(1+ii)])
for ii in range(npeaks)])
def component_integrals(self, xarr, dx=None):
"""
Compute the integrals of each component
"""
components = self.components(xarr, self.parinfo.values)
if dx is None:
dx = 1
integrals = [com.sum()*dx for com in components]
return integrals
def analytic_fwhm(self, parinfo=None):
"""
Return the FWHMa of the model components *if* a fwhm_func has been
defined
Done with incomprehensible list comprehensions instead of nested for
loops... readability sacrificed for speed and simplicity. This is
unpythonic.
"""
if self.fwhm_func is None and self.fwhm_pars is None:
raise TypeError("fwhm_func not implemented for model %s" % self.__name__)
if parinfo is None:
parinfo = self.parinfo
fwhm = [self.fwhm_func(
*[self.parinfo[str.upper(p+'%i' % n)] for p in self.fwhm_pars]
)
for n in range(self.npeaks)]
return fwhm
def analytic_centroids(self, centroidpar=None):
"""
Return the *analytic* centroids of the model components
Parameters
----------
centroidpar : None or string
The name of the parameter in the fit that represents the centroid
*some models have default centroid parameters - these will be used
if centroidpar is unspecified*
Returns
-------
List of the centroid values (even if there's only 1)
"""
if centroidpar is None:
centroidpar = self.centroid_par
centr = [par.value
for par in self.parinfo
if str.upper(centroidpar) in par.parname]
return centr
def computed_centroid(self, xarr=None):
"""
Return the *computed* centroid of the model
Parameters
----------
xarr : None or np.ndarray
The X coordinates of the model over which the centroid should be
computed. If unspecified, the centroid will be in pixel units
"""
if not hasattr(self,'model'):
raise ValueError("Must fit (or compute) a model before measuring "
"its centroid")
if xarr is None:
xarr = np.arange(self.model.size)
centr = (self.model*xarr).sum() / self.model.sum()
return centr
def logp(self, xarr, data, error, pars=None):
"""
Return the log probability of the model. If the parameter is out of
range, return -inf
"""
if pars is None:
pars = self.parinfo
else:
parinfo = copy.copy(self.parinfo)
for value,parameter in zip(pars,parinfo):
try:
parameter.value = value
except ValueError:
return -np.inf
model = self.n_modelfunc(pars, **self.modelfunc_kwargs)(xarr)
difference = np.abs(data-model)
# prob = 1/(2*np.pi)**0.5/error * exp(-difference**2/(2.*error**2))
#logprob = np.log(1./(2.*np.pi)**0.5/error) * (-difference**2/(2.*error**2))
logprob = (-difference**2/(2.*error**2))
totallogprob = np.sum(logprob)
return totallogprob
def get_emcee_sampler(self, xarr, data, error, **kwargs):
"""
Get an emcee walker for the data & model
Parameters
----------
xarr : pyspeckit.units.SpectroscopicAxis
data : np.ndarray
error : np.ndarray
Examples
--------
>>> import pyspeckit
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> emcee_sampler = sp.specfit.fitter.get_emcee_sampler(sp.xarr, sp.data, sp.error)
>>> p0 = sp.specfit.parinfo
>>> emcee_sampler.run_mcmc(p0,100)
"""
try:
import emcee
except ImportError:
return
def probfunc(pars):
return self.logp(xarr, data, error, pars=pars)
raise NotImplementedError("emcee's metropolis-hastings sampler is not implemented; use pymc")
sampler = emcee.MHSampler(self.npars*self.npeaks+self.vheight, probfunc, **kwargs)
return sampler
def get_emcee_ensemblesampler(self, xarr, data, error, nwalkers, **kwargs):
"""
Get an emcee walker ensemble for the data & model
Parameters
----------
data : np.ndarray
error : np.ndarray
nwalkers : int
Number of walkers to use
Examples
--------
>>> import pyspeckit
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> nwalkers = sp.specfit.fitter.npars * 2
>>> emcee_ensemble = sp.specfit.fitter.get_emcee_ensemblesampler(sp.xarr, sp.data, sp.error, nwalkers)
>>> p0 = np.array([sp.specfit.parinfo.values] * nwalkers)
>>> p0 *= np.random.randn(*p0.shape) / 10. + 1.0
>>> pos,logprob,state = emcee_ensemble.run_mcmc(p0,100)
"""
try:
import emcee
except ImportError:
return
def probfunc(pars):
return self.logp(xarr, data, error, pars=pars)
sampler = emcee.EnsembleSampler(nwalkers,
self.npars*self.npeaks+self.vheight,
probfunc, **kwargs)
return sampler
def get_pymc(self, xarr, data, error, use_fitted_values=False, inf=np.inf,
use_adaptive=False, return_dict=False, **kwargs):
"""
Create a pymc MCMC sampler. Defaults to 'uninformative' priors
Parameters
----------
data : np.ndarray
error : np.ndarray
use_fitted_values : bool
Each parameter with a measured error will have a prior defined by
the Normal distribution with sigma = par.error and mean = par.value
use_adaptive : bool
Use the Adaptive Metropolis-Hastings sampler?
Examples
--------
>>> x = pyspeckit.units.SpectroscopicAxis(np.linspace(-10,10,50), unit='km/s')
>>> e = np.random.randn(50)
>>> d = np.exp(-np.asarray(x)**2/2.)*5 + e
>>> sp = pyspeckit.Spectrum(data=d, xarr=x, error=np.ones(50)*e.std())
>>> sp.specfit(fittype='gaussian')
>>> MCuninformed = sp.specfit.fitter.get_pymc(sp.xarr, sp.data, sp.error)
>>> MCwithpriors = sp.specfit.fitter.get_pymc(sp.xarr, sp.data, sp.error, use_fitted_values=True)
>>> MCuninformed.sample(1000)
>>> MCuninformed.stats()['AMPLITUDE0']
>>> # WARNING: This will fail because width cannot be set <0, but it may randomly reach that...
>>> # How do you define a likelihood distribution with a lower limit?!
>>> MCwithpriors.sample(1000)
>>> MCwithpriors.stats()['AMPLITUDE0']
"""
old_errsettings = np.geterr()
try:
import pymc
finally:
# pymc breaks error settings
np.seterr(**old_errsettings)
#def lowerlimit_like(x,lolim):
# "lower limit (log likelihood - set very positive for unacceptable values)"
# return (x>=lolim) / 1e10
#def upperlimit_like(x,uplim):
# "upper limit"
# return (x<=uplim) / 1e10
#LoLim = pymc.distributions.stochastic_from_dist('lolim', logp=lowerlimit_like, dtype=np.float, mv=False)
#UpLim = pymc.distributions.stochastic_from_dist('uplim', logp=upperlimit_like, dtype=np.float, mv=False)
funcdict = {}
# very, very worrisome: pymc changes the values of parinfo
parcopy = copy.deepcopy(self.parinfo)
for par in parcopy:
lolim = par.limits[0] if par.limited[0] else -inf
uplim = par.limits[1] if par.limited[1] else inf
if par.fixed:
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, par.value, par.value, value=par.value)
elif use_fitted_values:
if par.error > 0:
if any(par.limited):
try:
funcdict[par.parname] = pymc.distributions.TruncatedNormal(par.parname, par.value, 1./par.error**2, lolim, uplim)
except AttributeError:
# old versions used this?
funcdict[par.parname] = pymc.distributions.TruncNorm(par.parname, par.value, 1./par.error**2, lolim, uplim)
else:
funcdict[par.parname] = pymc.distributions.Normal(par.parname, par.value, 1./par.error**2)
else:
if any(par.limited):
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, lolim, uplim, value=par.value)
else:
funcdict[par.parname] = pymc.distributions.Uninformative(par.parname, value=par.value)
elif any(par.limited):
lolim = par.limits[0] if par.limited[0] else -1e10
uplim = par.limits[1] if par.limited[1] else 1e10
funcdict[par.parname] = pymc.distributions.Uniform(par.parname, lower=lolim, upper=uplim, value=par.value)
else:
funcdict[par.parname] = pymc.distributions.Uninformative(par.parname, value=par.value)
d = dict(funcdict)
def modelfunc(xarr, pars=parcopy, **kwargs):
for k,v in kwargs.items():
if k in list(pars.keys()):
pars[k].value = v
return self.n_modelfunc(pars, **self.modelfunc_kwargs)(xarr)
funcdict['xarr'] = xarr
funcdet=pymc.Deterministic(name='f',eval=modelfunc,parents=funcdict,doc="The model function")
d['f'] = funcdet
datamodel = pymc.distributions.Normal('data', mu=funcdet,
tau=1/np.asarray(error)**2,
observed=True,
value=np.asarray(data))
d['data']=datamodel
if return_dict:
return d
mc = pymc.MCMC(d)
if use_adaptive:
mc.use_step_method(pymc.AdaptiveMetropolis,[d[p] for p in self.parinfo.names])
return mc
def parse_3par_guesses(self, guesses):
"""
Try to convert a set of interactive guesses (peak, center, width) into
guesses appropriate to the model.
"""
if len(guesses) % 3!= 0:
raise ValueError("Guesses passed to parse_3par_guesses must have "
"length % 3 == 0")
npeaks_guessed = len(guesses) // 3
gtypes = [parse_offset_guess(gtype, gval)[0]
for gtype, gval in zip(itertools.cycle(self.guess_types),
[0]*len(self.guess_types))]
guess_dict = {(valid_guess_types[ii % 3], ii // 3): gval
for ii, gval in enumerate(guesses)}
new_guesses = [guess_dict[(gtype, ii)]
if isinstance(gtype, str)
else gtype
for ii in range(npeaks_guessed)
for gtype in gtypes
]
new_guesses = [parse_offset_guess(gtype, gval)[1]
for gtype, gval in zip(itertools.cycle(self.guess_types),
new_guesses)]
assert len(new_guesses) % len(self.guess_types) == 0
return new_guesses
class AstropyModel(SpectralModel):
def __init__(self, model, shortvarnames=None, **kwargs):
"""
Override the SpectralModel initialization
"""
if hasattr(self,__doc__): # how do you extend a docstring really?
self.__doc__ += SpectralModel.__doc__
if shortvarnames is None:
shortvarnames = model.param_names
super(AstropyModel,self).__init__(model, len(model.parameters),
shortvarnames=shortvarnames,
model=model, **kwargs)
self.mp = None
self.vheight = False
self.npeaks = 1
def _make_parinfo(self, model=None):
self.parinfo = ParinfoList([
Parinfo(parname=name,value=value)
for name,value in zip(model.param_names,model.parameters)])
return self.parinfo, {}
def _parse_parinfo(self, parinfo):
"""
Parse a ParinfoList into astropy.models parameters
"""
if len(parinfo) > self.npars:
if len(parinfo) % self.npars!= 0:
raise ValueError("Need to have an integer number of models")
else:
self.modelfunc.param_names = parinfo.names
self.modelfunc.parameters = parinfo.values
else:
self.modelfunc.param_names = parinfo.names
self.modelfunc.parameters = parinfo.values
def fitter(self, xax, data, err=None, quiet=True, veryverbose=False,
debug=False, parinfo=None, params=None, npeaks=None, **kwargs):
import astropy.models as models
if npeaks is not None and npeaks > 1:
raise NotImplementedError("Astropy models cannot be used to fit multiple peaks yet")
if parinfo is not None:
self._parse_parinfo(parinfo)
if params is not None:
self.modelfunc.parameters = params
self.astropy_fitter = models.fitting.NonLinearLSQFitter(self.modelfunc)
if err is None:
self.astropy_fitter(xax, data, **kwargs)
else:
self.astropy_fitter(xax, data, weights=1./err**2, **kwargs)
mpp = self.astropy_fitter.fitpars
cov = self.astropy_fitter.covar
if cov is None:
mpperr = np.zeros(len(mpp))
else:
mpperr = cov.diagonal()
self.model = self.astropy_fitter.model(xax)
if err is None:
chi2 = ((data-self.model)**2).sum()
else:
chi2 = ((data-self.model)**2/err**2).sum()
# update object paramters
self.modelfunc.parameters = mpp
self._make_parinfo(self.modelfunc)
return mpp,self.model,mpperr,chi2
def n_modelfunc(self, pars=None, debug=False, **kwargs):
"""
Only deals with single-peak functions
"""
try:
self._parse_parinfo(pars)
except AttributeError:
self.modelfunc.parameters = pars
return self.modelfunc
def parse_offset_guess(gname, gval):
"""
Utility function for handling guesses. Allows guess types to be specified
as 'amplitude*2' or 'width+3'.
"""
operators = '+-*/'
if not isinstance(gname, six.string_types):
return gname, gval
ops = [x for x in operators if x in gname]
if len(ops)>1:
raise ValueError("Invalid offset guess")
elif len(ops) == 0:
return gname,gval
else:
opmap = {"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
}
op = ops[0]
pars = gname.split(op)
gname = [p for p in gname.split(op) if p in valid_guess_types][0]
pars = [gval if p in valid_guess_types else float(p)
for p in pars]
gval = opmap[op](*pars)
return gname, gval |
|
piccolo-orm__piccolo | baseuser.rst | Module doc | Generate documentation for this module | MIT License | piccolo-orm__piccolo/docs/src/piccolo/authentication/baseuser.rst | [
"piccolo-orm__piccolo/piccolo/apps/user/tables.py"
] | BaseUser
BaseUser is a Table you can use to store and authenticate your users.
------------------------------------------------------------------------
Creating the Table
Run the migrations:
piccolo migrations forwards user
------------------------------------------------------------------------
Commands
The app comes with some useful commands.
create
Creates a new user. It presents an interactive prompt, asking for the
username, password etc.
piccolo user create
If you'd prefer to create a user without the interactive prompt (perhaps
in a script), you can pass all of the arguments in as follows:
piccolo user create --username=bob --password=bob123 [email protected] --is_admin=t --is_superuser=t --is_active=t
Warning
If you choose this approach then be careful, as the password will be in
the shell's history.
change_password
Change a user's password.
piccolo user change_password
change_permissions
Change a user's permissions. The options are --admin, --superuser and
--active, which change the corresponding attributes on BaseUser.
For example:
piccolo user change_permissions some_user --active=true
The Piccolo Admin<PiccoloAdmin> uses these attributes to control who can
login and what they can do.
- active and admin - must be true for a user to be able to login.
- superuser - must be true for a user to be able to change other
user's passwords.
------------------------------------------------------------------------
Within your code
create_user / create_user_sync
To create a new user:
# From within a coroutine:
await BaseUser.create_user(username="bob", password="abc123", active=True)
# When not in an event loop:
BaseUser.create_user_sync(username="bob", password="abc123", active=True)
It saves the user in the database, and returns the created BaseUser
instance.
Note
It is preferable to use this rather than instantiating and saving
BaseUser directly, as we add additional validation.
login / login_sync
To check a user's credentials, do the following:
from piccolo.apps.user.tables import BaseUser
# From within a coroutine:
>>> await BaseUser.login(username="bob", password="abc123")
1
# When not in an event loop:
>>> BaseUser.login_sync(username="bob", password="abc123")
1
If the login is successful, the user's id is returned, otherwise None is
returned.
update_password / update_password_sync
To change a user's password:
# From within a coroutine:
await BaseUser.update_password(username="bob", password="abc123")
# When not in an event loop:
BaseUser.update_password_sync(username="bob", password="abc123")
Warning
Don't use bulk updates for passwords - use update_password /
update_password_sync, and they'll correctly hash the password.
------------------------------------------------------------------------
Limits
The maximum password length allowed is 128 characters. This should be
sufficiently long for most use cases.
------------------------------------------------------------------------
Extending BaseUser
If you want to extend BaseUser with additional fields, we recommend
creating a Profile table with a ForeignKey to BaseUser, which can
include any custom fields.
from piccolo.apps.user.tables import BaseUser
from piccolo.columns import ForeignKey, Text, Varchar
from piccolo.table import Table
class Profile(Table):
custom_user = ForeignKey(BaseUser)
phone_number = Varchar()
bio = Text()
Alternatively, you can copy the entire user app into your project, and
customise it to fit your needs.
------------------------------------------------------------------------
| """
A User model, used for authentication.
"""
from __future__ import annotations
import datetime
import hashlib
import logging
import secrets
import typing as t
from piccolo.columns import Boolean, Secret, Timestamp, Varchar
from piccolo.columns.column_types import Serial
from piccolo.columns.readable import Readable
from piccolo.table import Table
from piccolo.utils.sync import run_sync
logger = logging.getLogger(__name__)
class BaseUser(Table, tablename="piccolo_user"):
"""
Provides a basic user, with authentication support.
"""
id: Serial
username = Varchar(length=100, unique=True)
password = Secret(length=255)
first_name = Varchar(null=True)
last_name = Varchar(null=True)
email = Varchar(length=255, unique=True)
active = Boolean(default=False)
admin = Boolean(
default=False, help_text="An admin can log into the Piccolo admin GUI."
)
superuser = Boolean(
default=False,
help_text=(
"If True, this user can manage other users's passwords in the "
"Piccolo admin GUI."
),
)
last_login = Timestamp(
null=True,
default=None,
required=False,
help_text="When this user last logged in.",
)
_min_password_length = 6
_max_password_length = 128
# The number of hash iterations recommended by OWASP:
# https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2
_pbkdf2_iteration_count = 600_000
def __init__(self, **kwargs):
# Generating passwords upfront is expensive, so might need reworking.
password = kwargs.get("password", None)
if password:
if not password.startswith("pbkdf2_sha256"):
kwargs["password"] = self.__class__.hash_password(password)
super().__init__(**kwargs)
@classmethod
def get_salt(cls):
return secrets.token_hex(16)
@classmethod
def get_readable(cls) -> Readable:
"""
Used to get a readable string, representing a table row.
"""
return Readable(template="%s", columns=[cls.username])
###########################################################################
@classmethod
def _validate_password(cls, password: str):
"""
Validate the raw password. Used by :meth:`update_password` and
:meth:`create_user`.
:param password:
The raw password e.g. ``'hello123'``.
:raises ValueError:
If the password fails any of the criteria.
"""
if not password:
raise ValueError("A password must be provided.")
if len(password) < cls._min_password_length:
raise ValueError("The password is too short.")
if len(password) > cls._max_password_length:
raise ValueError("The password is too long.")
if password.startswith("pbkdf2_sha256"):
logger.warning(
"Tried to create a user with an already hashed password."
)
raise ValueError("Do not pass a hashed password.")
###########################################################################
@classmethod
def update_password_sync(cls, user: t.Union[str, int], password: str):
"""
A sync equivalent of :meth:`update_password`.
"""
return run_sync(cls.update_password(user, password))
@classmethod
async def update_password(cls, user: t.Union[str, int], password: str):
"""
The password is the raw password string e.g. ``'password123'``.
The user can be a user ID, or a username.
"""
if isinstance(user, str):
clause = cls.username == user
elif isinstance(user, int):
clause = cls.id == user
else:
raise ValueError(
"The `user` arg must be a user id, or a username."
)
cls._validate_password(password=password)
password = cls.hash_password(password)
await cls.update({cls.password: password}).where(clause).run()
###########################################################################
@classmethod
def hash_password(
cls, password: str, salt: str = "", iterations: t.Optional[int] = None
) -> str:
"""
Hashes the password, ready for storage, and for comparing during
login.
:raises ValueError:
If an excessively long password is provided.
"""
if len(password) > cls._max_password_length:
logger.warning("Excessively long password provided.")
raise ValueError("The password is too long.")
if not salt:
salt = cls.get_salt()
if iterations is None:
iterations = cls._pbkdf2_iteration_count
hashed = hashlib.pbkdf2_hmac(
"sha256",
bytes(password, encoding="utf-8"),
bytes(salt, encoding="utf-8"),
iterations,
).hex()
return f"pbkdf2_sha256${iterations}${salt}${hashed}"
def __setattr__(self, name: str, value: t.Any):
"""
Make sure that if the password is set, it's stored in a hashed form.
"""
if name == "password" and not value.startswith("pbkdf2_sha256"):
value = self.__class__.hash_password(value)
super().__setattr__(name, value)
@classmethod
def split_stored_password(cls, password: str) -> t.List[str]:
elements = password.split("$")
if len(elements)!= 4:
raise ValueError("Unable to split hashed password")
return elements
###########################################################################
@classmethod
def login_sync(cls, username: str, password: str) -> t.Optional[int]:
"""
A sync equivalent of :meth:`login`.
"""
return run_sync(cls.login(username, password))
@classmethod
async def login(cls, username: str, password: str) -> t.Optional[int]:
"""
Make sure the user exists and the password is valid. If so, the
``last_login`` value is updated in the database.
:returns:
The id of the user if a match is found, otherwise ``None``.
"""
if len(username) > cls.username.length:
logger.warning("Excessively long username provided.")
return None
if len(password) > cls._max_password_length:
logger.warning("Excessively long password provided.")
return None
response = (
await cls.select(cls._meta.primary_key, cls.password)
.where(cls.username == username)
.first()
.run()
)
if not response:
# No match found
return None
stored_password = response["password"]
algorithm, iterations_, salt, hashed = cls.split_stored_password(
stored_password
)
iterations = int(iterations_)
if cls.hash_password(password, salt, iterations) == stored_password:
# If the password was hashed in an earlier Piccolo version, update
# it so it's hashed with the currently recommended number of
# iterations:
if iterations!= cls._pbkdf2_iteration_count:
await cls.update_password(username, password)
await cls.update({cls.last_login: datetime.datetime.now()}).where(
cls.username == username
)
return response["id"]
else:
return None
###########################################################################
@classmethod
def create_user_sync(
cls, username: str, password: str, **extra_params
) -> BaseUser:
"""
A sync equivalent of :meth:`create_user`.
"""
return run_sync(
cls.create_user(
username=username, password=password, **extra_params
)
)
@classmethod
async def create_user(
cls, username: str, password: str, **extra_params
) -> BaseUser:
"""
Creates a new user, and saves it in the database. It is recommended to
use this rather than instantiating and saving ``BaseUser`` directly, as
we add extra validation.
:raises ValueError:
If the username or password is invalid.
:returns:
The created ``BaseUser`` instance.
"""
if not username:
raise ValueError("A username must be provided.")
cls._validate_password(password=password)
user = cls(username=username, password=password, **extra_params)
await user.save()
return user |
|
piccolo-orm__piccolo | cockroach_engine.rst | Module doc | Generate documentation for this module | MIT License | piccolo-orm__piccolo/docs/src/piccolo/engines/cockroach_engine.rst | [
"piccolo-orm__piccolo/piccolo/engine/cockroach.py"
] | CockroachEngine
Configuration
# piccolo_conf.py
from piccolo.engine.cockroach import CockroachEngine
DB = CockroachEngine(config={
'host': 'localhost',
'database': 'piccolo',
'user': 'root',
'password': '',
'port': '26257',
})
config
The config dictionary is passed directly to the underlying database
adapter, asyncpg. See the asyncpg docs to learn more.
------------------------------------------------------------------------
Connection pool
To use a connection pool, you need to first initialise it. The best
place to do this is in the startup event handler of whichever web
framework you are using.
Here's an example using Starlette. Notice that we also close the
connection pool in the shutdown event handler.
from piccolo.engine import engine_finder
from starlette.applications import Starlette
app = Starlette()
@app.on_event('startup')
async def open_database_connection_pool():
engine = engine_finder()
await engine.start_connection_pool()
@app.on_event('shutdown')
async def close_database_connection_pool():
engine = engine_finder()
await engine.close_connection_pool()
Hint
Using a connection pool helps with performance, since connections are
reused instead of being created for each query.
Once a connection pool has been started, the engine will use it for
making queries.
Hint
If you're running several instances of an app on the same server, you
may prefer an external connection pooler - like pgbouncer.
Configuration
The connection pool uses the same configuration as your engine. You can
also pass in additional parameters, which are passed to the underlying
database adapter. Here's an example:
# To increase the number of connections available:
await engine.start_connection_pool(max_size=20)
------------------------------------------------------------------------ | from __future__ import annotations
import typing as t
from piccolo.utils.lazy_loader import LazyLoader
from piccolo.utils.warnings import Level, colored_warning
from.postgres import PostgresEngine
asyncpg = LazyLoader("asyncpg", globals(), "asyncpg")
class CockroachEngine(PostgresEngine):
"""
An extension of
:class:`PostgresEngine <piccolo.engine.postgres.PostgresEngine>`.
"""
engine_type = "cockroach"
min_version_number = 0 # Doesn't seem to work with cockroach versioning.
def __init__(
self,
config: t.Dict[str, t.Any],
extensions: t.Sequence[str] = (),
log_queries: bool = False,
log_responses: bool = False,
extra_nodes: t.Dict[str, CockroachEngine] = None,
) -> None:
super().__init__(
config=config,
extensions=extensions,
log_queries=log_queries,
log_responses=log_responses,
extra_nodes=extra_nodes,
)
async def prep_database(self):
try:
await self._run_in_new_connection(
"SET CLUSTER SETTING sql.defaults.experimental_alter_column_type.enabled = true;" # noqa: E501
)
except asyncpg.exceptions.InsufficientPrivilegeError:
colored_warning(
"=> Unable to set up Cockroach DB "
"functionality may not behave as expected. Make sure "
"your database user has permission to set cluster options.",
level=Level.medium,
) |
|
piccolo-orm__piccolo | piccolo_apps.rst | Module doc | Generate documentation for this module | MIT License | piccolo-orm__piccolo/docs/src/piccolo/projects_and_apps/piccolo_apps.rst | [
"piccolo-orm__piccolo/piccolo/conf/apps.py"
] | Piccolo Apps
By leveraging Piccolo apps you can:
- Modularise your code.
- Share your apps with other Piccolo users.
- Unlock some useful functionality like auto migrations.
------------------------------------------------------------------------
Creating an app
Run the following command within your project:
piccolo app new my_app
Where my_app is your new app's name. This will create a folder like
this:
my_app/
__init__.py
piccolo_app.py
piccolo_migrations/
__init__.py
tables.py
It's important to register your new app with the APP_REGISTRY in
piccolo_conf.py.
# piccolo_conf.py
APP_REGISTRY = AppRegistry(apps=['my_app.piccolo_app'])
Anytime you invoke the piccolo command, you will now be able to perform
operations on your app, such as Migrations.
------------------------------------------------------------------------
AppConfig
Inside your app's piccolo_app.py file is an AppConfig instance. This is
how you customise your app's settings.
# piccolo_app.py
import os
from piccolo.conf.apps import AppConfig
from .tables import (
Author,
Post,
Category,
CategoryToPost,
)
CURRENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
APP_CONFIG = AppConfig(
app_name='blog',
migrations_folder_path=os.path.join(
CURRENT_DIRECTORY,
'piccolo_migrations'
),
table_classes=[Author, Post, Category, CategoryToPost],
migration_dependencies=[],
commands=[]
)
app_name
This is used to identify your app, when using the piccolo CLI, for
example:
piccolo migrations forwards blog
migrations_folder_path
Specifies where your app's migrations are stored. By default, a folder
called piccolo_migrations is used.
table_classes
Use this to register your app's Table subclasses. This is important for
auto migrations <Migrations>.
You can register them manually (see the example above), or can use
table_finder <TableFinder>.
migration_dependencies
Used to specify other Piccolo apps whose migrations need to be run
before the current app's migrations.
commands
You can register functions and coroutines, which are automatically added
to the piccolo CLI.
The targ library is used under the hood. It makes it really easy to
write command lines tools - just use type annotations and docstrings.
Here's an example:
def say_hello(name: str):
"""
Say hello.
:param name:
The person to greet.
"""
print("hello,", name)
We then register it with the AppConfig.
# piccolo_app.py
APP_CONFIG = AppConfig(
# ...
commands=[say_hello]
)
And from the command line:
>>> piccolo my_app say_hello bob
hello, bob
If the code contains an error to see more details in the output add a
--trace flag to the command line.
>>> piccolo my_app say_hello bob --trace
By convention, store the command definitions in a commands folder in
your app.
my_app/
__init__.py
piccolo_app.py
commands/
__init__.py
say_hello.py
Piccolo itself is bundled with several apps - have a look at the source
code for inspiration.
------------------------------------------------------------------------
table_finder
Instead of manually registering Table subclasses, you can use
table_finder to automatically import any Table subclasses from a given
list of modules.
from piccolo.conf.apps import table_finder
APP_CONFIG = AppConfig(
app_name='blog',
migrations_folder_path=os.path.join(
CURRENT_DIRECTORY,
'piccolo_migrations'
),
table_classes=table_finder(modules=['blog.tables']),
migration_dependencies=[],
commands=[]
)
The module path should be from the root of the project (the same
directory as your piccolo_conf.py file, rather than a relative path).
You can filter the Table subclasses returned using tags <TableTags>.
------------------------------------------------------------------------
Sharing Apps
By breaking up your project into apps, the project becomes more
maintainable. You can also share these apps between projects, and they
can even be installed using pip.
| from __future__ import annotations
import inspect
import itertools
import os
import pathlib
import traceback
import typing as t
from dataclasses import dataclass, field
from importlib import import_module
from types import ModuleType
from piccolo.engine.base import Engine
from piccolo.table import Table
from piccolo.utils.graphlib import TopologicalSorter
from piccolo.utils.warnings import Level, colored_warning
class MigrationModule(ModuleType):
ID: str
VERSION: str
DESCRIPTION: str
@staticmethod
async def forwards() -> None:
pass
class PiccoloAppModule(ModuleType):
APP_CONFIG: AppConfig
def table_finder(
modules: t.Sequence[str],
include_tags: t.Sequence[str] = None,
exclude_tags: t.Sequence[str] = None,
exclude_imported: bool = False,
) -> t.List[t.Type[Table]]:
"""
Rather than explicitly importing and registering table classes with the
``AppConfig``, ``table_finder`` can be used instead. It imports any ``Table``
subclasses in the given modules. Tags can be used to limit which ``Table``
subclasses are imported.
:param modules:
The module paths to check for ``Table`` subclasses. For example,
``['blog.tables']``. The path should be from the root of your project,
not a relative path.
:param include_tags:
If the ``Table`` subclass has one of these tags, it will be
imported. The special tag ``'__all__'`` will import all ``Table``
subclasses found.
:param exclude_tags:
If the ``Table`` subclass has any of these tags, it won't be
imported. ``exclude_tags`` overrides ``include_tags``.
:param exclude_imported:
If ``True``, only ``Table`` subclasses defined within the module are
used. Any ``Table`` subclasses imported by that module from other
modules are ignored. For example:
.. code-block:: python
from piccolo.table import Table
from piccolo.column import Varchar, ForeignKey
from piccolo.apps.user.tables import BaseUser # excluded
class Task(Table): # included
title = Varchar()
creator = ForeignKey(BaseUser)
""" # noqa: E501
if include_tags is None:
include_tags = ["__all__"]
if exclude_tags is None:
exclude_tags = []
if isinstance(modules, str):
# Guard against the user just entering a string, for example
# 'blog.tables', instead of ['blog.tables'].
modules = [modules]
table_subclasses: t.List[t.Type[Table]] = []
for module_path in modules:
try:
module = import_module(module_path)
except ImportError as exception:
print(f"Unable to import {module_path}")
raise exception from exception
object_names = [i for i in dir(module) if not i.startswith("_")]
for object_name in object_names:
_object = getattr(module, object_name)
if (
inspect.isclass(_object)
and issubclass(_object, Table)
and _object is not Table
):
table: Table = _object # type: ignore
if exclude_imported and table.__module__!= module_path:
continue
if exclude_tags and set(table._meta.tags).intersection(
set(exclude_tags)
):
continue
elif "__all__" in include_tags:
table_subclasses.append(_object)
elif set(table._meta.tags).intersection(set(include_tags)):
table_subclasses.append(_object)
return table_subclasses
@dataclass
class Command:
callable: t.Callable
aliases: t.List[str] = field(default_factory=list)
@dataclass
class AppConfig:
"""
Each app needs an AppConfig, which is defined in piccolo_app.py.
:param app_name:
The name of the app, for example ``'article'``.
:param migrations_folder_path:
The path of the folder containing this app's migration files.
:param table_classes:
By registering table classes, Piccolo's auto migrations can detect
changes to tables.
:param migration_dependencies:
A list of Piccolo apps whose migrations this app depends on. For
example: ``['piccolo.apps.user.piccolo_conf']``. The migrations for
those apps will be run before the migrations for this app.
:param commands:
A list of functions and coroutines, which are then registered with
the Piccolo CLI. For example, with a Piccolo app called ``'article'``,
and a command called ``new``, it can be called on the command line
using ``piccolo article new``.
"""
app_name: str
migrations_folder_path: str
table_classes: t.List[t.Type[Table]] = field(default_factory=list)
migration_dependencies: t.List[str] = field(default_factory=list)
commands: t.List[t.Union[t.Callable, Command]] = field(
default_factory=list
)
def __post_init__(self):
self.commands = [
i if isinstance(i, Command) else Command(i) for i in self.commands
]
if isinstance(self.migrations_folder_path, pathlib.Path):
self.migrations_folder_path = str(self.migrations_folder_path)
self._migration_dependency_app_configs: t.Optional[
t.List[AppConfig]
] = None
def register_table(self, table_class: t.Type[Table]):
self.table_classes.append(table_class)
return table_class
@property
def migration_dependency_app_configs(self) -> t.List[AppConfig]:
"""
Get all of the ``AppConfig`` instances from this app's migration
dependencies.
"""
# We cache the value so it's more efficient, and also so we can set the
# underlying value in unit tests for easier mocking.
if self._migration_dependency_app_configs is None:
modules: t.List[PiccoloAppModule] = [
t.cast(PiccoloAppModule, import_module(module_path))
for module_path in self.migration_dependencies
]
self._migration_dependency_app_configs = [
i.APP_CONFIG for i in modules
]
return self._migration_dependency_app_configs
def get_table_with_name(self, table_class_name: str) -> t.Type[Table]:
"""
Returns a ``Table`` subclass with the given name from this app, if it
exists. Otherwise raises a ``ValueError``.
"""
filtered = [
table_class
for table_class in self.table_classes
if table_class.__name__ == table_class_name
]
if not filtered:
raise ValueError(
f"No table with class name {table_class_name} exists."
)
return filtered[0]
class AppRegistry:
"""
Records all of the Piccolo apps in your project. Kept in
``piccolo_conf.py``.
:param apps:
A list of paths to Piccolo apps, e.g. ``['blog.piccolo_app']``.
"""
def __init__(self, apps: t.List[str] = None):
self.apps = apps or []
self.app_configs: t.Dict[str, AppConfig] = {}
app_names = []
for app in self.apps:
try:
app_conf_module = import_module(app)
app_config: AppConfig = getattr(app_conf_module, "APP_CONFIG")
except (ImportError, AttributeError) as e:
if app.endswith(".piccolo_app"):
raise e from e
app += ".piccolo_app"
app_conf_module = import_module(app)
app_config = getattr(app_conf_module, "APP_CONFIG")
colored_warning(
f"App {app[:-12]} should end with `.piccolo_app`",
level=Level.medium,
)
self.app_configs[app_config.app_name] = app_config
app_names.append(app_config.app_name)
self._validate_app_names(app_names)
@staticmethod
def _validate_app_names(app_names: t.List[str]):
"""
Raise a ValueError if an app_name is repeated.
"""
app_names.sort()
grouped = itertools.groupby(app_names)
for key, value in grouped:
count = len(list(value))
if count > 1:
raise ValueError(
f"There are {count} apps with the name `{key}`. This can "
"cause unexpected behavior. Make sure each app has a "
"unique name, and you haven't registered the same app "
"multiple times."
)
def get_app_config(self, app_name: str) -> t.Optional[AppConfig]:
return self.app_configs.get(app_name)
def get_table_classes(self, app_name: str) -> t.List[t.Type[Table]]:
"""
Returns each Table subclass defined in the given app if it exists.
Otherwise raises a ValueError.
:raises ValueError:
If an AppConfig can't be found for the given app_name.
"""
app_config = self.get_app_config(app_name=app_name)
if not app_config:
raise ValueError(f"Unrecognised app_name: {app_name}")
return app_config.table_classes
def get_table_with_name(
self, app_name: str, table_class_name: str
) -> t.Optional[t.Type[Table]]:
"""
Returns a Table subclass registered with the given app if it exists.
Otherwise raises a ValueError.
"""
app_config = self.get_app_config(app_name=app_name)
if app_config is None:
raise ValueError(f"Can't find an app_config for {app_name}")
else:
return app_config.get_table_with_name(
table_class_name=table_class_name
)
class PiccoloConfModule(ModuleType):
DB: Engine
APP_REGISTRY: AppRegistry
DEFAULT_MODULE_NAME = "piccolo_conf"
ENVIRONMENT_VARIABLE = "PICCOLO_CONF"
ENGINE_VAR = "DB"
class Finder:
"""
Contains useful methods for locating and loading apps within your project,
and tables within apps.
"""
def __init__(self, diagnose: bool = False):
"""
:param diagnose:
If True, when trying to import piccolo_conf, a traceback will be
printed out if an error occurs.
"""
self.diagnose = diagnose
def _deduplicate(
self, config_modules: t.List[PiccoloAppModule]
) -> t.List[PiccoloAppModule]:
"""
Remove all duplicates - just leaving the first instance.
"""
# Deduplicate, but preserve order - which is why set() isn't used.
return list({c: None for c in config_modules}.keys())
def _import_app_modules(
self, config_module_paths: t.List[str]
) -> t.List[PiccoloAppModule]:
"""
Import all piccolo_app.py modules within your apps, and all
dependencies.
"""
config_modules = []
for config_module_path in config_module_paths:
try:
config_module = t.cast(
PiccoloAppModule, import_module(config_module_path)
)
except ImportError as e:
raise Exception(
f"Unable to import {config_module_path}"
) from e
app_config: AppConfig = getattr(config_module, "APP_CONFIG")
dependency_config_modules = self._import_app_modules(
app_config.migration_dependencies
)
config_modules.extend(dependency_config_modules + [config_module])
return config_modules
def get_piccolo_conf_module(
self, module_name: t.Optional[str] = None
) -> t.Optional[PiccoloConfModule]:
"""
Searches the path for a 'piccolo_conf.py' module to import. The
location searched can be overriden by:
* Explicitly passing a module name into this method.
* Setting the PICCOLO_CONF environment variable.
An example override is'my_folder.piccolo_conf'.
"""
env_module_name = os.environ.get(ENVIRONMENT_VARIABLE, None)
if not module_name and env_module_name:
module_name = env_module_name
if not module_name:
module_name = DEFAULT_MODULE_NAME
try:
module = t.cast(PiccoloConfModule, import_module(module_name))
except ModuleNotFoundError as exc:
if self.diagnose:
colored_warning(
(
f"{module_name} either doesn't exist or the import "
"failed. Traceback:"
),
level=Level.high,
)
print(traceback.format_exc())
if str(exc) == "No module named 'asyncpg'":
raise ModuleNotFoundError(
"PostgreSQL driver not found. "
"Try running `pip install 'piccolo[postgres]'`"
) from exc
elif str(exc) == "No module named 'aiosqlite'":
raise ModuleNotFoundError(
"SQLite driver not found. "
"Try running `pip install 'piccolo[sqlite]'`"
) from exc
else:
raise exc from exc
else:
return module
def get_app_registry(self) -> AppRegistry:
"""
Returns the ``AppRegistry`` instance within piccolo_conf.
"""
piccolo_conf_module = self.get_piccolo_conf_module()
return getattr(piccolo_conf_module, "APP_REGISTRY")
def get_engine(
self, module_name: t.Optional[str] = None
) -> t.Optional[Engine]:
piccolo_conf = self.get_piccolo_conf_module(module_name=module_name)
engine: t.Optional[Engine] = getattr(piccolo_conf, ENGINE_VAR, None)
if not engine:
colored_warning(
f"{module_name} doesn't define a {ENGINE_VAR} variable.",
level=Level.high,
)
elif not isinstance(engine, Engine):
colored_warning(
f"{module_name} contains a {ENGINE_VAR} variable of the "
"wrong type - it should be an Engine subclass.",
level=Level.high,
)
return engine
def get_app_modules(self) -> t.List[PiccoloAppModule]:
"""
Returns the ``piccolo_app.py`` modules for each registered Piccolo app
in your project.
"""
app_registry = self.get_app_registry()
app_modules = self._import_app_modules(app_registry.apps)
# Now deduplicate any dependencies
app_modules = self._deduplicate(app_modules)
return app_modules
def get_app_names(
self, sort_by_migration_dependencies: bool = True
) -> t.List[str]:
"""
Return all of the app names.
:param sort_by_migration_dependencies:
If True, sorts the app names using the migration dependencies, so
dependencies are before dependents in the list.
"""
return [
i.app_name
for i in self.get_app_configs(
sort_by_migration_dependencies=sort_by_migration_dependencies
)
]
def get_sorted_app_names(self) -> t.List[str]:
"""
Just here for backwards compatibility - use ``get_app_names`` directly.
"""
return self.get_app_names(sort_by_migration_dependencies=True)
def sort_app_configs(
self, app_configs: t.List[AppConfig]
) -> t.List[AppConfig]:
app_config_map = {
app_config.app_name: app_config for app_config in app_configs
}
sorted_app_names = TopologicalSorter(
{
app_config.app_name: [
i.app_name
for i in app_config.migration_dependency_app_configs
]
for app_config in app_config_map.values()
}
).static_order()
return [app_config_map[i] for i in sorted_app_names]
def get_app_configs(
self, sort_by_migration_dependencies: bool = True
) -> t.List[AppConfig]:
"""
Returns a list of ``AppConfig``, optionally sorted by migration
dependencies.
"""
app_configs = [i.APP_CONFIG for i in self.get_app_modules()]
return (
self.sort_app_configs(app_configs=app_configs)
if sort_by_migration_dependencies
else app_configs
)
def get_app_config(self, app_name: str) -> AppConfig:
"""
Returns an ``AppConfig`` for the given app name.
"""
for app_config in self.get_app_configs():
if app_config.app_name == app_name:
return app_config
raise ValueError(f"No app found with name {app_name}")
def get_table_with_name(
self, app_name: str, table_class_name: str
) -> t.Type[Table]:
"""
Returns a ``Table`` class registered with the given app if it exists.
Otherwise it raises an ``ValueError``.
"""
app_config = self.get_app_config(app_name=app_name)
return app_config.get_table_with_name(
table_class_name=table_class_name
)
def get_table_classes(
self,
include_apps: t.Optional[t.List[str]] = None,
exclude_apps: t.Optional[t.List[str]] = None,
) -> t.List[t.Type[Table]]:
"""
Returns all ``Table`` classes registered with the given apps. If
``include_apps`` is ``None``, then ``Table`` classes will be returned
for all apps.
"""
if include_apps and exclude_apps:
raise ValueError("Only specify `include_apps` or `exclude_apps`.")
if include_apps:
app_names = include_apps
else:
app_names = self.get_app_names()
if exclude_apps:
app_names = [i for i in app_names if i not in exclude_apps]
tables: t.List[t.Type[Table]] = []
for app_name in app_names:
app_config = self.get_app_config(app_name=app_name)
tables.extend(app_config.table_classes)
return tables |
|
piccolo-orm__piccolo | piccolo_projects.rst | Module doc | Generate documentation for this module | MIT License | piccolo-orm__piccolo/docs/src/piccolo/projects_and_apps/piccolo_projects.rst | [
"piccolo-orm__piccolo/piccolo/conf/apps.py"
] | Piccolo Projects
A Piccolo project is a collection of apps.
------------------------------------------------------------------------
piccolo_conf.py
A project requires a piccolo_conf.py file. To create this, use the
following command:
piccolo project new
The file serves two important purposes:
- Contains your database settings.
- Is used for registering PiccoloApps.
Location
By convention, the piccolo_conf.py file should be at the root of your
project:
my_project/
piccolo_conf.py
my_app/
piccolo_app.py
This means that when you use the piccolo CLI from the my_project folder
it can import piccolo_conf.py.
If you prefer to keep piccolo_conf.py in a different location, or to
give it a different name, you can do so using the PICCOLO_CONF
environment variable (see PICCOLO_CONF<PICCOLO_CONF>). For example:
my_project/
conf/
piccolo_conf_local.py
my_app/
piccolo_app.py
export PICCOLO_CONF=conf.piccolo_conf_local
------------------------------------------------------------------------
Example
Here's an example:
from piccolo.engine.postgres import PostgresEngine
from piccolo.conf.apps import AppRegistry
DB = PostgresEngine(
config={
"database": "piccolo_project",
"user": "postgres",
"password": "",
"host": "localhost",
"port": 5432,
}
)
APP_REGISTRY = AppRegistry(
apps=["home.piccolo_app", "piccolo_admin.piccolo_app"]
)
------------------------------------------------------------------------
DB
The DB setting is an Engine instance (see the Engine docs <Engines>).
------------------------------------------------------------------------
APP_REGISTRY
The APP_REGISTRY setting is an AppRegistry instance.
piccolo.conf.apps
AppRegistry
| from __future__ import annotations
import inspect
import itertools
import os
import pathlib
import traceback
import typing as t
from dataclasses import dataclass, field
from importlib import import_module
from types import ModuleType
from piccolo.engine.base import Engine
from piccolo.table import Table
from piccolo.utils.graphlib import TopologicalSorter
from piccolo.utils.warnings import Level, colored_warning
class MigrationModule(ModuleType):
ID: str
VERSION: str
DESCRIPTION: str
@staticmethod
async def forwards() -> None:
pass
class PiccoloAppModule(ModuleType):
APP_CONFIG: AppConfig
def table_finder(
modules: t.Sequence[str],
include_tags: t.Sequence[str] = None,
exclude_tags: t.Sequence[str] = None,
exclude_imported: bool = False,
) -> t.List[t.Type[Table]]:
"""
Rather than explicitly importing and registering table classes with the
``AppConfig``, ``table_finder`` can be used instead. It imports any ``Table``
subclasses in the given modules. Tags can be used to limit which ``Table``
subclasses are imported.
:param modules:
The module paths to check for ``Table`` subclasses. For example,
``['blog.tables']``. The path should be from the root of your project,
not a relative path.
:param include_tags:
If the ``Table`` subclass has one of these tags, it will be
imported. The special tag ``'__all__'`` will import all ``Table``
subclasses found.
:param exclude_tags:
If the ``Table`` subclass has any of these tags, it won't be
imported. ``exclude_tags`` overrides ``include_tags``.
:param exclude_imported:
If ``True``, only ``Table`` subclasses defined within the module are
used. Any ``Table`` subclasses imported by that module from other
modules are ignored. For example:
.. code-block:: python
from piccolo.table import Table
from piccolo.column import Varchar, ForeignKey
from piccolo.apps.user.tables import BaseUser # excluded
class Task(Table): # included
title = Varchar()
creator = ForeignKey(BaseUser)
""" # noqa: E501
if include_tags is None:
include_tags = ["__all__"]
if exclude_tags is None:
exclude_tags = []
if isinstance(modules, str):
# Guard against the user just entering a string, for example
# 'blog.tables', instead of ['blog.tables'].
modules = [modules]
table_subclasses: t.List[t.Type[Table]] = []
for module_path in modules:
try:
module = import_module(module_path)
except ImportError as exception:
print(f"Unable to import {module_path}")
raise exception from exception
object_names = [i for i in dir(module) if not i.startswith("_")]
for object_name in object_names:
_object = getattr(module, object_name)
if (
inspect.isclass(_object)
and issubclass(_object, Table)
and _object is not Table
):
table: Table = _object # type: ignore
if exclude_imported and table.__module__!= module_path:
continue
if exclude_tags and set(table._meta.tags).intersection(
set(exclude_tags)
):
continue
elif "__all__" in include_tags:
table_subclasses.append(_object)
elif set(table._meta.tags).intersection(set(include_tags)):
table_subclasses.append(_object)
return table_subclasses
@dataclass
class Command:
callable: t.Callable
aliases: t.List[str] = field(default_factory=list)
@dataclass
class AppConfig:
"""
Each app needs an AppConfig, which is defined in piccolo_app.py.
:param app_name:
The name of the app, for example ``'article'``.
:param migrations_folder_path:
The path of the folder containing this app's migration files.
:param table_classes:
By registering table classes, Piccolo's auto migrations can detect
changes to tables.
:param migration_dependencies:
A list of Piccolo apps whose migrations this app depends on. For
example: ``['piccolo.apps.user.piccolo_conf']``. The migrations for
those apps will be run before the migrations for this app.
:param commands:
A list of functions and coroutines, which are then registered with
the Piccolo CLI. For example, with a Piccolo app called ``'article'``,
and a command called ``new``, it can be called on the command line
using ``piccolo article new``.
"""
app_name: str
migrations_folder_path: str
table_classes: t.List[t.Type[Table]] = field(default_factory=list)
migration_dependencies: t.List[str] = field(default_factory=list)
commands: t.List[t.Union[t.Callable, Command]] = field(
default_factory=list
)
def __post_init__(self):
self.commands = [
i if isinstance(i, Command) else Command(i) for i in self.commands
]
if isinstance(self.migrations_folder_path, pathlib.Path):
self.migrations_folder_path = str(self.migrations_folder_path)
self._migration_dependency_app_configs: t.Optional[
t.List[AppConfig]
] = None
def register_table(self, table_class: t.Type[Table]):
self.table_classes.append(table_class)
return table_class
@property
def migration_dependency_app_configs(self) -> t.List[AppConfig]:
"""
Get all of the ``AppConfig`` instances from this app's migration
dependencies.
"""
# We cache the value so it's more efficient, and also so we can set the
# underlying value in unit tests for easier mocking.
if self._migration_dependency_app_configs is None:
modules: t.List[PiccoloAppModule] = [
t.cast(PiccoloAppModule, import_module(module_path))
for module_path in self.migration_dependencies
]
self._migration_dependency_app_configs = [
i.APP_CONFIG for i in modules
]
return self._migration_dependency_app_configs
def get_table_with_name(self, table_class_name: str) -> t.Type[Table]:
"""
Returns a ``Table`` subclass with the given name from this app, if it
exists. Otherwise raises a ``ValueError``.
"""
filtered = [
table_class
for table_class in self.table_classes
if table_class.__name__ == table_class_name
]
if not filtered:
raise ValueError(
f"No table with class name {table_class_name} exists."
)
return filtered[0]
class AppRegistry:
"""
Records all of the Piccolo apps in your project. Kept in
``piccolo_conf.py``.
:param apps:
A list of paths to Piccolo apps, e.g. ``['blog.piccolo_app']``.
"""
def __init__(self, apps: t.List[str] = None):
self.apps = apps or []
self.app_configs: t.Dict[str, AppConfig] = {}
app_names = []
for app in self.apps:
try:
app_conf_module = import_module(app)
app_config: AppConfig = getattr(app_conf_module, "APP_CONFIG")
except (ImportError, AttributeError) as e:
if app.endswith(".piccolo_app"):
raise e from e
app += ".piccolo_app"
app_conf_module = import_module(app)
app_config = getattr(app_conf_module, "APP_CONFIG")
colored_warning(
f"App {app[:-12]} should end with `.piccolo_app`",
level=Level.medium,
)
self.app_configs[app_config.app_name] = app_config
app_names.append(app_config.app_name)
self._validate_app_names(app_names)
@staticmethod
def _validate_app_names(app_names: t.List[str]):
"""
Raise a ValueError if an app_name is repeated.
"""
app_names.sort()
grouped = itertools.groupby(app_names)
for key, value in grouped:
count = len(list(value))
if count > 1:
raise ValueError(
f"There are {count} apps with the name `{key}`. This can "
"cause unexpected behavior. Make sure each app has a "
"unique name, and you haven't registered the same app "
"multiple times."
)
def get_app_config(self, app_name: str) -> t.Optional[AppConfig]:
return self.app_configs.get(app_name)
def get_table_classes(self, app_name: str) -> t.List[t.Type[Table]]:
"""
Returns each Table subclass defined in the given app if it exists.
Otherwise raises a ValueError.
:raises ValueError:
If an AppConfig can't be found for the given app_name.
"""
app_config = self.get_app_config(app_name=app_name)
if not app_config:
raise ValueError(f"Unrecognised app_name: {app_name}")
return app_config.table_classes
def get_table_with_name(
self, app_name: str, table_class_name: str
) -> t.Optional[t.Type[Table]]:
"""
Returns a Table subclass registered with the given app if it exists.
Otherwise raises a ValueError.
"""
app_config = self.get_app_config(app_name=app_name)
if app_config is None:
raise ValueError(f"Can't find an app_config for {app_name}")
else:
return app_config.get_table_with_name(
table_class_name=table_class_name
)
class PiccoloConfModule(ModuleType):
DB: Engine
APP_REGISTRY: AppRegistry
DEFAULT_MODULE_NAME = "piccolo_conf"
ENVIRONMENT_VARIABLE = "PICCOLO_CONF"
ENGINE_VAR = "DB"
class Finder:
"""
Contains useful methods for locating and loading apps within your project,
and tables within apps.
"""
def __init__(self, diagnose: bool = False):
"""
:param diagnose:
If True, when trying to import piccolo_conf, a traceback will be
printed out if an error occurs.
"""
self.diagnose = diagnose
def _deduplicate(
self, config_modules: t.List[PiccoloAppModule]
) -> t.List[PiccoloAppModule]:
"""
Remove all duplicates - just leaving the first instance.
"""
# Deduplicate, but preserve order - which is why set() isn't used.
return list({c: None for c in config_modules}.keys())
def _import_app_modules(
self, config_module_paths: t.List[str]
) -> t.List[PiccoloAppModule]:
"""
Import all piccolo_app.py modules within your apps, and all
dependencies.
"""
config_modules = []
for config_module_path in config_module_paths:
try:
config_module = t.cast(
PiccoloAppModule, import_module(config_module_path)
)
except ImportError as e:
raise Exception(
f"Unable to import {config_module_path}"
) from e
app_config: AppConfig = getattr(config_module, "APP_CONFIG")
dependency_config_modules = self._import_app_modules(
app_config.migration_dependencies
)
config_modules.extend(dependency_config_modules + [config_module])
return config_modules
def get_piccolo_conf_module(
self, module_name: t.Optional[str] = None
) -> t.Optional[PiccoloConfModule]:
"""
Searches the path for a 'piccolo_conf.py' module to import. The
location searched can be overriden by:
* Explicitly passing a module name into this method.
* Setting the PICCOLO_CONF environment variable.
An example override is'my_folder.piccolo_conf'.
"""
env_module_name = os.environ.get(ENVIRONMENT_VARIABLE, None)
if not module_name and env_module_name:
module_name = env_module_name
if not module_name:
module_name = DEFAULT_MODULE_NAME
try:
module = t.cast(PiccoloConfModule, import_module(module_name))
except ModuleNotFoundError as exc:
if self.diagnose:
colored_warning(
(
f"{module_name} either doesn't exist or the import "
"failed. Traceback:"
),
level=Level.high,
)
print(traceback.format_exc())
if str(exc) == "No module named 'asyncpg'":
raise ModuleNotFoundError(
"PostgreSQL driver not found. "
"Try running `pip install 'piccolo[postgres]'`"
) from exc
elif str(exc) == "No module named 'aiosqlite'":
raise ModuleNotFoundError(
"SQLite driver not found. "
"Try running `pip install 'piccolo[sqlite]'`"
) from exc
else:
raise exc from exc
else:
return module
def get_app_registry(self) -> AppRegistry:
"""
Returns the ``AppRegistry`` instance within piccolo_conf.
"""
piccolo_conf_module = self.get_piccolo_conf_module()
return getattr(piccolo_conf_module, "APP_REGISTRY")
def get_engine(
self, module_name: t.Optional[str] = None
) -> t.Optional[Engine]:
piccolo_conf = self.get_piccolo_conf_module(module_name=module_name)
engine: t.Optional[Engine] = getattr(piccolo_conf, ENGINE_VAR, None)
if not engine:
colored_warning(
f"{module_name} doesn't define a {ENGINE_VAR} variable.",
level=Level.high,
)
elif not isinstance(engine, Engine):
colored_warning(
f"{module_name} contains a {ENGINE_VAR} variable of the "
"wrong type - it should be an Engine subclass.",
level=Level.high,
)
return engine
def get_app_modules(self) -> t.List[PiccoloAppModule]:
"""
Returns the ``piccolo_app.py`` modules for each registered Piccolo app
in your project.
"""
app_registry = self.get_app_registry()
app_modules = self._import_app_modules(app_registry.apps)
# Now deduplicate any dependencies
app_modules = self._deduplicate(app_modules)
return app_modules
def get_app_names(
self, sort_by_migration_dependencies: bool = True
) -> t.List[str]:
"""
Return all of the app names.
:param sort_by_migration_dependencies:
If True, sorts the app names using the migration dependencies, so
dependencies are before dependents in the list.
"""
return [
i.app_name
for i in self.get_app_configs(
sort_by_migration_dependencies=sort_by_migration_dependencies
)
]
def get_sorted_app_names(self) -> t.List[str]:
"""
Just here for backwards compatibility - use ``get_app_names`` directly.
"""
return self.get_app_names(sort_by_migration_dependencies=True)
def sort_app_configs(
self, app_configs: t.List[AppConfig]
) -> t.List[AppConfig]:
app_config_map = {
app_config.app_name: app_config for app_config in app_configs
}
sorted_app_names = TopologicalSorter(
{
app_config.app_name: [
i.app_name
for i in app_config.migration_dependency_app_configs
]
for app_config in app_config_map.values()
}
).static_order()
return [app_config_map[i] for i in sorted_app_names]
def get_app_configs(
self, sort_by_migration_dependencies: bool = True
) -> t.List[AppConfig]:
"""
Returns a list of ``AppConfig``, optionally sorted by migration
dependencies.
"""
app_configs = [i.APP_CONFIG for i in self.get_app_modules()]
return (
self.sort_app_configs(app_configs=app_configs)
if sort_by_migration_dependencies
else app_configs
)
def get_app_config(self, app_name: str) -> AppConfig:
"""
Returns an ``AppConfig`` for the given app name.
"""
for app_config in self.get_app_configs():
if app_config.app_name == app_name:
return app_config
raise ValueError(f"No app found with name {app_name}")
def get_table_with_name(
self, app_name: str, table_class_name: str
) -> t.Type[Table]:
"""
Returns a ``Table`` class registered with the given app if it exists.
Otherwise it raises an ``ValueError``.
"""
app_config = self.get_app_config(app_name=app_name)
return app_config.get_table_with_name(
table_class_name=table_class_name
)
def get_table_classes(
self,
include_apps: t.Optional[t.List[str]] = None,
exclude_apps: t.Optional[t.List[str]] = None,
) -> t.List[t.Type[Table]]:
"""
Returns all ``Table`` classes registered with the given apps. If
``include_apps`` is ``None``, then ``Table`` classes will be returned
for all apps.
"""
if include_apps and exclude_apps:
raise ValueError("Only specify `include_apps` or `exclude_apps`.")
if include_apps:
app_names = include_apps
else:
app_names = self.get_app_names()
if exclude_apps:
app_names = [i for i in app_names if i not in exclude_apps]
tables: t.List[t.Type[Table]] = []
for app_name in app_names:
app_config = self.get_app_config(app_name=app_name)
tables.extend(app_config.table_classes)
return tables |
|
piccolo-orm__piccolo | postgres_engine.rst | Module doc | Generate documentation for this module | MIT License | piccolo-orm__piccolo/docs/src/piccolo/engines/postgres_engine.rst | [
"piccolo-orm__piccolo/piccolo/engine/postgres.py"
] | PostgresEngine
Configuration
# piccolo_conf.py
from piccolo.engine.postgres import PostgresEngine
DB = PostgresEngine(config={
'host': 'localhost',
'database': 'my_app',
'user': 'postgres',
'password': ''
})
config
The config dictionary is passed directly to the underlying database
adapter, asyncpg. See the asyncpg docs to learn more.
------------------------------------------------------------------------
Connection pool
To use a connection pool, you need to first initialise it. The best
place to do this is in the startup event handler of whichever web
framework you are using.
Here's an example using Starlette. Notice that we also close the
connection pool in the shutdown event handler.
from piccolo.engine import engine_finder
from starlette.applications import Starlette
app = Starlette()
@app.on_event('startup')
async def open_database_connection_pool():
engine = engine_finder()
await engine.start_connection_pool()
@app.on_event('shutdown')
async def close_database_connection_pool():
engine = engine_finder()
await engine.close_connection_pool()
Hint
Using a connection pool helps with performance, since connections are
reused instead of being created for each query.
Once a connection pool has been started, the engine will use it for
making queries.
Hint
If you're running several instances of an app on the same server, you
may prefer an external connection pooler - like pgbouncer.
Configuration
The connection pool uses the same configuration as your engine. You can
also pass in additional parameters, which are passed to the underlying
database adapter. Here's an example:
# To increase the number of connections available:
await engine.start_connection_pool(max_size=20)
------------------------------------------------------------------------
| from __future__ import annotations
import contextvars
import typing as t
from dataclasses import dataclass
from piccolo.engine.base import Batch, Engine
from piccolo.engine.exceptions import TransactionError
from piccolo.query.base import DDL, Query
from piccolo.querystring import QueryString
from piccolo.utils.lazy_loader import LazyLoader
from piccolo.utils.sync import run_sync
from piccolo.utils.warnings import Level, colored_warning
asyncpg = LazyLoader("asyncpg", globals(), "asyncpg")
if t.TYPE_CHECKING: # pragma: no cover
from asyncpg.connection import Connection
from asyncpg.cursor import Cursor
from asyncpg.pool import Pool
@dataclass
class AsyncBatch(Batch):
connection: Connection
query: Query
batch_size: int
# Set internally
_transaction = None
_cursor: t.Optional[Cursor] = None
@property
def cursor(self) -> Cursor:
if not self._cursor:
raise ValueError("_cursor not set")
return self._cursor
async def next(self) -> t.List[t.Dict]:
data = await self.cursor.fetch(self.batch_size)
return await self.query._process_results(data)
def __aiter__(self):
return self
async def __anext__(self):
response = await self.next()
if response == []:
raise StopAsyncIteration()
return response
async def __aenter__(self):
self._transaction = self.connection.transaction()
await self._transaction.start()
querystring = self.query.querystrings[0]
template, template_args = querystring.compile_string()
self._cursor = await self.connection.cursor(template, *template_args)
return self
async def __aexit__(self, exception_type, exception, traceback):
if exception:
await self._transaction.rollback()
else:
await self._transaction.commit()
await self.connection.close()
return exception is not None
###############################################################################
class Atomic:
"""
This is useful if you want to build up a transaction programatically, by
adding queries to it.
Usage::
transaction = engine.atomic()
transaction.add(Foo.create_table())
# Either:
transaction.run_sync()
await transaction.run()
"""
__slots__ = ("engine", "queries")
def __init__(self, engine: PostgresEngine):
self.engine = engine
self.queries: t.List[Query] = []
def add(self, *query: Query):
self.queries += list(query)
async def run(self):
from piccolo.query.methods.objects import Create, GetOrCreate
try:
async with self.engine.transaction():
for query in self.queries:
if isinstance(query, (Query, DDL, Create, GetOrCreate)):
await query.run()
else:
raise ValueError("Unrecognised query")
self.queries = []
except Exception as exception:
self.queries = []
raise exception from exception
def run_sync(self):
return run_sync(self.run())
def __await__(self):
return self.run().__await__()
###############################################################################
class Savepoint:
def __init__(self, name: str, transaction: PostgresTransaction):
self.name = name
self.transaction = transaction
async def rollback_to(self):
await self.transaction.connection.execute(
f"ROLLBACK TO SAVEPOINT {self.name}"
)
async def release(self):
await self.transaction.connection.execute(
f"RELEASE SAVEPOINT {self.name}"
)
class PostgresTransaction:
"""
Used for wrapping queries in a transaction, using a context manager.
Currently it's async only.
Usage::
async with engine.transaction():
# Run some queries:
await Band.select().run()
"""
__slots__ = (
"engine",
"transaction",
"context",
"connection",
"_savepoint_id",
"_parent",
"_committed",
"_rolled_back",
)
def __init__(self, engine: PostgresEngine, allow_nested: bool = True):
"""
:param allow_nested:
If ``True`` then if we try creating a new transaction when another
is already active, we treat this as a no-op::
async with DB.transaction():
async with DB.transaction():
pass
If we want to disallow this behaviour, then setting
``allow_nested=False`` will cause a ``TransactionError`` to be
raised.
"""
self.engine = engine
current_transaction = self.engine.current_transaction.get()
self._savepoint_id = 0
self._parent = None
self._committed = False
self._rolled_back = False
if current_transaction:
if allow_nested:
self._parent = current_transaction
else:
raise TransactionError(
"A transaction is already active - nested transactions "
"aren't allowed."
)
async def __aenter__(self) -> PostgresTransaction:
if self._parent is not None:
return self._parent
self.connection = await self.get_connection()
self.transaction = self.connection.transaction()
await self.begin()
self.context = self.engine.current_transaction.set(self)
return self
async def get_connection(self):
if self.engine.pool:
return await self.engine.pool.acquire()
else:
return await self.engine.get_new_connection()
async def begin(self):
await self.transaction.start()
async def commit(self):
await self.transaction.commit()
self._committed = True
async def rollback(self):
await self.transaction.rollback()
self._rolled_back = True
async def rollback_to(self, savepoint_name: str):
"""
Used to rollback to a savepoint just using the name.
"""
await Savepoint(name=savepoint_name, transaction=self).rollback_to()
###########################################################################
def get_savepoint_id(self) -> int:
self._savepoint_id += 1
return self._savepoint_id
async def savepoint(self, name: t.Optional[str] = None) -> Savepoint:
name = name or f"savepoint_{self.get_savepoint_id()}"
await self.connection.execute(f"SAVEPOINT {name}")
return Savepoint(name=name, transaction=self)
###########################################################################
async def __aexit__(self, exception_type, exception, traceback):
if self._parent:
return exception is None
if exception:
# The user may have manually rolled it back.
if not self._rolled_back:
await self.rollback()
else:
# The user may have manually committed it.
if not self._committed and not self._rolled_back:
await self.commit()
if self.engine.pool:
await self.engine.pool.release(self.connection)
else:
await self.connection.close()
self.engine.current_transaction.reset(self.context)
return exception is None
###############################################################################
class PostgresEngine(Engine[t.Optional[PostgresTransaction]]):
"""
Used to connect to PostgreSQL.
:param config:
The config dictionary is passed to the underlying database adapter,
asyncpg. Common arguments you're likely to need are:
* host
* port
* user
* password
* database
For example, ``{'host': 'localhost', 'port': 5432}``.
See the `asyncpg docs <https://magicstack.github.io/asyncpg/current/api/index.html#connection>`_
for all available options.
:param extensions:
When the engine starts, it will try and create these extensions
in Postgres. If you're using a read only database, set this value to an
empty tuple ``()``.
:param log_queries:
If ``True``, all SQL and DDL statements are printed out before being
run. Useful for debugging.
:param log_responses:
If ``True``, the raw response from each query is printed out. Useful
for debugging.
:param extra_nodes:
If you have additional database nodes (e.g. read replicas) for the
server, you can specify them here. It's a mapping of a memorable name
to a ``PostgresEngine`` instance. For example::
DB = PostgresEngine(
config={'database':'main_db'},
extra_nodes={
'read_replica_1': PostgresEngine(
config={
'database':'main_db',
host:'read_replicate.my_db.com'
},
extensions=()
)
}
)
Note how we set ``extensions=()``, because it's a read only database.
When executing a query, you can specify one of these nodes instead
of the main database. For example::
>>> await MyTable.select().run(node="read_replica_1")
""" # noqa: E501
__slots__ = (
"config",
"extensions",
"log_queries",
"log_responses",
"extra_nodes",
"pool",
"current_transaction",
)
engine_type = "postgres"
min_version_number = 10
def __init__(
self,
config: t.Dict[str, t.Any],
extensions: t.Sequence[str] = ("uuid-ossp",),
log_queries: bool = False,
log_responses: bool = False,
extra_nodes: t.Mapping[str, PostgresEngine] = None,
) -> None:
if extra_nodes is None:
extra_nodes = {}
self.config = config
self.extensions = extensions
self.log_queries = log_queries
self.log_responses = log_responses
self.extra_nodes = extra_nodes
self.pool: t.Optional[Pool] = None
database_name = config.get("database", "Unknown")
self.current_transaction = contextvars.ContextVar(
f"pg_current_transaction_{database_name}", default=None
)
super().__init__()
@staticmethod
def _parse_raw_version_string(version_string: str) -> float:
"""
The format of the version string isn't always consistent. Sometimes
it's just the version number e.g. '9.6.18', and sometimes
it contains specific build information e.g.
'12.4 (Ubuntu 12.4-0ubuntu0.20.04.1)'. Just extract the major and
minor version numbers.
"""
version_segment = version_string.split(" ")[0]
major, minor = version_segment.split(".")[:2]
return float(f"{major}.{minor}")
async def get_version(self) -> float:
"""
Returns the version of Postgres being run.
"""
try:
response: t.Sequence[t.Dict] = await self._run_in_new_connection(
"SHOW server_version"
)
except ConnectionRefusedError as exception:
# Suppressing the exception, otherwise importing piccolo_conf.py
# containing an engine will raise an ImportError.
colored_warning(f"Unable to connect to database - {exception}")
return 0.0
else:
version_string = response[0]["server_version"]
return self._parse_raw_version_string(
version_string=version_string
)
def get_version_sync(self) -> float:
return run_sync(self.get_version())
async def prep_database(self):
for extension in self.extensions:
try:
await self._run_in_new_connection(
f'CREATE EXTENSION IF NOT EXISTS "{extension}"',
)
except asyncpg.exceptions.InsufficientPrivilegeError:
colored_warning(
f"=> Unable to create {extension} extension - some "
"functionality may not behave as expected. Make sure "
"your database user has permission to create "
"extensions, or add it manually using "
f'`CREATE EXTENSION "{extension}";`',
level=Level.medium,
)
###########################################################################
# These typos existed in the codebase for a while, so leaving these proxy
# methods for now to ensure backwards compatibility.
async def start_connnection_pool(self, **kwargs) -> None:
colored_warning(
"`start_connnection_pool` is a typo - please change it to "
"`start_connection_pool`.",
category=DeprecationWarning,
)
return await self.start_connection_pool()
async def close_connnection_pool(self, **kwargs) -> None:
colored_warning(
"`close_connnection_pool` is a typo - please change it to "
"`close_connection_pool`.",
category=DeprecationWarning,
)
return await self.close_connection_pool()
###########################################################################
async def start_connection_pool(self, **kwargs) -> None:
if self.pool:
colored_warning(
"A pool already exists - close it first if you want to create "
"a new pool.",
)
else:
config = dict(self.config)
config.update(**kwargs)
self.pool = await asyncpg.create_pool(**config)
async def close_connection_pool(self) -> None:
if self.pool:
await self.pool.close()
self.pool = None
else:
colored_warning("No pool is running.")
###########################################################################
async def get_new_connection(self) -> Connection:
"""
Returns a new connection - doesn't retrieve it from the pool.
"""
return await asyncpg.connect(**self.config)
###########################################################################
async def batch(
self,
query: Query,
batch_size: int = 100,
node: t.Optional[str] = None,
) -> AsyncBatch:
"""
:param query:
The database query to run.
:param batch_size:
How many rows to fetch on each iteration.
:param node:
Which node to run the query on (see ``extra_nodes``). If not
specified, it runs on the main Postgres node.
"""
engine: t.Any = self.extra_nodes.get(node) if node else self
connection = await engine.get_new_connection()
return AsyncBatch(
connection=connection, query=query, batch_size=batch_size
)
###########################################################################
async def _run_in_pool(self, query: str, args: t.Sequence[t.Any] = None):
if args is None:
args = []
if not self.pool:
raise ValueError("A pool isn't currently running.")
async with self.pool.acquire() as connection:
response = await connection.fetch(query, *args)
return response
async def _run_in_new_connection(
self, query: str, args: t.Sequence[t.Any] = None
):
if args is None:
args = []
connection = await self.get_new_connection()
try:
results = await connection.fetch(query, *args)
except asyncpg.exceptions.PostgresError as exception:
await connection.close()
raise exception
await connection.close()
return results
async def run_querystring(
self, querystring: QueryString, in_pool: bool = True
):
query, query_args = querystring.compile_string(
engine_type=self.engine_type
)
query_id = self.get_query_id()
if self.log_queries:
self.print_query(query_id=query_id, query=querystring.__str__())
# If running inside a transaction:
current_transaction = self.current_transaction.get()
if current_transaction:
response = await current_transaction.connection.fetch(
query, *query_args
)
elif in_pool and self.pool:
response = await self._run_in_pool(query, query_args)
else:
response = await self._run_in_new_connection(query, query_args)
if self.log_responses:
self.print_response(query_id=query_id, response=response)
return response
async def run_ddl(self, ddl: str, in_pool: bool = True):
query_id = self.get_query_id()
if self.log_queries:
self.print_query(query_id=query_id, query=ddl)
# If running inside a transaction:
current_transaction = self.current_transaction.get()
if current_transaction:
response = await current_transaction.connection.fetch(ddl)
elif in_pool and self.pool:
response = await self._run_in_pool(ddl)
else:
response = await self._run_in_new_connection(ddl)
if self.log_responses:
self.print_response(query_id=query_id, response=response)
return response
def atomic(self) -> Atomic:
return Atomic(engine=self)
def transaction(self, allow_nested: bool = True) -> PostgresTransaction:
return PostgresTransaction(engine=self, allow_nested=allow_nested) |
|
piccolo-orm__piccolo | running.rst | Module doc | Generate documentation for this module | MIT License | piccolo-orm__piccolo/docs/src/piccolo/migrations/running.rst | [
"piccolo-orm__piccolo/piccolo/apps/migrations/commands/backwards.py",
"piccolo-orm__piccolo/piccolo/apps/migrations/commands/check.py",
"piccolo-orm__piccolo/piccolo/apps/migrations/commands/forwards.py"
] | Running migrations
Hint
To see all available options for these commands, use the --help flag,
for example piccolo migrations forwards --help.
Forwards
When the migration is run, the forwards function is executed. To do
this:
piccolo migrations forwards my_app
Multiple apps
If you have multiple apps you can run them all using:
piccolo migrations forwards all
Fake
We can 'fake' running a migration - we record that it ran in the
database without actually running it.
piccolo migrations forwards my_app 2022-09-04T19:44:09 --fake
This is useful if we started from an existing database using
piccolo schema generate, and the initial migration we generated is for
tables which already exist, hence we fake run it.
------------------------------------------------------------------------
Reversing migrations
To reverse the migration, run the following command, specifying the ID
of a migration:
piccolo migrations backwards my_app 2022-09-04T19:44:09
Piccolo will then reverse the migrations for the given app, starting
with the most recent migration, up to and including the migration with
the specified ID.
You can try going forwards and backwards a few times to make sure it
works as expected.
------------------------------------------------------------------------
Preview
To see the SQL queries of a migration without actually running them, use
the --preview flag.
This works when running migrations forwards:
piccolo migrations forwards my_app --preview
Or backwards:
piccolo migrations backwards 2022-09-04T19:44:09 --preview
------------------------------------------------------------------------
Checking migrations
You can easily check which migrations have and haven't ran using the
following:
piccolo migrations check
| from __future__ import annotations
import os
import sys
import typing as t
from piccolo.apps.migrations.auto.migration_manager import MigrationManager
from piccolo.apps.migrations.commands.base import (
BaseMigrationManager,
MigrationResult,
)
from piccolo.apps.migrations.tables import Migration
from piccolo.conf.apps import AppConfig, MigrationModule
from piccolo.utils.printing import print_heading
class BackwardsMigrationManager(BaseMigrationManager):
def __init__(
self,
app_name: str,
migration_id: str,
auto_agree: bool = False,
clean: bool = False,
preview: bool = False,
):
self.migration_id = migration_id
self.app_name = app_name
self.auto_agree = auto_agree
self.clean = clean
self.preview = preview
super().__init__()
async def run_migrations_backwards(self, app_config: AppConfig):
migration_modules: t.Dict[
str, MigrationModule
] = self.get_migration_modules(app_config.migrations_folder_path)
ran_migration_ids = await Migration.get_migrations_which_ran(
app_name=self.app_name
)
if len(ran_migration_ids) == 0:
# Make sure a success is returned, as we don't want this
# to appear as an error in automated scripts.
message = "🏁 No migrations to reverse!"
print(message)
return MigrationResult(success=True, message=message)
#######################################################################
if self.migration_id == "all":
earliest_migration_id = ran_migration_ids[0]
elif self.migration_id == "1":
earliest_migration_id = ran_migration_ids[-1]
else:
earliest_migration_id = self.migration_id
if earliest_migration_id not in ran_migration_ids:
message = (
"Unrecognized migration name - must be one of "
f"{ran_migration_ids}"
)
print(message, file=sys.stderr)
return MigrationResult(success=False, message=message)
#######################################################################
latest_migration_id = ran_migration_ids[-1]
start_index = ran_migration_ids.index(earliest_migration_id)
end_index = ran_migration_ids.index(latest_migration_id) + 1
subset = ran_migration_ids[start_index:end_index]
reversed_migration_ids = list(reversed(subset))
#######################################################################
n = len(reversed_migration_ids)
_continue = (
"y"
if self.auto_agree
else input(f"Reverse {n} migration{'s' if n!= 1 else ''}? [y/N] ")
)
if _continue in "yY":
for migration_id in reversed_migration_ids:
migration_module = migration_modules[migration_id]
response = await migration_module.forwards()
if isinstance(response, MigrationManager):
if self.preview:
response.preview = True
await response.run(backwards=True)
if not self.preview:
await Migration.delete().where(
Migration.name == migration_id
).run()
if self.clean and migration_module.__file__:
os.unlink(migration_module.__file__)
print("ok! ✔️")
return MigrationResult(success=True)
else: # pragma: no cover
message = "Not proceeding."
print(message, file=sys.stderr)
return MigrationResult(success=False, message=message)
async def run(self) -> MigrationResult:
await self.create_migration_table()
app_config = self.get_app_config(self.app_name)
return await self.run_migrations_backwards(app_config=app_config)
async def run_backwards(
app_name: str,
migration_id: str = "1",
auto_agree: bool = False,
clean: bool = False,
preview: bool = False,
) -> MigrationResult:
if app_name == "all":
sorted_app_names = BaseMigrationManager().get_sorted_app_names()
sorted_app_names.reverse()
names = [f"'{name}'" for name in sorted_app_names]
_continue = (
"y"
if auto_agree
else input(
"You are about to undo the migrations for the following "
"apps:\n"
f"{', '.join(names)}\n"
"Are you sure you want to continue? [y/N] "
)
)
if _continue not in "yY":
return MigrationResult(success=False, message="user cancelled")
for _app_name in sorted_app_names:
print_heading(_app_name)
manager = BackwardsMigrationManager(
app_name=_app_name,
migration_id="all",
auto_agree=auto_agree,
preview=preview,
)
await manager.run()
return MigrationResult(success=True)
else:
manager = BackwardsMigrationManager(
app_name=app_name,
migration_id=migration_id,
auto_agree=auto_agree,
clean=clean,
preview=preview,
)
return await manager.run()
async def backwards(
app_name: str,
migration_id: str = "1",
auto_agree: bool = False,
clean: bool = False,
preview: bool = False,
):
"""
Undo migrations up to a specific migration.
:param app_name:
The app to reverse migrations for. Specify a value of 'all' to reverse
migrations for all apps.
:param migration_id:
Migrations will be reversed up to and including this migration_id.
Specify a value of 'all' to undo all of the migrations. Specify a
value of '1' to undo the most recent migration.
:param auto_agree:
If true, automatically agree to any input prompts.
:param clean:
If true, the migration files which have been run backwards are deleted
from the disk after completing.
:param preview:
If true, don't actually run the migration, just print the SQL queries.
"""
response = await run_backwards(
app_name=app_name,
migration_id=migration_id,
auto_agree=auto_agree,
clean=clean,
preview=preview,
)
if not response.success:
sys.exit(1)
import dataclasses
import typing as t
from piccolo.apps.migrations.commands.base import BaseMigrationManager
from piccolo.apps.migrations.tables import Migration
from piccolo.utils.printing import get_fixed_length_string
@dataclasses.dataclass
class MigrationStatus:
app_name: str
migration_id: str
description: str
has_ran: bool
class CheckMigrationManager(BaseMigrationManager):
def __init__(self, app_name: str):
self.app_name = app_name
super().__init__()
async def get_migration_statuses(self) -> t.List[MigrationStatus]:
# Make sure the migration table exists, otherwise we'll get an error.
await self.create_migration_table()
migration_statuses: t.List[MigrationStatus] = []
app_modules = self.get_app_modules()
for app_module in app_modules:
app_config = app_module.APP_CONFIG
app_name = app_config.app_name
if self.app_name not in ["all", app_name]:
continue
migration_modules = self.get_migration_modules(
app_config.migrations_folder_path
)
ids = self.get_migration_ids(migration_modules)
for _id in ids:
has_ran = (
await Migration.exists()
.where(
(Migration.name == _id)
& (Migration.app_name == app_name)
)
.run()
)
description = getattr(
migration_modules[_id], "DESCRIPTION", "-"
)
migration_statuses.append(
MigrationStatus(
app_name=app_name,
migration_id=_id,
description=description,
has_ran=has_ran,
)
)
return migration_statuses
async def have_ran_count(self) -> int:
"""
:returns:
The number of migrations which have been ran.
"""
migration_statuses = await self.get_migration_statuses()
return len([i for i in migration_statuses if i.has_ran])
async def havent_ran_count(self) -> int:
"""
:returns:
The number of migrations which haven't been ran.
"""
migration_statuses = await self.get_migration_statuses()
return len([i for i in migration_statuses if not i.has_ran])
async def run(self):
"""
Prints out the migrations which have and haven't ran.
"""
print("Listing migrations...")
desc_length = 40
id_length = 26
print(
f'{get_fixed_length_string("APP NAME")} | '
f'{get_fixed_length_string("MIGRATION_ID", id_length)} | '
f'{get_fixed_length_string("DESCRIPTION", desc_length)} | RAN'
)
migration_statuses = await self.get_migration_statuses()
for migration_status in migration_statuses:
fixed_length_app_name = get_fixed_length_string(
migration_status.app_name
)
fixed_length_id = get_fixed_length_string(
migration_status.migration_id, length=id_length
)
fixed_length_description = get_fixed_length_string(
migration_status.description, desc_length
)
has_ran = migration_status.has_ran
print(
f"{fixed_length_app_name} | "
f"{fixed_length_id} | "
f"{fixed_length_description} | "
f"{has_ran}"
)
async def check(app_name: str = "all"):
"""
Lists all migrations which have and haven't ran.
:param app_name:
The name of the app to check. Specify a value of 'all' to check
the migrations for all apps.
"""
await CheckMigrationManager(app_name=app_name).run()
from __future__ import annotations
import sys
import typing as t
from piccolo.apps.migrations.auto.migration_manager import MigrationManager
from piccolo.apps.migrations.commands.base import (
BaseMigrationManager,
MigrationResult,
)
from piccolo.apps.migrations.tables import Migration
from piccolo.conf.apps import AppConfig, MigrationModule
from piccolo.utils.printing import print_heading
class ForwardsMigrationManager(BaseMigrationManager):
def __init__(
self,
app_name: str,
migration_id: str = "all",
fake: bool = False,
preview: bool = False,
):
self.app_name = app_name
self.migration_id = migration_id
self.fake = fake
self.preview = preview
super().__init__()
async def run_migrations(self, app_config: AppConfig) -> MigrationResult:
already_ran = await Migration.get_migrations_which_ran(
app_name=app_config.app_name
)
migration_modules: t.Dict[
str, MigrationModule
] = self.get_migration_modules(app_config.migrations_folder_path)
ids = self.get_migration_ids(migration_modules)
n = len(ids)
print(f"👍 {n} migration{'s' if n!= 1 else ''} already complete")
havent_run = sorted(set(ids) - set(already_ran))
if len(havent_run) == 0:
# Make sure this still appears successful, as we don't want this
# to appear as an error in automated scripts.
message = "🏁 No migrations need to be run"
print(message)
return MigrationResult(success=True, message=message)
else:
n = len(havent_run)
print(f"⏩ {n} migration{'s' if n!= 1 else ''} not yet run")
if self.migration_id == "all":
subset = havent_run
elif self.migration_id == "1":
subset = havent_run[:1]
else:
try:
index = havent_run.index(self.migration_id)
except ValueError:
message = f"{self.migration_id} is unrecognised"
print(message, file=sys.stderr)
return MigrationResult(success=False, message=message)
else:
subset = havent_run[: index + 1]
if subset:
n = len(subset)
print(f"🚀 Running {n} migration{'s' if n!= 1 else ''}:")
for _id in subset:
if self.fake:
print(f"- {_id}: faked! ⏭️")
else:
migration_module = migration_modules[_id]
response = await migration_module.forwards()
if isinstance(response, MigrationManager):
if self.preview:
response.preview = True
await response.run()
print("ok! ✔️")
if not self.preview:
await Migration.insert().add(
Migration(name=_id, app_name=app_config.app_name)
).run()
return MigrationResult(success=True, message="migration succeeded")
async def run(self) -> MigrationResult:
await self.create_migration_table()
app_config = self.get_app_config(app_name=self.app_name)
return await self.run_migrations(app_config)
async def run_forwards(
app_name: str,
migration_id: str = "all",
fake: bool = False,
preview: bool = False,
) -> MigrationResult:
"""
Run the migrations. This function can be used to programatically run
migrations - for example, in a unit test.
"""
if app_name == "all":
sorted_app_names = BaseMigrationManager().get_sorted_app_names()
for _app_name in sorted_app_names:
print_heading(_app_name)
manager = ForwardsMigrationManager(
app_name=_app_name,
migration_id="all",
fake=fake,
preview=preview,
)
response = await manager.run()
if not response.success:
return response
return MigrationResult(success=True)
else:
manager = ForwardsMigrationManager(
app_name=app_name,
migration_id=migration_id,
fake=fake,
preview=preview,
)
return await manager.run()
async def forwards(
app_name: str,
migration_id: str = "all",
fake: bool = False,
preview: bool = False,
):
"""
Runs any migrations which haven't been run yet.
:param app_name:
The name of the app to migrate. Specify a value of 'all' to run
migrations for all apps.
:param migration_id:
Migrations will be ran up to and including this migration_id.
Specify a value of 'all' to run all of the migrations. Specify a
value of '1' to just run the next migration.
:param fake:
If set, will record the migrations as being run without actually
running them.
:param preview:
If true, don't actually run the migration, just print the SQL queries
"""
response = await run_forwards(
app_name=app_name,
migration_id=migration_id,
fake=fake,
preview=preview,
)
if not response.success:
sys.exit(1) |
|
piccolo-orm__piccolo | using_sqlite_and_asyncio_effectively.rst | Module doc | Generate documentation for this module | MIT License | piccolo-orm__piccolo/docs/src/piccolo/tutorials/using_sqlite_and_asyncio_effectively.rst | [
"piccolo-orm__piccolo/piccolo/engine/sqlite.py"
] | Using SQLite and asyncio effectively
When using Piccolo with SQLite, there are some best practices to follow.
asyncio => lots of connections
With asyncio, we can potentially open lots of database connections, and
attempt to perform concurrent database writes.
SQLite doesn't support such concurrent behavior as effectively as
Postgres, so we need to be careful.
One write at a time
SQLite can easily support lots of transactions concurrently if they are
reading, but only one write can be performed at a time.
------------------------------------------------------------------------
Transactions
SQLite has several transaction types, as specified by Piccolo's
TransactionType enum:
piccolo.engine.sqlite
TransactionType
Which to use?
When creating a transaction, Piccolo uses DEFERRED by default (to be
consistent with SQLite).
This means that the first SQL query executed within the transaction
determines whether it's a READ or WRITE:
- READ - if the first query is a SELECT
- WRITE - if the first query is something like an INSERT / UPDATE /
DELETE
If a transaction starts off with a SELECT, but then tries to perform an
INSERT / UPDATE / DELETE, SQLite tries to 'promote' the transaction so
it can write.
The problem is, if multiple concurrent connections try doing this at the
same time, SQLite will return a database locked error.
So if you're creating a transaction which you know will perform writes,
then create an IMMEDIATE transaction:
from piccolo.engine.sqlite import TransactionType
async with Band._meta.db.transaction(
transaction_type=TransactionType.immediate
):
# We perform a SELECT first, but as it's an IMMEDIATE transaction,
# we can later perform writes without getting a database locked
# error.
if not await Band.exists().where(Band.name == 'Pythonistas'):
await Band.objects().create(name="Pythonistas")
Multiple IMMEDIATE transactions can exist concurrently - SQLite uses a
lock to make sure only one of them writes at a time.
If your transaction will just be performing SELECT queries, then just
use the default DEFERRED transactions - you will get improved
performance, as no locking is involved:
async with Band._meta.db.transaction():
bands = await Band.select()
managers = await Manager.select()
------------------------------------------------------------------------
timeout
It's recommended to specify the timeout argument in
SQLiteEngine <piccolo.engine.sqlite.SQLiteEngine>.
DB = SQLiteEngine(timeout=60)
Imagine you have a web app, and each endpoint creates a transaction
which runs multiple queries. With SQLite, only a single write operation
can happen at a time, so if several connections are open, they may be
queued for a while.
By increasing timeout it means that queries are less likely to timeout.
To find out more about timeout see the Python
sqlite3 docs <sqlite3.connect>.
| from __future__ import annotations
import contextvars
import datetime
import enum
import os
import sqlite3
import typing as t
import uuid
from dataclasses import dataclass
from decimal import Decimal
from piccolo.engine.base import Batch, Engine
from piccolo.engine.exceptions import TransactionError
from piccolo.query.base import DDL, Query
from piccolo.querystring import QueryString
from piccolo.utils.encoding import dump_json, load_json
from piccolo.utils.lazy_loader import LazyLoader
from piccolo.utils.sync import run_sync
aiosqlite = LazyLoader("aiosqlite", globals(), "aiosqlite")
if t.TYPE_CHECKING: # pragma: no cover
from aiosqlite import Connection, Cursor # type: ignore
from piccolo.table import Table
###############################################################################
# We need to register some adapters so sqlite returns types which are more
# consistent with the Postgres engine.
# In
def convert_numeric_in(value):
"""
Convert any Decimal values into floats.
"""
return float(value)
def convert_uuid_in(value) -> str:
"""
Converts the UUID value being passed into sqlite.
"""
return str(value)
def convert_time_in(value: datetime.time) -> str:
"""
Converts the time value being passed into sqlite.
"""
return value.isoformat()
def convert_date_in(value: datetime.date):
"""
Converts the date value being passed into sqlite.
"""
return value.isoformat()
def convert_datetime_in(value: datetime.datetime) -> str:
"""
Converts the datetime into a string. If it's timezone aware, we want to
convert it to UTC first. This is to replicate Postgres, which stores
timezone aware datetimes in UTC.
"""
if value.tzinfo is not None:
value = value.astimezone(datetime.timezone.utc)
return str(value)
def convert_timedelta_in(value: datetime.timedelta):
"""
Converts the timedelta value being passed into sqlite.
"""
return value.total_seconds()
def convert_array_in(value: list):
"""
Converts a list value into a string.
"""
if value and type(value[0]) not in [str, int, float]:
raise ValueError("Can only serialise str, int and float.")
return dump_json(value)
# Out
def convert_numeric_out(value: bytes) -> Decimal:
"""
Convert float values into Decimals.
"""
return Decimal(value.decode("ascii"))
def convert_int_out(value: bytes) -> int:
"""
Make sure Integer values are actually of type int.
"""
return int(float(value))
def convert_uuid_out(value: bytes) -> uuid.UUID:
"""
If the value is a uuid, convert it to a UUID instance.
"""
return uuid.UUID(value.decode("utf8"))
def convert_date_out(value: bytes) -> datetime.date:
return datetime.date.fromisoformat(value.decode("utf8"))
def convert_time_out(value: bytes) -> datetime.time:
"""
If the value is a time, convert it to a UUID instance.
"""
return datetime.time.fromisoformat(value.decode("utf8"))
def convert_seconds_out(value: bytes) -> datetime.timedelta:
"""
If the value is from a seconds column, convert it to a timedelta instance.
"""
return datetime.timedelta(seconds=float(value.decode("utf8")))
def convert_boolean_out(value: bytes) -> bool:
"""
If the value is from a boolean column, convert it to a bool value.
"""
_value = value.decode("utf8")
return _value == "1"
def convert_timestamp_out(value: bytes) -> datetime.datetime:
"""
If the value is from a timestamp column, convert it to a datetime value.
"""
return datetime.datetime.fromisoformat(value.decode("utf8"))
def convert_timestamptz_out(value: bytes) -> datetime.datetime:
"""
If the value is from a timestamptz column, convert it to a datetime value,
with a timezone of UTC.
"""
_value = datetime.datetime.fromisoformat(value.decode("utf8"))
_value = _value.replace(tzinfo=datetime.timezone.utc)
return _value
def convert_array_out(value: bytes) -> t.List:
"""
If the value if from an array column, deserialise the string back into a
list.
"""
return load_json(value.decode("utf8"))
def convert_M2M_out(value: bytes) -> t.List:
_value = value.decode("utf8")
return _value.split(",")
sqlite3.register_converter("Numeric", convert_numeric_out)
sqlite3.register_converter("Integer", convert_int_out)
sqlite3.register_converter("UUID", convert_uuid_out)
sqlite3.register_converter("Date", convert_date_out)
sqlite3.register_converter("Time", convert_time_out)
sqlite3.register_converter("Seconds", convert_seconds_out)
sqlite3.register_converter("Boolean", convert_boolean_out)
sqlite3.register_converter("Timestamp", convert_timestamp_out)
sqlite3.register_converter("Timestamptz", convert_timestamptz_out)
sqlite3.register_converter("Array", convert_array_out)
sqlite3.register_converter("M2M", convert_M2M_out)
sqlite3.register_adapter(Decimal, convert_numeric_in)
sqlite3.register_adapter(uuid.UUID, convert_uuid_in)
sqlite3.register_adapter(datetime.time, convert_time_in)
sqlite3.register_adapter(datetime.date, convert_date_in)
sqlite3.register_adapter(datetime.datetime, convert_datetime_in)
sqlite3.register_adapter(datetime.timedelta, convert_timedelta_in)
sqlite3.register_adapter(list, convert_array_in)
###############################################################################
@dataclass
class AsyncBatch(Batch):
connection: Connection
query: Query
batch_size: int
# Set internally
_cursor: t.Optional[Cursor] = None
@property
def cursor(self) -> Cursor:
if not self._cursor:
raise ValueError("_cursor not set")
return self._cursor
async def next(self) -> t.List[t.Dict]:
data = await self.cursor.fetchmany(self.batch_size)
return await self.query._process_results(data)
def __aiter__(self):
return self
async def __anext__(self):
response = await self.next()
if response == []:
raise StopAsyncIteration()
return response
async def __aenter__(self):
querystring = self.query.querystrings[0]
template, template_args = querystring.compile_string()
self._cursor = await self.connection.execute(template, *template_args)
return self
async def __aexit__(self, exception_type, exception, traceback):
await self._cursor.close()
await self.connection.close()
return exception is not None
###############################################################################
class TransactionType(enum.Enum):
"""
See the `SQLite <https://www.sqlite.org/lang_transaction.html>`_ docs for
more info.
"""
deferred = "DEFERRED"
immediate = "IMMEDIATE"
exclusive = "EXCLUSIVE"
class Atomic:
"""
Usage:
transaction = engine.atomic()
transaction.add(Foo.create_table())
# Either:
transaction.run_sync()
await transaction.run()
"""
__slots__ = ("engine", "queries", "transaction_type")
def __init__(
self,
engine: SQLiteEngine,
transaction_type: TransactionType = TransactionType.deferred,
):
self.engine = engine
self.transaction_type = transaction_type
self.queries: t.List[Query] = []
def add(self, *query: Query):
self.queries += list(query)
async def run(self):
from piccolo.query.methods.objects import Create, GetOrCreate
try:
async with self.engine.transaction(
transaction_type=self.transaction_type
):
for query in self.queries:
if isinstance(query, (Query, DDL, Create, GetOrCreate)):
await query.run()
else:
raise ValueError("Unrecognised query")
self.queries = []
except Exception as exception:
self.queries = []
raise exception from exception
def run_sync(self):
return run_sync(self.run())
def __await__(self):
return self.run().__await__()
###############################################################################
class Savepoint:
def __init__(self, name: str, transaction: SQLiteTransaction):
self.name = name
self.transaction = transaction
async def rollback_to(self):
await self.transaction.connection.execute(
f"ROLLBACK TO SAVEPOINT {self.name}"
)
async def release(self):
await self.transaction.connection.execute(
f"RELEASE SAVEPOINT {self.name}"
)
class SQLiteTransaction:
"""
Used for wrapping queries in a transaction, using a context manager.
Currently it's async only.
Usage::
async with engine.transaction():
# Run some queries:
await Band.select().run()
"""
__slots__ = (
"engine",
"context",
"connection",
"transaction_type",
"allow_nested",
"_savepoint_id",
"_parent",
"_committed",
"_rolled_back",
)
def __init__(
self,
engine: SQLiteEngine,
transaction_type: TransactionType = TransactionType.deferred,
allow_nested: bool = True,
):
"""
:param transaction_type:
If your transaction just contains ``SELECT`` queries, then use
``TransactionType.deferred``. This will give you the best
performance. When performing ``INSERT``, ``UPDATE``, ``DELETE``
queries, we recommend using ``TransactionType.immediate`` to
avoid database locks.
"""
self.engine = engine
self.transaction_type = transaction_type
current_transaction = self.engine.current_transaction.get()
self._savepoint_id = 0
self._parent = None
self._committed = False
self._rolled_back = False
if current_transaction:
if allow_nested:
self._parent = current_transaction
else:
raise TransactionError(
"A transaction is already active - nested transactions "
"aren't allowed."
)
async def __aenter__(self) -> SQLiteTransaction:
if self._parent is not None:
return self._parent
self.connection = await self.get_connection()
await self.begin()
self.context = self.engine.current_transaction.set(self)
return self
async def get_connection(self):
return await self.engine.get_connection()
async def begin(self):
await self.connection.execute(f"BEGIN {self.transaction_type.value}")
async def commit(self):
await self.connection.execute("COMMIT")
self._committed = True
async def rollback(self):
await self.connection.execute("ROLLBACK")
self._rolled_back = True
async def rollback_to(self, savepoint_name: str):
"""
Used to rollback to a savepoint just using the name.
"""
await Savepoint(name=savepoint_name, transaction=self).rollback_to()
###########################################################################
def get_savepoint_id(self) -> int:
self._savepoint_id += 1
return self._savepoint_id
async def savepoint(self, name: t.Optional[str] = None) -> Savepoint:
name = name or f"savepoint_{self.get_savepoint_id()}"
await self.connection.execute(f"SAVEPOINT {name}")
return Savepoint(name=name, transaction=self)
###########################################################################
async def __aexit__(self, exception_type, exception, traceback):
if self._parent:
return exception is None
if exception:
# The user may have manually rolled it back.
if not self._rolled_back:
await self.rollback()
else:
# The user may have manually committed it.
if not self._committed and not self._rolled_back:
await self.commit()
await self.connection.close()
self.engine.current_transaction.reset(self.context)
return exception is None
###############################################################################
def dict_factory(cursor, row) -> t.Dict:
return {col[0]: row[idx] for idx, col in enumerate(cursor.description)}
class SQLiteEngine(Engine[t.Optional[SQLiteTransaction]]):
__slots__ = (
"connection_kwargs",
"current_transaction",
"log_queries",
"log_responses",
)
engine_type = "sqlite"
min_version_number = 3.25
def __init__(
self,
path: str = "piccolo.sqlite",
log_queries: bool = False,
log_responses: bool = False,
**connection_kwargs,
) -> None:
"""
:param path:
A relative or absolute path to the the SQLite database file (it
will be created if it doesn't already exist).
:param log_queries:
If ``True``, all SQL and DDL statements are printed out before
being run. Useful for debugging.
:param log_responses:
If ``True``, the raw response from each query is printed out.
Useful for debugging.
:param connection_kwargs:
These are passed directly to the database adapter. We recommend
setting ``timeout`` if you expect your application to process a
large number of concurrent writes, to prevent queries timing out.
See Python's `sqlite3 docs <https://docs.python.org/3/library/sqlite3.html#sqlite3.connect>`_
for more info.
""" # noqa: E501
default_connection_kwargs = {
"database": path,
"detect_types": sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
"isolation_level": None,
}
self.log_queries = log_queries
self.log_responses = log_responses
self.connection_kwargs = {
**default_connection_kwargs,
**connection_kwargs,
}
self.current_transaction = contextvars.ContextVar(
f"sqlite_current_transaction_{path}", default=None
)
super().__init__()
@property
def path(self):
return self.connection_kwargs["database"]
@path.setter
def path(self, value: str):
self.connection_kwargs["database"] = value
async def get_version(self) -> float:
return self.get_version_sync()
def get_version_sync(self) -> float:
major, minor, _ = sqlite3.sqlite_version_info
return float(f"{major}.{minor}")
async def prep_database(self):
pass
###########################################################################
def remove_db_file(self):
"""
Use with caution - removes the SQLite file. Useful for testing
purposes.
"""
if os.path.exists(self.path):
os.unlink(self.path)
def create_db_file(self):
"""
Create the database file. Useful for testing purposes.
"""
if os.path.exists(self.path):
raise Exception(f"Database at {self.path} already exists")
with open(self.path, "w"):
pass
###########################################################################
async def batch(
self, query: Query, batch_size: int = 100, node: t.Optional[str] = None
) -> AsyncBatch:
"""
:param query:
The database query to run.
:param batch_size:
How many rows to fetch on each iteration.
:param node:
This is ignored currently, as SQLite runs off a single node. The
value is here so the API is consistent with Postgres.
"""
connection = await self.get_connection()
return AsyncBatch(
connection=connection, query=query, batch_size=batch_size
)
###########################################################################
async def get_connection(self) -> Connection:
connection = await aiosqlite.connect(**self.connection_kwargs)
connection.row_factory = dict_factory # type: ignore
await connection.execute("PRAGMA foreign_keys = 1")
return connection
###########################################################################
async def _get_inserted_pk(self, cursor, table: t.Type[Table]) -> t.Any:
"""
If the `pk` column is a non-integer then `ROWID` and `pk` will return
different types. Need to query by `lastrowid` to get `pk`s in SQLite
prior to 3.35.0.
"""
await cursor.execute(
f"SELECT {table._meta.primary_key._meta.db_column_name} FROM "
f"{table._meta.tablename} WHERE ROWID = {cursor.lastrowid}"
)
response = await cursor.fetchone()
return response[table._meta.primary_key._meta.db_column_name]
async def _run_in_new_connection(
self,
query: str,
args: t.List[t.Any] = None,
query_type: str = "generic",
table: t.Optional[t.Type[Table]] = None,
):
if args is None:
args = []
async with aiosqlite.connect(**self.connection_kwargs) as connection:
await connection.execute("PRAGMA foreign_keys = 1")
connection.row_factory = dict_factory # type: ignore
async with connection.execute(query, args) as cursor:
await connection.commit()
if query_type == "insert" and self.get_version_sync() < 3.35:
# We can't use the RETURNING clause on older versions
# of SQLite.
assert table is not None
pk = await self._get_inserted_pk(cursor, table)
return [{table._meta.primary_key._meta.db_column_name: pk}]
else:
return await cursor.fetchall()
async def _run_in_existing_connection(
self,
connection,
query: str,
args: t.List[t.Any] = None,
query_type: str = "generic",
table: t.Optional[t.Type[Table]] = None,
):
"""
This is used when a transaction is currently active.
"""
if args is None:
args = []
await connection.execute("PRAGMA foreign_keys = 1")
connection.row_factory = dict_factory
async with connection.execute(query, args) as cursor:
response = await cursor.fetchall()
if query_type == "insert" and self.get_version_sync() < 3.35:
# We can't use the RETURNING clause on older versions
# of SQLite.
assert table is not None
pk = await self._get_inserted_pk(cursor, table)
return [{table._meta.primary_key._meta.db_column_name: pk}]
else:
return response
async def run_querystring(
self, querystring: QueryString, in_pool: bool = False
):
"""
Connection pools aren't currently supported - the argument is there
for consistency with other engines.
"""
query_id = self.get_query_id()
if self.log_queries:
self.print_query(query_id=query_id, query=querystring.__str__())
query, query_args = querystring.compile_string(
engine_type=self.engine_type
)
# If running inside a transaction:
current_transaction = self.current_transaction.get()
if current_transaction:
response = await self._run_in_existing_connection(
connection=current_transaction.connection,
query=query,
args=query_args,
query_type=querystring.query_type,
table=querystring.table,
)
else:
response = await self._run_in_new_connection(
query=query,
args=query_args,
query_type=querystring.query_type,
table=querystring.table,
)
if self.log_responses:
self.print_response(query_id=query_id, response=response)
return response
async def run_ddl(self, ddl: str, in_pool: bool = False):
"""
Connection pools aren't currently supported - the argument is there
for consistency with other engines.
"""
query_id = self.get_query_id()
if self.log_queries:
self.print_query(query_id=query_id, query=ddl)
# If running inside a transaction:
current_transaction = self.current_transaction.get()
if current_transaction:
response = await self._run_in_existing_connection(
connection=current_transaction.connection,
query=ddl,
)
else:
response = await self._run_in_new_connection(
query=ddl,
)
if self.log_responses:
self.print_response(query_id=query_id, response=response)
return response
def atomic(
self, transaction_type: TransactionType = TransactionType.deferred
) -> Atomic:
return Atomic(engine=self, transaction_type=transaction_type)
def transaction(
self,
transaction_type: TransactionType = TransactionType.deferred,
allow_nested: bool = True,
) -> SQLiteTransaction:
"""
Create a new database transaction. See :class:`Transaction`.
"""
return SQLiteTransaction(
engine=self,
transaction_type=transaction_type,
allow_nested=allow_nested,
) |
|
nvidia__dali | augmentations.rst | Module doc | Generate documentation for this module | Apache License 2.0 | nvidia__dali/docs/auto_aug/augmentations.rst | [
"nvidia__dali/dali/python/nvidia/dali/auto_aug/augmentations.py"
] | Augmentation operations
In terms of the automatic augmentations, the augmentation is image
processing function that meets following requirements:
1. Its first argument is the input batch for the processing
2. The second argument is the parameter controlling the operation (for
example angle of rotation).
3. It can take additional keyword arguments.
4. It is implemented in terms of DALI operators <operation reference>.
5. It is decorated with
@augmentation <nvidia.dali.auto_aug.core.augmentation>
Here is an example of defining a simplified rotate augmentation:
from nvidia.dali.auto_aug.core import augmentation
from nvidia.dali import fn
@augmentation(mag_range=(0, 30), randomly_negate=True)
def rotate_aug(data, angle, fill_value=128, rotate_keep_size=True):
return fn.rotate(data, angle=angle, fill_value=fill_value, keep_size=True)
Based on the exsiting augmentation, a new one, with adjusted parameters,
can be created:
rotate_aug_60 = rotate_aug.augmentation(mag_range=(0, 60), randomly_negate=False)
To learn more how to build a policy using augmentations listed here,
check the documentation for specific automatic augmentation scheme:
AutoAugment, RandAugment, or TrivialAugment.
Decorator
nvidia.dali.auto_aug.core.augmentation
nvidia.dali.auto_aug._augmentation
The result of decorating a function with
@augmentation <nvidia.dali.auto_aug.core.augmentation> is an instance of
class ~nvidia.dali.auto_aug.core._augmentation.Augmentation. The class
should not be instantiated directly, it needs to be created with the
decorator.
Once obtained, those objects become callables that can be used to
speicfy a policy for AutoAugment, RandAugment or TrivialAugment.
def augmentation(self, mag_range, randomly_negate, mag_to_param,
param_device, name) -> Augmentation
You can call this method to create new
~nvidia.dali.auto_aug.core._augmentation.Augmentation instance based on
an existing one, with the paramenters adjusted. All parameters are
optional - those that were specifed replace the ones that were
previouslly passed to
@augmentation <nvidia.dali.auto_aug.core.augmentation>.
param mag_range
optional, see
@augmentation <nvidia.dali.auto_aug.core.augmentation>.
param randomly_negate
optional, see
@augmentation <nvidia.dali.auto_aug.core.augmentation>.
param mag_to_param
optional, see
@augmentation <nvidia.dali.auto_aug.core.augmentation>.
param param_device
optional, see
@augmentation <nvidia.dali.auto_aug.core.augmentation>.
param name
optional, see
@augmentation <nvidia.dali.auto_aug.core.augmentation>.
Augmentations
Here is a list of callable
~nvidia.dali.auto_aug.core._augmentation.Augmentation instances defined
by DALI. Note that the mag_to_param, param_device and name parameters
were ommited from the
@augmentation <nvidia.dali.auto_aug.core.augmentation> decorator listing
for simplicity.
To adjust the range of parameter, use the augmentation method on the
existing ~nvidia.dali.auto_aug.core._augmentation.Augmentation instance
listed below, for example:
# Create a steeper sheer operation based on existing one
steep_shear_x = shear_x.augmentation(mag_range=(0, 0.5), name="steep_shear_x")
nvidia.dali.auto_aug.augmentations
shear_x(data, , magnitude_bin=None, num_magnitude_bins=None,*kwargs)
Applies nvidia.dali.fn.transforms.shear with shear_x factor using
nvidia.dali.fn.warp_affine.
@augmentation(mag_range=(0, 0.3), randomly_negate=True, ...)
def shear_x(data, shear, fill_value=128, interp_type=None)
shear_y(data, , magnitude_bin=None, num_magnitude_bins=None,*kwargs)
Applies nvidia.dali.fn.transforms.shear with shear_y factor using
nvidia.dali.fn.warp_affine.
@augmentation(mag_range=(0, 0.3), randomly_negate=True, ...)
def shear_y(data, shear, fill_value=128, interp_type=None)
translate_x(data, , magnitude_bin=None, num_magnitude_bins=None,*kwargs)
Applies nvidia.dali.fn.transforms.translation with shape-relative offset
in x-axis using nvidia.dali.fn.warp_affine.
@augmentation(mag_range=(0., 1.), randomly_negate=True, ...)
def translate_x(data, rel_offset, shape, fill_value=128, interp_type=None)
translate_x_no_shape(data, , magnitude_bin=None,
num_magnitude_bins=None,*kwargs)
Applies nvidia.dali.fn.transforms.translation with absolute offset in
x-axis using nvidia.dali.fn.warp_affine.
@augmentation(mag_range=(0, 250), randomly_negate=True, ...)
def translate_x_no_shape(data, offset, fill_value=128, interp_type=None)
translate_y(data, , magnitude_bin=None, num_magnitude_bins=None,*kwargs)
Applies nvidia.dali.fn.transforms.translation with shape-relative offset
in y-axis using nvidia.dali.fn.warp_affine.
@augmentation(mag_range=(0., 1.), randomly_negate=True, ...)
def translate_y(data, rel_offset, shape, fill_value=128, interp_type=None)
translate_y_no_shape(data, , magnitude_bin=None,
num_magnitude_bins=None,*kwargs)
Applies nvidia.dali.fn.transforms.translation with absolute offset in
y-axis using nvidia.dali.fn.warp_affine.
@augmentation(mag_range=(0, 250), randomly_negate=True, ...)
def translate_y_no_shape(data, offset, fill_value=128, interp_type=None)
rotate(data, , magnitude_bin=None, num_magnitude_bins=None,*kwargs)
Rotates the image using nvidia.dali.fn.rotate.
@augmentation(mag_range=(0, 30), randomly_negate=True)
def rotate(data, angle, fill_value=128, interp_type=None, rotate_keep_size=True)
brightness(data, , magnitude_bin=None, num_magnitude_bins=None,*kwargs)
Adjusts the brightness with nvidia.dali.fn.brightness. The magnitude is
mapped to a [0, 2] parameter range.
@augmentation(mag_range=(0, 0.9), randomly_negate=True, ...)
def brightness(data, parameter)
contrast(data, , magnitude_bin=None, num_magnitude_bins=None,*kwargs)
Adjusts the contrasts using a channel-weighted mean as a contrast
center. The magnitude is mapped to a [0, 2] parameter range.
@augmentation(mag_range=(0, 0.9), randomly_negate=True, ...)
def contrast(data, parameter)
color(data, , magnitude_bin=None, num_magnitude_bins=None,*kwargs)
Adjusts the color with nvidia.dali.fn.saturation. The magnitude is
mapped to a [0, 2] parameter range.
@augmentation(mag_range=(0, 0.9), randomly_negate=True, ...)
def color(data, parameter)
sharpness(data, , magnitude_bin=None, num_magnitude_bins=None,*kwargs)
The outputs correspond to PIL's ImageEnhance.Sharpness.
@augmentation(mag_range=(0, 0.9), randomly_negate=True, ...)
def sharpness(data, kernel)
posterize(data, , magnitude_bin=None, num_magnitude_bins=None,*kwargs)
Posterizes the image by masking out the lower input bits.
@augmentation(mag_range=(0, 4), ...)
def posterize(data, mask)
solarize(data, , magnitude_bin=None, num_magnitude_bins=None,*kwargs)
Inverts the pixels that lie below a threshold.
@augmentation(mag_range=(256, 0), ...)
def solarize(data, threshold)
solarize_add(data, , magnitude_bin=None,
num_magnitude_bins=None,*kwargs)
Applies the shift to the pixels of value lower than 128.
@augmentation(mag_range=(0, 110), ...)
def solarize_add(data, shift)
invert(data, , magnitude_bin=None, num_magnitude_bins=None,*kwargs)
Inverts the image.
@augmentation
def invert(data, _)
equalize(data, , magnitude_bin=None, num_magnitude_bins=None,*kwargs)
Applies histogram equalization using
nvidia.dali.fn.experimental.equalize.
@augmentation
def equalize(data, _)
"""
DALI's equalize follows OpenCV's histogram equalization.
The PIL uses slightly different formula when transforming histogram's
cumulative sum into lookup table.
auto_contrast(data, , magnitude_bin=None,
num_magnitude_bins=None,*kwargs)
Applies automatic contrast adjustment.
@augmentation
def auto_contrast(data, _)
identity(data, , magnitude_bin=None, num_magnitude_bins=None,*kwargs)
Identity operation - no processing is applied.
@augmentation
def identity(data, _)
| # Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import numpy as np
except ImportError:
raise RuntimeError(
"Could not import numpy. DALI's automatic augmentation examples depend on numpy. "
"Please install numpy to use the examples.")
from nvidia.dali import fn
from nvidia.dali import types
from nvidia.dali.auto_aug.core import augmentation
"""
This module contains a standard suite of augmentations used by AutoAugment policy for ImageNet,
RandAugment and TrivialAugmentWide. The augmentations are implemented in terms of DALI operators.
The `@augmentation` decorator handles computation of the decorated transformations's parameter.
When called, the decorated augmentation expects:
* a single positional argument: batch of samples
* `magnitude_bin` and `num_magnitude_bins` instead of the parameter.
The parameter is computed as if by calling
`mag_to_param(magnitudes[magnitude_bin] * ((-1) ** random_sign))`, where
`magnitudes=linspace(mag_range[0], mag_range[1], num_magnitude_bins)`.
The augmentations in this module are defined with example setups passed
to `@augmentation`. The parameters can be easily adjusted. For instance, to increase
the magnitudes range of `shear_x` from 0.3 to 0.5, you can create
`my_shear_x = shear_x.augmentation(mag_range=(0, 0.5))`.
"""
def warp_x_param(magnitude):
return [magnitude, 0]
def warp_y_param(magnitude):
return [0, magnitude]
@augmentation(mag_range=(0, 0.3), randomly_negate=True, mag_to_param=warp_x_param)
def shear_x(data, shear, fill_value=128, interp_type=None):
mt = fn.transforms.shear(shear=shear)
return fn.warp_affine(data, matrix=mt, fill_value=fill_value, interp_type=interp_type,
inverse_map=False)
@augmentation(mag_range=(0, 0.3), randomly_negate=True, mag_to_param=warp_y_param)
def shear_y(data, shear, fill_value=128, interp_type=None):
mt = fn.transforms.shear(shear=shear)
return fn.warp_affine(data, matrix=mt, fill_value=fill_value, interp_type=interp_type,
inverse_map=False)
@augmentation(mag_range=(0., 1.), randomly_negate=True, mag_to_param=warp_x_param)
def translate_x(data, rel_offset, shape, fill_value=128, interp_type=None):
offset = rel_offset * shape[1]
mt = fn.transforms.translation(offset=offset)
return fn.warp_affine(data, matrix=mt, fill_value=fill_value, interp_type=interp_type,
inverse_map=False)
@augmentation(mag_range=(0, 250), randomly_negate=True, mag_to_param=warp_x_param,
name="translate_x")
def translate_x_no_shape(data, offset, fill_value=128, interp_type=None):
mt = fn.transforms.translation(offset=offset)
return fn.warp_affine(data, matrix=mt, fill_value=fill_value, interp_type=interp_type,
inverse_map=False)
@augmentation(mag_range=(0., 1.), randomly_negate=True, mag_to_param=warp_y_param)
def translate_y(data, rel_offset, shape, fill_value=128, interp_type=None):
offset = rel_offset * shape[0]
mt = fn.transforms.translation(offset=offset)
return fn.warp_affine(data, matrix=mt, fill_value=fill_value, interp_type=interp_type,
inverse_map=False)
@augmentation(mag_range=(0, 250), randomly_negate=True, mag_to_param=warp_y_param,
name="translate_y")
def translate_y_no_shape(data, offset, fill_value=128, interp_type=None):
mt = fn.transforms.translation(offset=offset)
return fn.warp_affine(data, matrix=mt, fill_value=fill_value, interp_type=interp_type,
inverse_map=False)
@augmentation(mag_range=(0, 30), randomly_negate=True)
def rotate(data, angle, fill_value=128, interp_type=None, rotate_keep_size=True):
return fn.rotate(data, angle=angle, fill_value=fill_value, interp_type=interp_type,
keep_size=rotate_keep_size)
def shift_enhance_range(magnitude):
"""The `enhance` operations (brightness, contrast, color, sharpness) accept magnitudes
from [0, 2] range. However, the neutral magnitude is not 0 but 1 and the intuitive strength
of the operation increases the further the magnitude is from 1. So, we specify magnitudes range
to be in [0, 1] range, expect it to be randomly negated and then shift it by 1"""
return 1 + magnitude
@augmentation(mag_range=(0, 0.9), randomly_negate=True, mag_to_param=shift_enhance_range)
def brightness(data, parameter):
return fn.brightness(data, brightness=parameter)
@augmentation(mag_range=(0, 0.9), randomly_negate=True, mag_to_param=shift_enhance_range)
def contrast(data, parameter):
"""
It follows PIL implementation of Contrast enhancement which uses a channel-weighted
mean as a contrast center.
"""
# assumes FHWC or HWC layout
mean = fn.reductions.mean(data, axis_names="HW", keep_dims=True)
rgb_weights = types.Constant(np.array([0.299, 0.587, 0.114], dtype=np.float32))
center = fn.reductions.sum(mean * rgb_weights, axis_names="C", keep_dims=True)
# it could be just `fn.contrast(data, contrast=parameter, contrast_center=center)`
# but for GPU `data` the `center` is in GPU mem, and that cannot be passed
# as named arg (i.e. `contrast_center`) to the operator
return fn.cast_like(center + (data - center) * parameter, data)
@augmentation(mag_range=(0, 0.9), randomly_negate=True, mag_to_param=shift_enhance_range)
def color(data, parameter):
return fn.saturation(data, saturation=parameter)
def sharpness_kernel(magnitude):
# assumes magnitude: [-1, 1]
blur = np.array([[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=np.float32) / 13
ident = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.float32)
return -magnitude * blur + (1 + magnitude) * ident
def sharpness_kernel_shifted(magnitude):
# assumes magnitude: [0, 2]
return sharpness_kernel(magnitude - 1)
@augmentation(mag_range=(0, 0.9), randomly_negate=True, mag_to_param=sharpness_kernel,
param_device="auto")
def sharpness(data, kernel):
"""
The outputs correspond to PIL's ImageEnhance.Sharpness with the exception for 1px
border around the output. PIL computes convolution with smoothing filter only for
valid positions (no out-of-bounds filter positions) and pads the output with the input.
"""
return fn.experimental.filter(data, kernel)
def poster_mask_uint8(magnitude):
# expects [0..8] where 8 yields identity mask and a 0
# would be a mask that zeros all bits,
# however, following the implementation for AA and RA referred
# in the paper https://arxiv.org/pdf/1909.13719.pdf, we remap 0 to 1,
# to avoid completely blank images
magnitude = np.round(magnitude).astype(np.uint32)
if magnitude <= 0:
magnitude = 1
elif magnitude > 8:
magnitude = 8
nbits = np.round(8 - magnitude).astype(np.uint32)
removal_mask = np.uint8(2)**nbits - 1
return np.array(np.uint8(255) ^ removal_mask, dtype=np.uint8)
@augmentation(mag_range=(0, 4), mag_to_param=poster_mask_uint8, param_device="auto")
def posterize(data, mask):
return data & mask
@augmentation(mag_range=(256, 0), param_device="auto")
def solarize(data, threshold):
sample_inv = types.Constant(255, dtype=types.UINT8) - data
mask_unchanged = data < threshold
mask_inverted = mask_unchanged ^ True
return mask_unchanged * data + mask_inverted * sample_inv
def solarize_add_shift(shift):
if shift >= 128:
raise Exception("The solarize_add augmentation accepts shifts from 0 to 128")
return np.uint8(shift)
@augmentation(mag_range=(0, 110), param_device="auto", mag_to_param=solarize_add_shift)
def solarize_add(data, shift):
mask_shifted = data < types.Constant(128, dtype=types.UINT8)
mask_id = mask_shifted ^ True
sample_shifted = data + shift
return mask_shifted * sample_shifted + mask_id * data
@augmentation
def invert(data, _):
return types.Constant(255, dtype=types.UINT8) - data
@augmentation
def equalize(data, _):
"""
DALI's equalize follows OpenCV's histogram equalization.
The PIL uses slightly different formula when transforming histogram's
cumulative sum into lookup table.
"""
return fn.experimental.equalize(data)
@augmentation
def auto_contrast(data, _):
# assumes FHWC or HWC layout
lo = fn.reductions.min(data, axis_names="HW", keep_dims=True)
hi = fn.reductions.max(data, axis_names="HW", keep_dims=True)
diff = hi - lo
mask_scale = diff > 0
mask_id = mask_scale ^ True
# choose div so that scale ends up being 255 / diff if diff > 0 and 1 otherwise
div_by = diff * mask_scale + types.Constant(255, dtype=types.UINT8) * mask_id
scale = 255 / div_by
scaled = (data - lo * mask_scale) * scale
return fn.cast_like(scaled, data)
@augmentation
def identity(data, _):
return data |
|
nvidia__dali | auto_augment.rst | Module doc | Generate documentation for this module | Apache License 2.0 | nvidia__dali/docs/auto_aug/auto_augment.rst | [
"nvidia__dali/dali/python/nvidia/dali/auto_aug/auto_augment.py"
] | AutoAugment
AutoAugment, as described in https://arxiv.org/abs/1805.09501, builds
policies out of pairs of augmentations <Augmentation operations> called
subpolicies. Each subpolicy specifies sequence of operations with the
probability of application and the magnitude parameter. When AutoAugment
is used, for each sample a random subpolicy is selected and applied.
To use the predefined policy that was discovered on ImageNet, import and
invoke ~nvidia.dali.auto_aug.auto_augment.auto_augment inside the
pipeline definition, for example:
from nvidia.dali import pipeline_def
from nvidia.dali.auto_aug import auto_augment
@pipeline_def(enable_conditionals=True)
def training_pipe(data_dir, image_size):
jpegs, labels = fn.readers.file(file_root=data_dir, ...)
shapes = fn.peek_image_shape(jpegs)
images = fn.decoders.image(jpegs, device="mixed", output_type=types.RGB)
augmented_images = auto_augment.auto_augment(images, shape=shapes)
resized_images = fn.resize(augmented_images, size=[image_size, image_size])
return resized_images, labels
Warning
You need to define the pipeline with the
@pipeline_def <nvidia.dali.pipeline_def> decorator and set
enable_conditionals to True to use automatic augmentations.
Refer to this <Building and invoking custom policies> section to read
more about using custom policies.
Invoking predefined AutoAugment policies
To invoke one of the predefined policies use the following functions.
Building and invoking custom policies
DALI's AutoAugment implementation relies on
~nvidia.dali.auto_aug.core.Policy class to define the policies to
execute, which can be invoked within the pipeline using
~nvidia.dali.auto_aug.auto_augment.apply_auto_augment function.
The best way is to wrap your policy creation into a function:
from nvidia.dali.auto_aug import augmentations
from nvidia.dali.auto_aug.core import Policy
def my_custom_policy() -> Policy:
"""
Creates a simple AutoAugment policy with 3 sub-policies using custom magnitude ranges.
"""
shear_x = augmentations.shear_x.augmentation((0, 0.5), True)
shear_y = augmentations.shear_y.augmentation((0, 0.5), True)
rotate = augmentations.rotate.augmentation((0, 40), True)
invert = augmentations.invert
return Policy(
name="SimplePolicy", num_magnitude_bins=11, sub_policies=[
[(shear_x, 0.8, 7), (shear_y, 0.8, 4)],
[(invert, 0.4, None), (rotate, 0.6, 8)],
[(rotate, 0.6, 7), (shear_y, 0.6, 3)],
])
The tuple within the subpolicy definition specifies:
- the augmentation to use,
- the probability of applying that augmentation (if this subpolicy is
selected),
- the magnitude to be used.
Accessing predefined policies
To obtain the predefined policy definition refer to the following
functions.
| # Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
from nvidia.dali import fn
from nvidia.dali import types
from nvidia.dali.auto_aug import augmentations as a
from nvidia.dali.auto_aug.core import _Augmentation, Policy, signed_bin
from nvidia.dali.auto_aug.core._args import forbid_unused_kwargs as _forbid_unused_kwargs
from nvidia.dali.auto_aug.core._utils import \
get_translations as _get_translations, \
pretty_select as _pretty_select
from nvidia.dali.data_node import DataNode as _DataNode
try:
import numpy as np
except ImportError:
raise RuntimeError(
"Could not import numpy. DALI's automatic augmentation examples depend on numpy. "
"Please install numpy to use the examples.")
def auto_augment(
data: _DataNode,
policy_name: str = 'image_net',
shape: Optional[Union[_DataNode, Tuple[int, int]]] = None,
fill_value: Optional[int] = 128,
interp_type: Optional[types.DALIInterpType] = None,
max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None,
seed: Optional[int] = None,
) -> _DataNode:
"""
Applies one of the predefined policies from the AutoAugment
paper (https://arxiv.org/abs/1805.09501) to the provided batch of samples.
Args
----
data : DataNode
A batch of samples to be processed. The supported samples are images
of `HWC` layout and videos of `FHWC` layout, the supported data type is `uint8`.
policy_name : str, optional
The name of predefined policy. Acceptable values are: `image_net`,
`reduced_image_net`, `svhn`, `reduced_cifar10`. Defaults to `image_net`.
shape: DataNode or Tuple[int, int], optional
The size (height and width) of the image or frames in the video sequence
passed as the `data`. If specified, the magnitude of `translation` operations
depends on the image/frame shape and spans from 0 to `max_translate_rel * shape`.
Otherwise, the magnitude range is `[0, max_translate_abs]` for any sample.
fill_value: int, optional
A value to be used as a padding for images/frames transformed with warp_affine ops
(translation, shear and rotate). If `None` is specified, the images/frames are padded
with the border value repeated (clamped).
interp_type: types.DALIInterpType, optional
Interpolation method used by the warp_affine ops (translation, shear and rotate).
Supported values are `types.INTERP_LINEAR` (default) and `types.INTERP_NN`.
max_translate_abs: int or (int, int), optional
Only valid when `shape` is not provided. Specifies the maximal shift (in pixels)
in the translation augmentation. If a tuple is specified, the first component limits
height, the second the width. Defaults to 250, which means the maximal magnitude
shifts the image by 250 pixels.
max_translate_rel: float or (float, float), optional
Only valid when `shape` argument is provided. Specifies the maximal shift as a
fraction of image shape in the translation augmentations.
If a tuple is specified, the first component limits the height, the second the width.
Defaults to 1, which means the maximal magnitude shifts the image entirely out of
the canvas.
seed: int, optional
Seed to be used to randomly sample operations (and to negate magnitudes).
Returns
-------
DataNode
A batch of transformed samples.
"""
predefined_policies = {
'image_net': get_image_net_policy,
'reduced_image_net': get_reduced_image_net_policy,
'svhn': get_svhn_policy,
'reduced_cifar10': get_reduced_cifar10_policy,
}
policies_without_translation = ('reduced_image_net', )
shape_related_args = (
(shape,'shape'),
(max_translate_abs,'max_translate_abs'),
(max_translate_rel,'max_translate_rel'),
)
if not isinstance(policy_name, str) or policy_name not in predefined_policies:
policies_str = ", ".join([f"`{name}`" for name in predefined_policies.keys()])
raise Exception(
f"The `policy_name` must be a string that takes one of the values: {policies_str}")
if policy_name in policies_without_translation:
shape_arg = next((name for arg, name in shape_related_args if arg is not None), None)
if shape_arg is not None:
raise Exception(
f"The policy `{policy_name}` does not contain any augmentations that rely on the "
f"image shape. The `{shape_arg}` argument must not be specified in that case.")
aug_kwargs = {"fill_value": fill_value, "interp_type": interp_type}
use_shape = shape is not None
if use_shape:
aug_kwargs["shape"] = shape
if policy_name in policies_without_translation:
policy = predefined_policies[policy_name]()
else:
policy = predefined_policies[policy_name](use_shape=use_shape,
max_translate_abs=max_translate_abs,
max_translate_rel=max_translate_rel)
return apply_auto_augment(policy, data, seed, **aug_kwargs)
def auto_augment_image_net(
data: _DataNode,
shape: Optional[Union[_DataNode, Tuple[int, int]]] = None,
fill_value: Optional[int] = 128,
interp_type: Optional[types.DALIInterpType] = None,
max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None,
seed: Optional[int] = None,
) -> _DataNode:
"""
Applies `image_net_policy` in AutoAugment (https://arxiv.org/abs/1805.09501)
fashion to the provided batch of samples.
Equivalent to :meth:`~nvidia.dali.auto_aug.auto_augment.auto_augment` call with ``policy_name``
specified to ``'image_net'``.
See :meth:`~nvidia.dali.auto_aug.auto_augment.auto_augment` function for details.
"""
return auto_augment(data, "image_net", shape, fill_value, interp_type, max_translate_abs,
max_translate_rel, seed)
def apply_auto_augment(policy: Policy, data: _DataNode, seed: Optional[int] = None,
**kwargs) -> _DataNode:
"""
Applies AutoAugment (https://arxiv.org/abs/1805.09501) augmentation scheme to the
provided batch of samples.
Args
----
policy: Policy
Set of sequences of augmentations to be applied in AutoAugment fashion.
data : DataNode
A batch of samples to be processed.
seed: int, optional
Seed to be used to randomly sample operations (and to negate magnitudes).
kwargs:
A dictionary of extra parameters to be passed when calling augmentations.
The signature of each augmentation is checked for any extra arguments and if
the name of the argument matches one from the `kwargs`, the value is
passed as an argument. For example, some augmentations from the default
AutoAugment suite accept ``shape``, ``fill_value`` and ``interp_type``.
Returns
-------
DataNode
A batch of transformed samples.
"""
if len(policy.sub_policies) == 0:
raise Exception(f"Cannot run empty policy. Got {policy} in `apply_auto_augment` call.")
max_policy_len = max(len(sub_policy) for sub_policy in policy.sub_policies)
should_run = fn.random.uniform(range=[0, 1], shape=(max_policy_len, ), dtype=types.FLOAT,
seed=seed)
sub_policy_id = fn.random.uniform(values=list(range(len(policy.sub_policies))), seed=seed,
dtype=types.INT32)
run_probabilities = _sub_policy_to_probability_map(policy)[sub_policy_id]
magnitude_bins = _sub_policy_to_magnitude_bin_map(policy)[sub_policy_id]
aug_ids, augmentations = _sub_policy_to_augmentation_map(policy)
aug_ids = aug_ids[sub_policy_id]
if any(aug.randomly_negate for aug in policy.augmentations.values()):
magnitude_bins = signed_bin(magnitude_bins, seed=seed, shape=(max_policy_len, ))
_forbid_unused_kwargs(policy.augmentations.values(), kwargs, 'apply_auto_augment')
for stage_id in range(max_policy_len):
if should_run[stage_id] < run_probabilities[stage_id]:
op_kwargs = dict(data=data, magnitude_bin=magnitude_bins[stage_id],
num_magnitude_bins=policy.num_magnitude_bins, **kwargs)
data = _pretty_select(augmentations[stage_id], aug_ids[stage_id], op_kwargs,
auto_aug_name='apply_auto_augment',
ref_suite_name='get_image_net_policy')
return data
def get_image_net_policy(use_shape: bool = False, max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None) -> Policy:
"""
Creates augmentation policy tuned for the ImageNet as described in
AutoAugment paper (https://arxiv.org/abs/1805.09501).
The returned policy can be run with
:meth:`~nvidia.dali.auto_aug.auto_augment.apply_auto_augment`.
Args
----
use_shape : bool
If true, the translation offset is computed as a percentage of the image/frame shape.
Useful if the samples processed with the auto augment have different shapes.
If false, the offsets range is bounded by a constant (`max_translate_abs`).
max_translate_abs: int or (int, int), optional
Only valid with use_shape=False, specifies the maximal shift (in pixels) in the translation
augmentations. If a tuple is specified, the first component limits height, the second the
width. Defaults to 250.
max_translate_rel: float or (float, float), optional
Only valid with use_shape=True, specifies the maximal shift as a fraction of image/frame
shape in the translation augmentations. If a tuple is specified, the first component limits
height, the second the width. Defaults to 1.
"""
default_translate_abs, default_translate_rel = 250, 1.
_, translate_y = _get_translations(use_shape, default_translate_abs, default_translate_rel,
max_translate_abs, max_translate_rel)
shear_x = a.shear_x.augmentation((0, 0.3), True)
shear_y = a.shear_y.augmentation((0, 0.3), True)
rotate = a.rotate.augmentation((0, 30), True)
color = a.color.augmentation((0.1, 1.9), False, None)
posterize = a.posterize.augmentation((0, 4), False, a.poster_mask_uint8)
solarize = a.solarize.augmentation((0, 256), False)
solarize_add = a.solarize_add.augmentation((0, 110), False)
invert = a.invert
equalize = a.equalize
auto_contrast = a.auto_contrast
return Policy(
name="ImageNetPolicy", num_magnitude_bins=11, sub_policies=[
[(equalize, 0.8, None), (shear_y, 0.8, 4)],
[(color, 0.4, 9), (equalize, 0.6, None)],
[(color, 0.4, 1), (rotate, 0.6, 8)],
[(solarize, 0.8, 3), (equalize, 0.4, None)],
[(solarize, 0.4, 2), (solarize, 0.6, 2)],
[(color, 0.2, 0), (equalize, 0.8, None)],
[(equalize, 0.4, None), (solarize_add, 0.8, 3)],
[(shear_x, 0.2, 9), (rotate, 0.6, 8)],
[(color, 0.6, 1), (equalize, 1.0, None)],
[(invert, 0.4, None), (rotate, 0.6, 0)],
[(equalize, 1.0, None), (shear_y, 0.6, 3)],
[(color, 0.4, 7), (equalize, 0.6, None)],
[(posterize, 0.4, 6), (auto_contrast, 0.4, None)],
[(solarize, 0.6, 8), (color, 0.6, 9)],
[(solarize, 0.2, 4), (rotate, 0.8, 9)],
[(rotate, 1.0, 7), (translate_y, 0.8, 9)],
[(solarize, 0.8, 4)],
[(shear_y, 0.8, 0), (color, 0.6, 4)],
[(color, 1.0, 0), (rotate, 0.6, 2)],
[(equalize, 0.8, None)],
[(equalize, 1.0, None), (auto_contrast, 0.6, None)],
[(shear_y, 0.4, 7), (solarize_add, 0.6, 7)],
[(posterize, 0.8, 2), (solarize, 0.6, 10)],
[(solarize, 0.6, 8), (equalize, 0.6, None)],
[(color, 0.8, 6), (rotate, 0.4, 5)],
])
def get_reduced_cifar10_policy(use_shape: bool = False, max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None) -> Policy:
"""
Creates augmentation policy tuned with the reduced CIFAR-10 as described
in AutoAugment paper (https://arxiv.org/abs/1805.09501).
The returned policy can be run with
:meth:`~nvidia.dali.auto_aug.auto_augment.apply_auto_augment`.
Args
----
use_shape : bool
If true, the translation offset is computed as a percentage of the image/frame shape.
Useful if the samples processed with the auto augment have different shapes.
If false, the offsets range is bounded by a constant (`max_translate_abs`).
max_translate_abs: int or (int, int), optional
Only valid with use_shape=False, specifies the maximal shift (in pixels) in the translation
augmentations. If a tuple is specified, the first component limits height, the second the
width. Defaults to 250.
max_translate_rel: float or (float, float), optional
Only valid with use_shape=True, specifies the maximal shift as a fraction of image/frame
shape in the translation augmentations. If a tuple is specified, the first component limits
height, the second the width. Defaults to 1.
"""
default_translate_abs, default_translate_rel = 250, 1.
translate_x, translate_y = _get_translations(use_shape, default_translate_abs,
default_translate_rel, max_translate_abs,
max_translate_rel)
shear_y = a.shear_y.augmentation((0, 0.3), True)
rotate = a.rotate.augmentation((0, 30), True)
brightness = a.brightness.augmentation((0.1, 1.9), False, None)
color = a.color.augmentation((0.1, 1.9), False, None)
contrast = a.contrast.augmentation((0.1, 1.9), False, None)
sharpness = a.sharpness.augmentation((0.1, 1.9), False, a.sharpness_kernel_shifted)
posterize = a.posterize.augmentation((0, 4), False, a.poster_mask_uint8)
solarize = a.solarize.augmentation((0, 256), False)
invert = a.invert
equalize = a.equalize
auto_contrast = a.auto_contrast
return Policy(
name="ReducedCifar10Policy", num_magnitude_bins=11, sub_policies=[
[(invert, 0.1, None), (contrast, 0.2, 6)],
[(rotate, 0.7, 2), (translate_x, 0.3, 9)],
[(sharpness, 0.8, 1), (sharpness, 0.9, 3)],
[(shear_y, 0.5, 8), (translate_y, 0.7, 9)],
[(auto_contrast, 0.5, None), (equalize, 0.9, None)],
[(shear_y, 0.2, 7), (posterize, 0.3, 7)],
[(color, 0.4, 3), (brightness, 0.6, 7)],
[(sharpness, 0.3, 9), (brightness, 0.7, 9)],
[(equalize, 0.6, None), (equalize, 0.5, None)],
[(contrast, 0.6, 7), (sharpness, 0.6, 5)],
[(color, 0.7, 7), (translate_x, 0.5, 8)],
[(equalize, 0.3, None), (auto_contrast, 0.4, None)],
[(translate_y, 0.4, 3), (sharpness, 0.2, 6)],
[(brightness, 0.9, 6), (color, 0.2, 8)],
[(solarize, 0.5, 2)],
[(equalize, 0.2, None), (auto_contrast, 0.6, None)],
[(equalize, 0.2, None), (equalize, 0.6, None)],
[(color, 0.9, 9), (equalize, 0.6, None)],
[(auto_contrast, 0.8, None), (solarize, 0.2, 8)],
[(brightness, 0.1, 3), (color, 0.7, 0)],
[(solarize, 0.4, 5), (auto_contrast, 0.9, None)],
[(translate_y, 0.9, 9), (translate_y, 0.7, 9)],
[(auto_contrast, 0.9, None), (solarize, 0.8, 3)],
[(equalize, 0.8, None), (invert, 0.1, None)],
[(translate_y, 0.7, 9), (auto_contrast, 0.9, None)],
])
def get_svhn_policy(use_shape: bool = False, max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None) -> Policy:
"""
Creates augmentation policy tuned with the SVHN as described
in AutoAugment paper (https://arxiv.org/abs/1805.09501).
The returned policy can be run with
:meth:`~nvidia.dali.auto_aug.auto_augment.apply_auto_augment`.
Args
----
use_shape : bool
If true, the translation offset is computed as a percentage of the image/frame shape.
Useful if the samples processed with the auto augment have different shapes.
If false, the offsets range is bounded by a constant (`max_translate_abs`).
max_translate_abs: int or (int, int), optional
Only valid with use_shape=False, specifies the maximal shift (in pixels) in the translation
augmentations. If a tuple is specified, the first component limits height, the second the
width. Defaults to 250.
max_translate_rel: float or (float, float), optional
Only valid with use_shape=True, specifies the maximal shift as a fraction of image/frame
shape in the translation augmentations. If a tuple is specified, the first component limits
height, the second the width. Defaults to 1.
"""
default_translate_abs, default_translate_rel = 250, 1.
translate_x, translate_y = _get_translations(use_shape, default_translate_abs,
default_translate_rel, max_translate_abs,
max_translate_rel)
shear_x = a.shear_x.augmentation((0, 0.3), True)
shear_y = a.shear_y.augmentation((0, 0.3), True)
rotate = a.rotate.augmentation((0, 30), True)
contrast = a.contrast.augmentation((0.1, 1.9), False, None)
solarize = a.solarize.augmentation((0, 256), False)
invert = a.invert
equalize = a.equalize
auto_contrast = a.auto_contrast
return Policy(
name="SvhnPolicy", num_magnitude_bins=11, sub_policies=[
[(shear_x, 0.9, 4), (invert, 0.2, None)],
[(shear_y, 0.9, 8), (invert, 0.7, None)],
[(equalize, 0.6, None), (solarize, 0.6, 6)],
[(invert, 0.9, None), (equalize, 0.6, None)],
[(equalize, 0.6, None), (rotate, 0.9, 3)],
[(shear_x, 0.9, 4), (auto_contrast, 0.8, None)],
[(shear_y, 0.9, 8), (invert, 0.4, None)],
[(shear_y, 0.9, 5), (solarize, 0.2, 6)],
[(invert, 0.9, None), (auto_contrast, 0.8, None)],
[(equalize, 0.6, None), (rotate, 0.9, 3)],
[(shear_x, 0.9, 4), (solarize, 0.3, 3)],
[(shear_y, 0.8, 8), (invert, 0.7, None)],
[(equalize, 0.9, None), (translate_y, 0.6, 6)],
[(invert, 0.9, None), (equalize, 0.6, None)],
[(contrast, 0.3, 3), (rotate, 0.8, 4)],
[(invert, 0.8, None)],
[(shear_y, 0.7, 6), (solarize, 0.4, 8)],
[(invert, 0.6, None), (rotate, 0.8, 4)],
[(shear_y, 0.3, 7), (translate_x, 0.9, 3)],
[(shear_x, 0.1, 6), (invert, 0.6, None)],
[(solarize, 0.7, 2), (translate_y, 0.6, 7)],
[(shear_y, 0.8, 4), (invert, 0.8, None)],
[(shear_x, 0.7, 9), (translate_y, 0.8, 3)],
[(shear_y, 0.8, 5), (auto_contrast, 0.7, None)],
[(shear_x, 0.7, 2), (invert, 0.1, None)],
])
def get_reduced_image_net_policy() -> Policy:
"""
Creates augmentation policy tuned with the reduced ImageNet as described in
AutoAugment paper (https://arxiv.org/abs/1805.09501).
The returned policy can be run with
:meth:`~nvidia.dali.auto_aug.auto_augment.apply_auto_augment`.
"""
shear_x = a.shear_x.augmentation((0, 0.3), True)
rotate = a.rotate.augmentation((0, 30), True)
color = a.color.augmentation((0.1, 1.9), False, None)
contrast = a.contrast.augmentation((0.1, 1.9), False, None)
sharpness = a.sharpness.augmentation((0.1, 1.9), False, a.sharpness_kernel_shifted)
posterize = a.posterize.augmentation((0, 4), False, a.poster_mask_uint8)
solarize = a.solarize.augmentation((0, 256), False)
invert = a.invert
equalize = a.equalize
auto_contrast = a.auto_contrast
return Policy(
name="ReducedImageNetPolicy",
num_magnitude_bins=11, sub_policies=[[(posterize, 0.4, 8), (rotate, 0.6, 9)],
[(solarize, 0.6, 5), (auto_contrast, 0.6, None)],
[(equalize, 0.8, None), (equalize, 0.6, None)],
[(posterize, 0.6, 7), (posterize, 0.6, 6)],
[(equalize, 0.4, None), (solarize, 0.2, 4)],
[(equalize, 0.4, None), (rotate, 0.8, 8)],
[(solarize, 0.6, 3), (equalize, 0.6, None)],
[(posterize, 0.8, 5), (equalize, 1.0, None)],
[(rotate, 0.2, 3), (solarize, 0.6, 8)],
[(equalize, 0.6, None), (posterize, 0.4, 6)],
[(rotate, 0.8, 8), (color, 0.4, 0)],
[(rotate, 0.4, 9), (equalize, 0.6, None)],
[(equalize, 0.8, None)],
[(invert, 0.6, None), (equalize, 1.0, None)],
[(color, 0.6, 4), (contrast, 1.0, 8)],
[(rotate, 0.8, 8), (color, 1.0, 2)],
[(color, 0.8, 8), (solarize, 0.8, 7)],
[(sharpness, 0.4, 7), (invert, 0.6, None)],
[(shear_x, 0.6, 5), (equalize, 1.0, None)],
[(color, 0.4, 0), (equalize, 0.6, None)],
[(equalize, 0.4, None), (solarize, 0.2, 4)],
[(solarize, 0.6, 5), (auto_contrast, 0.6, None)],
[(invert, 0.6, None), (equalize, 1.0, None)],
[(color, 0.6, 4), (contrast, 1.0, 8)],
[(equalize, 0.8, None), (equalize, 0.6, None)]])
def _sub_policy_to_probability_map(policy: Policy) -> _DataNode:
sub_policies = policy.sub_policies
max_policy_len = max(len(sub_policy) for sub_policy in sub_policies)
prob = np.array([[0. for _ in range(max_policy_len)] for _ in range(len(sub_policies))],
dtype=np.float32)
for sub_policy_id, sub_policy in enumerate(sub_policies):
for stage_idx, (aug_name, p, mag) in enumerate(sub_policy):
prob[sub_policy_id, stage_idx] = p
return types.Constant(prob)
def _sub_policy_to_magnitude_bin_map(policy: Policy) -> _DataNode:
sub_policies = policy.sub_policies
max_policy_len = max(len(sub_policy) for sub_policy in sub_policies)
magnitude_bin = np.array([[0 for _ in range(max_policy_len)] for _ in range(len(sub_policies))],
dtype=np.int32)
for sub_policy_id, sub_policy in enumerate(sub_policies):
for stage_idx, (aug_name, p, mag) in enumerate(sub_policy):
# use dummy value instead of None, it will be ignored anyway
val = mag if mag is not None else -999
magnitude_bin[sub_policy_id, stage_idx] = val
return types.Constant(magnitude_bin)
def _sub_policy_to_augmentation_matrix_map(
policy: Policy) -> Tuple[np.ndarray, List[List[_Augmentation]]]:
"""
Creates a matrix of operators to be called for given sub policy at given stage.
The output is a tuple `(m, augments)`, where `augments` is a list of augmentations per stage
- each entry contains a reduced list of unique augmentations used in a corresponding stage.
The `m` matrix contains the mapping from the original sub_policy_id, to the index within the
reduced list, for every stage. I.e., for policy `sub_policy_idx`, as the `stage_idx`-ith
operation in a sequence, the `augments[stage_idx][m[sub_policy_idx][stage_idx]]` operator
should be called.
"""
sub_policies = policy.sub_policies
max_policy_len = max(len(sub_policy) for sub_policy in sub_policies)
augmentations = [] # list of augmentations in each stage
for stage_idx in range(max_policy_len):
stage_augments = set()
stage_augments_list = []
for sub_policy in sub_policies:
if stage_idx < len(sub_policy):
aug, _, _ = sub_policy[stage_idx]
if aug not in stage_augments:
stage_augments.add(aug)
stage_augments_list.append(aug)
augmentations.append(stage_augments_list + [a.identity])
identity_id = [len(stage_augments) - 1 for stage_augments in augmentations]
augment_to_id = [{augmentation: i
for i, augmentation in enumerate(stage_augments)}
for stage_augments in augmentations]
augments_by_id = np.array([[identity_id[stage_idx] for stage_idx in range(max_policy_len)]
for _ in range(len(sub_policies))], dtype=np.int32)
for sub_policy_id, sub_policy in enumerate(sub_policies):
for stage_idx, (augment, p, mag) in enumerate(sub_policy):
augments_by_id[sub_policy_id, stage_idx] = augment_to_id[stage_idx][augment]
return augments_by_id, augmentations
def _sub_policy_to_augmentation_map(policy: Policy) -> Tuple[_DataNode, List[List[_Augmentation]]]:
matrix, augments = _sub_policy_to_augmentation_matrix_map(policy)
return types.Constant(matrix), augments |
|
nvidia__dali | rand_augment.rst | Module doc | Generate documentation for this module | Apache License 2.0 | nvidia__dali/docs/auto_aug/rand_augment.rst | [
"nvidia__dali/dali/python/nvidia/dali/auto_aug/rand_augment.py"
] | RandAugment
RandAugment, as described in https://arxiv.org/abs/1909.13719, is an
automatic augmentation scheme that simplified the AutoAugment. For
RandAugment the policy is just a list of
augmentations <Augmentation operations> with a search space limited to
two parameters n and m.
- n describes how many randomly selected augmentations should we apply
to an input sample.
- m is a fixed magnitude used for all of the augmentations.
For example, to use 3 random operations for each sample, each with fixed
magnitude 17, you can call
~nvidia.dali.auto_aug.rand_augment.rand_augment, as follows:
from nvidia.dali import pipeline_def
from nvidia.dali.auto_aug import rand_augment
@pipeline_def(enable_conditionals=True)
def training_pipe(data_dir, image_size):
jpegs, labels = fn.readers.file(file_root=data_dir, ...)
shapes = fn.peek_image_shape(jpegs)
images = fn.decoders.image(jpegs, device="mixed", output_type=types.RGB)
augmented_images = rand_augment.rand_augment(images, shape=shapes, n=3, m=17)
resized_images = fn.resize(augmented_images, size=[image_size, image_size])
return resized_images, labels
The ~nvidia.dali.auto_aug.rand_augment.rand_augment uses set of
augmentations described in the paper. To apply custom augmentations
refer to this section <Invoking custom RandAugment policies>.
Warning
You need to define the pipeline with the
@pipeline_def <nvidia.dali.pipeline_def> decorator and set
enable_conditionals to True to use automatic augmentations.
Invoking custom RandAugment policies
Thanks to the simpler nature of RandAugment, its policies are defined as
lists of augmentations <Augmentation operations>, that can be passed as
a first argument to the
~nvidia.dali.auto_aug.rand_augment.apply_rand_augment when invoked
inside a pipeline definition.
| # Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List, Optional, Tuple, Union
from nvidia.dali import fn
from nvidia.dali import types
from nvidia.dali.auto_aug import augmentations as a
from nvidia.dali.auto_aug.core import signed_bin, _Augmentation
from nvidia.dali.auto_aug.core._args import \
forbid_unused_kwargs as _forbid_unused_kwargs
from nvidia.dali.auto_aug.core._utils import \
get_translations as _get_translations, \
pretty_select as _pretty_select
from nvidia.dali.data_node import DataNode as _DataNode
def rand_augment(
data: _DataNode,
n: int,
m: int,
num_magnitude_bins: int = 31,
shape: Optional[Union[_DataNode, Tuple[int, int]]] = None,
fill_value: Optional[int] = 128,
interp_type: Optional[types.DALIInterpType] = None,
max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None,
seed: Optional[int] = None,
monotonic_mag: bool = True,
excluded: Optional[List[str]] = None,
) -> _DataNode:
"""
Applies RandAugment (https://arxiv.org/abs/1909.13719) augmentation scheme to the
provided batch of samples.
Args
----
data : DataNode
A batch of samples to be processed. The supported samples are images
of `HWC` layout and videos of `FHWC` layout, the supported data type is `uint8`.
n: int
The number of randomly sampled operations to be applied to a sample.
m: int
A magnitude (strength) of each operation to be applied, it must be an integer
within ``[0, num_magnitude_bins - 1]``.
num_magnitude_bins: int, optional
The number of bins to divide the magnitude ranges into.
shape: DataNode or Tuple[int, int], optional
The size (height and width) of the image or frames in the video sequence
passed as the `data`. If specified, the magnitude of `translation` operations
depends on the image/frame shape and spans from 0 to `max_translate_rel * shape`.
Otherwise, the magnitude range is `[0, max_translate_abs]` for any sample.
fill_value: int, optional
A value to be used as a padding for images/frames transformed with warp_affine ops
(translation, shear and rotate). If `None` is specified, the images/frames are padded
with the border value repeated (clamped).
interp_type: types.DALIInterpType, optional
Interpolation method used by the warp_affine ops (translation, shear and rotate).
Supported values are `types.INTERP_LINEAR` (default) and `types.INTERP_NN`.
max_translate_abs: int or (int, int), optional
Only valid when ``shapes`` is not provided. Specifies the maximal shift (in pixels)
in the translation augmentation. If a tuple is specified, the first component limits
height, the second the width. Defaults to 100, which means the maximal magnitude
shifts the image by 100 pixels.
max_translate_rel: float or (float, float), optional
Only valid when ``shapes`` argument is provided. Specifies the maximal shift as a
fraction of image shape in the translation augmentations.
If a tuple is specified, the first component limits the height, the second the width.
Defaults to around `0.45` (100/224).
seed: int, optional
Seed to be used to randomly sample operations (and to negate magnitudes).
monotonic_mag: bool, optional
There are two flavours of RandAugment available in different frameworks. For the default
``monotonic_mag=True`` the strength of operations that accept magnitude bins increases with
the increasing bins. If set to False, the magnitude ranges for some color operations differ.
There, the :meth:`~nvidia.dali.auto_aug.augmentations.posterize` and
:meth:`~nvidia.dali.auto_aug.augmentations.solarize` strength decreases with increasing
magnitude bins and enhance operations (
:meth:`~nvidia.dali.auto_aug.augmentations.brightness`,
:meth:`~nvidia.dali.auto_aug.augmentations.contrast`,
:meth:`~nvidia.dali.auto_aug.augmentations.color`,
:meth:`~nvidia.dali.auto_aug.augmentations.sharpness`) use (0.1, 1.9) range,
which means that the strength decreases the closer the magnitudes are to the center
of the range. See
:meth:`~nvidia.dali.auto_aug.rand_augment.get_rand_augment_non_monotonic_suite`.
excluded: List[str], optional
A list of names of the operations to be excluded from the default suite of augmentations.
If, instead of just limiting the set of operations, you need to include some custom
operations or fine-tune the existing ones, you can use the
:meth:`~nvidia.dali.auto_aug.rand_augment.apply_rand_augment` directly, which accepts
a list of augmentations.
Returns
-------
DataNode
A batch of transformed samples.
"""
aug_kwargs = {"fill_value": fill_value, "interp_type": interp_type}
use_shape = shape is not None
if use_shape:
aug_kwargs["shape"] = shape
if monotonic_mag:
augmentations = get_rand_augment_suite(use_shape, max_translate_abs, max_translate_rel)
else:
augmentations = get_rand_augment_non_monotonic_suite(use_shape, max_translate_abs,
max_translate_rel)
augmentation_names = set(aug.name for aug in augmentations)
assert len(augmentation_names) == len(augmentations)
excluded = excluded or []
for name in excluded:
if name not in augmentation_names:
raise Exception(f"The `{name}` was specified in `excluded`, but the RandAugment suite "
f"does not contain augmentation with this name. "
f"The augmentations in the suite are: {', '.join(augmentation_names)}.")
selected_augments = [aug for aug in augmentations if aug.name not in excluded]
return apply_rand_augment(selected_augments, data, n, m,
num_magnitude_bins=num_magnitude_bins, seed=seed, **aug_kwargs)
def apply_rand_augment(augmentations: List[_Augmentation], data: _DataNode, n: int, m: int,
num_magnitude_bins: int = 31, seed: Optional[int] = None,
**kwargs) -> _DataNode:
"""
Applies the list of ``augmentations`` in RandAugment (https://arxiv.org/abs/1909.13719) fashion.
Each sample is transformed with ``n`` operations in a sequence randomly selected from the
``augmentations`` list. Each operation uses ``m`` as the magnitude bin.
Args
----
augmentations : List[core._Augmentation]
List of augmentations to be sampled and applied in RandAugment fashion.
data : DataNode
A batch of samples to be processed.
n: int
The number of randomly sampled operations to be applied to a sample.
m: int
A magnitude bin (strength) of each operation to be applied, it must be an integer
within ``[0, num_magnitude_bins - 1]``.
num_magnitude_bins: int
The number of bins to divide the magnitude ranges into.
seed: int
Seed to be used to randomly sample operations (and to negate magnitudes).
kwargs:
Any extra parameters to be passed when calling `augmentations`.
The signature of each augmentation is checked for any extra arguments and if
the name of the argument matches one from the `kwargs`, the value is
passed as an argument. For example, some augmentations from the default
RandAugment suite accept ``shapes``, ``fill_value`` and ``interp_type``.
Returns
-------
DataNode
A batch of transformed samples.
"""
if not isinstance(n, int) or n < 0:
raise Exception(
f"The number of operations to apply `n` must be a non-negative integer, got {n}.")
if not isinstance(num_magnitude_bins, int) or num_magnitude_bins < 1:
raise Exception(
f"The `num_magnitude_bins` must be a positive integer, got {num_magnitude_bins}.")
if not isinstance(m, int) or not 0 <= m < num_magnitude_bins:
raise Exception(f"The magnitude bin `m` must be an integer from "
f"`[0, {num_magnitude_bins - 1}]` range. Got {m}.")
if n == 0:
warnings.warn(
"The `apply_rand_augment` was called with `n=0`, "
"no augmentation will be applied.", Warning)
return data
if len(augmentations) == 0:
raise Exception("The `augmentations` list cannot be empty, unless n=0. "
"Got empty list in `apply_rand_augment` call.")
shape = tuple() if n == 1 else (n, )
op_idx = fn.random.uniform(values=list(range(len(augmentations))), seed=seed, shape=shape,
dtype=types.INT32)
use_signed_magnitudes = any(aug.randomly_negate for aug in augmentations)
mag_bin = signed_bin(m, seed=seed, shape=shape) if use_signed_magnitudes else m
_forbid_unused_kwargs(augmentations, kwargs, 'apply_rand_augment')
for level_idx in range(n):
level_mag_bin = mag_bin if not use_signed_magnitudes or n == 1 else mag_bin[level_idx]
op_kwargs = dict(data=data, magnitude_bin=level_mag_bin,
num_magnitude_bins=num_magnitude_bins, **kwargs)
level_op_idx = op_idx if n == 1 else op_idx[level_idx]
data = _pretty_select(augmentations, level_op_idx, op_kwargs,
auto_aug_name='apply_rand_augment',
ref_suite_name='get_rand_augment_suite')
return data
def get_rand_augment_suite(use_shape: bool = False, max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None) -> List[_Augmentation]:
"""
Creates a list of RandAugment augmentations.
Args
----
use_shape : bool
If true, the translation offset is computed as a percentage of the image/frame shape.
Useful if the samples processed with the auto augment have different shapes.
If false, the offsets range is bounded by a constant (`max_translate_abs`).
max_translate_abs: int or (int, int), optional
Only valid with use_shape=False, specifies the maximal shift (in pixels) in the translation
augmentations. If a tuple is specified, the first component limits height, the second the
width. Defaults 100.
max_translate_rel: float or (float, float), optional
Only valid with use_shape=True, specifies the maximal shift as a fraction of image/frame
shape in the translation augmentations. If a tuple is specified, the first component limits
height, the second the width. Defaults to around `0.45` (100/224).
"""
default_translate_abs, default_translate_rel = 100, 100 / 224
# translations = [translate_x, translate_y] with adjusted magnitude range
translations = _get_translations(use_shape, default_translate_abs, default_translate_rel,
max_translate_abs, max_translate_rel)
# [.augmentation((mag_low, mag_high), randomly_negate_mag, magnitude_to_param_custom_mapping]
return translations + [
a.shear_x.augmentation((0, 0.3), True),
a.shear_y.augmentation((0, 0.3), True),
a.rotate.augmentation((0, 30), True),
a.brightness.augmentation((0, 0.9), True, a.shift_enhance_range),
a.contrast.augmentation((0, 0.9), True, a.shift_enhance_range),
a.color.augmentation((0, 0.9), True, a.shift_enhance_range),
a.sharpness.augmentation((0, 0.9), True, a.sharpness_kernel),
a.posterize.augmentation((8, 4), False, a.poster_mask_uint8),
# solarization strength increases with decreasing magnitude (threshold)
a.solarize.augmentation((256, 0)),
a.equalize,
a.auto_contrast,
a.identity,
]
def get_rand_augment_non_monotonic_suite(
use_shape: bool = False, max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None) -> List[_Augmentation]:
"""
Similarly to :meth:`~nvidia.dali.auto_aug.rand_augment.get_rand_augment_suite` creates a list
of RandAugment augmentations.
This variant uses brightness, contrast, color, sharpness, posterize, and solarize
with magnitude ranges as used by the AutoAugment. However, those ranges do not meet
the intuition that the bigger magnitude bin corresponds to stronger operation.
"""
default_translate_abs, default_translate_rel = 100, 100 / 224
# translations = [translate_x, translate_y] with adjusted magnitude range
translations = _get_translations(use_shape, default_translate_abs, default_translate_rel,
max_translate_abs, max_translate_rel)
return translations + [
a.shear_x.augmentation((0, 0.3), True),
a.shear_y.augmentation((0, 0.3), True),
a.rotate.augmentation((0, 30), True),
a.brightness.augmentation((0.1, 1.9), False, None),
a.contrast.augmentation((0.1, 1.9), False, None),
a.color.augmentation((0.1, 1.9), False, None),
a.sharpness.augmentation((0.1, 1.9), False, a.sharpness_kernel_shifted),
a.posterize.augmentation((0, 4), False, a.poster_mask_uint8),
a.solarize.augmentation((0, 256), False, None),
a.equalize,
a.auto_contrast,
a.identity,
] |
|
numpy__numpy | distutils.rst | Module doc | Generate documentation for this module | BSD 3-Clause New or Revised License | numpy__numpy/doc/source/reference/distutils.rst | [
"numpy__numpy/numpy/distutils/misc_util.py"
] | numpy__numpy/numpy/distutils | NumPy provides enhanced distutils functionality to make it easier to
build and install sub-packages, auto-generate code, and extension
modules that use Fortran-compiled libraries. To use features of NumPy
distutils, use the setup <core.setup> command from numpy.distutils.core.
A useful Configuration
<misc_util.Configuration> class is also provided in
numpy.distutils.misc_util that can make it easier to construct keyword
arguments to pass to the setup function (by passing the dictionary
obtained from the todict() method of the class). More information is
available in the distutils-user-guide.
The choice and location of linked libraries such as BLAS and LAPACK as
well as include paths and other such build options can be specified in a
site.cfg file located in the NumPy root repository or a .numpy-site.cfg
file in your home directory. See the site.cfg.example example file
included in the NumPy repository or sdist for documentation.
Configuration class
Construct a configuration instance for the given package name. If
parent_name is not None, then construct the package as a sub-package of
the parent_name package. If top_path and package_path are None then they
are assumed equal to the path of the file this instance was created in.
The setup.py files in the numpy distribution are good examples of how to
use the Configuration instance.
Building Installable C libraries
Conventional C libraries (installed through add_library) are not
installed, and are just used during the build (they are statically
linked). An installable C library is a pure C library, which does not
depend on the python C runtime, and is installed such that it may be
used by third-party packages. To build and install the C library, you
just use the method add_installed_library instead of add_library, which
takes the same arguments except for an additional install_dir argument:
.. hidden in a comment so as to be included in refguide but not rendered documentation
>>> import numpy.distutils.misc_util
>>> config = np.distutils.misc_util.Configuration(None, '', '.')
>>> with open('foo.c', 'w') as f: pass
>>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib')
npy-pkg-config files
To make the necessary build options available to third parties, you
could use the npy-pkg-config mechanism implemented in numpy.distutils.
This mechanism is based on a .ini file which contains all the options. A
.ini file is very similar to .pc files as used by the pkg-config unix
utility:
[meta]
Name: foo
Version: 1.0
Description: foo library
[variables]
prefix = /home/user/local
libdir = ${prefix}/lib
includedir = ${prefix}/include
[default]
cflags = -I${includedir}
libs = -L${libdir} -lfoo
Generally, the file needs to be generated during the build, since it
needs some information known at build time only (e.g. prefix). This is
mostly automatic if one uses the Configuration method
add_npy_pkg_config. Assuming we have a template file foo.ini.in as
follows:
[meta]
Name: foo
Version: @version@
Description: foo library
[variables]
prefix = @prefix@
libdir = ${prefix}/lib
includedir = ${prefix}/include
[default]
cflags = -I${includedir}
libs = -L${libdir} -lfoo
and the following code in setup.py:
>>> config.add_installed_library('foo', sources=['foo.c'], install_dir='lib')
>>> subst = {'version': '1.0'}
>>> config.add_npy_pkg_config('foo.ini.in', 'lib', subst_dict=subst)
This will install the file foo.ini into the directory package_dir/lib,
and the foo.ini file will be generated from foo.ini.in, where each
@version@ will be replaced by subst_dict['version']. The dictionary has
an additional prefix substitution rule automatically added, which
contains the install prefix (since this is not easy to get from
setup.py). npy-pkg-config files can also be installed at the same
location as used for numpy, using the path returned from get_npy_pkg_dir
function.
Reusing a C library from another package
Info are easily retrieved from the get_info function in
`numpy.distutils.misc_util`:
>>> info = np.distutils.misc_util.get_info('npymath')
>>> config.add_extension('foo', sources=['foo.c'], extra_info=info)
<numpy.distutils.extension.Extension('foo') at 0x...>
An additional list of paths to look for .ini files can be given to
get_info.
Conversion of .src files
NumPy distutils supports automatic conversion of source files named
<somefile>.src. This facility can be used to maintain very similar code
blocks requiring only simple changes between blocks. During the build
phase of setup, if a template file named <somefile>.src is encountered,
a new file named <somefile> is constructed from the template and placed
in the build directory to be used instead. Two forms of template
conversion are supported. The first form occurs for files named
<file>.ext.src where ext is a recognized Fortran extension (f, f90, f95,
f77, for, ftn, pyf). The second form is used for all other cases. See
templating.
| import os
import re
import sys
import copy
import glob
import atexit
import tempfile
import subprocess
import shutil
import multiprocessing
import textwrap
import importlib.util
from threading import local as tlocal
from functools import reduce
import distutils
from distutils.errors import DistutilsError
# stores temporary directory of each thread to only create one per thread
_tdata = tlocal()
# store all created temporary directories so they can be deleted on exit
_tmpdirs = []
def clean_up_temporary_directory():
if _tmpdirs is not None:
for d in _tmpdirs:
try:
shutil.rmtree(d)
except OSError:
pass
atexit.register(clean_up_temporary_directory)
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dict_append', 'appendpath', 'generate_config_py',
'get_cmd', 'allpath', 'get_mathlibs',
'terminal_has_colors','red_text', 'green_text', 'yellow_text',
'blue_text', 'cyan_text', 'cyg2win32','mingw32', 'all_strings',
'has_f_sources', 'has_cxx_sources', 'filter_sources',
'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame','minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'get_build_architecture', 'get_info', 'get_pkg_info',
'get_num_build_jobs','sanitize_cxx_flags',
'exec_mod_from_location']
class InstallableLib:
"""
Container to hold information on an installable library.
Parameters
----------
name : str
Name of the installed library.
build_info : dict
Dictionary holding build information.
target_dir : str
Absolute path specifying where to install the library.
See Also
--------
Configuration.add_installed_library
Notes
-----
The three parameters are stored as attributes with the same names.
"""
def __init__(self, name, build_info, target_dir):
self.name = name
self.build_info = build_info
self.target_dir = target_dir
def get_num_build_jobs():
"""
Get number of parallel build jobs set by the --parallel command line
argument of setup.py
If the command did not receive a setting the environment variable
NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of
processors on the system, with a maximum of 8 (to prevent
overloading the system if there a lot of CPUs).
Returns
-------
out : int
number of parallel jobs that can be run
"""
from numpy.distutils.core import get_distribution
try:
cpu_count = len(os.sched_getaffinity(0))
except AttributeError:
cpu_count = multiprocessing.cpu_count()
cpu_count = min(cpu_count, 8)
envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count))
dist = get_distribution()
# may be None during configuration
if dist is None:
return envjobs
# any of these three may have the job set, take the largest
cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None),
getattr(dist.get_command_obj('build_ext'), 'parallel', None),
getattr(dist.get_command_obj('build_clib'), 'parallel', None))
if all(x is None for x in cmdattr):
return envjobs
else:
return max(x for x in cmdattr if x is not None)
def quote_args(args):
"""Quote list of arguments.
.. deprecated:: 1.22.
"""
import warnings
warnings.warn('"quote_args" is deprecated.',
DeprecationWarning, stacklevel=2)
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
for i in range(len(args)):
a = args[i]
if'' in a and a[0] not in '"\'':
args[i] = '"%s"' % (a)
return args
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
split = name.split('/')
return os.path.join(*split)
def rel_path(path, parent_path):
"""Return path relative to parent_path."""
# Use realpath to avoid issues with symlinked dirs (see gh-7707)
pd = os.path.realpath(os.path.abspath(parent_path))
apath = os.path.realpath(os.path.abspath(path))
if len(apath) < len(pd):
return path
if apath == pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)]))
path = apath[len(pd)+1:]
return path
def get_path_from_frame(frame, parent_path=None):
"""Return path of the module given a frame object from the call stack.
Returned path is relative to parent_path when given,
otherwise it is absolute path.
"""
# First, try to find if the file name is in the frame.
try:
caller_file = eval('__file__', frame.f_globals, frame.f_locals)
d = os.path.dirname(os.path.abspath(caller_file))
except NameError:
# __file__ is not defined, so let's try __name__. We try this second
# because setuptools spoofs __name__ to be '__main__' even though
# sys.modules['__main__'] might be something else, like easy_install(1).
caller_name = eval('__name__', frame.f_globals, frame.f_locals)
__import__(caller_name)
mod = sys.modules[caller_name]
if hasattr(mod, '__file__'):
d = os.path.dirname(os.path.abspath(mod.__file__))
else:
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
if parent_path is not None:
d = rel_path(d, parent_path)
return d or '.'
def njoin(*path):
"""Join two or more pathname components +
- convert a /-separated pathname to one using the OS's path separator.
- resolve `..` and `.` from path.
Either passing n arguments as in njoin('a','b'), or a sequence
of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
"""
paths = []
for p in path:
if is_sequence(p):
# njoin(['a', 'b'], 'c')
paths.append(njoin(*p))
else:
assert is_string(p)
paths.append(p)
path = paths
if not path:
# njoin()
joined = ''
else:
# njoin('a', 'b')
joined = os.path.join(*path)
if os.path.sep!= '/':
joined = joined.replace('/', os.path.sep)
return minrelpath(joined)
def get_mathlibs(path=None):
"""Return the MATHLIB line from numpyconfig.h
"""
if path is not None:
config_file = os.path.join(path, '_numpyconfig.h')
else:
# Look for the file in each of the numpy include directories.
dirs = get_numpy_include_dirs()
for path in dirs:
fn = os.path.join(path, '_numpyconfig.h')
if os.path.exists(fn):
config_file = fn
break
else:
raise DistutilsError('_numpyconfig.h not found in numpy include '
'dirs %r' % (dirs,))
with open(config_file) as fid:
mathlibs = []
s = '#define MATHLIB'
for line in fid:
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
return mathlibs
def minrelpath(path):
"""Resolve `..` and '.' from path.
"""
if not is_string(path):
return path
if '.' not in path:
return path
l = path.split(os.sep)
while l:
try:
i = l.index('.', 1)
except ValueError:
break
del l[i]
j = 1
while l:
try:
i = l.index('..', j)
except ValueError:
break
if l[i-1]=='..':
j += 1
else:
del l[i], l[i-1]
j = 1
if not l:
return ''
return os.sep.join(l)
def sorted_glob(fileglob):
"""sorts output of python glob for https://bugs.python.org/issue30461
to allow extensions to have reproducible build results"""
return sorted(glob.glob(fileglob))
def _fix_paths(paths, local_path, include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
assert not is_string(paths), repr(paths)
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
p = sorted_glob(n)
p2 = sorted_glob(njoin(local_path, n))
if p2:
new_paths.extend(p2)
elif p:
new_paths.extend(p)
else:
if include_non_existing:
new_paths.append(n)
print('could not resolve pattern in %r: %r' %
(local_path, n))
else:
n2 = njoin(local_path, n)
if os.path.exists(n2):
new_paths.append(n2)
else:
if os.path.exists(n):
new_paths.append(n)
elif include_non_existing:
new_paths.append(n)
if not os.path.exists(n):
print('non-existing path in %r: %r' %
(local_path, n))
elif is_sequence(n):
new_paths.extend(_fix_paths(n, local_path, include_non_existing))
else:
new_paths.append(n)
return [minrelpath(p) for p in new_paths]
def gpaths(paths, local_path='', include_non_existing=True):
"""Apply glob to paths and prepend local_path if needed.
"""
if is_string(paths):
paths = (paths,)
return _fix_paths(paths, local_path, include_non_existing)
def make_temp_file(suffix='', prefix='', text=True):
if not hasattr(_tdata, 'tempdir'):
_tdata.tempdir = tempfile.mkdtemp()
_tmpdirs.append(_tdata.tempdir)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=_tdata.tempdir,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
# Hooks for colored terminal output.
# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle
def terminal_has_colors():
if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
# Avoid importing curses that causes illegal operation
# with a message:
# PYTHON2 caused an invalid page fault in
# module CYGNURSES7.DLL as 015f:18bbfc28
# Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
# ssh to Win32 machine from debian
# curses.version is 2.2
# CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
return 0
if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
try:
import curses
curses.setupterm()
if (curses.tigetnum("colors") >= 0
and curses.tigetnum("pairs") >= 0
and ((curses.tigetstr("setf") is not None
and curses.tigetstr("setb") is not None)
or (curses.tigetstr("setaf") is not None
and curses.tigetstr("setab") is not None)
or curses.tigetstr("scp") is not None)):
return 1
except Exception:
pass
return 0
if terminal_has_colors():
_colour_codes = dict(black=0, red=1, green=2, yellow=3,
blue=4, magenta=5, cyan=6, white=7, default=9)
def colour_text(s, fg=None, bg=None, bold=False):
seq = []
if bold:
seq.append('1')
if fg:
fgcode = 30 + _colour_codes.get(fg.lower(), 0)
seq.append(str(fgcode))
if bg:
bgcode = 40 + _colour_codes.get(bg.lower(), 7)
seq.append(str(bgcode))
if seq:
return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
else:
return s
else:
def colour_text(s, fg=None, bg=None):
return s
def default_text(s):
return colour_text(s, 'default')
def red_text(s):
return colour_text(s,'red')
def green_text(s):
return colour_text(s, 'green')
def yellow_text(s):
return colour_text(s, 'yellow')
def cyan_text(s):
return colour_text(s, 'cyan')
def blue_text(s):
return colour_text(s, 'blue')
#########################
def cyg2win32(path: str) -> str:
"""Convert a path from Cygwin-native to Windows-native.
Uses the cygpath utility (part of the Base install) to do the
actual conversion. Falls back to returning the original path if
this fails.
Handles the default ``/cygdrive`` mount prefix as well as the
``/proc/cygdrive`` portable prefix, custom cygdrive prefixes such
as ``/`` or ``/mnt``, and absolute paths such as ``/usr/src/`` or
``/home/username``
Parameters
----------
path : str
The path to convert
Returns
-------
converted_path : str
The converted path
Notes
-----
Documentation for cygpath utility:
https://cygwin.com/cygwin-ug-net/cygpath.html
Documentation for the C function it wraps:
https://cygwin.com/cygwin-api/func-cygwin-conv-path.html
"""
if sys.platform!= "cygwin":
return path
return subprocess.check_output(
["/usr/bin/cygpath", "--windows", path], text=True
)
def mingw32():
"""Return true when using mingw32 environment.
"""
if sys.platform=='win32':
if os.environ.get('OSTYPE', '')=='msys':
return True
if os.environ.get('MSYSTEM', '')=='MINGW32':
return True
return False
def msvc_runtime_version():
"Return version of MSVC runtime library, as defined by __MSC_VER__ macro"
msc_pos = sys.version.find('MSC v.')
if msc_pos!= -1:
msc_ver = int(sys.version[msc_pos+6:msc_pos+10])
else:
msc_ver = None
return msc_ver
def msvc_runtime_library():
"Return name of MSVC runtime library if Python was built with MSVC >= 7"
ver = msvc_runtime_major ()
if ver:
if ver < 140:
return "msvcr%i" % ver
else:
return "vcruntime%i" % ver
else:
return None
def msvc_runtime_major():
"Return major version of MSVC runtime coded like get_build_msvc_version"
major = {1300: 70, # MSVC 7.0
1310: 71, # MSVC 7.1
1400: 80, # MSVC 8
1500: 90, # MSVC 9 (aka 2008)
1600: 100, # MSVC 10 (aka 2010)
1900: 140, # MSVC 14 (aka 2015)
}.get(msvc_runtime_version(), None)
return major
#########################
#XXX need support for.C that is also C++
cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match
fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match
f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match
f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)', re.I).match
def _get_f90_modules(source):
"""Return a list of Fortran f90 module names that
given source file defines.
"""
if not f90_ext_match(source):
return []
modules = []
with open(source) as f:
for line in f:
m = f90_module_name_match(line)
if m:
name = m.group('name')
modules.append(name)
# break # XXX can we assume that there is one module per file?
return modules
def is_string(s):
return isinstance(s, str)
def all_strings(lst):
"""Return True if all items in lst are string objects. """
for item in lst:
if not is_string(item):
return False
return True
def is_sequence(seq):
if is_string(seq):
return False
try:
len(seq)
except Exception:
return False
return True
def is_glob_pattern(s):
return is_string(s) and ('*' in s or '?' in s)
def as_list(seq):
if is_sequence(seq):
return list(seq)
else:
return [seq]
def get_language(sources):
# not used in numpy/scipy packages, use build_ext.detect_language instead
"""Determine language value (c,f77,f90) from sources """
language = None
for source in sources:
if isinstance(source, str):
if f90_ext_match(source):
language = 'f90'
break
elif fortran_ext_match(source):
language = 'f77'
return language
def has_f_sources(sources):
"""Return True if sources contains Fortran files """
for source in sources:
if fortran_ext_match(source):
return True
return False
def has_cxx_sources(sources):
"""Return True if sources contains C++ files """
for source in sources:
if cxx_ext_match(source):
return True
return False
def filter_sources(sources):
"""Return four lists of filenames containing
C, C++, Fortran, and Fortran 90 module sources,
respectively.
"""
c_sources = []
cxx_sources = []
f_sources = []
fmodule_sources = []
for source in sources:
if fortran_ext_match(source):
modules = _get_f90_modules(source)
if modules:
fmodule_sources.append(source)
else:
f_sources.append(source)
elif cxx_ext_match(source):
cxx_sources.append(source)
else:
c_sources.append(source)
return c_sources, cxx_sources, f_sources, fmodule_sources
def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
def _get_directories(list_of_sources):
# get unique directories from list of sources.
direcs = []
for f in list_of_sources:
d = os.path.split(f)
if d[0]!= '' and not d[0] in direcs:
direcs.append(d[0])
return direcs
def _commandline_dep_string(cc_args, extra_postargs, pp_opts):
"""
Return commandline representation used to determine if a file needs
to be recompiled
"""
cmdline = 'commandline: '
cmdline +=''.join(cc_args)
cmdline +=''.join(extra_postargs)
cmdline +=''.join(pp_opts) + '\n'
return cmdline
def get_dependencies(sources):
#XXX scan sources for include statements
return _get_headers(_get_directories(sources))
def is_local_src_dir(directory):
"""Return true if directory is local directory.
"""
if not is_string(directory):
return False
abs_dir = os.path.abspath(directory)
c = os.path.commonprefix([os.getcwd(), abs_dir])
new_dir = abs_dir[len(c):].split(os.sep)
if new_dir and not new_dir[0]:
new_dir = new_dir[1:]
if new_dir and new_dir[0]=='build':
return False
new_dir = os.sep.join(new_dir)
return os.path.isdir(new_dir)
def general_source_files(top_path):
pruned_directories = {'CVS':1, '.svn':1, 'build':1}
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for f in filenames:
if not prune_file_pat.search(f):
yield os.path.join(dirpath, f)
def general_source_directories_files(top_path):
"""Return a directory name relative to top_path and
files contained.
"""
pruned_directories = ['CVS', '.svn', 'build']
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for d in dirnames:
dpath = os.path.join(dirpath, d)
rpath = rel_path(dpath, top_path)
files = []
for f in os.listdir(dpath):
fn = os.path.join(dpath, f)
if os.path.isfile(fn) and not prune_file_pat.search(fn):
files.append(fn)
yield rpath, files
dpath = top_path
rpath = rel_path(dpath, top_path)
filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \
if not prune_file_pat.search(f)]
files = [f for f in filenames if os.path.isfile(f)]
yield rpath, files
def get_ext_source_files(ext):
# Get sources and any include files in the same directory.
filenames = []
sources = [_m for _m in ext.sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_script_files(scripts):
scripts = [_m for _m in scripts if is_string(_m)]
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources', [])
sources = [_m for _m in sources if is_string(_m)]
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends', [])
for d in depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_shared_lib_extension(is_python_ext=False):
"""Return the correct file extension for shared libraries.
Parameters
----------
is_python_ext : bool, optional
Whether the shared library is a Python extension. Default is False.
Returns
-------
so_ext : str
The shared library extension.
Notes
-----
For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X,
and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on
POSIX systems according to PEP 3149.
"""
confvars = distutils.sysconfig.get_config_vars()
so_ext = confvars.get('EXT_SUFFIX', '')
if not is_python_ext:
# hardcode known values, config vars (including SHLIB_SUFFIX) are
# unreliable (see #3182)
# darwin, windows and debug linux are wrong in 3.3.1 and older
if (sys.platform.startswith('linux') or
sys.platform.startswith('gnukfreebsd')):
so_ext = '.so'
elif sys.platform.startswith('darwin'):
so_ext = '.dylib'
elif sys.platform.startswith('win'):
so_ext = '.dll'
else:
# fall back to config vars for unknown platforms
# fix long extension for Python >=3.2, see PEP 3149.
if 'SOABI' in confvars:
# Does nothing unless SOABI config var exists
so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1)
return so_ext
def get_data_files(data):
if is_string(data):
return [data]
sources = data[1]
filenames = []
for s in sources:
if hasattr(s, '__call__'):
continue
if is_local_src_dir(s):
filenames.extend(list(general_source_files(s)))
elif is_string(s):
if os.path.isfile(s):
filenames.append(s)
else:
print('Not existing data file:', s)
else:
raise TypeError(repr(s))
return filenames
def dot_join(*args):
return '.'.join([a for a in args if a])
def get_frame(level=0):
"""Return frame object from call stack with given level.
"""
try:
return sys._getframe(level+1)
except AttributeError:
frame = sys.exc_info()[2].tb_frame
for _ in range(level+1):
frame = frame.f_back
return frame
######################
class Configuration:
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers','scripts', 'py_modules',
'installed_libraries', 'define_macros']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name','version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self, n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s\n' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0, os.path.dirname(setup_py))
try:
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
setup_module = exec_mod_from_location(
'_'.join(n.split('.')), setup_py)
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name, subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name : str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name : str
name of the subpackage
subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone : bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name, subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d, dict), repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
'it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self, data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path : seq or str
Argument can be either
* 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths::
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat:
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
<package install directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d, p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list, path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list), path))
else:
for path in paths:
self.add_data_dir((d, path))
return
assert not is_glob_pattern(d), repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1, f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package, d, d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p, files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. ``*``.txt -> parent/a.txt, parent/b.txt
#. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt
#. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
<package install directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where <package install directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d, files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d, f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d, files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d, paths))
return
assert not is_glob_pattern(d), repr((d, filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package, d), paths))
### XXX Implement add_py_modules
def add_define_macros(self, macros):
"""Add define macros to configuration
Add the given sequence of macro name and value duples to the beginning
of the define_macros list This list will be visible to all extension
modules of the current package.
"""
dist = self.get_distribution()
if dist is not None:
if not hasattr(dist, 'define_macros'):
dist.define_macros = []
dist.define_macros.extend(macros)
else:
self.define_macros.extend(macros)
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the <python-include> path.
Parameters
----------
files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name, p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path)!= 2:
raise TypeError(repr(path))
[headers.append((path[0], p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing', True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self, kw):
for k in kw.keys():
v = kw[k]
if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
'module_dirs', 'extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name : str
name of the extension
sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs :
define_macros :
undef_macros :
library_dirs :
libraries :
runtime_library_dirs :
extra_objects :
extra_compile_args :
extra_link_args :
extra_f77_compile_args :
extra_f90_compile_args :
export_symbols :
swig_opts :
depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language :
f2py_options :
module_dirs :
extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries', [])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.split('@', 1)
lpath = os.path.abspath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath,
caller_level = 2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries', [])]:
llname = l.split('__OF__', 1)[0]
if llname == lname:
c.pop('name', None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
ext_args['define_macros'] = \
self.define_macros + ext_args.get('define_macros', [])
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
'it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
'it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
When cross-compiling with numpy distutils, it might be necessary to
use modified npy-pkg-config files. Using the default/generated files
will link with the host libraries (i.e. libnpymath.a). For
cross-compilation you of-course need to link with target libraries,
while using the host Python installation.
You can copy out the numpy/core/lib/npy-pkg-config directory, add a
pkgdir value to the.ini files and set NPY_PKG_CONFIG_PATH environment
variable to point to the directory with the modified npy-pkg-config
files.
Example npymath.ini modified for cross-compilation::
[meta]
Name=npymath
Description=Portable, core math library implementing C99 standard
Version=0.1
[variables]
pkgname=numpy.core
pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core
prefix=${pkgdir}
libdir=${prefix}/lib
includedir=${prefix}/include
[default]
Libs=-L${libdir} -lnpymath
Cflags=-I${includedir}
Requires=mlib
[msvc]
Libs=/LIBPATH:${libdir} npymath.lib
Cflags=/INCLUDE:${includedir}
Requires=mlib
"""
if subst_dict is None:
subst_dict = {}
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self, key)
a.extend(dict.get(key, []))
for key in self.dict_keys:
a = getattr(self, key)
a.update(dict.get(key, {}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key, dict[key], dict.get('name', '?')))
setattr(self, key, dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self, key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self, k, None)
if a:
s += '%s = %s\n' % (k, pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.', old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib, Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
try:
output = subprocess.check_output(['svnversion'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
with open(entries) as f:
fstr = f.read()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
return int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
return int(m.group('revision'))
return None
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
try:
output = subprocess.check_output(
['hg', 'identify', '--num'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
with open(branch_fn) as f:
revision0 = f.read().strip()
branch_map = {}
with open(branch_cache_fn) as f:
for line in f:
branch1, revision1 = line.split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
return branch_map.get(branch0)
return None
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self,'version', None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path, f)
if os.path.isfile(fn):
info = ('.py', 'U', 1)
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
version_module = exec_mod_from_location(
'_'.join(n.split('.')), fn)
except ImportError as e:
self.warn(str(e))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module, a, None)
if version is not None:
break
# Try if versioneer module
try:
version = version_module.get_versions()['version']
except AttributeError:
pass
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path, '__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path, '__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name, name, generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from.system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def get_cmd(cmdname, _cache={}):
if cmdname not in _cache:
import distutils.core
dist = distutils.core._setup_distribution
if dist is None:
from distutils.errors import DistutilsInternalError
raise DistutilsInternalError(
'setup distribution instance not initialized')
cmd = dist.get_command_obj(cmdname)
_cache[cmdname] = cmd
return _cache[cmdname]
def get_numpy_include_dirs():
# numpy_include_dirs are set by numpy/core/setup.py, otherwise []
include_dirs = Configuration.numpy_include_dirs[:]
if not include_dirs:
import numpy
include_dirs = [ numpy.get_include() ]
# else running numpy/core/setup.py
return include_dirs
def get_npy_pkg_dir():
"""Return the path where to find the npy-pkg-config directory.
If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that
is returned. Otherwise, a path inside the location of the numpy module is
returned.
The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining
customized npy-pkg-config.ini files for the cross-compilation
environment, and using them when cross-compiling.
"""
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d is not None:
return d
spec = importlib.util.find_spec('numpy')
d = os.path.join(os.path.dirname(spec.origin),
'core', 'lib', 'npy-pkg-config')
return d
def get_pkg_info(pkgname, dirs=None):
"""
Return library info for the given package.
Parameters
----------
pkgname : str
Name of the package (should match the name of the.ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_info
"""
from numpy.distutils.npy_pkg_config import read_config
if dirs:
dirs.append(get_npy_pkg_dir())
else:
dirs = [get_npy_pkg_dir()]
return read_config(pkgname, dirs)
def get_info(pkgname, dirs=None):
"""
Return an info dict for a given C library.
The info dict contains the necessary options to use the C library.
Parameters
----------
pkgname : str
Name of the package (should match the name of the.ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
info : dict
The dictionary with build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_pkg_info
Examples
--------
To get the necessary information for the npymath library from NumPy:
>>> npymath_info = np.distutils.misc_util.get_info('npymath')
>>> npymath_info #doctest: +SKIP
{'define_macros': [], 'libraries': ['npymath'], 'library_dirs':
['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}
This info dict can then be used as input to a `Configuration` instance::
config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)
"""
from numpy.distutils.npy_pkg_config import parse_flags
pkg_info = get_pkg_info(pkgname, dirs)
# Translate LibraryInfo instance into a build_info dict
info = parse_flags(pkg_info.cflags())
for k, v in parse_flags(pkg_info.libs()).items():
info[k].extend(v)
# add_extension extra_info argument is ANAL
info['define_macros'] = info['macros']
del info['macros']
del info['ignored']
return info
def is_bootstrapping():
import builtins
try:
builtins.__NUMPY_SETUP__
return True
except AttributeError:
return False
#########################
def default_config_dict(name = None, parent_name = None, local_path=None):
"""Return a configuration dictionary for usage in
configuration() function defined in file setup_<name>.py.
"""
import warnings
warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\
'deprecated default_config_dict(%r,%r,%r)'
% (name, parent_name, local_path,
name, parent_name, local_path,
), stacklevel=2)
c = Configuration(name, parent_name, local_path)
return c.todict()
def dict_append(d, **kws):
for k, v in kws.items():
if k in d:
ov = d[k]
if isinstance(ov, str):
d[k] = v
else:
d[k].extend(v)
else:
d[k] = v
def appendpath(prefix, path):
if os.path.sep!= '/':
prefix = prefix.replace('/', os.path.sep)
path = path.replace('/', os.path.sep)
drive = ''
if os.path.isabs(path):
drive = os.path.splitdrive(prefix)[0]
absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
pathdrive, path = os.path.splitdrive(path)
d = os.path.commonprefix([absprefix, path])
if os.path.join(absprefix[:len(d)], absprefix[len(d):])!= absprefix \
or os.path.join(path[:len(d)], path[len(d):])!= path:
# Handle invalid paths
d = os.path.dirname(d)
subpath = path[len(d):]
if os.path.isabs(subpath):
subpath = subpath[1:]
else:
subpath = path
return os.path.normpath(njoin(drive + prefix, subpath))
def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
with open(target, 'w') as f:
f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
# For gfortran+msvc combination, extra shared libraries may exist
f.write(textwrap.dedent("""
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
os.add_dll_directory(extra_dll_dir)
"""))
for k, i in system_info.saved_results.items():
f.write('%s=%r\n' % (k, i))
f.write(textwrap.dedent(r'''
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
"""
Show libraries in the system on which NumPy was built.
Print information about various resources (libraries, library
directories, include directories, etc.) in the system on which
NumPy was built.
See Also
--------
get_include : Returns the directory containing NumPy C
header files.
Notes
-----
1. Classes specifying the information to be printed are defined
in the `numpy.distutils.system_info` module.
Information may include:
* ``language``: language used to write the libraries (mostly
C or f77)
* ``libraries``: names of libraries found in the system
* ``library_dirs``: directories containing the libraries
* ``include_dirs``: directories containing library header files
* ``src_dirs``: directories containing library source files
* ``define_macros``: preprocessor macros used by
``distutils.setup``
* ``baseline``: minimum CPU features required
* ``found``: dispatched features supported in the system
* ``not found``: dispatched features that are not supported
in the system
2. NumPy BLAS/LAPACK Installation Notes
Installing a numpy wheel (``pip install numpy`` or force it
via ``pip install numpy --only-binary :numpy: numpy``) includes
an OpenBLAS implementation of the BLAS and LAPACK linear algebra
APIs. In this case, ``library_dirs`` reports the original build
time configuration as compiled with gcc/gfortran; at run time
the OpenBLAS library is in
``site-packages/numpy.libs/`` (linux), or
``site-packages/numpy/.dylibs/`` (macOS), or
``site-packages/numpy/.libs/`` (windows).
Installing numpy from source
(``pip install numpy --no-binary numpy``) searches for BLAS and
LAPACK dynamic link libraries at build time as influenced by
environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and
NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER;
or the optional file ``~/.numpy-site.cfg``.
NumPy remembers those locations and expects to load the same
libraries at run-time.
In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS
library) is in the default build-time search order after
'openblas'.
Examples
--------
>>> import numpy as np
>>> np.show_config()
blas_opt_info:
language = c
define_macros = [('HAVE_CBLAS', None)]
libraries = ['openblas', 'openblas']
library_dirs = ['/usr/local/lib']
"""
from numpy.core._multiarray_umath import (
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
)
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + "...\n... " + v[-60:]
print(" %s = %s" % (k,v))
features_found, features_not_found = [], []
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
features_found.append(feature)
else:
features_not_found.append(feature)
print("Supported SIMD extensions in this NumPy install:")
print(" baseline = %s" % (','.join(__cpu_baseline__)))
print(" found = %s" % (','.join(features_found)))
print(" not found = %s" % (','.join(features_not_found)))
'''))
return target
def msvc_version(compiler):
"""Return version major and minor of compiler instance if it is
MSVC, raise an exception otherwise."""
if not compiler.compiler_type == "msvc":
raise ValueError("Compiler instance is not msvc (%s)"\
% compiler.compiler_type)
return compiler._MSVCCompiler__version
def get_build_architecture():
# Importing distutils.msvccompiler triggers a warning on non-Windows
# systems, so delay the import to here.
from distutils.msvccompiler import get_build_architecture
return get_build_architecture()
_cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'}
def sanitize_cxx_flags(cxxflags):
'''
Some flags are valid for C but not C++. Prune them.
'''
return [flag for flag in cxxflags if flag not in _cxx_ignore_flags]
def exec_mod_from_location(modname, modfile):
'''
Use importlib machinery to import a module `modname` from the file
`modfile`. Depending on the `spec.loader`, the module may not be
registered in sys.modules.
'''
spec = importlib.util.spec_from_file_location(modname, modfile)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
return foo |
numpy__numpy | basics.indexing.rst | Module doc | Generate documentation for this module | BSD 3-Clause New or Revised License | numpy__numpy/doc/source/user/basics.indexing.rst | [
"numpy__numpy/numpy/lib/recfunctions.py"
] | numpy__numpy/numpy | Structured arrays
Introduction
Structured arrays are ndarrays whose datatype is a composition of
simpler datatypes organized as a sequence of named fields <field>. For
example, :
>>> x = np.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)],
... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')])
>>> x
array([('Rex', 9, 81.), ('Fido', 3, 27.)],
dtype=[('name', '<U10'), ('age', '<i4'), ('weight', '<f4')])
Here x is a one-dimensional array of length two whose datatype is a
structure with three fields: 1. A string of length 10 or less named
'name', 2. a 32-bit integer named 'age', and 3. a 32-bit float named
'weight'.
If you index x at position 1 you get a structure:
>>> x[1]
np.void(('Fido', 3, 27.0), dtype=[('name', '<U10'), ('age', '<i4'), ('weight', '<f4')])
You can access and modify individual fields of a structured array by
indexing with the field name:
>>> x['age']
array([9, 3], dtype=int32)
>>> x['age'] = 5
>>> x
array([('Rex', 5, 81.), ('Fido', 5, 27.)],
dtype=[('name', '<U10'), ('age', '<i4'), ('weight', '<f4')])
Structured datatypes are designed to be able to mimic 'structs' in the C
language, and share a similar memory layout. They are meant for
interfacing with C code and for low-level manipulation of structured
buffers, for example for interpreting binary blobs. For these purposes
they support specialized features such as subarrays, nested datatypes,
and unions, and allow control over the memory layout of the structure.
Users looking to manipulate tabular data, such as stored in csv files,
may find other pydata projects more suitable, such as xarray, pandas, or
DataArray. These provide a high-level interface for tabular data
analysis and are better optimized for that use. For instance, the
C-struct-like memory layout of structured arrays in numpy can lead to
poor cache behavior in comparison.
Structured Datatypes
A structured datatype can be thought of as a sequence of bytes of a
certain length (the structure's itemsize) which is interpreted as a
collection of fields. Each field has a name, a datatype, and a byte
offset within the structure. The datatype of a field may be any numpy
datatype including other structured datatypes, and it may also be a
subarray data type which behaves like an ndarray of a specified shape.
The offsets of the fields are arbitrary, and fields may even overlap.
These offsets are usually determined automatically by numpy, but can
also be specified.
Structured Datatype Creation
Structured datatypes may be created using the function numpy.dtype.
There are 4 alternative forms of specification which vary in flexibility
and conciseness. These are further documented in the
Data Type Objects <arrays.dtypes.constructing> reference page, and in
summary they are:
1. A list of tuples, one tuple per field
Each tuple has the form (fieldname, datatype, shape) where shape is
optional. fieldname is a string (or tuple if titles are used, see
Field Titles <titles> below), datatype may be any object convertible
to a datatype, and shape is a tuple of integers specifying subarray
shape.
>>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2,
2))]) dtype([('x', '<f4'), ('y', '<f4'), ('z', '<f4', (2, 2))])
If fieldname is the empty string '', the field will be given a
default name of the form f#, where # is the integer index of the
field, counting from 0 from the left:
>>> np.dtype([('x', 'f4'), ('', 'i4'), ('z', 'i8')])
dtype([('x', '<f4'), ('f1', '<i4'), ('z', '<i8')])
The byte offsets of the fields within the structure and the total
structure itemsize are determined automatically.
2. A string of comma-separated dtype specifications
In this shorthand notation any of the string dtype specifications
<arrays.dtypes.constructing> may be used in a string and separated
by commas. The itemsize and byte offsets of the fields are
determined automatically, and the field names are given the default
names f0, f1, etc. :
>>> np.dtype('i8, f4, S3')
dtype([('f0', '<i8'), ('f1', '<f4'), ('f2', 'S3')])
>>> np.dtype('3int8, float32, (2, 3)float64')
dtype([('f0', 'i1', (3,)), ('f1', '<f4'), ('f2', '<f8', (2, 3))])
3. A dictionary of field parameter arrays
This is the most flexible form of specification since it allows
control over the byte-offsets of the fields and the itemsize of the
structure.
The dictionary has two required keys, 'names' and 'formats', and
four optional keys, 'offsets', 'itemsize', 'aligned' and 'titles'.
The values for 'names' and 'formats' should respectively be a list
of field names and a list of dtype specifications, of the same
length. The optional 'offsets' value should be a list of integer
byte-offsets, one for each field within the structure. If 'offsets'
is not given the offsets are determined automatically. The optional
'itemsize' value should be an integer describing the total size in
bytes of the dtype, which must be large enough to contain all the
fields. :
>>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4', 'f4']})
dtype([('col1', '<i4'), ('col2', '<f4')])
>>> np.dtype({'names': ['col1', 'col2'],
... 'formats': ['i4', 'f4'],
... 'offsets': [0, 4],
... 'itemsize': 12})
dtype({'names': ['col1', 'col2'], 'formats': ['<i4', '<f4'], 'offsets': [0, 4], 'itemsize': 12})
Offsets may be chosen such that the fields overlap, though this will
mean that assigning to one field may clobber any overlapping field's
data. As an exception, fields of numpy.object_ type cannot overlap
with other fields, because of the risk of clobbering the internal
object pointer and then dereferencing it.
The optional 'aligned' value can be set to True to make the
automatic offset computation use aligned offsets (see
offsets-and-alignment), as if the 'align' keyword argument of
numpy.dtype had been set to True.
The optional 'titles' value should be a list of titles of the same
length as 'names', see Field Titles <titles> below.
4. A dictionary of field names
The keys of the dictionary are the field names and the values are
tuples specifying type and offset:
>>> np.dtype({'col1': ('i1', 0), 'col2': ('f4', 1)})
dtype([('col1', 'i1'), ('col2', '<f4')])
This form was discouraged because Python dictionaries did not
preserve order in Python versions before Python 3.6.
Field Titles <titles> may be specified by using a 3-tuple, see
below.
Manipulating and Displaying Structured Datatypes
The list of field names of a structured datatype can be found in the
names attribute of the dtype object:
>>> d = np.dtype([('x', 'i8'), ('y', 'f4')])
>>> d.names
('x', 'y')
The dtype of each individual field can be looked up by name:
>>> d['x']
dtype('int64')
The field names may be modified by assigning to the names attribute
using a sequence of strings of the same length.
The dtype object also has a dictionary-like attribute, fields, whose
keys are the field names (and Field Titles <titles>, see below) and
whose values are tuples containing the dtype and byte offset of each
field. :
>>> d.fields
mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)})
Both the names and fields attributes will equal None for unstructured
arrays. The recommended way to test if a dtype is structured is with if
dt.names is not None rather than if dt.names, to account for dtypes with
0 fields.
The string representation of a structured datatype is shown in the "list
of tuples" form if possible, otherwise numpy falls back to using the
more general dictionary form.
Automatic Byte Offsets and Alignment
Numpy uses one of two methods to automatically determine the field byte
offsets and the overall itemsize of a structured datatype, depending on
whether align=True was specified as a keyword argument to numpy.dtype.
By default (align=False), numpy will pack the fields together such that
each field starts at the byte offset the previous field ended, and the
fields are contiguous in memory. :
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
>>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2'))
offsets: [0, 1, 2, 6, 7, 15]
itemsize: 17
If align=True is set, numpy will pad the structure in the same way many
C compilers would pad a C-struct. Aligned structures can give a
performance improvement in some cases, at the cost of increased datatype
size. Padding bytes are inserted between fields such that each field's
byte offset will be a multiple of that field's alignment, which is
usually equal to the field's size in bytes for simple datatypes, see
PyArray_Descr.alignment. The structure will also have trailing padding
added so that its itemsize is a multiple of the largest field's
alignment. :
>>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2', align=True))
offsets: [0, 1, 4, 8, 16, 24]
itemsize: 32
Note that although almost all modern C compilers pad in this way by
default, padding in C structs is C-implementation-dependent so this
memory layout is not guaranteed to exactly match that of a corresponding
struct in a C program. Some work may be needed, either on the numpy side
or the C side, to obtain exact correspondence.
If offsets were specified using the optional offsets key in the
dictionary-based dtype specification, setting align=True will check that
each field's offset is a multiple of its size and that the itemsize is a
multiple of the largest field size, and raise an exception if not.
If the offsets of the fields and itemsize of a structured array satisfy
the alignment conditions, the array will have the ALIGNED flag
<numpy.ndarray.flags> set.
A convenience function numpy.lib.recfunctions.repack_fields converts an
aligned dtype or array to a packed one and vice versa. It takes either a
dtype or structured ndarray as an argument, and returns a copy with
fields re-packed, with or without padding bytes.
Field Titles
In addition to field names, fields may also have an associated title, an
alternate name, which is sometimes used as an additional description or
alias for the field. The title may be used to index an array, just like
a field name.
To add titles when using the list-of-tuples form of dtype specification,
the field name may be specified as a tuple of two strings instead of a
single string, which will be the field's title and field name
respectively. For example:
>>> np.dtype([(('my title', 'name'), 'f4')])
dtype([(('my title', 'name'), '<f4')])
When using the first form of dictionary-based specification, the titles
may be supplied as an extra 'titles' key as described above. When using
the second (discouraged) dictionary-based specification, the title can
be supplied by providing a 3-element tuple (datatype, offset, title)
instead of the usual 2-element tuple:
>>> np.dtype({'name': ('i4', 0, 'my title')})
dtype([(('my title', 'name'), '<i4')])
The dtype.fields dictionary will contain titles as keys, if any titles
are used. This means effectively that a field with a title will be
represented twice in the fields dictionary. The tuple values for these
fields will also have a third element, the field title. Because of this,
and because the names attribute preserves the field order while the
fields attribute may not, it is recommended to iterate through the
fields of a dtype using the names attribute of the dtype, which will not
list titles, as in:
>>> for name in d.names:
... print(d.fields[name][:2])
(dtype('int64'), 0)
(dtype('float32'), 8)
Union types
Structured datatypes are implemented in numpy to have base type
numpy.void by default, but it is possible to interpret other numpy types
as structured types using the (base_dtype, dtype) form of dtype
specification described in
Data Type Objects <arrays.dtypes.constructing>. Here, base_dtype is the
desired underlying dtype, and fields and flags will be copied from
dtype. This dtype is similar to a 'union' in C.
Indexing and Assignment to Structured arrays
Assigning data to a Structured Array
There are a number of ways to assign values to a structured array: Using
python tuples, using scalar values, or using other structured arrays.
Assignment from Python Native Types (Tuples)
The simplest way to assign values to a structured array is using python
tuples. Each assigned value should be a tuple of length equal to the
number of fields in the array, and not a list or array as these will
trigger numpy's broadcasting rules. The tuple's elements are assigned to
the successive fields of the array, from left to right:
>>> x = np.array([(1, 2, 3), (4, 5, 6)], dtype='i8, f4, f8')
>>> x[1] = (7, 8, 9)
>>> x
array([(1, 2., 3.), (7, 8., 9.)],
dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '<f8')])
Assignment from Scalars
A scalar assigned to a structured element will be assigned to all
fields. This happens when a scalar is assigned to a structured array, or
when an unstructured array is assigned to a structured array:
>>> x = np.zeros(2, dtype='i8, f4, ?, S1')
>>> x[:] = 3
>>> x
array([(3, 3., True, b'3'), (3, 3., True, b'3')],
dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
>>> x[:] = np.arange(2)
>>> x
array([(0, 0., False, b'0'), (1, 1., True, b'1')],
dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
Structured arrays can also be assigned to unstructured arrays, but only
if the structured datatype has just a single field:
>>> twofield = np.zeros(2, dtype=[('A', 'i4'), ('B', 'i4')])
>>> onefield = np.zeros(2, dtype=[('A', 'i4')])
>>> nostruct = np.zeros(2, dtype='i4')
>>> nostruct[:] = twofield
Traceback (most recent call last):
...
TypeError: Cannot cast array data from dtype([('A', '<i4'), ('B', '<i4')]) to dtype('int32') according to the rule 'unsafe'
Assignment from other Structured Arrays
Assignment between two structured arrays occurs as if the source
elements had been converted to tuples and then assigned to the
destination elements. That is, the first field of the source array is
assigned to the first field of the destination array, and the second
field likewise, and so on, regardless of field names. Structured arrays
with a different number of fields cannot be assigned to each other.
Bytes of the destination structure which are not included in any of the
fields are unaffected. :
>>> a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'f4'), ('c', 'S3')])
>>> b = np.ones(3, dtype=[('x', 'f4'), ('y', 'S3'), ('z', 'O')])
>>> b[:] = a
>>> b
array([(0., b'0.0', b''), (0., b'0.0', b''), (0., b'0.0', b'')],
dtype=[('x', '<f4'), ('y', 'S3'), ('z', 'O')])
Assignment involving subarrays
When assigning to fields which are subarrays, the assigned value will
first be broadcast to the shape of the subarray.
Indexing Structured Arrays
Accessing Individual Fields
Individual fields of a structured array may be accessed and modified by
indexing the array with the field name. :
>>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
>>> x['foo']
array([1, 3])
>>> x['foo'] = 10
>>> x
array([(10, 2.), (10, 4.)],
dtype=[('foo', '<i8'), ('bar', '<f4')])
The resulting array is a view into the original array. It shares the
same memory locations and writing to the view will modify the original
array. :
>>> y = x['bar']
>>> y[:] = 11
>>> x
array([(10, 11.), (10, 11.)],
dtype=[('foo', '<i8'), ('bar', '<f4')])
This view has the same dtype and itemsize as the indexed field, so it is
typically a non-structured array, except in the case of nested
structures.
>>> y.dtype, y.shape, y.strides (dtype('float32'), (2,), (12,))
If the accessed field is a subarray, the dimensions of the subarray are
appended to the shape of the result:
>>> x = np.zeros((2, 2), dtype=[('a', np.int32), ('b', np.float64, (3, 3))])
>>> x['a'].shape
(2, 2)
>>> x['b'].shape
(2, 2, 3, 3)
Accessing Multiple Fields
One can index and assign to a structured array with a multi-field index,
where the index is a list of field names.
Warning
The behavior of multi-field indexes changed from Numpy 1.15 to Numpy
1.16.
The result of indexing with a multi-field index is a view into the
original array, as follows:
>>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')])
>>> a[['a', 'c']]
array([(0, 0.), (0, 0.), (0, 0.)],
dtype={'names': ['a', 'c'], 'formats': ['<i4', '<f4'], 'offsets': [0, 8], 'itemsize': 12})
Assignment to the view modifies the original array. The view's fields
will be in the order they were indexed. Note that unlike for
single-field indexing, the dtype of the view has the same itemsize as
the original array, and has fields at the same offsets as in the
original array, and unindexed fields are merely missing.
Warning
In Numpy 1.15, indexing an array with a multi-field index returned a
copy of the result above, but with fields packed together in memory as
if passed through numpy.lib.recfunctions.repack_fields.
The new behavior as of Numpy 1.16 leads to extra "padding" bytes at the
location of unindexed fields compared to 1.15. You will need to update
any code which depends on the data having a "packed" layout. For
instance code such as:
>>> a[['a', 'c']].view('i8') # Fails in Numpy 1.16
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype
will need to be changed. This code has raised a FutureWarning since
Numpy 1.12, and similar code has raised FutureWarning since 1.7.
In 1.16 a number of functions have been introduced in the
numpy.lib.recfunctions module to help users account for this change.
These are numpy.lib.recfunctions.repack_fields.
numpy.lib.recfunctions.structured_to_unstructured,
numpy.lib.recfunctions.unstructured_to_structured,
numpy.lib.recfunctions.apply_along_fields,
numpy.lib.recfunctions.assign_fields_by_name, and
numpy.lib.recfunctions.require_fields.
The function numpy.lib.recfunctions.repack_fields can always be used to
reproduce the old behavior, as it will return a packed copy of the
structured array. The code above, for example, can be replaced with:
>>> from numpy.lib.recfunctions import repack_fields >>>
repack_fields(a[['a', 'c']]).view('i8') # supported in 1.16 array([0,
0, 0])
Furthermore, numpy now provides a new function
numpy.lib.recfunctions.structured_to_unstructured which is a safer and
more efficient alternative for users who wish to convert structured
arrays to unstructured arrays, as the view above is often intended to
do. This function allows safe conversion to an unstructured type taking
into account padding, often avoids a copy, and also casts the datatypes
as needed, unlike the view. Code such as:
>>> b = np.zeros(3, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')]) >>>
b[['x', 'z']].view('f4') array([0., 0., 0., 0., 0., 0., 0., 0., 0.],
dtype=float32)
can be made safer by replacing with:
>>> from numpy.lib.recfunctions import structured_to_unstructured >>>
structured_to_unstructured(b[['x', 'z']]) array([[0., 0.], [0., 0.],
[0., 0.]], dtype=float32)
Assignment to an array with a multi-field index modifies the original
array:
>>> a[['a', 'c']] = (2, 3)
>>> a
array([(2, 0, 3.), (2, 0, 3.), (2, 0, 3.)],
dtype=[('a', '<i4'), ('b', '<i4'), ('c', '<f4')])
This obeys the structured array assignment rules described above. For
example, this means that one can swap the values of two fields using
appropriate multi-field indexes:
>>> a[['a', 'c']] = a[['c', 'a']]
Indexing with an Integer to get a Structured Scalar
Indexing a single element of a structured array (with an integer index)
returns a structured scalar:
>>> x = np.array([(1, 2., 3.)], dtype='i, f, f')
>>> scalar = x[0]
>>> scalar
np.void((1, 2.0, 3.0), dtype=[('f0', '<i4'), ('f1', '<f4'), ('f2', '<f4')])
>>> type(scalar)
<class 'numpy.void'>
Unlike other numpy scalars, structured scalars are mutable and act like
views into the original array, such that modifying the scalar will
modify the original array. Structured scalars also support access and
assignment by field name:
>>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
>>> s = x[0]
>>> s['bar'] = 100
>>> x
array([(1, 100.), (3, 4.)],
dtype=[('foo', '<i8'), ('bar', '<f4')])
Similarly to tuples, structured scalars can also be indexed with an
integer:
>>> scalar = np.array([(1, 2., 3.)], dtype='i, f, f')[0]
>>> scalar[0]
1
>>> scalar[1] = 4
Thus, tuples might be thought of as the native Python equivalent to
numpy's structured types, much like native python integers are the
equivalent to numpy's integer types. Structured scalars may be converted
to a tuple by calling `numpy.ndarray.item`:
>>> scalar.item(), type(scalar.item())
((1, 4.0, 3.0), <class 'tuple'>)
Viewing Structured Arrays Containing Objects
In order to prevent clobbering object pointers in fields of object type,
numpy currently does not allow views of structured arrays containing
objects.
Structure Comparison and Promotion
If the dtypes of two void structured arrays are equal, testing the
equality of the arrays will result in a boolean array with the
dimensions of the original arrays, with elements set to True where all
fields of the corresponding structures are equal:
>>> a = np.array([(1, 1), (2, 2)], dtype=[('a', 'i4'), ('b', 'i4')])
>>> b = np.array([(1, 1), (2, 3)], dtype=[('a', 'i4'), ('b', 'i4')])
>>> a == b
array([True, False])
NumPy will promote individual field datatypes to perform the comparison.
So the following is also valid (note the 'f4' dtype for the 'a' field):
>>> b = np.array([(1.0, 1), (2.5, 2)], dtype=[("a", "f4"), ("b",
"i4")]) >>> a == b array([True, False])
To compare two structured arrays, it must be possible to promote them to
a common dtype as returned by numpy.result_type and numpy.promote_types.
This enforces that the number of fields, the field names, and the field
titles must match precisely. When promotion is not possible, for example
due to mismatching field names, NumPy will raise an error. Promotion
between two structured dtypes results in a canonical dtype that ensures
native byte-order for all fields:
>>> np.result_type(np.dtype("i,>i"))
dtype([('f0', '<i4'), ('f1', '<i4')])
>>> np.result_type(np.dtype("i,>i"), np.dtype("i,i"))
dtype([('f0', '<i4'), ('f1', '<i4')])
The resulting dtype from promotion is also guaranteed to be packed,
meaning that all fields are ordered contiguously and any unnecessary
padding is removed:
>>> dt = np.dtype("i1,V3,i4,V1")[["f0", "f2"]]
>>> dt
dtype({'names':['f0','f2'], 'formats':['i1','<i4'], 'offsets':[0,4], 'itemsize':9})
>>> np.result_type(dt)
dtype([('f0', 'i1'), ('f2', '<i4')])
Note that the result prints without offsets or itemsize indicating no
additional padding. If a structured dtype is created with align=True
ensuring that dtype.isalignedstruct is true, this property is preserved:
>>> dt = np.dtype("i1,V3,i4,V1", align=True)[["f0", "f2"]]
>>> dt
dtype({'names':['f0','f2'], 'formats':['i1','<i4'], 'offsets':[0,4], 'itemsize':12}, align=True)
>>> np.result_type(dt)
dtype([('f0', 'i1'), ('f2', '<i4')], align=True)
>>> np.result_type(dt).isalignedstruct
True
When promoting multiple dtypes, the result is aligned if any of the
inputs is:
>>> np.result_type(np.dtype("i,i"), np.dtype("i,i", align=True))
dtype([('f0', '<i4'), ('f1', '<i4')], align=True)
The < and > operators always return False when comparing void structured
arrays, and arithmetic and bitwise operations are not supported.
1.23 Before NumPy 1.23, a warning was given and False returned when
promotion to a common dtype failed. Further, promotion was much more
restrictive: It would reject the mixed float/integer comparison example
above.
Record Arrays
As an optional convenience numpy provides an ndarray subclass,
numpy.recarray that allows access to fields of structured arrays by
attribute instead of only by index. Record arrays use a special
datatype, numpy.record, that allows field access by attribute on the
structured scalars obtained from the array. The numpy.rec module
provides functions for creating recarrays from various objects.
Additional helper functions for creating and manipulating structured
arrays can be found in numpy.lib.recfunctions.
The simplest way to create a record array is with
numpy.rec.array <numpy.core.records.array>:
>>> recordarr = np.rec.array([(1, 2., 'Hello'), (2, 3., "World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> recordarr.bar
array([2., 3.], dtype=float32)
>>> recordarr[1:2]
rec.array([(2, 3., b'World')],
dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])
>>> recordarr[1:2].foo
array([2], dtype=int32)
>>> recordarr.foo[1:2]
array([2], dtype=int32)
>>> recordarr[1].baz
b'World'
numpy.rec.array <numpy.core.records.array> can convert a wide variety of
arguments into record arrays, including structured arrays:
>>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
>>> recordarr = np.rec.array(arr)
The numpy.rec module provides a number of other convenience functions
for creating record arrays, see record array creation routines
<routines.array-creation.rec>.
A record array representation of a structured array can be obtained
using the appropriate view:
>>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')])
>>> recordarr = arr.view(dtype=np.dtype((np.record, arr.dtype)),
... type=np.recarray)
For convenience, viewing an ndarray as type numpy.recarray will
automatically convert to numpy.record datatype, so the dtype can be left
out of the view:
>>> recordarr = arr.view(np.recarray)
>>> recordarr.dtype
dtype((numpy.record, [('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')]))
To get back to a plain ndarray both the dtype and type must be reset.
The following view does so, taking into account the unusual case that
the recordarr was not a structured type:
>>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray)
Record array fields accessed by index or by attribute are returned as a
record array if the field has a structured type but as a plain ndarray
otherwise. :
>>> recordarr = np.rec.array([('Hello', (1, 2)), ("World", (3, 4))],
... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])])
>>> type(recordarr.foo)
<class 'numpy.ndarray'>
>>> type(recordarr.bar)
<class 'numpy.recarray'>
Note that if a field has the same name as an ndarray attribute, the
ndarray attribute takes precedence. Such fields will be inaccessible by
attribute but will still be accessible by index.
| """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.core.overrides import array_function_dispatch
from numpy.lib._iotools import _is_string_like
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'apply_along_fields', 'assign_fields_by_name',
'drop_fields', 'find_duplicates', 'flatten_descr',
'get_fieldstructure', 'get_names', 'get_names_flat',
'join_by','merge_arrays','rec_append_fields',
'rec_drop_fields','rec_join','recursive_fill_fields',
'rename_fields','repack_fields','require_fields',
'stack_arrays','structured_to_unstructured', 'unstructured_to_structured',
]
def _recursive_fill_fields_dispatcher(input, output):
return (input, output)
@array_function_dispatch(_recursive_fill_fields_dispatcher)
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names is not None:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def _get_fieldspec(dtype):
"""
Produce a list of name/dtype pairs corresponding to the dtype fields
Similar to dtype.descr, but the second item of each tuple is a dtype, not a
string. As a result, this handles subarray dtypes
Can be passed to the dtype constructor to reconstruct the dtype, noting that
this (deliberately) discards field offsets.
Examples
--------
>>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)])
>>> dt.descr
[(('a', 'A'), '<i8'), ('b', '<f8', (3,))]
>>> _get_fieldspec(dt)
[(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))]
"""
if dtype.names is None:
#.descr returns a nameless field, so we should too
return [('', dtype)]
else:
fields = ((name, dtype.fields[name]) for name in dtype.names)
# keep any titles, if present
return [
(name if len(f) == 2 else (f[2], name), f[0])
for name, f in fields
]
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple. Input datatype
must have fields otherwise error is raised.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype)
('A',)
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype)
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names is not None:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames)
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Input datatype
must have fields otherwise error is raised.
Nested structure are flattened beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None
False
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype)
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names is not None:
listnames.extend(get_names_flat(current))
return tuple(listnames)
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return (('', ndtype),)
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names is not None:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def _zip_dtype(seqarrays, flatten=False):
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
if current.names is not None and len(current.names) == 1:
# special case - dtypes of 1 field are flattened
newdtype.extend(_get_fieldspec(current))
else:
newdtype.append(('', current))
return np.dtype(newdtype)
def _zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
return _zip_dtype(seqarrays, flatten=flatten).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names is not None:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
yield from _izip_fields_flat(tuple(element))
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, str)):
yield from _izip_fields(element)
elif isinstance(element, np.void) and len(tuple(element)) == 1:
# this statement is the same from the previous expression
yield from _izip_fields(element)
else:
yield element
def _izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value):
yield tuple(zipfunc(tup))
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None,
usemask=None, asrecarray=None):
return seqarrays
@array_function_dispatch(_merge_arrays_dispatcher)
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
array([( 1, 10.), ( 2, 20.), (-1, 30.)],
dtype=[('f0', '<i8'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64),
... np.array([10., 20., 30.])), usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i8'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)],
dtype=[('a', '<i8'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
depending on what its corresponding type:
* ``-1`` for integers
* ``-1.0`` for floating point numbers
* ``'-'`` for characters
* ``'-1'`` for strings
* ``True`` for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
# Make sure we have named fields
if seqdtype.names is None:
seqdtype = np.dtype([('', seqdtype)])
if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:
# Minimal processing needed: just make sure everything's a-ok
seqarrays = seqarrays.ravel()
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = _zip_dtype(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(_izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(_izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def _drop_fields_dispatcher(base, drop_names, usemask=None, asrecarray=None):
return (base,)
@array_function_dispatch(_drop_fields_dispatcher)
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
.. versionchanged:: 1.18.0
`drop_fields` returns an array with 0 fields if all fields are dropped,
rather than returning ``None`` as it did previously.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])])
>>> rfn.drop_fields(a, 'a')
array([((2., 3),), ((5., 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i8')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))], dtype=[('a', '<i8'), ('b', [('bb', '<i8')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)], dtype=[('a', '<i8')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names is not None:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def _keep_fields(base, keep_names, usemask=True, asrecarray=False):
"""
Return a new array keeping only the fields in `keep_names`,
and preserving the order of those fields.
Parameters
----------
base : array
Input array
keep_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to keep. Order of the names will be preserved.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
"""
newdtype = [(n, base.dtype[n]) for n in keep_names]
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def _rec_drop_fields_dispatcher(base, drop_names):
return (base,)
@array_function_dispatch(_rec_drop_fields_dispatcher)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def _rename_fields_dispatcher(base, namemapper):
return (base,)
@array_function_dispatch(_rename_fields_dispatcher)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],
dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names is not None:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def _append_fields_dispatcher(base, names, data, dtypes=None,
fill_value=None, usemask=None, asrecarray=None):
yield base
yield from data
@array_function_dispatch(_append_fields_dispatcher)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names)!= len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, str):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data)!= len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(
max(len(base), len(data)),
dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype))
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def _rec_append_fields_dispatcher(base, names, data, dtypes=None):
yield base
yield from data
@array_function_dispatch(_rec_append_fields_dispatcher)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def _repack_fields_dispatcher(a, align=None, recurse=None):
return (a,)
@array_function_dispatch(_repack_fields_dispatcher)
def repack_fields(a, align=False, recurse=False):
"""
Re-pack the fields of a structured array or dtype in memory.
The memory layout of structured datatypes allows fields at arbitrary
byte offsets. This means the fields can be separated by padding bytes,
their offsets can be non-monotonically increasing, and they can overlap.
This method removes any overlaps and reorders the fields in memory so they
have increasing byte offsets, and adds or removes padding bytes depending
on the `align` option, which behaves like the `align` option to
`numpy.dtype`.
If `align=False`, this method produces a "packed" memory layout in which
each field starts at the byte the previous field ended, and any padding
bytes are removed.
If `align=True`, this methods produces an "aligned" memory layout in which
each field's offset is a multiple of its alignment, and the total itemsize
is a multiple of the largest alignment, by adding padding bytes as needed.
Parameters
----------
a : ndarray or dtype
array or dtype for which to repack the fields.
align : boolean
If true, use an "aligned" memory layout, otherwise use a "packed" layout.
recurse : boolean
If True, also repack nested structures.
Returns
-------
repacked : ndarray or dtype
Copy of `a` with fields repacked, or `a` itself if no repacking was
needed.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
...
>>> dt = np.dtype('u1, <i8, <f8', align=True)
>>> dt
dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '<i8', '<f8'], \
'offsets': [0, 8, 16], 'itemsize': 24}, align=True)
>>> print_offsets(dt)
offsets: [0, 8, 16]
itemsize: 24
>>> packed_dt = rfn.repack_fields(dt)
>>> packed_dt
dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])
>>> print_offsets(packed_dt)
offsets: [0, 1, 9]
itemsize: 17
"""
if not isinstance(a, np.dtype):
dt = repack_fields(a.dtype, align=align, recurse=recurse)
return a.astype(dt, copy=False)
if a.names is None:
return a
fieldinfo = []
for name in a.names:
tup = a.fields[name]
if recurse:
fmt = repack_fields(tup[0], align=align, recurse=True)
else:
fmt = tup[0]
if len(tup) == 3:
name = (tup[2], name)
fieldinfo.append((name, fmt))
dt = np.dtype(fieldinfo, align=align)
return np.dtype((a.type, dt))
def _get_fields_and_offsets(dt, offset=0):
"""
Returns a flat list of (dtype, count, offset) tuples of all the
scalar fields in the dtype "dt", including nested fields, in left
to right order.
"""
# counts up elements in subarrays, including nested subarrays, and returns
# base dtype and count
def count_elem(dt):
count = 1
while dt.shape!= ():
for size in dt.shape:
count *= size
dt = dt.base
return dt, count
fields = []
for name in dt.names:
field = dt.fields[name]
f_dt, f_offset = field[0], field[1]
f_dt, n = count_elem(f_dt)
if f_dt.names is None:
fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))
else:
subfields = _get_fields_and_offsets(f_dt, f_offset + offset)
size = f_dt.itemsize
for i in range(n):
if i == 0:
# optimization: avoid list comprehension if no subarray
fields.extend(subfields)
else:
fields.extend([(d, c, o + i*size) for d, c, o in subfields])
return fields
def _common_stride(offsets, counts, itemsize):
"""
Returns the stride between the fields, or None if the stride is not
constant. The values in "counts" designate the lengths of
subarrays. Subarrays are treated as many contiguous fields, with
always positive stride.
"""
if len(offsets) <= 1:
return itemsize
negative = offsets[1] < offsets[0] # negative stride
if negative:
# reverse, so offsets will be ascending
it = zip(reversed(offsets), reversed(counts))
else:
it = zip(offsets, counts)
prev_offset = None
stride = None
for offset, count in it:
if count!= 1: # subarray: always c-contiguous
if negative:
return None # subarrays can never have a negative stride
if stride is None:
stride = itemsize
if stride!= itemsize:
return None
end_offset = offset + (count - 1) * itemsize
else:
end_offset = offset
if prev_offset is not None:
new_stride = offset - prev_offset
if stride is None:
stride = new_stride
if stride!= new_stride:
return None
prev_offset = end_offset
if negative:
return -stride
return stride
def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None,
casting=None):
return (arr,)
@array_function_dispatch(_structured_to_unstructured_dispatcher)
def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
"""
Converts an n-D structured array into an (n+1)-D unstructured array.
The new array will have a new last dimension equal in size to the
number of field-elements of the input array. If not supplied, the output
datatype is determined from the numpy type promotion rules applied to all
the field datatypes.
Nested fields, as well as each element of any subarray fields, all count
as a single field-elements.
Parameters
----------
arr : ndarray
Structured array or dtype to convert. Cannot contain object datatype.
dtype : dtype, optional
The dtype of the output unstructured array.
copy : bool, optional
If true, always return a copy. If false, a view is returned if
possible, such as when the `dtype` and strides of the fields are
suitable and the array subtype is one of `numpy.ndarray`,
`numpy.recarray` or `numpy.memmap`.
.. versionchanged:: 1.25.0
A view can now be returned if the fields are separated by a
uniform stride.
casting : {'no', 'equiv','safe','same_kind', 'unsafe'}, optional
See casting argument of `numpy.ndarray.astype`. Controls what kind of
data casting may occur.
Returns
-------
unstructured : ndarray
Unstructured array with one more dimension.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
>>> a
array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
>>> rfn.structured_to_unstructured(a)
array([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
>>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8,11), (10, 11, 12)],
... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
>>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)
array([ 3., 5.5, 9., 11. ])
"""
if arr.dtype.names is None:
raise ValueError('arr must be a structured array')
fields = _get_fields_and_offsets(arr.dtype)
n_fields = len(fields)
if n_fields == 0 and dtype is None:
raise ValueError("arr has no fields. Unable to guess dtype")
elif n_fields == 0:
# too many bugs elsewhere for this to work now
raise NotImplementedError("arr with no fields is not supported")
dts, counts, offsets = zip(*fields)
names = ['f{}'.format(n) for n in range(n_fields)]
if dtype is None:
out_dtype = np.result_type(*[dt.base for dt in dts])
else:
out_dtype = np.dtype(dtype)
# Use a series of views and casts to convert to an unstructured array:
# first view using flattened fields (doesn't work for object arrays)
# Note: dts may include a shape for subarrays
flattened_fields = np.dtype({'names': names,
'formats': dts,
'offsets': offsets,
'itemsize': arr.dtype.itemsize})
arr = arr.view(flattened_fields)
# we only allow a few types to be unstructured by manipulating the
# strides, because we know it won't work with, for example, np.matrix nor
# np.ma.MaskedArray.
can_view = type(arr) in (np.ndarray, np.recarray, np.memmap)
if (not copy) and can_view and all(dt.base == out_dtype for dt in dts):
# all elements have the right dtype already; if they have a common
# stride, we can just return a view
common_stride = _common_stride(offsets, counts, out_dtype.itemsize)
if common_stride is not None:
wrap = arr.__array_wrap__
new_shape = arr.shape + (sum(counts), out_dtype.itemsize)
new_strides = arr.strides + (abs(common_stride), 1)
arr = arr[..., np.newaxis].view(np.uint8) # view as bytes
arr = arr[..., min(offsets):] # remove the leading unused data
arr = np.lib.stride_tricks.as_strided(arr,
new_shape,
new_strides,
subok=True)
# cast and drop the last dimension again
arr = arr.view(out_dtype)[..., 0]
if common_stride < 0:
arr = arr[..., ::-1] # reverse, if the stride was negative
if type(arr) is not type(wrap.__self__):
# Some types (e.g. recarray) turn into an ndarray along the
# way, so we have to wrap it again in order to match the
# behavior with copy=True.
arr = wrap(arr)
return arr
# next cast to a packed format with all fields converted to new dtype
packed_fields = np.dtype({'names': names,
'formats': [(out_dtype, dt.shape) for dt in dts]})
arr = arr.astype(packed_fields, copy=copy, casting=casting)
# finally is it safe to view the packed fields as the unstructured type
return arr.view((out_dtype, (sum(counts),)))
def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
align=None, copy=None, casting=None):
return (arr,)
@array_function_dispatch(_unstructured_to_structured_dispatcher)
def unstructured_to_structured(arr, dtype=None, names=None, align=False,
copy=False, casting='unsafe'):
"""
Converts an n-D unstructured array into an (n-1)-D structured array.
The last dimension of the input array is converted into a structure, with
number of field-elements equal to the size of the last dimension of the
input array. By default all output fields have the input array's dtype, but
an output structured dtype with an equal number of fields-elements can be
supplied instead.
Nested fields, as well as each element of any subarray fields, all count
towards the number of field-elements.
Parameters
----------
arr : ndarray
Unstructured array or dtype to convert.
dtype : dtype, optional
The structured dtype of the output array
names : list of strings, optional
If dtype is not supplied, this specifies the field names for the output
dtype, in order. The field dtypes will be the same as the input array.
align : boolean, optional
Whether to create an aligned memory layout.
copy : bool, optional
See copy argument to `numpy.ndarray.astype`. If true, always return a
copy. If false, and `dtype` requirements are satisfied, a view is
returned.
casting : {'no', 'equiv','safe','same_kind', 'unsafe'}, optional
See casting argument of `numpy.ndarray.astype`. Controls what kind of
data casting may occur.
Returns
-------
structured : ndarray
Structured array with fewer dimensions.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
>>> a = np.arange(20).reshape((4,5))
>>> a
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]])
>>> rfn.unstructured_to_structured(a, dt)
array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]),
(10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],
dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
"""
if arr.shape == ():
raise ValueError('arr must have at least one dimension')
n_elem = arr.shape[-1]
if n_elem == 0:
# too many bugs elsewhere for this to work now
raise NotImplementedError("last axis with size 0 is not supported")
if dtype is None:
if names is None:
names = ['f{}'.format(n) for n in range(n_elem)]
out_dtype = np.dtype([(n, arr.dtype) for n in names], align=align)
fields = _get_fields_and_offsets(out_dtype)
dts, counts, offsets = zip(*fields)
else:
if names is not None:
raise ValueError("don't supply both dtype and names")
# if dtype is the args of np.dtype, construct it
dtype = np.dtype(dtype)
# sanity check of the input dtype
fields = _get_fields_and_offsets(dtype)
if len(fields) == 0:
dts, counts, offsets = [], [], []
else:
dts, counts, offsets = zip(*fields)
if n_elem!= sum(counts):
raise ValueError('The length of the last dimension of arr must '
'be equal to the number of fields in dtype')
out_dtype = dtype
if align and not out_dtype.isalignedstruct:
raise ValueError("align was True but dtype is not aligned")
names = ['f{}'.format(n) for n in range(len(fields))]
# Use a series of views and casts to convert to a structured array:
# first view as a packed structured array of one dtype
packed_fields = np.dtype({'names': names,
'formats': [(arr.dtype, dt.shape) for dt in dts]})
arr = np.ascontiguousarray(arr).view(packed_fields)
# next cast to an unpacked but flattened format with varied dtypes
flattened_fields = np.dtype({'names': names,
'formats': dts,
'offsets': offsets,
'itemsize': out_dtype.itemsize})
arr = arr.astype(flattened_fields, copy=copy, casting=casting)
# finally view as the final nested dtype and remove the last axis
return arr.view(out_dtype)[..., 0]
def _apply_along_fields_dispatcher(func, arr):
return (arr,)
@array_function_dispatch(_apply_along_fields_dispatcher)
def apply_along_fields(func, arr):
"""
Apply function 'func' as a reduction across fields of a structured array.
This is similar to `numpy.apply_along_axis`, but treats the fields of a
structured array as an extra axis. The fields are all first cast to a
common type following the type-promotion rules from `numpy.result_type`
applied to the field's dtypes.
Parameters
----------
func : function
Function to apply on the "field" dimension. This function must
support an `axis` argument, like `numpy.mean`, `numpy.sum`, etc.
arr : ndarray
Structured array for which to apply func.
Returns
-------
out : ndarray
Result of the recution operation
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8,11), (10, 11, 12)],
... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
>>> rfn.apply_along_fields(np.mean, b)
array([ 2.66666667, 5.33333333, 8.66666667, 11. ])
>>> rfn.apply_along_fields(np.mean, b[['x', 'z']])
array([ 3., 5.5, 9., 11. ])
"""
if arr.dtype.names is None:
raise ValueError('arr must be a structured array')
uarr = structured_to_unstructured(arr)
return func(uarr, axis=-1)
# works and avoids axis requirement, but very, very slow:
#return np.apply_along_axis(func, -1, uarr)
def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None):
return dst, src
@array_function_dispatch(_assign_fields_by_name_dispatcher)
def assign_fields_by_name(dst, src, zero_unassigned=True):
"""
Assigns values from one structured array to another by field name.
Normally in numpy >= 1.14, assignment of one structured array to another
copies fields "by position", meaning that the first field from the src is
copied to the first field of the dst, and so on, regardless of field name.
This function instead copies "by field name", such that fields in the dst
are assigned from the identically named field in the src. This applies
recursively for nested structures. This is how structure assignment worked
in numpy >= 1.6 to <= 1.13.
Parameters
----------
dst : ndarray
src : ndarray
The source and destination arrays during assignment.
zero_unassigned : bool, optional
If True, fields in the dst for which there was no matching
field in the src are filled with the value 0 (zero). This
was the behavior of numpy <= 1.13. If False, those fields
are not modified.
"""
if dst.dtype.names is None:
dst[...] = src
return
for name in dst.dtype.names:
if name not in src.dtype.names:
if zero_unassigned:
dst[name] = 0
else:
assign_fields_by_name(dst[name], src[name],
zero_unassigned)
def _require_fields_dispatcher(array, required_dtype):
return (array,)
@array_function_dispatch(_require_fields_dispatcher)
def require_fields(array, required_dtype):
"""
Casts a structured array to a new dtype using assignment by field-name.
This function assigns from the old to the new array by name, so the
value of a field in the output array is the value of the field with the
same name in the source array. This has the effect of creating a new
ndarray containing only the fields "required" by the required_dtype.
If a field name in the required_dtype does not exist in the
input array, that field is created and set to 0 in the output array.
Parameters
----------
a : ndarray
array to cast
required_dtype : dtype
datatype for output array
Returns
-------
out : ndarray
array with the new dtype, with field values copied from the fields in
the input array with the same name
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
>>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')])
array([(1., 1), (1., 1), (1., 1), (1., 1)],
dtype=[('b', '<f4'), ('c', 'u1')])
>>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')])
array([(1., 0), (1., 0), (1., 0), (1., 0)],
dtype=[('b', '<f4'), ('newf', 'u1')])
"""
out = np.empty(array.shape, dtype=required_dtype)
assign_fields_by_name(out, array)
return out
def _stack_arrays_dispatcher(arrays, defaults=None, usemask=None,
asrecarray=None, autoconvert=None):
return arrays
@array_function_dispatch(_stack_arrays_dispatcher)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0),
(b'b', 20.0, 200.0), (b'c', 30.0, 300.0)],
mask=[(False, False, True), (False, False, True),
(False, False, False), (False, False, False),
(False, False, False)],
fill_value=(b'N/A', 1e+20, 1e+20),
dtype=[('A', 'S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = _get_fieldspec(dtype_l)
names = [n for n, d in newdescr]
for dtype_n in ndtype[1:]:
for fname, fdtype in _get_fieldspec(dtype_n):
if fname not in names:
newdescr.append((fname, fdtype))
names.append(fname)
else:
nameidx = names.index(fname)
_, cdtype = newdescr[nameidx]
if autoconvert:
newdescr[nameidx] = (fname, max(fdtype, cdtype))
elif fdtype!= cdtype:
raise TypeError("Incompatible type '%s' <> '%s'" %
(cdtype, fdtype))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def _find_duplicates_dispatcher(
a, key=None, ignoremask=None, return_index=None):
return (a,)
@array_function_dispatch(_find_duplicates_dispatcher)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
(masked_array(data=[(1,), (1,), (2,), (2,)],
mask=[(False,), (False,), (False,), (False,)],
fill_value=(999999,),
dtype=[('a', '<i8')]), array([0, 1, 3, 4]))
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def _join_by_dispatcher(
key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
defaults=None, usemask=None, asrecarray=None):
return (r1, r2)
@array_function_dispatch(_join_by_dispatcher)
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, str):
key = (key,)
# Check the keys
if len(set(key))!= len(key):
dup = next(x for n,x in enumerate(key) if x in key[n+1:])
raise ValueError("duplicate join key %r" % dup)
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %r' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %r' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
collisions = (set(r1names) & set(r2names)) - set(key)
if collisions and not (r1postfix or r2postfix):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't both be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
# (use order of keys in `r1` for back-compatibility)
key1 = [ n for n in r1names if n in key ]
r1k = _keep_fields(r1, key1)
r2k = _keep_fields(r2, key1)
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array.......
# Start with the key fields
ndtype = _get_fieldspec(r1k.dtype)
# Add the fields from r1
for fname, fdtype in _get_fieldspec(r1.dtype):
if fname not in key:
ndtype.append((fname, fdtype))
# Add the fields from r2
for fname, fdtype in _get_fieldspec(r2.dtype):
# Have we seen the current name already?
# we need to rebuild this list every time
names = list(name for name, dtype in ndtype)
try:
nameidx = names.index(fname)
except ValueError:
#... we haven't: just add the description to the current list
ndtype.append((fname, fdtype))
else:
# collision
_, cdtype = ndtype[nameidx]
if fname in key:
# The current field is part of the key: take the largest dtype
ndtype[nameidx] = (fname, max(fdtype, cdtype))
else:
# The current field is not part of the key: add the suffixes,
# and place the new field adjacent to the old one
ndtype[nameidx:nameidx + 1] = [
(fname + r1postfix, cdtype),
(fname + r2postfix, fdtype)
]
# Rebuild a dtype from the new fields
ndtype = np.dtype(ndtype)
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def _rec_join_dispatcher(
key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
defaults=None):
return (r1, r2)
@array_function_dispatch(_rec_join_dispatcher)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs) |
mosaicml__composer | scale_schedule.md | Module doc | Generate documentation for this module | Apache License 2.0 | mosaicml__composer/docs/source/method_cards/scale_schedule.md | [
"mosaicml__composer/composer/optim/scheduler.py"
] | # Scale Schedule
Scale Schedule changes the number of training steps by a dilation factor
and dilating learning rate changes accordingly. Doing so varies the
training budget, making it possible to explore tradeoffs between cost
(measured in time or money) and the quality of the final model.
## How to Use
### Implementation Details
Scale schedule is implemented as part of the {class}~.Trainer via the
scale_schedule_ratio argument. The trainer will scale the max_duration
by the scale_schedule_ratio, and also adjust non-warmup milestones for
the learning rate schedulers.
## Suggested Hyperparameters
The default scale schedule ratio is 1.0. For a standard maximum number
of epochs (these will differ depending on the task), scaling down the
learning rate schedule will lead to a monotonic decrease in accuracy.
Increasing the scale schedule ratio will often improve the accuracy up
to a plateau, although this leads to longer training time and added
cost.
## Techical Details
Changing the length of training will affect the final accuracy of the
model. For example, training ResNet-50 on ImageNet for the standard
schedule in the composer library leads to final validation accuracy of
76.6%, while using scale schedule with a ratio of 0.5 leads to final
validation accuracy of 75.6%. Training for longer can lead to
diminishing returns or even overfitting and worse validation accuracy.
In general, the cost of training is proportional to the length of
training when using scale schedule (assuming all other techniques, such
as progressive resizing, have their schedules scaled accordingly).
`{note} The warmup periods of schedulers are not scaled by the scale schedule ratio.`
> As general rule, scale schedule can be applied in conjunction with
any method. If other methods also perform actions > according to a
schedule, it is important to modify their schedules to coincide with the
altered number of epochs.
## Attribution
The number of training steps to perform is an important hyperparameter
to tune when developing a model. This technique appears implicitly
throughout the deep learning literature. One example of a systematic
study of this approach is the scan-SGD technique in [_How Important is
Importance Sampling for Deep Budgeted
Training](https://openreview.net/forum?id=TqQ0oOzJlai) by Eric Arazo,
Diego Ortega, Paul Albert, Noel O'Connor, and Kevin McGuinness. Posted
to OpenReview in 2020.
## API Reference
Trainer attribute: scale_schedule_ratio in {class}composer.Trainer
| # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Stateless learning rate schedulers.
Stateless schedulers solve some of the problems associated with PyTorch's built-in schedulers provided in
:mod:`torch.optim.lr_scheduler`. The primary design goal of the schedulers provided in this module is to allow
schedulers to interface directly with Composer's :mod:`~composer.core.time` abstraction. This means that schedulers can
be configured using arbitrary but explicit time units.
See :class:`~.ComposerScheduler` for more information on stateless schedulers.
"""
import inspect
import logging
import math
import textwrap
import warnings
from typing import TYPE_CHECKING, List, Union
from torch.optim.lr_scheduler import LambdaLR
from composer.core import PyTorchScheduler, State, Time, TimeUnit
if TYPE_CHECKING:
from typing import Protocol
else:
# subclasses of Protocol cannot be instantiated in Python 3.8
Protocol = object
log = logging.getLogger(__name__)
__all__ = [
'ComposerScheduler', 'compile_composer_scheduler', 'StepScheduler', 'MultiStepScheduler', 'ConstantScheduler',
'LinearScheduler', 'ExponentialScheduler', 'CosineAnnealingScheduler', 'CosineAnnealingWarmRestartsScheduler',
'PolynomialScheduler', 'MultiStepWithWarmupScheduler', 'ConstantWithWarmupScheduler', 'LinearWithWarmupScheduler',
'CosineAnnealingWithWarmupScheduler', 'PolynomialWithWarmupScheduler'
]
class ComposerScheduler(Protocol):
r"""Specification for a stateless scheduler function.
While this specification is provided as a Python class, an ordinary function can implement this interface as long
as it matches the signature of this interface's :meth:`~.ComposerScheduler.__call__` method.
For example, a scheduler that halves the learning rate after 10 epochs could be written as:
.. code:: python
def ten_epoch_decay_scheduler(state: State) -> float:
if state.timestamp.epoch < 10:
return 1.0
return 0.5
# ten_epoch_decay_scheduler is a valid ComposerScheduler
trainer = Trainer(
schedulers=[ten_epoch_decay_scheduler],
...
)
In order to allow schedulers to be configured, schedulers may also written as callable classes:
.. code:: python
class VariableEpochDecayScheduler(ComposerScheduler):
def __init__(num_epochs: int):
self.num_epochs = num_epochs
def __call__(state: State) -> float:
if state.time.epoch < self.num_epochs:
return 1.0
return 0.5
ten_epoch_decay_scheduler = VariableEpochDecayScheduler(num_epochs=10)
# ten_epoch_decay_scheduler is also a valid ComposerScheduler
trainer = Trainer(
schedulers=[ten_epoch_decay_scheduler],
...
)
The constructions of ``ten_epoch_decay_scheduler`` in each of the examples above are equivalent. Note that neither
scheduler uses the ``scale_schedule_ratio`` parameter. As long as this parameter is not used when initializing
:class:`.Trainer`, it is not required that any schedulers implement that parameter.
.. automethod:: __call__
"""
def __call__(self, state: State, ssr: float = 1.0) -> float:
r"""Calculate the current learning rate multiplier :math:`\alpha`.
A scheduler function should be a pure function that returns a multiplier to apply to the optimizer's provided
learning rate, given the current trainer state, and optionally a "scale schedule ratio" (SSR). A typical
implementation will read ``state.timestamp``, and possibly other fields like ``state.max_duration``, to determine
the trainer's latest temporal progress.
.. note::
All instances of :class:`~.ComposerScheduler` output a `multiplier` for the learning rate, rather than the
learning rate directly. By convention, we use the symbol :math:`\alpha` to refer to this multiplier. This
means that the learning rate :math:`\eta` at time :math:`t` can be represented as
:math:`\eta(t) = \eta_i \times \alpha(t)`, where :math:`\eta_i` represents the learning rate used to
initialize the optimizer.
.. note::
It is possible to use multiple schedulers, in which case their effects will stack multiplicatively.
The ``ssr`` param indicates that the schedule should be "stretched" accordingly. In symbolic terms, where
:math:`\alpha_\sigma(t)` represents the scheduler output at time :math:`t` using scale schedule ratio
:math:`\sigma`:
.. math::
\alpha_{\sigma}(t) = \alpha(t / \sigma)
Args:
state (State): The current Composer Trainer state.
ssr (float): The scale schedule ratio. In general, the learning rate computed by this
scheduler at time :math:`t` with an SSR of 1.0 should be the same as that computed by
this scheduler at time :math:`t \times s` with an SSR of :math:`s`. Default = ``1.0``.
Returns:
alpha (float): A multiplier to apply to the optimizer's provided learning rate.
"""
raise NotImplementedError
def _convert_time(time: Union[str, Time[int], Time[float]], state: State, ssr: float = 1.0) -> Time[int]:
if isinstance(time, str):
time = Time.from_timestring(time)
assert state.max_duration is not None,'max_duration should be set whenever schedulers are invoked'
if time.unit == TimeUnit.DURATION:
if state.max_duration.unit == TimeUnit.EPOCH:
if state.dataloader_len is None:
raise RuntimeError('Cannot convert time, as state.dataloader_len is None.')
return Time(int(time.value * int(state.dataloader_len) * state.max_duration.value), TimeUnit.BATCH)
return Time(int(time.value * state.max_duration.value), state.max_duration.unit)
elif time.unit == TimeUnit.EPOCH:
# Epochs do not provide sufficient granularity for SSR scaling
# e.g. if max_duration = 1ep, then any SSR would result in a new duration of 0.
# so, convert the time into batches
if state.dataloader_len is None:
raise RuntimeError('Cannot convert time, as state.dataloader_len is None.')
time = Time(value=time.value * int(state.dataloader_len), unit=TimeUnit.BATCH)
return Time(value=int(time.value * ssr), unit=time.unit)
def compile_composer_scheduler(scheduler: ComposerScheduler, state: State, ssr: float = 1.0) -> PyTorchScheduler:
"""Converts a stateless scheduler into a PyTorch scheduler object.
While the resulting scheduler provides a ``.step()`` interface similar to other PyTorch schedulers, the scheduler is
also given a bound reference to the current :class:`~composer.core.State`. This means that any internal state updated
by ``.step()`` can be ignored, and the scheduler can instead simply use the bound state to recalculate the current
learning rate.
Args:
scheduler (ComposerScheduler): A stateless scheduler, provided as a :class:`~.ComposerScheduler` object.
state (State): The Composer Trainer's state.
Returns:
compiled_scheduler (PyTorchScheduler): The scheduler, in a form compatible with PyTorch scheduler interfaces.
"""
optimizers = state.optimizers
if len(optimizers)!= 1:
raise NotImplementedError('Providing functional schedulers is unsupported with multiple optimizers.')
optimizer = optimizers[0]
scheduler_sig = inspect.signature(scheduler)
def scheduler_fn(epoch: int) -> float:
del epoch # unused. Provided by the pytorch LambdaLR
# if the ssr is 1.0, don't pass it to the scheduler. This allows users to pass in lambdas that only take
# one parameter -- the state
if len(scheduler_sig.parameters) == 1:
if ssr == 1.0:
return scheduler(state)
else:
raise ValueError(
textwrap.dedent(f"""\
Scheduler {scheduler} does not support `scale_schedule_ratio`.
To use `scale_schedule_ratio`, the scheduler must take two arguments (state, ssr)"""))
return scheduler(state, ssr)
lambda_scheduler = LambdaLR(optimizer, scheduler_fn)
return lambda_scheduler
class StepScheduler(ComposerScheduler):
r"""Decays the learning rate discretely at fixed intervals.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.StepLR` from PyTorch.
Decays the learning rate by a factor of ``gamma`` periodically, with a frequency determined by ``step_size``.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \gamma ^ {\text{floor}(t / \rho)}
Where :math:`\rho` represents the time between changes to the learning rate (the step size), and
:math:`\gamma` represents the multiplicative decay factor.
Args:
step_size (str | Time): Time between changes to the learning rate.
gamma (float): Multiplicative decay factor. Default = ``0.1``.
"""
def __init__(self, step_size: Union[str, Time], gamma: float = 0.1):
self.step_size = step_size
self.gamma = gamma
def __call__(self, state: State, ssr: float = 1.0):
step_size = _convert_time(self.step_size, state, ssr=ssr)
current_time = state.timestamp.get(step_size.unit)
steps = int(current_time / step_size)
return self.gamma**steps
class MultiStepScheduler(ComposerScheduler):
r"""Decays the learning rate discretely at fixed milestones.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.MultiStepLR` from PyTorch.
Decays the learning rate by a factor of ``gamma`` whenever a time milestone in ``milestones`` is reached.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \gamma ^ x
Where :math:`x` represents the amount of milestones that have been reached, and :math:`\gamma` represents the
multiplicative decay factor.
Args:
milestones (List[str | Time]): Times at which the learning rate should change.
gamma (float): Multiplicative decay factor. Default = ``0.1``.
"""
def __init__(self, milestones: List[Union[str, Time]], gamma: float = 0.1):
self.milestones = milestones
self.gamma = gamma
def __call__(self, state: State, ssr: float = 1.0):
milestones = [_convert_time(milestone, state, ssr=ssr) for milestone in self.milestones]
factor = 1.0
for milestone in milestones:
if state.timestamp >= milestone:
factor *= self.gamma
return factor
class ConstantScheduler(ComposerScheduler):
r"""Maintains a fixed learning rate.
This scheduler is based on :class:`~torch.optim.lr_scheduler.ConstantLR` from PyTorch.
The default settings for this scheduler simply maintain a learning rate factor of 1 for the entire training
duration. However, both the factor and the duration of this scheduler can be configured.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases} \alpha, & \text{if } t < t_{max} \\ 1.0 & \text{otherwise} \end{cases}
Where :math:`\alpha` represents the learning rate multiplier to maintain while this scheduler is active, and
:math:`t_{max}` represents the duration of this scheduler.
Args:
alpha (float): Learning rate multiplier to maintain while this scheduler is active. Default = ``1.0``.
t_max (str | Time): Duration of this scheduler. Default = ``"1dur"``.
"""
def __init__(self, alpha: float = 1.0, t_max: Union[str, Time] = '1dur') -> None:
self.alpha = alpha
self.t_max = t_max
def __call__(self, state: State, ssr: float = 1.0) -> float:
t_max = _convert_time(self.t_max, state, ssr=ssr)
if state.timestamp < t_max:
return self.alpha
return 1.0
class LinearScheduler(ComposerScheduler):
r"""Adjusts the learning rate linearly.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.LinearLR` from PyTorch.
.. warning::
Note that the defaults for this scheduler differ from the defaults for
:class:`~torch.optim.lr_scheduler.LinearLR`. The PyTorch scheduler, by default, linearly increases the learning
rate multiplier from 1.0 / 3 to 1.0, whereas this implementation, by default, linearly decreases the multiplier
rom 1.0 to 0.0.
Linearly adjusts the learning rate multiplier from ``alpha_i`` to ``alpha_f`` over ``t_{max}`` time.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \alpha_i + (alpha_f - \alpha_i) \times \tau
Given :math:`\tau`, the fraction of time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau = t / t_{max}
Where :math:`\alpha_i` represents the initial learning rate multiplier, :math:`\alpha_f` represents
the learning rate multiplier to decay to, and :math:`t_{max}` represents the duration of this scheduler.
Args:
alpha_i (float): Initial learning rate multiplier. Default = ``1.0``.
alpha_f (float): Final learning rate multiplier. Default = ``0.0``.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
"""
def __init__(self, alpha_i: float = 1.0, alpha_f: float = 0.0, t_max: Union[str, Time] = '1dur'):
self.alpha_i = alpha_i
self.alpha_f = alpha_f
self.t_max = Time.from_timestring(t_max) if isinstance(t_max, str) else t_max
def __call__(self, state: State, ssr: float = 1.0):
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_max.unit)
frac_of_total = min(1.0, (current_time / t_max).value)
current_factor = self.alpha_i + frac_of_total * (self.alpha_f - self.alpha_i)
return current_factor
class ExponentialScheduler(ComposerScheduler):
r"""Decays the learning rate exponentially.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.ExponentialLR` from PyTorch.
Exponentially decays the learning rate such that it decays by a factor of ``gamma`` every ``decay_period`` time.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \gamma ^ {t / \rho}
Where :math:`\rho` represents the decay period, and :math:`\gamma` represents the multiplicative decay factor.
Args:
decay_period (str | Time): Decay period. Default = ``"1ep"``.
gamma (float): Multiplicative decay factor.
"""
def __init__(self, gamma: float, decay_period: Union[str, Time] = '1ep'):
self.gamma = gamma
self.decay_period = decay_period
def __call__(self, state: State, ssr: float = 1.0):
decay_period = _convert_time(self.decay_period, state, ssr)
current_time_in_decay_units = state.timestamp.get(decay_period.unit)
return self.gamma**float(current_time_in_decay_units / decay_period)
def _cosine_anneal(x: float, min_y: float = 0.0, max_y: float = 1.0) -> float:
"""Implements a cosine decay curve.
Curve is cos(x) on domain [0, pi], stretched to the domain [0, 1] and range [min_y, max_y]. Additionally, param x is
clipped to the interval [0, 1]
"""
x = min(max(x, 0.0), 1.0)
return min_y + (max_y - min_y) * (1 + math.cos(x * math.pi)) / 2
class CosineAnnealingScheduler(ComposerScheduler):
r"""Decays the learning rate according to the decreasing part of a cosine curve.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.CosineAnnealingLR` from PyTorch.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \alpha_f + (1 - \alpha_f) \times \frac{1}{2} (1 + \cos(\pi \times \tau))
Given :math:`\tau`, the fraction of time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau = t / t_{max}
Where :math:`t_{max}`
represents the duration of this scheduler, and :math:`\alpha_f` represents the learning rate multiplier to decay to.
Args:
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
"""
def __init__(self, t_max: Union[str, Time] = '1dur', alpha_f: float = 0.0):
self.t_max = t_max
self.alpha_f = alpha_f
def __call__(self, state: State, ssr: float = 1.0):
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_max.unit)
frac_of_total = (current_time / t_max).value
return _cosine_anneal(x=frac_of_total, min_y=self.alpha_f)
class CosineAnnealingWarmRestartsScheduler(ComposerScheduler):
r"""Cyclically decays the learning rate according to the decreasing part of a cosine curve.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.CosineAnnealingWarmRestarts` from PyTorch.
This scheduler resembles a regular cosine annealing curve, as seen in :class:`~.CosineAnnealingScheduler`, except
that after the curve first completes ``t_0`` time, the curve resets to the start. The durations of subsequent cycles
are each multiplied by ``t_mult``.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \alpha_f + (1 - \alpha_f) \times \frac{1}{2}(1 + \cos(\pi \times \tau_i))
Given :math:`\tau_i`, the fraction of time elapsed through the :math:`i^\text{th}` cycle, as:
.. math::
\tau_i = (t - \sum_{j=0}^{i-1} t_0 t_{mult}^j) / (t_0 t_{mult}^i)
Where :math:`t_0`
represents the period of the first cycle, :math:`t_{mult}` represents the multiplier for the duration of successive
cycles, and :math:`\alpha_f` represents the learning rate multiplier to decay to.
Args:
t_0 (str | Time): The period of the first cycle.
t_mult (float): The multiplier for the duration of successive cycles. Default = ``1.0``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
"""
def __init__(self, t_0: Union[str, Time], t_mult: float = 1.0, alpha_f: float = 0.0):
self.t_0 = t_0
self.t_mult = t_mult
self.alpha_f = alpha_f
def __call__(self, state: State, ssr: float = 1.0):
t_0 = _convert_time(self.t_0, state, ssr=ssr)
current_interval_len = t_0
current_interval_end = t_0
while current_interval_end <= state.timestamp.get(current_interval_end.unit):
if current_interval_len.value == 0:
raise ValueError(
'Interval between restarts for cosine annealing/warm restarts scheduler has decayed to 0.')
current_interval_len = Time(value=int(self.t_mult * current_interval_len.value),
unit=current_interval_len.unit)
current_interval_end += current_interval_len
current_interval_start = current_interval_end - current_interval_len
frac_of_current_interval = ((state.timestamp.get(t_0.unit) - current_interval_start) /
current_interval_len).value
return _cosine_anneal(x=frac_of_current_interval, min_y=self.alpha_f)
class PolynomialScheduler(ComposerScheduler):
r"""Sets the learning rate to be proportional to a power of the fraction of training time left.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \alpha_f + (1 - \alpha_f) \times (1 - \tau) ^ {\kappa}
Given :math:`\tau`, the fraction of time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau = t / t_{max}
Where :math:`\kappa`
represents the exponent to be used for the proportionality relationship, :math:`t_{max}` represents the duration of
this scheduler, and :math:`\alpha_f` represents the learning rate multiplier to decay to.
Args:
power (float): The exponent to be used for the proportionality relationship.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
"""
def __init__(self, power: float, t_max: Union[str, Time] = '1dur', alpha_f: float = 0.0):
self.t_max = t_max
self.power = power
self.alpha_f = alpha_f
def __call__(self, state: State, ssr: float = 1.0):
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_max.unit)
frac_of_total = (current_time / t_max).value
coeff = (1 - frac_of_total)**self.power
current_factor = self.alpha_f + coeff * (1.0 - self.alpha_f)
return current_factor
def _raise_if_warmup_and_max_duration_incompatible(t_warmup: Union[str, Time], t_max: Union[str, Time]):
if isinstance(t_warmup, str):
t_warmup = Time.from_timestring(t_warmup)
if isinstance(t_max, str):
t_max = Time.from_timestring(t_max)
units_same = t_warmup.unit == t_max.unit
warmup_is_dur = t_warmup.unit == TimeUnit('dur')
batches_vs_epochs = (t_warmup.unit == TimeUnit('ba') and
t_max.unit == TimeUnit('ep')) or (t_warmup.unit == TimeUnit('ep') and
t_max.unit == TimeUnit('ba'))
if not units_same and not warmup_is_dur and not batches_vs_epochs:
raise ValueError(f'Cannot use warmup scheduler with max_duration {t_max} and warmup {t_warmup}. '
't_warmup units must be the same as max_duration units, warmup must be in units "dur", '
'max_duration must be "ba" and t_warmup "ep", or max_duration must be "ep" and t_warmup "ba".')
class MultiStepWithWarmupScheduler(ComposerScheduler):
r"""Decays the learning rate discretely at fixed milestones, with an initial warmup.
.. seealso::
This scheduler is based on :class:`~.MultiStepScheduler`, with an added warmup.
Starts with a linear warmup over ``t_warmup`` time, then decays the learning rate by a factor of ``gamma``
whenever a time milestone in ``milestones`` is reached.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\gamma ^ x & \text{otherwise}
\end{cases}
Where :math:`t_{warmup}` represents the warmup time, :math:`x` represents the amount of milestones that have been
reached, and :math:`\gamma` represents the multiplicative decay factor.
.. warning::
All milestones should be greater than ``t_warmup``; otherwise, they will have no effect on the computed learning
rate multiplier until the warmup has completed.
.. warning::
By default, initial warmup time is **not** scaled according to any provided scale schedule ratio.
To change this behavior, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
milestones (List[str | Time]): Times at which the learning rate should change.
gamma (float): Multiplicative decay factor. Default = ``0.1``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
milestones: List[Union[str, Time]],
gamma: float = 0.1,
scale_warmup: bool = False):
self.t_warmup = t_warmup
self.milestones = milestones
self.gamma = gamma
self.scale_warmup = scale_warmup
self.warmup_scheduler = LinearScheduler(alpha_i=0.0, alpha_f=1.0, t_max=t_warmup)
self.step_scheduler = MultiStepScheduler(milestones=milestones, gamma=gamma)
def __call__(self, state: State, ssr: float = 1.0):
assert state.max_duration is not None,'max_duration should be set whenever schedulers are invoked'
_raise_if_warmup_and_max_duration_incompatible(self.t_warmup, state.max_duration)
t_warmup = _convert_time(self.t_warmup, state)
if t_warmup.value == 0:
warnings.warn(
textwrap.dedent("""\
The warmup duration is 0. If you specified warmup as a fraction of total
training duration, take note that the warmup duration is calculated in the
same unit as the trainer's max_duration parameter."""))
if state.timestamp < t_warmup:
if self.scale_warmup:
return self.warmup_scheduler(state, ssr)
return self.warmup_scheduler(state)
return self.step_scheduler(state, ssr)
class ConstantWithWarmupScheduler(ComposerScheduler):
r"""Maintains a fixed learning rate, with an initial warmup.
This scheduler is based on :class:`~torch.optim.lr_scheduler.ConstantLR` from PyTorch, with an added warmup.
Starts with a linear warmup over ``t_warmup`` time, then simply maintains a learning rate factor of 1 for the entire training
duration. However, both the factor and the duration of this scheduler can be configured.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\alpha, & \text{if } t < t_{max} \\
1.0 & \text{otherwise} \end{cases}
Where :math:`\alpha` represents the learning rate multiplier to maintain while this scheduler is active, and
:math:`t_{max}` represents the duration of this scheduler.
.. warning::
By default, initial warmup time is **not** scaled according to any provided scale schedule ratio.
To change this behavior, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
alpha (float): Learning rate multiplier to maintain while this scheduler is active. Default = ``1.0``.
t_max (str | Time): Duration of this scheduler. Default = ``"1dur"``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
alpha: float = 1.0,
t_max: Union[str, Time] = '1dur',
scale_warmup: bool = False) -> None:
self.t_warmup = t_warmup
self.alpha = alpha
self.t_max = t_max
self.scale_warmup = scale_warmup
self.scheduler = LinearWithWarmupScheduler(t_warmup=t_warmup,
alpha_i=alpha,
alpha_f=alpha,
t_max=t_max,
scale_warmup=scale_warmup)
def __call__(self, state: State, ssr: float = 1.0) -> float:
return self.scheduler(state, ssr)
class LinearWithWarmupScheduler(ComposerScheduler):
r"""Adjusts the learning rate linearly, with an initial warmup.
.. seealso::
This scheduler is based on :class:`~.LinearScheduler`, with an added warmup.
Linearly adjusts the learning rate multiplier from ``alpha_i`` to ``alpha_f`` over ``t_{max}`` time.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\alpha_i + (alpha_f - \alpha_i) \times \tau_w & \text{otherwise}
\end{cases}
Given :math:`\tau_w`, the fraction of post-warmup time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau_w = (t - t_{warmup}) / t_{max}
Where :math:`t_{warmup}` represents the warmup time, :math:`\alpha_i` represents the initial learning rate multiplier,
and :math:`\alpha_f` represents the learning rate multiplier to decay to, and :math:`t_{max}` represents the duration
of this scheduler.
.. warning::
By default, the initial warmup time is **not** scaled according to any provided scale schedule ratio! However, the duration of
the scheduler is still scaled accordingly. To achieve this, after warmup, the scheduler's "slope" will be
slightly distorted from what would otherwise be expected. To scale the entire schedule, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
alpha_i (float): Initial learning rate multiplier. Default = ``1.0``.
alpha_f (float): Final learning rate multiplier. Default = ``0.0``.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
alpha_i: float = 1.0,
alpha_f: float = 0.0,
t_max: Union[str, Time] = '1dur',
scale_warmup: bool = False):
self.t_warmup = t_warmup
self.alpha_i = alpha_i
self.alpha_f = alpha_f
self.t_max = t_max
self.scale_warmup = scale_warmup
self.warmup_scheduler = LinearScheduler(alpha_i=0.0, alpha_f=alpha_i, t_max=t_warmup)
def __call__(self, state: State, ssr: float = 1.0):
assert state.max_duration is not None,'max_duration should be set whenever schedulers are invoked'
_raise_if_warmup_and_max_duration_incompatible(self.t_warmup, state.max_duration)
t_warmup = _convert_time(self.t_warmup, state)
if t_warmup.value == 0:
warnings.warn(
textwrap.dedent("""\
The warmup duration is 0. If you specified warmup as a fraction of total
training duration, take note that the warmup duration is calculated in the
same unit as the trainer's max_duration parameter."""))
if state.timestamp < t_warmup:
if self.scale_warmup:
return self.warmup_scheduler(state, ssr)
return self.warmup_scheduler(state)
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_warmup.unit)
frac_of_total = ((current_time - t_warmup) / (t_max - t_warmup)).value if (t_max > t_warmup) else 0.0
frac_of_total = min(1.0, frac_of_total)
current_factor = self.alpha_i + frac_of_total * (self.alpha_f - self.alpha_i)
return current_factor
class CosineAnnealingWithWarmupScheduler(ComposerScheduler):
r"""Decays the learning rate according to the decreasing part of a cosine curve, with an initial warmup.
.. seealso::
This scheduler is based on :class:`~.CosineAnnealingScheduler`, with an added warmup.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\alpha_f + (1 - \alpha_f) \times \frac{1}{2} (1 + \cos(\pi \times \tau_w)) & \text{otherwise}
\end{cases}
Given :math:`\tau_w`, the fraction of post-warmup time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau_w = (t - t_{warmup}) / t_{max}
Where :math:`t_{warmup}` represents the warmup time, :math:`t_{max}` represents the duration of this scheduler, and
:math:`\alpha_f` represents the learning rate multiplier to decay to.
.. warning::
By default, initial warmup time is **not** scaled according to any provided scale schedule ratio.
To change this behavior, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
t_max: Union[str, Time] = '1dur',
alpha_f: float = 0.0,
scale_warmup: bool = False):
self.t_warmup = t_warmup
self.t_max = t_max
self.alpha_f = alpha_f
self.scale_warmup = scale_warmup
self.warmup_scheduler = LinearScheduler(alpha_i=0.0, alpha_f=1.0, t_max=t_warmup)
def __call__(self, state: State, ssr: float = 1.0):
assert state.max_duration is not None,'max_duration should be set whenever schedulers are invoked'
_raise_if_warmup_and_max_duration_incompatible(self.t_warmup, state.max_duration)
t_warmup = _convert_time(self.t_warmup, state)
if t_warmup.value == 0:
warnings.warn(
textwrap.dedent("""\
The warmup duration is 0. If you specified warmup as a fraction of total
training duration, take note that the warmup duration is calculated in the
same unit as the trainer's max_duration parameter."""))
if state.timestamp < t_warmup:
if self.scale_warmup:
return self.warmup_scheduler(state, ssr)
return self.warmup_scheduler(state)
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_warmup.unit)
frac_of_total = ((current_time - t_warmup) / (t_max - t_warmup)).value if (t_max > t_warmup) else 0.0
frac_of_total = min(1.0, frac_of_total)
return _cosine_anneal(x=frac_of_total, min_y=self.alpha_f)
class PolynomialWithWarmupScheduler(ComposerScheduler):
r"""Decays the learning rate according to a power of the fraction of training time left, with an initial warmup.
.. seealso::
This scheduler is based on :class:`~.PolynomialScheduler`, with an added warmup.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\alpha_f + (1 - \alpha_f) \times (1 - \tau_w) ^ {\kappa} & \text{otherwise}
\end{cases}
Given :math:`\tau_w`, the fraction of post-warmup time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau_w = (t - t_{warmup}) / t_{max}
Where :math:`\kappa` represents the exponent to be used for the proportionality relationship,
:math:`t_{warmup}` represents the warmup time, :math:`t_{max}` represents the duration of this scheduler, and
:math:`\alpha_f` represents the learning rate multiplier to decay to.
.. warning::
By default, initial warmup time is **not** scaled according to any provided scale schedule ratio.
To change this behavior, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
power (float): The exponent to be used for the proportionality relationship. Default = ``2.0``.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
power: float = 2.0,
t_max: Union[str, Time] = '1dur',
alpha_f: float = 0.0,
scale_warmup: bool = False):
self.t_warmup = t_warmup
self.power = power
self.t_max = t_max
self.alpha_f = alpha_f
self.scale_warmup = scale_warmup
self.warmup_scheduler = LinearScheduler(alpha_i=0.0, alpha_f=1.0, t_max=t_warmup)
def __call__(self, state: State, ssr: float = 1.0):
assert state.max_duration is not None,'max_duration should be set whenever schedulers are invoked'
_raise_if_warmup_and_max_duration_incompatible(self.t_warmup, state.max_duration)
t_warmup = _convert_time(self.t_warmup, state)
if t_warmup.value == 0:
warnings.warn(
textwrap.dedent("""\
The warmup duration is 0. If you specified warmup as a fraction of total
training duration, take note that the warmup duration is calculated in the
same unit as the trainer's max_duration parameter."""))
if state.timestamp < t_warmup:
if self.scale_warmup:
return self.warmup_scheduler(state, ssr)
return self.warmup_scheduler(state)
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_warmup.unit)
frac_of_total = ((current_time - t_warmup) / (t_max - t_warmup)).value if (t_max > t_warmup) else 0.0
frac_of_total = min(1.0, frac_of_total)
coeff = (1 - frac_of_total)**self.power
current_factor = self.alpha_f + coeff * (1.0 - self.alpha_f)
return current_factor |
|
mosaicml__composer | schedulers.rst | Module doc | Generate documentation for this module | Apache License 2.0 | mosaicml__composer/docs/source/trainer/schedulers.rst | [
"mosaicml__composer/composer/optim/scheduler.py"
] | Schedulers
The .Trainer supports both PyTorch torch.optim.lr_scheduler schedulers
as well as our own schedulers, which take advantage of the .Time
representation.
For PyTorch schedulers, we step every epoch by default. To instead step
every batch, set step_schedulers_every_batch=True:
from composer import Trainer
from torch.optim.lr_scheduler import
CosineAnnealingLR
trainer = Trainer(
..., schedulers=CosineAnnealingLR(optimizer, T_max=2),
step_schedulers_every_batch=True,
)
Note
If setting step_schedulers_every_batch to True, remember to specify the
arguments to your pytorch scheduler in units of batches, not epochs.
Our experiments have shown better accuracy using stepwise schedulers, so
it is the recommended setting in most cases.
Composer Schedulers
Our schedulers take advantage of our Time</trainer/time> abstraction to
provide easier ways to set time. Time parameters can be provided in
different units: samples ("sp"), tokens ("tok"), batches ("ba"), epochs
("ep"), and duration ("dur"). See Time</trainer/time>.
For example, the below would step the learning rate at 30%, 50%, and 90%
of the way through training:
from composer import Trainer from composer.optim.scheduler import
MultiStepScheduler
trainer = Trainer(
model=model, train_dataloader=train_dataloader, max_duration='90ep',
schedulers=MultiStepScheduler( milestones=['0.3dur', '0.5dur',
'0.9dur'], gamma=0.1 ))
These schedulers typically read the state.timestamp to determine the
trainer's progress and return a learning rate multipler. Inside the
Trainer, we convert these to ~torch.optim.lr_scheduler.LambdaLR
schedulers. By default, our schedulers are stepped at every batch.
Below are the supported schedulers found at composer.optim.scheduler.
composer.optim.scheduler
StepScheduler MultiStepScheduler MultiStepWithWarmupScheduler
ConstantScheduler LinearScheduler LinearWithWarmupScheduler
ExponentialScheduler CosineAnnealingScheduler
CosineAnnealingWithWarmupScheduler CosineAnnealingWarmRestartsScheduler
PolynomialScheduler PolynomialWithWarmupScheduler
Note
Compared to PyTorch schedulers, .ComposerScheduler need not be provided
an optimizer directly. The trainer will handle binding the optimizer
when it compiles the scheduler later.
Scale Schedule Ratio
The Scale Schedule Ratio (SSR) scales the learning rate schedule by some
factor and is a powerful way to tradeoff training time and quality.
scale_schedule_ratio is an argument to the .Trainer.
Scale Schedule changes the training duration by a scaling factor and
scales the learning rate scheduler accordingly. This serves to vary the
training budget, making it possible to explore tradeoffs between cost
(measured in time or money) and model quality.
For example, the code below will scale the training time by half (to 10
epochs) and also scale the learning rate schedule.
from composer import Trainer from composer.optim.scheduler import
MultiStepScheduler
trainer = Trainer(
..., max_duration="20ep",
schedulers=MultiStepScheduler(milestones=["10ep", "16ep"]),
scale_schedule_ratio=0.5,
)
# or equivalently, with default SSR=1.0:
trainer = Trainer(
..., max_duration="10ep",
schedulers=MultiStepScheduler(milestones=["5ep", "8ep"])
)
Importantly, for our schedulers that have warmup, the warmup period is
not scaled by default. For example, if we apply scale_schedule_ratio=0.5
to:
from composer.optim.scheduler import MultiStepWithWarmupScheduler
scheduler = MultiStepWithWarmupScheduler(
milestones=["10ep", "20ep"], t_warmup="4ep",
)
The resulting scheduler would warmup for 4 epochs and then have step
milestones at 5 epochs and 10 epochs.
To scale the warmup period as well, set scale_warmup=True. For example:
from composer.optim.scheduler import MultiStepWithWarmupScheduler
scheduler = MultiStepWithWarmupScheduler(
milestones=["10ep", "20ep"], t_warmup="4ep", scale_warmup=True,
)
With scale_schedule_ratio=0.5, this scheduler will warmup for 2 epochs,
then step on 5 and 10 epochs.
| # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Stateless learning rate schedulers.
Stateless schedulers solve some of the problems associated with PyTorch's built-in schedulers provided in
:mod:`torch.optim.lr_scheduler`. The primary design goal of the schedulers provided in this module is to allow
schedulers to interface directly with Composer's :mod:`~composer.core.time` abstraction. This means that schedulers can
be configured using arbitrary but explicit time units.
See :class:`~.ComposerScheduler` for more information on stateless schedulers.
"""
import inspect
import logging
import math
import textwrap
import warnings
from typing import TYPE_CHECKING, List, Union
from torch.optim.lr_scheduler import LambdaLR
from composer.core import PyTorchScheduler, State, Time, TimeUnit
if TYPE_CHECKING:
from typing import Protocol
else:
# subclasses of Protocol cannot be instantiated in Python 3.8
Protocol = object
log = logging.getLogger(__name__)
__all__ = [
'ComposerScheduler', 'compile_composer_scheduler', 'StepScheduler', 'MultiStepScheduler', 'ConstantScheduler',
'LinearScheduler', 'ExponentialScheduler', 'CosineAnnealingScheduler', 'CosineAnnealingWarmRestartsScheduler',
'PolynomialScheduler', 'MultiStepWithWarmupScheduler', 'ConstantWithWarmupScheduler', 'LinearWithWarmupScheduler',
'CosineAnnealingWithWarmupScheduler', 'PolynomialWithWarmupScheduler'
]
class ComposerScheduler(Protocol):
r"""Specification for a stateless scheduler function.
While this specification is provided as a Python class, an ordinary function can implement this interface as long
as it matches the signature of this interface's :meth:`~.ComposerScheduler.__call__` method.
For example, a scheduler that halves the learning rate after 10 epochs could be written as:
.. code:: python
def ten_epoch_decay_scheduler(state: State) -> float:
if state.timestamp.epoch < 10:
return 1.0
return 0.5
# ten_epoch_decay_scheduler is a valid ComposerScheduler
trainer = Trainer(
schedulers=[ten_epoch_decay_scheduler],
...
)
In order to allow schedulers to be configured, schedulers may also written as callable classes:
.. code:: python
class VariableEpochDecayScheduler(ComposerScheduler):
def __init__(num_epochs: int):
self.num_epochs = num_epochs
def __call__(state: State) -> float:
if state.time.epoch < self.num_epochs:
return 1.0
return 0.5
ten_epoch_decay_scheduler = VariableEpochDecayScheduler(num_epochs=10)
# ten_epoch_decay_scheduler is also a valid ComposerScheduler
trainer = Trainer(
schedulers=[ten_epoch_decay_scheduler],
...
)
The constructions of ``ten_epoch_decay_scheduler`` in each of the examples above are equivalent. Note that neither
scheduler uses the ``scale_schedule_ratio`` parameter. As long as this parameter is not used when initializing
:class:`.Trainer`, it is not required that any schedulers implement that parameter.
.. automethod:: __call__
"""
def __call__(self, state: State, ssr: float = 1.0) -> float:
r"""Calculate the current learning rate multiplier :math:`\alpha`.
A scheduler function should be a pure function that returns a multiplier to apply to the optimizer's provided
learning rate, given the current trainer state, and optionally a "scale schedule ratio" (SSR). A typical
implementation will read ``state.timestamp``, and possibly other fields like ``state.max_duration``, to determine
the trainer's latest temporal progress.
.. note::
All instances of :class:`~.ComposerScheduler` output a `multiplier` for the learning rate, rather than the
learning rate directly. By convention, we use the symbol :math:`\alpha` to refer to this multiplier. This
means that the learning rate :math:`\eta` at time :math:`t` can be represented as
:math:`\eta(t) = \eta_i \times \alpha(t)`, where :math:`\eta_i` represents the learning rate used to
initialize the optimizer.
.. note::
It is possible to use multiple schedulers, in which case their effects will stack multiplicatively.
The ``ssr`` param indicates that the schedule should be "stretched" accordingly. In symbolic terms, where
:math:`\alpha_\sigma(t)` represents the scheduler output at time :math:`t` using scale schedule ratio
:math:`\sigma`:
.. math::
\alpha_{\sigma}(t) = \alpha(t / \sigma)
Args:
state (State): The current Composer Trainer state.
ssr (float): The scale schedule ratio. In general, the learning rate computed by this
scheduler at time :math:`t` with an SSR of 1.0 should be the same as that computed by
this scheduler at time :math:`t \times s` with an SSR of :math:`s`. Default = ``1.0``.
Returns:
alpha (float): A multiplier to apply to the optimizer's provided learning rate.
"""
raise NotImplementedError
def _convert_time(time: Union[str, Time[int], Time[float]], state: State, ssr: float = 1.0) -> Time[int]:
if isinstance(time, str):
time = Time.from_timestring(time)
assert state.max_duration is not None,'max_duration should be set whenever schedulers are invoked'
if time.unit == TimeUnit.DURATION:
if state.max_duration.unit == TimeUnit.EPOCH:
if state.dataloader_len is None:
raise RuntimeError('Cannot convert time, as state.dataloader_len is None.')
return Time(int(time.value * int(state.dataloader_len) * state.max_duration.value), TimeUnit.BATCH)
return Time(int(time.value * state.max_duration.value), state.max_duration.unit)
elif time.unit == TimeUnit.EPOCH:
# Epochs do not provide sufficient granularity for SSR scaling
# e.g. if max_duration = 1ep, then any SSR would result in a new duration of 0.
# so, convert the time into batches
if state.dataloader_len is None:
raise RuntimeError('Cannot convert time, as state.dataloader_len is None.')
time = Time(value=time.value * int(state.dataloader_len), unit=TimeUnit.BATCH)
return Time(value=int(time.value * ssr), unit=time.unit)
def compile_composer_scheduler(scheduler: ComposerScheduler, state: State, ssr: float = 1.0) -> PyTorchScheduler:
"""Converts a stateless scheduler into a PyTorch scheduler object.
While the resulting scheduler provides a ``.step()`` interface similar to other PyTorch schedulers, the scheduler is
also given a bound reference to the current :class:`~composer.core.State`. This means that any internal state updated
by ``.step()`` can be ignored, and the scheduler can instead simply use the bound state to recalculate the current
learning rate.
Args:
scheduler (ComposerScheduler): A stateless scheduler, provided as a :class:`~.ComposerScheduler` object.
state (State): The Composer Trainer's state.
Returns:
compiled_scheduler (PyTorchScheduler): The scheduler, in a form compatible with PyTorch scheduler interfaces.
"""
optimizers = state.optimizers
if len(optimizers)!= 1:
raise NotImplementedError('Providing functional schedulers is unsupported with multiple optimizers.')
optimizer = optimizers[0]
scheduler_sig = inspect.signature(scheduler)
def scheduler_fn(epoch: int) -> float:
del epoch # unused. Provided by the pytorch LambdaLR
# if the ssr is 1.0, don't pass it to the scheduler. This allows users to pass in lambdas that only take
# one parameter -- the state
if len(scheduler_sig.parameters) == 1:
if ssr == 1.0:
return scheduler(state)
else:
raise ValueError(
textwrap.dedent(f"""\
Scheduler {scheduler} does not support `scale_schedule_ratio`.
To use `scale_schedule_ratio`, the scheduler must take two arguments (state, ssr)"""))
return scheduler(state, ssr)
lambda_scheduler = LambdaLR(optimizer, scheduler_fn)
return lambda_scheduler
class StepScheduler(ComposerScheduler):
r"""Decays the learning rate discretely at fixed intervals.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.StepLR` from PyTorch.
Decays the learning rate by a factor of ``gamma`` periodically, with a frequency determined by ``step_size``.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \gamma ^ {\text{floor}(t / \rho)}
Where :math:`\rho` represents the time between changes to the learning rate (the step size), and
:math:`\gamma` represents the multiplicative decay factor.
Args:
step_size (str | Time): Time between changes to the learning rate.
gamma (float): Multiplicative decay factor. Default = ``0.1``.
"""
def __init__(self, step_size: Union[str, Time], gamma: float = 0.1):
self.step_size = step_size
self.gamma = gamma
def __call__(self, state: State, ssr: float = 1.0):
step_size = _convert_time(self.step_size, state, ssr=ssr)
current_time = state.timestamp.get(step_size.unit)
steps = int(current_time / step_size)
return self.gamma**steps
class MultiStepScheduler(ComposerScheduler):
r"""Decays the learning rate discretely at fixed milestones.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.MultiStepLR` from PyTorch.
Decays the learning rate by a factor of ``gamma`` whenever a time milestone in ``milestones`` is reached.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \gamma ^ x
Where :math:`x` represents the amount of milestones that have been reached, and :math:`\gamma` represents the
multiplicative decay factor.
Args:
milestones (List[str | Time]): Times at which the learning rate should change.
gamma (float): Multiplicative decay factor. Default = ``0.1``.
"""
def __init__(self, milestones: List[Union[str, Time]], gamma: float = 0.1):
self.milestones = milestones
self.gamma = gamma
def __call__(self, state: State, ssr: float = 1.0):
milestones = [_convert_time(milestone, state, ssr=ssr) for milestone in self.milestones]
factor = 1.0
for milestone in milestones:
if state.timestamp >= milestone:
factor *= self.gamma
return factor
class ConstantScheduler(ComposerScheduler):
r"""Maintains a fixed learning rate.
This scheduler is based on :class:`~torch.optim.lr_scheduler.ConstantLR` from PyTorch.
The default settings for this scheduler simply maintain a learning rate factor of 1 for the entire training
duration. However, both the factor and the duration of this scheduler can be configured.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases} \alpha, & \text{if } t < t_{max} \\ 1.0 & \text{otherwise} \end{cases}
Where :math:`\alpha` represents the learning rate multiplier to maintain while this scheduler is active, and
:math:`t_{max}` represents the duration of this scheduler.
Args:
alpha (float): Learning rate multiplier to maintain while this scheduler is active. Default = ``1.0``.
t_max (str | Time): Duration of this scheduler. Default = ``"1dur"``.
"""
def __init__(self, alpha: float = 1.0, t_max: Union[str, Time] = '1dur') -> None:
self.alpha = alpha
self.t_max = t_max
def __call__(self, state: State, ssr: float = 1.0) -> float:
t_max = _convert_time(self.t_max, state, ssr=ssr)
if state.timestamp < t_max:
return self.alpha
return 1.0
class LinearScheduler(ComposerScheduler):
r"""Adjusts the learning rate linearly.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.LinearLR` from PyTorch.
.. warning::
Note that the defaults for this scheduler differ from the defaults for
:class:`~torch.optim.lr_scheduler.LinearLR`. The PyTorch scheduler, by default, linearly increases the learning
rate multiplier from 1.0 / 3 to 1.0, whereas this implementation, by default, linearly decreases the multiplier
rom 1.0 to 0.0.
Linearly adjusts the learning rate multiplier from ``alpha_i`` to ``alpha_f`` over ``t_{max}`` time.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \alpha_i + (alpha_f - \alpha_i) \times \tau
Given :math:`\tau`, the fraction of time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau = t / t_{max}
Where :math:`\alpha_i` represents the initial learning rate multiplier, :math:`\alpha_f` represents
the learning rate multiplier to decay to, and :math:`t_{max}` represents the duration of this scheduler.
Args:
alpha_i (float): Initial learning rate multiplier. Default = ``1.0``.
alpha_f (float): Final learning rate multiplier. Default = ``0.0``.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
"""
def __init__(self, alpha_i: float = 1.0, alpha_f: float = 0.0, t_max: Union[str, Time] = '1dur'):
self.alpha_i = alpha_i
self.alpha_f = alpha_f
self.t_max = Time.from_timestring(t_max) if isinstance(t_max, str) else t_max
def __call__(self, state: State, ssr: float = 1.0):
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_max.unit)
frac_of_total = min(1.0, (current_time / t_max).value)
current_factor = self.alpha_i + frac_of_total * (self.alpha_f - self.alpha_i)
return current_factor
class ExponentialScheduler(ComposerScheduler):
r"""Decays the learning rate exponentially.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.ExponentialLR` from PyTorch.
Exponentially decays the learning rate such that it decays by a factor of ``gamma`` every ``decay_period`` time.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \gamma ^ {t / \rho}
Where :math:`\rho` represents the decay period, and :math:`\gamma` represents the multiplicative decay factor.
Args:
decay_period (str | Time): Decay period. Default = ``"1ep"``.
gamma (float): Multiplicative decay factor.
"""
def __init__(self, gamma: float, decay_period: Union[str, Time] = '1ep'):
self.gamma = gamma
self.decay_period = decay_period
def __call__(self, state: State, ssr: float = 1.0):
decay_period = _convert_time(self.decay_period, state, ssr)
current_time_in_decay_units = state.timestamp.get(decay_period.unit)
return self.gamma**float(current_time_in_decay_units / decay_period)
def _cosine_anneal(x: float, min_y: float = 0.0, max_y: float = 1.0) -> float:
"""Implements a cosine decay curve.
Curve is cos(x) on domain [0, pi], stretched to the domain [0, 1] and range [min_y, max_y]. Additionally, param x is
clipped to the interval [0, 1]
"""
x = min(max(x, 0.0), 1.0)
return min_y + (max_y - min_y) * (1 + math.cos(x * math.pi)) / 2
class CosineAnnealingScheduler(ComposerScheduler):
r"""Decays the learning rate according to the decreasing part of a cosine curve.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.CosineAnnealingLR` from PyTorch.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \alpha_f + (1 - \alpha_f) \times \frac{1}{2} (1 + \cos(\pi \times \tau))
Given :math:`\tau`, the fraction of time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau = t / t_{max}
Where :math:`t_{max}`
represents the duration of this scheduler, and :math:`\alpha_f` represents the learning rate multiplier to decay to.
Args:
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
"""
def __init__(self, t_max: Union[str, Time] = '1dur', alpha_f: float = 0.0):
self.t_max = t_max
self.alpha_f = alpha_f
def __call__(self, state: State, ssr: float = 1.0):
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_max.unit)
frac_of_total = (current_time / t_max).value
return _cosine_anneal(x=frac_of_total, min_y=self.alpha_f)
class CosineAnnealingWarmRestartsScheduler(ComposerScheduler):
r"""Cyclically decays the learning rate according to the decreasing part of a cosine curve.
.. seealso::
This scheduler is based on :class:`~torch.optim.lr_scheduler.CosineAnnealingWarmRestarts` from PyTorch.
This scheduler resembles a regular cosine annealing curve, as seen in :class:`~.CosineAnnealingScheduler`, except
that after the curve first completes ``t_0`` time, the curve resets to the start. The durations of subsequent cycles
are each multiplied by ``t_mult``.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \alpha_f + (1 - \alpha_f) \times \frac{1}{2}(1 + \cos(\pi \times \tau_i))
Given :math:`\tau_i`, the fraction of time elapsed through the :math:`i^\text{th}` cycle, as:
.. math::
\tau_i = (t - \sum_{j=0}^{i-1} t_0 t_{mult}^j) / (t_0 t_{mult}^i)
Where :math:`t_0`
represents the period of the first cycle, :math:`t_{mult}` represents the multiplier for the duration of successive
cycles, and :math:`\alpha_f` represents the learning rate multiplier to decay to.
Args:
t_0 (str | Time): The period of the first cycle.
t_mult (float): The multiplier for the duration of successive cycles. Default = ``1.0``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
"""
def __init__(self, t_0: Union[str, Time], t_mult: float = 1.0, alpha_f: float = 0.0):
self.t_0 = t_0
self.t_mult = t_mult
self.alpha_f = alpha_f
def __call__(self, state: State, ssr: float = 1.0):
t_0 = _convert_time(self.t_0, state, ssr=ssr)
current_interval_len = t_0
current_interval_end = t_0
while current_interval_end <= state.timestamp.get(current_interval_end.unit):
if current_interval_len.value == 0:
raise ValueError(
'Interval between restarts for cosine annealing/warm restarts scheduler has decayed to 0.')
current_interval_len = Time(value=int(self.t_mult * current_interval_len.value),
unit=current_interval_len.unit)
current_interval_end += current_interval_len
current_interval_start = current_interval_end - current_interval_len
frac_of_current_interval = ((state.timestamp.get(t_0.unit) - current_interval_start) /
current_interval_len).value
return _cosine_anneal(x=frac_of_current_interval, min_y=self.alpha_f)
class PolynomialScheduler(ComposerScheduler):
r"""Sets the learning rate to be proportional to a power of the fraction of training time left.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \alpha_f + (1 - \alpha_f) \times (1 - \tau) ^ {\kappa}
Given :math:`\tau`, the fraction of time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau = t / t_{max}
Where :math:`\kappa`
represents the exponent to be used for the proportionality relationship, :math:`t_{max}` represents the duration of
this scheduler, and :math:`\alpha_f` represents the learning rate multiplier to decay to.
Args:
power (float): The exponent to be used for the proportionality relationship.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
"""
def __init__(self, power: float, t_max: Union[str, Time] = '1dur', alpha_f: float = 0.0):
self.t_max = t_max
self.power = power
self.alpha_f = alpha_f
def __call__(self, state: State, ssr: float = 1.0):
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_max.unit)
frac_of_total = (current_time / t_max).value
coeff = (1 - frac_of_total)**self.power
current_factor = self.alpha_f + coeff * (1.0 - self.alpha_f)
return current_factor
def _raise_if_warmup_and_max_duration_incompatible(t_warmup: Union[str, Time], t_max: Union[str, Time]):
if isinstance(t_warmup, str):
t_warmup = Time.from_timestring(t_warmup)
if isinstance(t_max, str):
t_max = Time.from_timestring(t_max)
units_same = t_warmup.unit == t_max.unit
warmup_is_dur = t_warmup.unit == TimeUnit('dur')
batches_vs_epochs = (t_warmup.unit == TimeUnit('ba') and
t_max.unit == TimeUnit('ep')) or (t_warmup.unit == TimeUnit('ep') and
t_max.unit == TimeUnit('ba'))
if not units_same and not warmup_is_dur and not batches_vs_epochs:
raise ValueError(f'Cannot use warmup scheduler with max_duration {t_max} and warmup {t_warmup}. '
't_warmup units must be the same as max_duration units, warmup must be in units "dur", '
'max_duration must be "ba" and t_warmup "ep", or max_duration must be "ep" and t_warmup "ba".')
class MultiStepWithWarmupScheduler(ComposerScheduler):
r"""Decays the learning rate discretely at fixed milestones, with an initial warmup.
.. seealso::
This scheduler is based on :class:`~.MultiStepScheduler`, with an added warmup.
Starts with a linear warmup over ``t_warmup`` time, then decays the learning rate by a factor of ``gamma``
whenever a time milestone in ``milestones`` is reached.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\gamma ^ x & \text{otherwise}
\end{cases}
Where :math:`t_{warmup}` represents the warmup time, :math:`x` represents the amount of milestones that have been
reached, and :math:`\gamma` represents the multiplicative decay factor.
.. warning::
All milestones should be greater than ``t_warmup``; otherwise, they will have no effect on the computed learning
rate multiplier until the warmup has completed.
.. warning::
By default, initial warmup time is **not** scaled according to any provided scale schedule ratio.
To change this behavior, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
milestones (List[str | Time]): Times at which the learning rate should change.
gamma (float): Multiplicative decay factor. Default = ``0.1``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
milestones: List[Union[str, Time]],
gamma: float = 0.1,
scale_warmup: bool = False):
self.t_warmup = t_warmup
self.milestones = milestones
self.gamma = gamma
self.scale_warmup = scale_warmup
self.warmup_scheduler = LinearScheduler(alpha_i=0.0, alpha_f=1.0, t_max=t_warmup)
self.step_scheduler = MultiStepScheduler(milestones=milestones, gamma=gamma)
def __call__(self, state: State, ssr: float = 1.0):
assert state.max_duration is not None,'max_duration should be set whenever schedulers are invoked'
_raise_if_warmup_and_max_duration_incompatible(self.t_warmup, state.max_duration)
t_warmup = _convert_time(self.t_warmup, state)
if t_warmup.value == 0:
warnings.warn(
textwrap.dedent("""\
The warmup duration is 0. If you specified warmup as a fraction of total
training duration, take note that the warmup duration is calculated in the
same unit as the trainer's max_duration parameter."""))
if state.timestamp < t_warmup:
if self.scale_warmup:
return self.warmup_scheduler(state, ssr)
return self.warmup_scheduler(state)
return self.step_scheduler(state, ssr)
class ConstantWithWarmupScheduler(ComposerScheduler):
r"""Maintains a fixed learning rate, with an initial warmup.
This scheduler is based on :class:`~torch.optim.lr_scheduler.ConstantLR` from PyTorch, with an added warmup.
Starts with a linear warmup over ``t_warmup`` time, then simply maintains a learning rate factor of 1 for the entire training
duration. However, both the factor and the duration of this scheduler can be configured.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\alpha, & \text{if } t < t_{max} \\
1.0 & \text{otherwise} \end{cases}
Where :math:`\alpha` represents the learning rate multiplier to maintain while this scheduler is active, and
:math:`t_{max}` represents the duration of this scheduler.
.. warning::
By default, initial warmup time is **not** scaled according to any provided scale schedule ratio.
To change this behavior, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
alpha (float): Learning rate multiplier to maintain while this scheduler is active. Default = ``1.0``.
t_max (str | Time): Duration of this scheduler. Default = ``"1dur"``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
alpha: float = 1.0,
t_max: Union[str, Time] = '1dur',
scale_warmup: bool = False) -> None:
self.t_warmup = t_warmup
self.alpha = alpha
self.t_max = t_max
self.scale_warmup = scale_warmup
self.scheduler = LinearWithWarmupScheduler(t_warmup=t_warmup,
alpha_i=alpha,
alpha_f=alpha,
t_max=t_max,
scale_warmup=scale_warmup)
def __call__(self, state: State, ssr: float = 1.0) -> float:
return self.scheduler(state, ssr)
class LinearWithWarmupScheduler(ComposerScheduler):
r"""Adjusts the learning rate linearly, with an initial warmup.
.. seealso::
This scheduler is based on :class:`~.LinearScheduler`, with an added warmup.
Linearly adjusts the learning rate multiplier from ``alpha_i`` to ``alpha_f`` over ``t_{max}`` time.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\alpha_i + (alpha_f - \alpha_i) \times \tau_w & \text{otherwise}
\end{cases}
Given :math:`\tau_w`, the fraction of post-warmup time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau_w = (t - t_{warmup}) / t_{max}
Where :math:`t_{warmup}` represents the warmup time, :math:`\alpha_i` represents the initial learning rate multiplier,
and :math:`\alpha_f` represents the learning rate multiplier to decay to, and :math:`t_{max}` represents the duration
of this scheduler.
.. warning::
By default, the initial warmup time is **not** scaled according to any provided scale schedule ratio! However, the duration of
the scheduler is still scaled accordingly. To achieve this, after warmup, the scheduler's "slope" will be
slightly distorted from what would otherwise be expected. To scale the entire schedule, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
alpha_i (float): Initial learning rate multiplier. Default = ``1.0``.
alpha_f (float): Final learning rate multiplier. Default = ``0.0``.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
alpha_i: float = 1.0,
alpha_f: float = 0.0,
t_max: Union[str, Time] = '1dur',
scale_warmup: bool = False):
self.t_warmup = t_warmup
self.alpha_i = alpha_i
self.alpha_f = alpha_f
self.t_max = t_max
self.scale_warmup = scale_warmup
self.warmup_scheduler = LinearScheduler(alpha_i=0.0, alpha_f=alpha_i, t_max=t_warmup)
def __call__(self, state: State, ssr: float = 1.0):
assert state.max_duration is not None,'max_duration should be set whenever schedulers are invoked'
_raise_if_warmup_and_max_duration_incompatible(self.t_warmup, state.max_duration)
t_warmup = _convert_time(self.t_warmup, state)
if t_warmup.value == 0:
warnings.warn(
textwrap.dedent("""\
The warmup duration is 0. If you specified warmup as a fraction of total
training duration, take note that the warmup duration is calculated in the
same unit as the trainer's max_duration parameter."""))
if state.timestamp < t_warmup:
if self.scale_warmup:
return self.warmup_scheduler(state, ssr)
return self.warmup_scheduler(state)
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_warmup.unit)
frac_of_total = ((current_time - t_warmup) / (t_max - t_warmup)).value if (t_max > t_warmup) else 0.0
frac_of_total = min(1.0, frac_of_total)
current_factor = self.alpha_i + frac_of_total * (self.alpha_f - self.alpha_i)
return current_factor
class CosineAnnealingWithWarmupScheduler(ComposerScheduler):
r"""Decays the learning rate according to the decreasing part of a cosine curve, with an initial warmup.
.. seealso::
This scheduler is based on :class:`~.CosineAnnealingScheduler`, with an added warmup.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\alpha_f + (1 - \alpha_f) \times \frac{1}{2} (1 + \cos(\pi \times \tau_w)) & \text{otherwise}
\end{cases}
Given :math:`\tau_w`, the fraction of post-warmup time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau_w = (t - t_{warmup}) / t_{max}
Where :math:`t_{warmup}` represents the warmup time, :math:`t_{max}` represents the duration of this scheduler, and
:math:`\alpha_f` represents the learning rate multiplier to decay to.
.. warning::
By default, initial warmup time is **not** scaled according to any provided scale schedule ratio.
To change this behavior, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
t_max: Union[str, Time] = '1dur',
alpha_f: float = 0.0,
scale_warmup: bool = False):
self.t_warmup = t_warmup
self.t_max = t_max
self.alpha_f = alpha_f
self.scale_warmup = scale_warmup
self.warmup_scheduler = LinearScheduler(alpha_i=0.0, alpha_f=1.0, t_max=t_warmup)
def __call__(self, state: State, ssr: float = 1.0):
assert state.max_duration is not None,'max_duration should be set whenever schedulers are invoked'
_raise_if_warmup_and_max_duration_incompatible(self.t_warmup, state.max_duration)
t_warmup = _convert_time(self.t_warmup, state)
if t_warmup.value == 0:
warnings.warn(
textwrap.dedent("""\
The warmup duration is 0. If you specified warmup as a fraction of total
training duration, take note that the warmup duration is calculated in the
same unit as the trainer's max_duration parameter."""))
if state.timestamp < t_warmup:
if self.scale_warmup:
return self.warmup_scheduler(state, ssr)
return self.warmup_scheduler(state)
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_warmup.unit)
frac_of_total = ((current_time - t_warmup) / (t_max - t_warmup)).value if (t_max > t_warmup) else 0.0
frac_of_total = min(1.0, frac_of_total)
return _cosine_anneal(x=frac_of_total, min_y=self.alpha_f)
class PolynomialWithWarmupScheduler(ComposerScheduler):
r"""Decays the learning rate according to a power of the fraction of training time left, with an initial warmup.
.. seealso::
This scheduler is based on :class:`~.PolynomialScheduler`, with an added warmup.
Specifically, the learning rate multiplier :math:`\alpha` can be expressed as:
.. math::
\alpha(t) = \begin{cases}
t / t_{warmup}, & \text{if } t < t_{warmup} \\
\alpha_f + (1 - \alpha_f) \times (1 - \tau_w) ^ {\kappa} & \text{otherwise}
\end{cases}
Given :math:`\tau_w`, the fraction of post-warmup time elapsed (clipped to the interval :math:`[0, 1]`), as:
.. math::
\tau_w = (t - t_{warmup}) / t_{max}
Where :math:`\kappa` represents the exponent to be used for the proportionality relationship,
:math:`t_{warmup}` represents the warmup time, :math:`t_{max}` represents the duration of this scheduler, and
:math:`\alpha_f` represents the learning rate multiplier to decay to.
.. warning::
By default, initial warmup time is **not** scaled according to any provided scale schedule ratio.
To change this behavior, set ``scale_warmup=True``.
Args:
t_warmup (str | Time): Warmup time.
power (float): The exponent to be used for the proportionality relationship. Default = ``2.0``.
t_max (str | Time): The duration of this scheduler. Default = ``"1dur"``.
alpha_f (float): Learning rate multiplier to decay to. Default = ``0.0``.
scale_warmup (float): SSR also scales the warmup period. Default = ``False``.
"""
def __init__(self,
t_warmup: Union[str, Time],
power: float = 2.0,
t_max: Union[str, Time] = '1dur',
alpha_f: float = 0.0,
scale_warmup: bool = False):
self.t_warmup = t_warmup
self.power = power
self.t_max = t_max
self.alpha_f = alpha_f
self.scale_warmup = scale_warmup
self.warmup_scheduler = LinearScheduler(alpha_i=0.0, alpha_f=1.0, t_max=t_warmup)
def __call__(self, state: State, ssr: float = 1.0):
assert state.max_duration is not None,'max_duration should be set whenever schedulers are invoked'
_raise_if_warmup_and_max_duration_incompatible(self.t_warmup, state.max_duration)
t_warmup = _convert_time(self.t_warmup, state)
if t_warmup.value == 0:
warnings.warn(
textwrap.dedent("""\
The warmup duration is 0. If you specified warmup as a fraction of total
training duration, take note that the warmup duration is calculated in the
same unit as the trainer's max_duration parameter."""))
if state.timestamp < t_warmup:
if self.scale_warmup:
return self.warmup_scheduler(state, ssr)
return self.warmup_scheduler(state)
t_max = _convert_time(self.t_max, state, ssr=ssr)
current_time = state.timestamp.get(t_warmup.unit)
frac_of_total = ((current_time - t_warmup) / (t_max - t_warmup)).value if (t_max > t_warmup) else 0.0
frac_of_total = min(1.0, frac_of_total)
coeff = (1 - frac_of_total)**self.power
current_factor = self.alpha_f + coeff * (1.0 - self.alpha_f)
return current_factor |
|
mitogen-hq__mitogen | getting_started.rst | Tutorial | Generate getting started tutorial | BSD 3-Clause New or Revised License | mitogen-hq__mitogen/docs/getting_started.rst | [
"mitogen-hq__mitogen/mitogen/parent.py",
"mitogen-hq__mitogen/mitogen/core.py"
] | Getting Started
Warning
This section is incomplete.
Liability Waiver
Before proceeding, it is critical you understand what you're involving
yourself and possibly your team and its successors with:
[image]
- Constructing the most fundamental class, Broker
<mitogen.master.Broker>, causes a new thread to be spawned, exposing
a huge class of difficult to analyse behaviours that Python software
generally does not suffer from.
While every effort is made to hide this complexity, you should
expect threading-related encounters during development, and
crucially, years after your program reached production. See
troubleshooting for more information.
- While high-level abstractions are provided, they are only a
convenience, you must still understand
how Mitogen works <howitworks> before depending on it. Mitogen
interacts with many aspects of the operating system, threading, SSH,
sudo, sockets, TTYs, shell, Python runtime, and timing and ordering
uncertainty introduced through interaction with the network, GIL and
OS scheduling.
Knowledge of this domain is typically attained through painful years
of failed attempts hacking system-level programs, and learning
through continual suffering how to debug the atrocities left behind.
If you feel you lack resources or willpower to diagnose problems
independently, Mitogen is not appropriate, prefer a higher level
solution instead.
First Principles
Before starting, take a moment to reflect on writing a program that will
operate across machines and privilege domains:
- As with multithreaded programming, writing a program that spans
multiple hosts is exposed to many asynchrony issues. Unlike
multithreaded programming, the margin for unexpected failures is
much higher, even between only two peers, as communication may be
fail at any moment, since that communication depends on reliability
of an external network.
- Since a multi-host program always spans trust and privilege domains,
trust must be taken into consideration in your design from the
outset. Mitogen attempts to protect the consuming application by
default where possible, however it is paramount that trust
considerations are always in mind when exposing any privileged
functionality to a potentially untrusted network of peers.
A parent must always assume data received from a child is suspect,
and must not base privileged control decisions on that data. As a
small example, a parent should not form a command to execute in a
subprocess using strings received from a child.
- As the program spans multiple hosts, its design will benefit from a
strict separation of program and data. This entails avoiding some
common Python idioms that rely on its ability to manipulate
functions and closures as if they were data, such as passing a
lambda closed over some program state as a callback parameter.
In the general case this is both difficult and unsafe to support in
a distributed program, and so (for now at least) it should be
assumed this functionality is unlikely to appear in future.
Broker And Router
[image]
mitogen.core
Execution starts when your program constructs a Broker and associated
Router. The broker is responsible for multiplexing IO to children from a
private thread, while in children, it is additionally responsible for
ensuring robust destruction if communication with the master is lost.
Router is responsible for receiving messages and dispatching them to a
callback from the broker thread (registered by add_handler()
<mitogen.core.Router.add_handler>), or forwarding them to a Stream
<mitogen.core.Stream>. See routing for an in-depth description. Router
also doubles as the entry point to Mitogen's public API:
>>> import mitogen.master
>>> broker = mitogen.master.Broker()
>>> router = mitogen.master.Router(broker)
>>> try:
... # Your code here.
... pass
... finally:
... broker.shutdown()
As Python will not stop if threads still exist after the main thread
exits, Broker.shutdown must be called reliably at exit. Helpers are
provided by mitogen.utils to ensure Broker is reliably destroyed:
def do_mitogen_stuff(router):
# Your code here.
mitogen.utils.run_with_router(do_mitogen_stuff)
If your program cannot live beneath mitogen.utils.run_with_router on the
stack, you must arrange for Broker.shutdown to be called anywhere the
main thread may exit.
Enable Logging
Mitogen makes heavy use of the logging package, both for child stdio
redirection, and soft errors and warnings that may be generated.
You should always configure the logging package in any program that
integrates Mitogen. If your program does not otherwise use the logging
package, a basic configuration can be performed by calling
mitogen.utils.log_to_file:
>>> import mitogen.utils
# Errors, warnings, and child stdio will be written to stderr.
>>> mitogen.utils.log_to_file()
Additionally, if your program has logging.DEBUG as the default logging
level, you may wish to update its configuration to restrict the mitogen
logger to logging.INFO, otherwise vast amounts of output will be
generated by default.
Logging Environment Variables
MITOGEN_LOG_LEVEL
Overrides the logging package log level set by any call to
mitogen.utils.log_to_file. Defaults to INFO.
If set to IO, equivalent to DEBUG but additionally enabled IO
logging for any call to mitogen.utils.log_to_file. IO logging
produces verbose records of any IO interaction, which is useful for
debugging hangs and deadlocks.
Logging Records
Messages received from a child context via mitogen.master.LogForwarder
receive extra attributes:
- `mitogen_context`: mitogen.parent.Context referring to the message
source.
- `mitogen_name`: original logger name in the source context.
- `mitogen_msg`: original message in the source context.
Creating A Context
Contexts are simply external Python programs over which your program has
control, and can execute code within. They can be created as
subprocesses on the local machine, in another user account via sudo, on
a remote machine via ssh, or any recursive combination of the above.
Now a Router exists, our first contexts <Context> can be created. To
demonstrate basic functionality, we will start with some
local() <Router.local> contexts created as subprocesses:
>>> local = router.local()
>>> local_with_name = router.local(remote_name='i-have-a-name')
Examination of the system process list with the pstree utility reveals
the resulting process hierarchy:
| | \-+= 27660 dmw python
| | |--- 27661 dmw mitogen:[email protected]:27660
| | \--- 27663 dmw mitogen:i-have-a-name
Both contexts are visible as subprocesses of the interactive Python
interpreter, with their argv[0] including a description of their
identity. To aid systems administrators in identifying errant software
running on their machines, the default remote_name includes the location
of the program that started the context, however as shown, this can be
overridden.
Note
Presently contexts are constructed in a blocking manner on the thread
that invoked the context factory <context-factories>. In a future
release, the factory will instead return immediately, and construction
will happen asynchronously on the broker thread.
Calling A Function
mitogen.parent
Now that some contexts exist, it is time to execute code in them. Any
regular function, static method, or class method reachable directly from
module scope may be used, including built-in functions such as
time.time.
The Context.call method is used to execute a function and block the
caller until the return value is available or an exception is raised:
>>> import time
>>> import os
>>> # Returns the current time.
>>> print('Time in remote context:', local.call(time.time))
>>> try:
... # Raises OSError.
... local.call(os.chdir, '/nonexistent')
... except mitogen.core.CallError, e:
... print('Call failed:', str(e))
It is a simple wrapper around the more flexible Context.call_async,
which immediately returns a Receiver <mitogen.core.Receiver> wired up to
receive the return value instead. A receiver may simply be discarded,
kept around indefinitely without ever reading its result, or used to
wait on the results from several calls. Here
get() <mitogen.core.Receiver.get> is called to block the thread until
the result arrives:
>>> call = local.call_async(time.time)
>>> msg = call.get()
>>> print(msg.unpickle())
1507292737.75547
Running User Functions
So far we have used the interactive interpreter to call some standard
library functions, but since the source code typed at the interpreter
cannot be recovered, Mitogen is unable to execute functions defined in
this way.
We must therefore continue by writing our code as a script:
# first-script.py
import mitogen.utils
def my_first_function():
print('Hello from remote context!')
return 123
def main(router):
local = router.local()
print(local.call(my_first_function))
if __name__ == '__main__':
mitogen.utils.log_to_file("mitogen.log")
mitogen.utils.run_with_router(main)
Let's try running it:
$ python first-script.py
19:11:32 I mitogen.ctx.local.32466: stdout: Hello from remote context!
123
Waiting On Multiple Calls
Using Context.call_async it is possible to start multiple function calls
then sleep waiting for responses as they are available. This makes it
trivial to run tasks in parallel across processes (including remote
processes) without the need for writing asynchronous code:
hostnames = ['host1', 'host2', 'host3', 'host4']
contexts = [router.ssh(hostname=hn) for hn in hostnames]
calls = [context.call(my_func) for context in contexts]
for msg in mitogen.select.Select(calls):
print('Reply from %s: %s' % (recv.context, data))
Running Code That May Hang
When executing code that may hang due to, for example, talking to
network peers that may become unavailable, it is desirable to be able to
recover control in the case a remote call has hung.
By specifying the timeout parameter to Receiver.get on the receiver
returned by Context.call_async, it becomes possible to wait for a
function to complete, but time out if its result does not become
available.
When a context has become hung like this, it is still possible to
gracefully terminate it using the Context.shutdown method. This method
sends a shutdown message to the target process, where its IO multiplexer
thread can still process it independently of the hung function running
on on the target's main thread.
Recovering Mitogen Object References In Children
@mitogen.core.takes_econtext
def func1(a, b, econtext):
...
@mitogen.core.takes_router
def func2(a, b, router):
...
Recursion
Let's try something a little more complex:
RPC Serialization Rules
The following built-in types may be used as parameters or return values
in remote procedure calls:
- bool
- bytes (str on Python 2.x)
- dict
- int
- list
- long
- tuple
- unicode (str on Python 3.x)
User-defined types may not be used, except for:
- mitogen.core.Blob
- mitogen.core.Secret
- mitogen.core.CallError
- mitogen.core.Context
- mitogen.core.Sender
Subclasses of built-in types must be undecorated using
mitogen.utils.cast.
Test Your Design
tc qdisc add dev eth0 root netem delay 250ms
Troubleshooting
Warning
This section is incomplete.
A typical example is a hang due to your application's main thread
exitting perhaps due to an unhandled exception, without first arranging
for any Broker <mitogen.master.Broker> to be shut down gracefully.
Another example would be your main thread hanging indefinitely because a
bug in Mitogen fails to notice an event (such as RPC completion) your
thread is waiting for will never complete. Solving this kind of hang is
a work in progress.
router.enable_debug()
| # Copyright 2019, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#!mitogen: minify_safe
"""
This module defines functionality common to master and parent processes. It is
sent to any child context that is due to become a parent, due to recursive
connection.
"""
import codecs
import errno
import fcntl
import getpass
import heapq
import inspect
import logging
import os
import re
import signal
import socket
import struct
import subprocess
import sys
import termios
import textwrap
import threading
import zlib
# Absolute imports for <2.5.
select = __import__('select')
try:
import thread
except ImportError:
import threading as thread
import mitogen.core
from mitogen.core import b
from mitogen.core import bytes_partition
from mitogen.core import IOLOG
LOG = logging.getLogger(__name__)
# #410: we must avoid the use of socketpairs if SELinux is enabled.
try:
fp = open('/sys/fs/selinux/enforce', 'rb')
try:
SELINUX_ENABLED = bool(int(fp.read()))
finally:
fp.close()
except IOError:
SELINUX_ENABLED = False
try:
next
except NameError:
# Python 2.4/2.5
from mitogen.core import next
itervalues = getattr(dict, 'itervalues', dict.values)
if mitogen.core.PY3:
xrange = range
closure_attr = '__closure__'
IM_SELF_ATTR = '__self__'
else:
closure_attr = 'func_closure'
IM_SELF_ATTR = 'im_self'
try:
SC_OPEN_MAX = os.sysconf('SC_OPEN_MAX')
except ValueError:
SC_OPEN_MAX = 1024
BROKER_SHUTDOWN_MSG = (
'Connection cancelled because the associated Broker began to shut down.'
)
OPENPTY_MSG = (
"Failed to create a PTY: %s. It is likely the maximum number of PTYs has "
"been reached. Consider increasing the 'kern.tty.ptmx_max' sysctl on OS "
"X, the 'kernel.pty.max' sysctl on Linux, or modifying your configuration "
"to avoid PTY use."
)
SYS_EXECUTABLE_MSG = (
"The Python sys.executable variable is unset, indicating Python was "
"unable to determine its original program name. Unless explicitly "
"configured otherwise, child contexts will be started using "
"'/usr/bin/python'"
)
_sys_executable_warning_logged = False
def _ioctl_cast(n):
"""
Linux ioctl() request parameter is unsigned, whereas on BSD/Darwin it is
signed. Until 2.5 Python exclusively implemented the BSD behaviour,
preventing use of large unsigned int requests like the TTY layer uses
below. So on 2.4, we cast our unsigned to look like signed for Python.
"""
if sys.version_info < (2, 5):
n, = struct.unpack('i', struct.pack('I', n))
return n
# If not :data:`None`, called prior to exec() of any new child process. Used by
# :func:`mitogen.utils.reset_affinity` to allow the child to be freely
# scheduled.
_preexec_hook = None
# Get PTY number; asm-generic/ioctls.h
LINUX_TIOCGPTN = _ioctl_cast(2147767344)
# Lock/unlock PTY; asm-generic/ioctls.h
LINUX_TIOCSPTLCK = _ioctl_cast(1074025521)
IS_LINUX = os.uname()[0] == 'Linux'
SIGNAL_BY_NUM = dict(
(getattr(signal, name), name)
for name in sorted(vars(signal), reverse=True)
if name.startswith('SIG') and not name.startswith('SIG_')
)
_core_source_lock = threading.Lock()
_core_source_partial = None
def get_log_level():
return (LOG.getEffectiveLevel() or logging.INFO)
def get_sys_executable():
"""
Return :data:`sys.executable` if it is set, otherwise return
``"/usr/bin/python"`` and log a warning.
"""
if sys.executable:
return sys.executable
global _sys_executable_warning_logged
if not _sys_executable_warning_logged:
LOG.warn(SYS_EXECUTABLE_MSG)
_sys_executable_warning_logged = True
return '/usr/bin/python'
def _get_core_source():
"""
In non-masters, simply fetch the cached mitogen.core source code via the
import mechanism. In masters, this function is replaced with a version that
performs minification directly.
"""
return inspect.getsource(mitogen.core)
def get_core_source_partial():
"""
_get_core_source() is expensive, even with @lru_cache in minify.py, threads
can enter it simultaneously causing severe slowdowns.
"""
global _core_source_partial
if _core_source_partial is None:
_core_source_lock.acquire()
try:
if _core_source_partial is None:
_core_source_partial = PartialZlib(
_get_core_source().encode('utf-8')
)
finally:
_core_source_lock.release()
return _core_source_partial
def get_default_remote_name():
"""
Return the default name appearing in argv[0] of remote machines.
"""
s = u'%s@%s:%d'
s %= (getpass.getuser(), socket.gethostname(), os.getpid())
# In mixed UNIX/Windows environments, the username may contain slashes.
return s.translate({
ord(u'\\'): ord(u'_'),
ord(u'/'): ord(u'_')
})
def is_immediate_child(msg, stream):
"""
Handler policy that requires messages to arrive only from immediately
connected children.
"""
return msg.src_id == stream.protocol.remote_id
def flags(names):
"""
Return the result of ORing a set of (space separated) :py:mod:`termios`
module constants together.
"""
return sum(getattr(termios, name, 0)
for name in names.split())
def cfmakeraw(tflags):
"""
Given a list returned by :py:func:`termios.tcgetattr`, return a list
modified in a manner similar to the `cfmakeraw()` C library function, but
additionally disabling local echo.
"""
# BSD: github.com/freebsd/freebsd/blob/master/lib/libc/gen/termios.c#L162
# Linux: github.com/lattera/glibc/blob/master/termios/cfmakeraw.c#L20
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = tflags
iflag &= ~flags('IMAXBEL IXOFF INPCK BRKINT PARMRK '
'ISTRIP INLCR ICRNL IXON IGNPAR')
iflag &= ~flags('IGNBRK BRKINT PARMRK')
oflag &= ~flags('OPOST')
lflag &= ~flags('ECHO ECHOE ECHOK ECHONL ICANON ISIG '
'IEXTEN NOFLSH TOSTOP PENDIN')
cflag &= ~flags('CSIZE PARENB')
cflag |= flags('CS8 CREAD')
return [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
def disable_echo(fd):
old = termios.tcgetattr(fd)
new = cfmakeraw(old)
flags = getattr(termios, 'TCSASOFT', 0)
if not mitogen.core.IS_WSL:
# issue #319: Windows Subsystem for Linux as of July 2018 throws EINVAL
# if TCSAFLUSH is specified.
flags |= termios.TCSAFLUSH
termios.tcsetattr(fd, flags, new)
def create_socketpair(size=None):
"""
Create a :func:`socket.socketpair` for use as a child's UNIX stdio
channels. As socketpairs are bidirectional, they are economical on file
descriptor usage as one descriptor can be used for ``stdin`` and
``stdout``. As they are sockets their buffers are tunable, allowing large
buffers to improve file transfer throughput and reduce IO loop iterations.
"""
if size is None:
size = mitogen.core.CHUNK_SIZE
parentfp, childfp = socket.socketpair()
for fp in parentfp, childfp:
fp.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, size)
return parentfp, childfp
def create_best_pipe(escalates_privilege=False):
"""
By default we prefer to communicate with children over a UNIX socket, as a
single file descriptor can represent bidirectional communication, and a
cross-platform API exists to align buffer sizes with the needs of the
library.
SELinux prevents us setting up a privileged process to inherit an AF_UNIX
socket, a facility explicitly designed as a better replacement for pipes,
because at some point in the mid 90s it might have been commonly possible
for AF_INET sockets to end up undesirably connected to a privileged
process, so let's make up arbitrary rules breaking all sockets instead.
If SELinux is detected, fall back to using pipes.
:param bool escalates_privilege:
If :data:`True`, the target program may escalate privileges, causing
SELinux to disconnect AF_UNIX sockets, so avoid those.
:returns:
`(parent_rfp, child_wfp, child_rfp, parent_wfp)`
"""
if (not escalates_privilege) or (not SELINUX_ENABLED):
parentfp, childfp = create_socketpair()
return parentfp, childfp, childfp, parentfp
parent_rfp, child_wfp = mitogen.core.pipe()
try:
child_rfp, parent_wfp = mitogen.core.pipe()
return parent_rfp, child_wfp, child_rfp, parent_wfp
except:
parent_rfp.close()
child_wfp.close()
raise
def popen(**kwargs):
"""
Wrap :class:`subprocess.Popen` to ensure any global :data:`_preexec_hook`
is invoked in the child.
"""
real_preexec_fn = kwargs.pop('preexec_fn', None)
def preexec_fn():
if _preexec_hook:
_preexec_hook()
if real_preexec_fn:
real_preexec_fn()
return subprocess.Popen(preexec_fn=preexec_fn, **kwargs)
def create_child(args, merge_stdio=False, stderr_pipe=False,
escalates_privilege=False, preexec_fn=None):
"""
Create a child process whose stdin/stdout is connected to a socket.
:param list args:
Program argument vector.
:param bool merge_stdio:
If :data:`True`, arrange for `stderr` to be connected to the `stdout`
socketpair, rather than inherited from the parent process. This may be
necessary to ensure that no TTY is connected to any stdio handle, for
instance when using LXC.
:param bool stderr_pipe:
If :data:`True` and `merge_stdio` is :data:`False`, arrange for
`stderr` to be connected to a separate pipe, to allow any ongoing debug
logs generated by e.g. SSH to be output as the session progresses,
without interfering with `stdout`.
:param bool escalates_privilege:
If :data:`True`, the target program may escalate privileges, causing
SELinux to disconnect AF_UNIX sockets, so avoid those.
:param function preexec_fn:
If not :data:`None`, a function to run within the post-fork child
before executing the target program.
:returns:
:class:`Process` instance.
"""
parent_rfp, child_wfp, child_rfp, parent_wfp = create_best_pipe(
escalates_privilege=escalates_privilege
)
stderr = None
stderr_r = None
if merge_stdio:
stderr = child_wfp
elif stderr_pipe:
stderr_r, stderr = mitogen.core.pipe()
mitogen.core.set_cloexec(stderr_r.fileno())
try:
proc = popen(
args=args,
stdin=child_rfp,
stdout=child_wfp,
stderr=stderr,
close_fds=True,
preexec_fn=preexec_fn,
)
except:
child_rfp.close()
child_wfp.close()
parent_rfp.close()
parent_wfp.close()
if stderr_pipe:
stderr.close()
stderr_r.close()
raise
child_rfp.close()
child_wfp.close()
if stderr_pipe:
stderr.close()
return PopenProcess(
proc=proc,
stdin=parent_wfp,
stdout=parent_rfp,
stderr=stderr_r,
)
def _acquire_controlling_tty():
os.setsid()
if sys.platform in ('linux', 'linux2'):
# On Linux, the controlling tty becomes the first tty opened by a
# process lacking any prior tty.
os.close(os.open(os.ttyname(2), os.O_RDWR))
if hasattr(termios, 'TIOCSCTTY') and not mitogen.core.IS_WSL:
# #550: prehistoric WSL does not like TIOCSCTTY.
# On BSD an explicit ioctl is required. For some inexplicable reason,
# Python 2.6 on Travis also requires it.
fcntl.ioctl(2, termios.TIOCSCTTY)
def _linux_broken_devpts_openpty():
"""
#462: On broken Linux hosts with mismatched configuration (e.g. old
/etc/fstab template installed), /dev/pts may be mounted without the gid=
mount option, causing new slave devices to be created with the group ID of
the calling process. This upsets glibc, whose openpty() is required by
specification to produce a slave owned by a special group ID (which is
always the 'tty' group).
Glibc attempts to use "pt_chown" to fix ownership. If that fails, it
chown()s the PTY directly, which fails due to non-root, causing openpty()
to fail with EPERM ("Operation not permitted"). Since we don't need the
magical TTY group to run sudo and su, open the PTY ourselves in this case.
"""
master_fd = None
try:
# Opening /dev/ptmx causes a PTY pair to be allocated, and the
# corresponding slave /dev/pts/* device to be created, owned by UID/GID
# matching this process.
master_fd = os.open('/dev/ptmx', os.O_RDWR)
# Clear the lock bit from the PTY. This a prehistoric feature from a
# time when slave device files were persistent.
fcntl.ioctl(master_fd, LINUX_TIOCSPTLCK, struct.pack('i', 0))
# Since v4.13 TIOCGPTPEER exists to open the slave in one step, but we
# must support older kernels. Ask for the PTY number.
pty_num_s = fcntl.ioctl(master_fd, LINUX_TIOCGPTN,
struct.pack('i', 0))
pty_num, = struct.unpack('i', pty_num_s)
pty_name = '/dev/pts/%d' % (pty_num,)
# Now open it with O_NOCTTY to ensure it doesn't change our controlling
# TTY. Otherwise when we close the FD we get killed by the kernel, and
# the child we spawn that should really attach to it will get EPERM
# during _acquire_controlling_tty().
slave_fd = os.open(pty_name, os.O_RDWR|os.O_NOCTTY)
return master_fd, slave_fd
except OSError:
if master_fd is not None:
os.close(master_fd)
e = sys.exc_info()[1]
raise mitogen.core.StreamError(OPENPTY_MSG, e)
def openpty():
"""
Call :func:`os.openpty`, raising a descriptive error if the call fails.
:raises mitogen.core.StreamError:
Creating a PTY failed.
:returns:
`(master_fp, slave_fp)` file-like objects.
"""
try:
master_fd, slave_fd = os.openpty()
except OSError:
e = sys.exc_info()[1]
if not (IS_LINUX and e.args[0] == errno.EPERM):
raise mitogen.core.StreamError(OPENPTY_MSG, e)
master_fd, slave_fd = _linux_broken_devpts_openpty()
master_fp = os.fdopen(master_fd, 'r+b', 0)
slave_fp = os.fdopen(slave_fd, 'r+b', 0)
disable_echo(master_fd)
disable_echo(slave_fd)
mitogen.core.set_block(slave_fd)
return master_fp, slave_fp
def tty_create_child(args):
"""
Return a file descriptor connected to the master end of a pseudo-terminal,
whose slave end is connected to stdin/stdout/stderr of a new child process.
The child is created such that the pseudo-terminal becomes its controlling
TTY, ensuring access to /dev/tty returns a new file descriptor open on the
slave end.
:param list args:
Program argument vector.
:returns:
:class:`Process` instance.
"""
master_fp, slave_fp = openpty()
try:
proc = popen(
args=args,
stdin=slave_fp,
stdout=slave_fp,
stderr=slave_fp,
preexec_fn=_acquire_controlling_tty,
close_fds=True,
)
except:
master_fp.close()
slave_fp.close()
raise
slave_fp.close()
return PopenProcess(
proc=proc,
stdin=master_fp,
stdout=master_fp,
)
def hybrid_tty_create_child(args, escalates_privilege=False):
"""
Like :func:`tty_create_child`, except attach stdin/stdout to a socketpair
like :func:`create_child`, but leave stderr and the controlling TTY
attached to a TTY.
This permits high throughput communication with programs that are reached
via some program that requires a TTY for password input, like many
configurations of sudo. The UNIX TTY layer tends to have tiny (no more than
14KiB) buffers, forcing many IO loop iterations when transferring bulk
data, causing significant performance loss.
:param bool escalates_privilege:
If :data:`True`, the target program may escalate privileges, causing
SELinux to disconnect AF_UNIX sockets, so avoid those.
:param list args:
Program argument vector.
:returns:
:class:`Process` instance.
"""
master_fp, slave_fp = openpty()
try:
parent_rfp, child_wfp, child_rfp, parent_wfp = create_best_pipe(
escalates_privilege=escalates_privilege,
)
try:
mitogen.core.set_block(child_rfp)
mitogen.core.set_block(child_wfp)
proc = popen(
args=args,
stdin=child_rfp,
stdout=child_wfp,
stderr=slave_fp,
preexec_fn=_acquire_controlling_tty,
close_fds=True,
)
except:
parent_rfp.close()
child_wfp.close()
parent_wfp.close()
child_rfp.close()
raise
except:
master_fp.close()
slave_fp.close()
raise
slave_fp.close()
child_rfp.close()
child_wfp.close()
return PopenProcess(
proc=proc,
stdin=parent_wfp,
stdout=parent_rfp,
stderr=master_fp,
)
class Timer(object):
"""
Represents a future event.
"""
#: Set to :data:`False` if :meth:`cancel` has been called, or immediately
#: prior to being executed by :meth:`TimerList.expire`.
active = True
def __init__(self, when, func):
self.when = when
self.func = func
def __repr__(self):
return 'Timer(%r, %r)' % (self.when, self.func)
def __eq__(self, other):
return self.when == other.when
def __lt__(self, other):
return self.when < other.when
def __le__(self, other):
return self.when <= other.when
def cancel(self):
"""
Cancel this event. If it has not yet executed, it will not execute
during any subsequent :meth:`TimerList.expire` call.
"""
self.active = False
class TimerList(object):
"""
Efficiently manage a list of cancellable future events relative to wall
clock time. An instance of this class is installed as
:attr:`mitogen.master.Broker.timers` by default, and as
:attr:`mitogen.core.Broker.timers` in children after a call to
:func:`mitogen.parent.upgrade_router`.
You can use :class:`TimerList` to cause the broker to wake at arbitrary
future moments, useful for implementing timeouts and polling in an
asynchronous context.
:class:`TimerList` methods can only be called from asynchronous context,
for example via :meth:`mitogen.core.Broker.defer`.
The broker automatically adjusts its sleep delay according to the installed
timer list, and arranges for timers to expire via automatic calls to
:meth:`expire`. The main user interface to :class:`TimerList` is
:meth:`schedule`.
"""
_now = mitogen.core.now
def __init__(self):
self._lst = []
def get_timeout(self):
"""
Return the floating point seconds until the next event is due.
:returns:
Floating point delay, or 0.0, or :data:`None` if no events are
scheduled.
"""
while self._lst and not self._lst[0].active:
heapq.heappop(self._lst)
if self._lst:
return max(0, self._lst[0].when - self._now())
def schedule(self, when, func):
"""
Schedule a future event.
:param float when:
UNIX time in seconds when event should occur.
:param callable func:
Callable to invoke on expiry.
:returns:
A :class:`Timer` instance, exposing :meth:`Timer.cancel`, which may
be used to cancel the future invocation.
"""
timer = Timer(when, func)
heapq.heappush(self._lst, timer)
return timer
def expire(self):
"""
Invoke callbacks for any events in the past.
"""
now = self._now()
while self._lst and self._lst[0].when <= now:
timer = heapq.heappop(self._lst)
if timer.active:
timer.active = False
timer.func()
class PartialZlib(object):
"""
Because the mitogen.core source has a line appended to it during bootstrap,
it must be recompressed for each connection. This is not a problem for a
small number of connections, but it amounts to 30 seconds CPU time by the
time 500 targets are in use.
For that reason, build a compressor containing mitogen.core and flush as
much of it as possible into an initial buffer. Then to append the custom
line, clone the compressor and compress just that line.
A full compression costs ~6ms on a modern machine, this method costs ~35
usec.
"""
def __init__(self, s):
self.s = s
if sys.version_info > (2, 5):
self._compressor = zlib.compressobj(9)
self._out = self._compressor.compress(s)
self._out += self._compressor.flush(zlib.Z_SYNC_FLUSH)
else:
self._compressor = None
def append(self, s):
"""
Append the bytestring `s` to the compressor state and return the
final compressed output.
"""
if self._compressor is None:
return zlib.compress(self.s + s, 9)
else:
compressor = self._compressor.copy()
out = self._out
out += compressor.compress(s)
return out + compressor.flush()
def _upgrade_broker(broker):
"""
Extract the poller state from Broker and replace it with the industrial
strength poller for this OS. Must run on the Broker thread.
"""
# This function is deadly! The act of calling start_receive() generates log
# messages which must be silenced as the upgrade progresses, otherwise the
# poller state will change as it is copied, resulting in write fds that are
# lost. (Due to LogHandler->Router->Stream->Protocol->Broker->Poller, where
# Stream only calls start_transmit() when transitioning from empty to
# non-empty buffer. If the start_transmit() is lost, writes from the child
# hang permanently).
root = logging.getLogger()
old_level = root.level
root.setLevel(logging.CRITICAL)
try:
old = broker.poller
new = PREFERRED_POLLER()
for fd, data in old.readers:
new.start_receive(fd, data)
for fd, data in old.writers:
new.start_transmit(fd, data)
old.close()
broker.poller = new
finally:
root.setLevel(old_level)
broker.timers = TimerList()
LOG.debug('upgraded %r with %r (new: %d readers, %d writers; '
'old: %d readers, %d writers)', old, new,
len(new.readers), len(new.writers),
len(old.readers), len(old.writers))
@mitogen.core.takes_econtext
def upgrade_router(econtext):
if not isinstance(econtext.router, Router): # TODO
econtext.broker.defer(_upgrade_broker, econtext.broker)
econtext.router.__class__ = Router # TODO
econtext.router.upgrade(
importer=econtext.importer,
parent=econtext.parent,
)
def get_connection_class(name):
"""
Given the name of a Mitogen connection method, import its implementation
module and return its Stream subclass.
"""
if name == u'local':
name = u'parent'
module = mitogen.core.import_module(u'mitogen.' + name)
return module.Connection
@mitogen.core.takes_econtext
def _proxy_connect(name, method_name, kwargs, econtext):
"""
Implements the target portion of Router._proxy_connect() by upgrading the
local process to a parent if it was not already, then calling back into
Router._connect() using the arguments passed to the parent's
Router.connect().
:returns:
Dict containing:
* ``id``: :data:`None`, or integer new context ID.
* ``name``: :data:`None`, or string name attribute of new Context.
* ``msg``: :data:`None`, or StreamError exception text.
"""
upgrade_router(econtext)
try:
context = econtext.router._connect(
klass=get_connection_class(method_name),
name=name,
**kwargs
)
except mitogen.core.StreamError:
return {
u'id': None,
u'name': None,
u'msg': 'error occurred on host %s: %s' % (
socket.gethostname(),
sys.exc_info()[1],
),
}
return {
u'id': context.context_id,
u'name': context.name,
u'msg': None,
}
def returncode_to_str(n):
"""
Parse and format a :func:`os.waitpid` exit status.
"""
if n < 0:
return 'exited due to signal %d (%s)' % (-n, SIGNAL_BY_NUM.get(-n))
return 'exited with return code %d' % (n,)
class EofError(mitogen.core.StreamError):
"""
Raised by :class:`Connection` when an empty read is detected from the
remote process before bootstrap completes.
"""
# inherits from StreamError to maintain compatibility.
pass
class CancelledError(mitogen.core.StreamError):
"""
Raised by :class:`Connection` when :meth:`mitogen.core.Broker.shutdown` is
called before bootstrap completes.
"""
pass
class Argv(object):
"""
Wrapper to defer argv formatting when debug logging is disabled.
"""
def __init__(self, argv):
self.argv = argv
must_escape = frozenset('\\$"`!')
must_escape_or_space = must_escape | frozenset(' ')
def escape(self, x):
if not self.must_escape_or_space.intersection(x):
return x
s = '"'
for c in x:
if c in self.must_escape:
s += '\\'
s += c
s += '"'
return s
def __str__(self):
return''.join(map(self.escape, self.argv))
class CallSpec(object):
"""
Wrapper to defer call argument formatting when debug logging is disabled.
"""
def __init__(self, func, args, kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def _get_name(self):
bits = [self.func.__module__]
if inspect.ismethod(self.func):
im_self = getattr(self.func, IM_SELF_ATTR)
bits.append(getattr(im_self, '__name__', None) or
getattr(type(im_self), '__name__', None))
bits.append(self.func.__name__)
return u'.'.join(bits)
def _get_args(self):
return u', '.join(repr(a) for a in self.args)
def _get_kwargs(self):
s = u''
if self.kwargs:
s = u', '.join('%s=%r' % (k, v) for k, v in self.kwargs.items())
if self.args:
s = u','+ s
return s
def __repr__(self):
return '%s(%s%s)' % (
self._get_name(),
self._get_args(),
self._get_kwargs(),
)
class PollPoller(mitogen.core.Poller):
"""
Poller based on the POSIX :linux:man2:`poll` interface. Not available on
some versions of OS X, otherwise it is the preferred poller for small FD
counts, as there is no setup/teardown/configuration system call overhead.
"""
SUPPORTED = hasattr(select, 'poll')
_repr = 'PollPoller()'
def __init__(self):
super(PollPoller, self).__init__()
self._pollobj = select.poll()
# TODO: no proof we dont need writemask too
_readmask = (
getattr(select, 'POLLIN', 0) |
getattr(select, 'POLLHUP', 0)
)
def _update(self, fd):
mask = (((fd in self._rfds) and self._readmask) |
((fd in self._wfds) and select.POLLOUT))
if mask:
self._pollobj.register(fd, mask)
else:
try:
self._pollobj.unregister(fd)
except KeyError:
pass
def _poll(self, timeout):
if timeout:
timeout *= 1000
events, _ = mitogen.core.io_op(self._pollobj.poll, timeout)
for fd, event in events:
if event & self._readmask:
IOLOG.debug('%r: POLLIN|POLLHUP for %r', self, fd)
data, gen = self._rfds.get(fd, (None, None))
if gen and gen < self._generation:
yield data
if event & select.POLLOUT:
IOLOG.debug('%r: POLLOUT for %r', self, fd)
data, gen = self._wfds.get(fd, (None, None))
if gen and gen < self._generation:
yield data
class KqueuePoller(mitogen.core.Poller):
"""
Poller based on the FreeBSD/Darwin :freebsd:man2:`kqueue` interface.
"""
SUPPORTED = hasattr(select, 'kqueue')
_repr = 'KqueuePoller()'
def __init__(self):
super(KqueuePoller, self).__init__()
self._kqueue = select.kqueue()
self._changelist = []
def close(self):
super(KqueuePoller, self).close()
self._kqueue.close()
def _control(self, fd, filters, flags):
mitogen.core._vv and IOLOG.debug(
'%r._control(%r, %r, %r)', self, fd, filters, flags)
# TODO: at shutdown it is currently possible for KQ_EV_ADD/KQ_EV_DEL
# pairs to be pending after the associated file descriptor has already
# been closed. Fixing this requires maintaining extra state, or perhaps
# making fd closure the poller's responsibility. In the meantime,
# simply apply changes immediately.
# self._changelist.append(select.kevent(fd, filters, flags))
changelist = [select.kevent(fd, filters, flags)]
events, _ = mitogen.core.io_op(self._kqueue.control, changelist, 0, 0)
assert not events
def start_receive(self, fd, data=None):
mitogen.core._vv and IOLOG.debug('%r.start_receive(%r, %r)',
self, fd, data)
if fd not in self._rfds:
self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_ADD)
self._rfds[fd] = (data or fd, self._generation)
def stop_receive(self, fd):
mitogen.core._vv and IOLOG.debug('%r.stop_receive(%r)', self, fd)
if fd in self._rfds:
self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE)
del self._rfds[fd]
def start_transmit(self, fd, data=None):
mitogen.core._vv and IOLOG.debug('%r.start_transmit(%r, %r)',
self, fd, data)
if fd not in self._wfds:
self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_ADD)
self._wfds[fd] = (data or fd, self._generation)
def stop_transmit(self, fd):
mitogen.core._vv and IOLOG.debug('%r.stop_transmit(%r)', self, fd)
if fd in self._wfds:
self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)
del self._wfds[fd]
def _poll(self, timeout):
changelist = self._changelist
self._changelist = []
events, _ = mitogen.core.io_op(self._kqueue.control,
changelist, 32, timeout)
for event in events:
fd = event.ident
if event.flags & select.KQ_EV_ERROR:
LOG.debug('ignoring stale event for fd %r: errno=%d: %s',
fd, event.data, errno.errorcode.get(event.data))
elif event.filter == select.KQ_FILTER_READ:
data, gen = self._rfds.get(fd, (None, None))
# Events can still be read for an already-discarded fd.
if gen and gen < self._generation:
mitogen.core._vv and IOLOG.debug('%r: POLLIN: %r', self, fd)
yield data
elif event.filter == select.KQ_FILTER_WRITE and fd in self._wfds:
data, gen = self._wfds.get(fd, (None, None))
if gen and gen < self._generation:
mitogen.core._vv and IOLOG.debug('%r: POLLOUT: %r', self, fd)
yield data
class EpollPoller(mitogen.core.Poller):
"""
Poller based on the Linux :linux:man2:`epoll` interface.
"""
SUPPORTED = hasattr(select, 'epoll')
_repr = 'EpollPoller()'
def __init__(self):
super(EpollPoller, self).__init__()
self._epoll = select.epoll(32)
self._registered_fds = set()
def close(self):
super(EpollPoller, self).close()
self._epoll.close()
def _control(self, fd):
mitogen.core._vv and IOLOG.debug('%r._control(%r)', self, fd)
mask = (((fd in self._rfds) and select.EPOLLIN) |
((fd in self._wfds) and select.EPOLLOUT))
if mask:
if fd in self._registered_fds:
self._epoll.modify(fd, mask)
else:
self._epoll.register(fd, mask)
self._registered_fds.add(fd)
elif fd in self._registered_fds:
self._epoll.unregister(fd)
self._registered_fds.remove(fd)
def start_receive(self, fd, data=None):
mitogen.core._vv and IOLOG.debug('%r.start_receive(%r, %r)',
self, fd, data)
self._rfds[fd] = (data or fd, self._generation)
self._control(fd)
def stop_receive(self, fd):
mitogen.core._vv and IOLOG.debug('%r.stop_receive(%r)', self, fd)
self._rfds.pop(fd, None)
self._control(fd)
def start_transmit(self, fd, data=None):
mitogen.core._vv and IOLOG.debug('%r.start_transmit(%r, %r)',
self, fd, data)
self._wfds[fd] = (data or fd, self._generation)
self._control(fd)
def stop_transmit(self, fd):
mitogen.core._vv and IOLOG.debug('%r.stop_transmit(%r)', self, fd)
self._wfds.pop(fd, None)
self._control(fd)
_inmask = (getattr(select, 'EPOLLIN', 0) |
getattr(select, 'EPOLLHUP', 0))
def _poll(self, timeout):
the_timeout = -1
if timeout is not None:
the_timeout = timeout
events, _ = mitogen.core.io_op(self._epoll.poll, the_timeout, 32)
for fd, event in events:
if event & self._inmask:
data, gen = self._rfds.get(fd, (None, None))
if gen and gen < self._generation:
# Events can still be read for an already-discarded fd.
mitogen.core._vv and IOLOG.debug('%r: POLLIN: %r', self, fd)
yield data
if event & select.EPOLLOUT:
data, gen = self._wfds.get(fd, (None, None))
if gen and gen < self._generation:
mitogen.core._vv and IOLOG.debug('%r: POLLOUT: %r', self, fd)
yield data
# 2.4 and 2.5 only had select.select() and select.poll().
for _klass in mitogen.core.Poller, PollPoller, KqueuePoller, EpollPoller:
if _klass.SUPPORTED:
PREFERRED_POLLER = _klass
# For processes that start many threads or connections, it's possible Latch
# will also get high-numbered FDs, and so select() becomes useless there too.
# So swap in our favourite poller.
if PollPoller.SUPPORTED:
mitogen.core.Latch.poller_class = PollPoller
else:
mitogen.core.Latch.poller_class = PREFERRED_POLLER
class LineLoggingProtocolMixin(object):
def __init__(self, **kwargs):
super(LineLoggingProtocolMixin, self).__init__(**kwargs)
self.logged_lines = []
self.logged_partial = None
def on_line_received(self, line):
self.logged_partial = None
self.logged_lines.append((mitogen.core.now(), line))
self.logged_lines[:] = self.logged_lines[-100:]
return super(LineLoggingProtocolMixin, self).on_line_received(line)
def on_partial_line_received(self, line):
self.logged_partial = line
return super(LineLoggingProtocolMixin, self).on_partial_line_received(line)
def on_disconnect(self, broker):
if self.logged_partial:
self.logged_lines.append((mitogen.core.now(), self.logged_partial))
self.logged_partial = None
super(LineLoggingProtocolMixin, self).on_disconnect(broker)
def get_history(streams):
history = []
for stream in streams:
if stream:
history.extend(getattr(stream.protocol, 'logged_lines', []))
history.sort()
s = b('\n').join(h[1] for h in history)
return mitogen.core.to_text(s)
class RegexProtocol(LineLoggingProtocolMixin, mitogen.core.DelimitedProtocol):
"""
Implement a delimited protocol where messages matching a set of regular
expressions are dispatched to individual handler methods. Input is
dispatches using :attr:`PATTERNS` and :attr:`PARTIAL_PATTERNS`, before
falling back to :meth:`on_unrecognized_line_received` and
:meth:`on_unrecognized_partial_line_received`.
"""
#: A sequence of 2-tuples of the form `(compiled pattern, method)` for
#: patterns that should be matched against complete (delimited) messages,
#: i.e. full lines.
PATTERNS = []
#: Like :attr:`PATTERNS`, but patterns that are matched against incomplete
#: lines.
PARTIAL_PATTERNS = []
def on_line_received(self, line):
super(RegexProtocol, self).on_line_received(line)
for pattern, func in self.PATTERNS:
match = pattern.search(line)
if match is not None:
return func(self, line, match)
return self.on_unrecognized_line_received(line)
def on_unrecognized_line_received(self, line):
LOG.debug('%s: (unrecognized): %s',
self.stream.name, line.decode('utf-8','replace'))
def on_partial_line_received(self, line):
super(RegexProtocol, self).on_partial_line_received(line)
LOG.debug('%s: (partial): %s',
self.stream.name, line.decode('utf-8','replace'))
for pattern, func in self.PARTIAL_PATTERNS:
match = pattern.search(line)
if match is not None:
return func(self, line, match)
return self.on_unrecognized_partial_line_received(line)
def on_unrecognized_partial_line_received(self, line):
LOG.debug('%s: (unrecognized partial): %s',
self.stream.name, line.decode('utf-8','replace'))
class BootstrapProtocol(RegexProtocol):
"""
Respond to stdout of a child during bootstrap. Wait for :attr:`EC0_MARKER`
to be written by the first stage to indicate it can receive the bootstrap,
then await :attr:`EC1_MARKER` to indicate success, and
:class:`MitogenProtocol` can be enabled.
"""
#: Sentinel value emitted by the first stage to indicate it is ready to
#: receive the compressed bootstrap. For :mod:`mitogen.ssh` this must have
#: length of at least `max(len('password'), len('debug1:'))`
EC0_MARKER = b('MITO000')
EC1_MARKER = b('MITO001')
EC2_MARKER = b('MITO002')
def __init__(self, broker):
super(BootstrapProtocol, self).__init__()
self._writer = mitogen.core.BufferedWriter(broker, self)
def on_transmit(self, broker):
self._writer.on_transmit(broker)
def _on_ec0_received(self, line, match):
LOG.debug('%r: first stage started succcessfully', self)
self._writer.write(self.stream.conn.get_preamble())
def _on_ec1_received(self, line, match):
LOG.debug('%r: first stage received mitogen.core source', self)
def _on_ec2_received(self, line, match):
LOG.debug('%r: new child booted successfully', self)
self.stream.conn._complete_connection()
return False
def on_unrecognized_line_received(self, line):
LOG.debug('%s: stdout: %s', self.stream.name,
line.decode('utf-8','replace'))
PATTERNS = [
(re.compile(EC0_MARKER), _on_ec0_received),
(re.compile(EC1_MARKER), _on_ec1_received),
(re.compile(EC2_MARKER), _on_ec2_received),
]
class LogProtocol(LineLoggingProtocolMixin, mitogen.core.DelimitedProtocol):
"""
For "hybrid TTY/socketpair" mode, after connection setup a spare TTY master
FD exists that cannot be closed, and to which SSH or sudo may continue
writing log messages.
The descriptor cannot be closed since the UNIX TTY layer sends SIGHUP to
processes whose controlling TTY is the slave whose master side was closed.
LogProtocol takes over this FD and creates log messages for anything
written to it.
"""
def on_line_received(self, line):
"""
Read a line, decode it as UTF-8, and log it.
"""
super(LogProtocol, self).on_line_received(line)
LOG.info(u'%s: %s', self.stream.name, line.decode('utf-8','replace'))
class MitogenProtocol(mitogen.core.MitogenProtocol):
"""
Extend core.MitogenProtocol to cause SHUTDOWN to be sent to the child
during graceful shutdown.
"""
def on_shutdown(self, broker):
"""
Respond to the broker's request for the stream to shut down by sending
SHUTDOWN to the child.
"""
LOG.debug('%r: requesting child shutdown', self)
self._send(
mitogen.core.Message(
src_id=mitogen.context_id,
dst_id=self.remote_id,
handle=mitogen.core.SHUTDOWN,
)
)
class Options(object):
name = None
#: The path to the remote Python interpreter.
python_path = get_sys_executable()
#: Maximum time to wait for a connection attempt.
connect_timeout = 30.0
#: True to cause context to write verbose /tmp/mitogen.<pid>.log.
debug = False
#: True to cause context to write /tmp/mitogen.stats.<pid>.<thread>.log.
profiling = False
#: True if unidirectional routing is enabled in the new child.
unidirectional = False
#: Passed via Router wrapper methods, must eventually be passed to
#: ExternalContext.main().
max_message_size = None
#: Remote name.
remote_name = None
#: Derived from :py:attr:`connect_timeout`; absolute floating point
#: UNIX timestamp after which the connection attempt should be abandoned.
connect_deadline = None
def __init__(self, max_message_size, name=None, remote_name=None,
python_path=None, debug=False, connect_timeout=None,
profiling=False, unidirectional=False, old_router=None):
self.name = name
self.max_message_size = max_message_size
if python_path:
self.python_path = python_path
if connect_timeout:
self.connect_timeout = connect_timeout
if remote_name is None:
remote_name = get_default_remote_name()
if '/' in remote_name or '\\' in remote_name:
raise ValueError('remote_name= cannot contain slashes')
if remote_name:
self.remote_name = mitogen.core.to_text(remote_name)
self.debug = debug
self.profiling = profiling
self.unidirectional = unidirectional
self.max_message_size = max_message_size
self.connect_deadline = mitogen.core.now() + self.connect_timeout
class Connection(object):
"""
Manage the lifetime of a set of :class:`Streams <Stream>` connecting to a
remote Python interpreter, including bootstrap, disconnection, and external
tool integration.
Base for streams capable of starting children.
"""
options_class = Options
#: The protocol attached to stdio of the child.
stream_protocol_class = BootstrapProtocol
#: The protocol attached to stderr of the child.
diag_protocol_class = LogProtocol
#: :class:`Process`
proc = None
#: :class:`mitogen.core.Stream` with sides connected to stdin/stdout.
stdio_stream = None
#: If `proc.stderr` is set, referencing either a plain pipe or the
#: controlling TTY, this references the corresponding
#: :class:`LogProtocol`'s stream, allowing it to be disconnected when this
#: stream is disconnected.
stderr_stream = None
#: Function with the semantics of :func:`create_child` used to create the
#: child process.
create_child = staticmethod(create_child)
#: Dictionary of extra kwargs passed to :attr:`create_child`.
create_child_args = {}
#: :data:`True` if the remote has indicated that it intends to detach, and
#: should not be killed on disconnect.
detached = False
#: If :data:`True`, indicates the child should not be killed during
#: graceful detachment, as it the actual process implementing the child
#: context. In all other cases, the subprocess is SSH, sudo, or a similar
#: tool that should be reminded to quit during disconnection.
child_is_immediate_subprocess = True
#: Prefix given to default names generated by :meth:`connect`.
name_prefix = u'local'
#: :class:`Timer` that runs :meth:`_on_timer_expired` when connection
#: timeout occurs.
_timer = None
#: When disconnection completes, instance of :class:`Reaper` used to wait
#: on the exit status of the subprocess.
_reaper = None
#: On failure, the exception object that should be propagated back to the
#: user.
exception = None
#: Extra text appended to :class:`EofError` if that exception is raised on
#: a failed connection attempt. May be used in subclasses to hint at common
#: problems with a particular connection method.
eof_error_hint = None
def __init__(self, options, router):
#: :class:`Options`
self.options = options
self._router = router
def __repr__(self):
return 'Connection(%r)' % (self.stdio_stream,)
# Minimised, gzipped, base64'd and passed to 'python -c'. It forks, dups
# file descriptor 0 as 100, creates a pipe, then execs a new interpreter
# with a custom argv.
# * Optimized for minimum byte count after minification & compression.
# * 'CONTEXT_NAME' and 'PREAMBLE_COMPRESSED_LEN' are substituted with
# their respective values.
# * CONTEXT_NAME must be prefixed with the name of the Python binary in
# order to allow virtualenvs to detect their install prefix.
# * macOS <= 10.14 (Darwin <= 18) install an unreliable Python version
# switcher as /usr/bin/python, which introspects argv0. To workaround
# it we redirect attempts to call /usr/bin/python with an explicit
# call to /usr/bin/python2.7. macOS 10.15 (Darwin 19) removed it.
# * macOS 11.x (Darwin 20, Big Sur) and macOS 12.x (Darwin 21, Montery)
# do something slightly different. The Python executable is patched to
# perform an extra execvp(). I don't fully understand the details, but
# setting PYTHON_LAUNCHED_FROM_WRAPPER=1 avoids it.
# * macOS 13.x (Darwin 22?) may remove python 2.x entirely.
#
# Locals:
# R: read side of interpreter stdin.
# W: write side of interpreter stdin.
# r: read side of core_src FD.
# w: write side of core_src FD.
# C: the decompressed core source.
# Final os.close(2) to avoid --py-debug build from corrupting stream with
# "[1234 refs]" during exit.
@staticmethod
def _first_stage():
R,W=os.pipe()
r,w=os.pipe()
if os.fork():
os.dup2(0,100)
os.dup2(R,0)
os.dup2(r,101)
os.close(R)
os.close(r)
os.close(W)
os.close(w)
if os.uname()[0]=='Darwin'and os.uname()[2][:2]<'19'and sys.executable=='/usr/bin/python':sys.executable='/usr/bin/python2.7'
if os.uname()[0]=='Darwin'and os.uname()[2][:2]in'2021'and sys.version[:3]=='2.7':os.environ['PYTHON_LAUNCHED_FROM_WRAPPER']='1'
os.environ['ARGV0']=sys.executable
os.execl(sys.executable,sys.executable+'(mitogen:CONTEXT_NAME)')
os.write(1,'MITO000\n'.encode())
C=_(os.fdopen(0,'rb').read(PREAMBLE_COMPRESSED_LEN),'zip')
fp=os.fdopen(W,'wb',0)
fp.write(C)
fp.close()
fp=os.fdopen(w,'wb',0)
fp.write(C)
fp.close()
os.write(1,'MITO001\n'.encode())
os.close(2)
def get_python_argv(self):
"""
Return the initial argument vector elements necessary to invoke Python,
by returning a 1-element list containing :attr:`python_path` if it is a
string, or simply returning it if it is already a list.
This allows emulation of existing tools where the Python invocation may
be set to e.g. `['/usr/bin/env', 'python']`.
"""
if isinstance(self.options.python_path, list):
return self.options.python_path
return [self.options.python_path]
def get_boot_command(self):
source = inspect.getsource(self._first_stage)
source = textwrap.dedent('\n'.join(source.strip().split('\n')[2:]))
source = source.replace(' ','')
source = source.replace('CONTEXT_NAME', self.options.remote_name)
preamble_compressed = self.get_preamble()
source = source.replace('PREAMBLE_COMPRESSED_LEN',
str(len(preamble_compressed)))
compressed = zlib.compress(source.encode(), 9)
encoded = codecs.encode(compressed, 'base64').replace(b('\n'), b(''))
# We can't use bytes.decode() in 3.x since it was restricted to always
# return unicode, so codecs.decode() is used instead. In 3.x
# codecs.decode() requires a bytes object. Since we must be compatible
# with 2.4 (no bytes literal), an extra.encode() either returns the
# same str (2.x) or an equivalent bytes (3.x).
return self.get_python_argv() + [
'-c',
'import codecs,os,sys;_=codecs.decode;'
'exec(_(_("%s".encode(),"base64"),"zip"))' % (encoded.decode(),)
]
def get_econtext_config(self):
assert self.options.max_message_size is not None
parent_ids = mitogen.parent_ids[:]
parent_ids.insert(0, mitogen.context_id)
return {
'parent_ids': parent_ids,
'context_id': self.context.context_id,
'debug': self.options.debug,
'profiling': self.options.profiling,
'unidirectional': self.options.unidirectional,
'log_level': get_log_level(),
'whitelist': self._router.get_module_whitelist(),
'blacklist': self._router.get_module_blacklist(),
'max_message_size': self.options.max_message_size,
'version': mitogen.__version__,
}
def get_preamble(self):
suffix = (
'\nExternalContext(%r).main()\n' %
(self.get_econtext_config(),)
)
partial = get_core_source_partial()
return partial.append(suffix.encode('utf-8'))
def _get_name(self):
"""
Called by :meth:`connect` after :attr:`pid` is known. Subclasses can
override it to specify a default stream name, or set
:attr:`name_prefix` to generate a default format.
"""
return u'%s.%s' % (self.name_prefix, self.proc.pid)
def start_child(self):
args = self.get_boot_command()
LOG.debug('command line for %r: %s', self, Argv(args))
try:
return self.create_child(args=args, **self.create_child_args)
except OSError:
e = sys.exc_info()[1]
msg = 'Child start failed: %s. Command was: %s' % (e, Argv(args))
raise mitogen.core.StreamError(msg)
def _adorn_eof_error(self, e):
"""
Subclasses may provide additional information in the case of a failed
connection.
"""
if self.eof_error_hint:
e.args = ('%s\n\n%s' % (e.args[0], self.eof_error_hint),)
def _complete_connection(self):
self._timer.cancel()
if not self.exception:
mitogen.core.unlisten(self._router.broker,'shutdown',
self._on_broker_shutdown)
self._router.register(self.context, self.stdio_stream)
self.stdio_stream.set_protocol(
MitogenProtocol(
router=self._router,
remote_id=self.context.context_id,
)
)
self._router.route_monitor.notice_stream(self.stdio_stream)
self.latch.put()
def _fail_connection(self, exc):
"""
Fail the connection attempt.
"""
LOG.debug('failing connection %s due to %r',
self.stdio_stream and self.stdio_stream.name, exc)
if self.exception is None:
self._adorn_eof_error(exc)
self.exception = exc
mitogen.core.unlisten(self._router.broker,'shutdown',
self._on_broker_shutdown)
for stream in self.stdio_stream, self.stderr_stream:
if stream and not stream.receive_side.closed:
stream.on_disconnect(self._router.broker)
self._complete_connection()
eof_error_msg = 'EOF on stream; last 100 lines received:\n'
def on_stdio_disconnect(self):
"""
Handle stdio stream disconnection by failing the Connection if the
stderr stream has already been closed. Otherwise, wait for it to close
(or timeout), to allow buffered diagnostic logs to be consumed.
It is normal that when a subprocess aborts, stdio has nothing buffered
when it is closed, thus signalling readability, causing an empty read
(interpreted as indicating disconnection) on the next loop iteration,
even if its stderr pipe has lots of diagnostic logs still buffered in
the kernel. Therefore we must wait for both pipes to indicate they are
empty before triggering connection failure.
"""
stderr = self.stderr_stream
if stderr is None or stderr.receive_side.closed:
self._on_streams_disconnected()
def on_stderr_disconnect(self):
"""
Inverse of :func:`on_stdio_disconnect`.
"""
if self.stdio_stream.receive_side.closed:
self._on_streams_disconnected()
def _on_streams_disconnected(self):
"""
When disconnection has been detected for both streams, cancel the
connection timer, mark the connection failed, and reap the child
process. Do nothing if the timer has already been cancelled, indicating
some existing failure has already been noticed.
"""
if self._timer.active:
self._timer.cancel()
self._fail_connection(EofError(
self.eof_error_msg + get_history(
[self.stdio_stream, self.stderr_stream]
)
))
if self._reaper:
return
self._reaper = Reaper(
broker=self._router.broker,
proc=self.proc,
kill=not (
(self.detached and self.child_is_immediate_subprocess) or
# Avoid killing so child has chance to write cProfile data
self._router.profiling
),
# Don't delay shutdown waiting for a detached child, since the
# detached child may expect to live indefinitely after its parent
# exited.
wait_on_shutdown=(not self.detached),
)
self._reaper.reap()
def _on_broker_shutdown(self):
"""
Respond to broker.shutdown() being called by failing the connection
attempt.
"""
self._fail_connection(CancelledError(BROKER_SHUTDOWN_MSG))
def stream_factory(self):
return self.stream_protocol_class.build_stream(
broker=self._router.broker,
)
def stderr_stream_factory(self):
return self.diag_protocol_class.build_stream()
def _setup_stdio_stream(self):
stream = self.stream_factory()
stream.conn = self
stream.name = self.options.name or self._get_name()
stream.accept(self.proc.stdout, self.proc.stdin)
mitogen.core.listen(stream, 'disconnect', self.on_stdio_disconnect)
self._router.broker.start_receive(stream)
return stream
def _setup_stderr_stream(self):
stream = self.stderr_stream_factory()
stream.conn = self
stream.name = self.options.name or self._get_name()
stream.accept(self.proc.stderr, self.proc.stderr)
mitogen.core.listen(stream, 'disconnect', self.on_stderr_disconnect)
self._router.broker.start_receive(stream)
return stream
def _on_timer_expired(self):
self._fail_connection(
mitogen.core.TimeoutError(
'Failed to setup connection after %.2f seconds',
self.options.connect_timeout,
)
)
def _async_connect(self):
LOG.debug('creating connection to context %d using %s',
self.context.context_id, self.__class__.__module__)
mitogen.core.listen(self._router.broker,'shutdown',
self._on_broker_shutdown)
self._timer = self._router.broker.timers.schedule(
when=self.options.connect_deadline,
func=self._on_timer_expired,
)
try:
self.proc = self.start_child()
except Exception:
LOG.debug('failed to start child', exc_info=True)
self._fail_connection(sys.exc_info()[1])
return
LOG.debug('child for %r started: pid:%r stdin:%r stdout:%r stderr:%r',
self, self.proc.pid,
self.proc.stdin.fileno(),
self.proc.stdout.fileno(),
self.proc.stderr and self.proc.stderr.fileno())
self.stdio_stream = self._setup_stdio_stream()
if self.context.name is None:
self.context.name = self.stdio_stream.name
self.proc.name = self.stdio_stream.name
if self.proc.stderr:
self.stderr_stream = self._setup_stderr_stream()
def connect(self, context):
self.context = context
self.latch = mitogen.core.Latch()
self._router.broker.defer(self._async_connect)
self.latch.get()
if self.exception:
raise self.exception
class ChildIdAllocator(object):
"""
Allocate new context IDs from a block of unique context IDs allocated by
the master process.
"""
def __init__(self, router):
self.router = router
self.lock = threading.Lock()
self.it = iter(xrange(0))
def allocate(self):
"""
Allocate an ID, requesting a fresh block from the master if the
existing block is exhausted.
:returns:
The new context ID.
.. warning::
This method is not safe to call from the :class:`Broker` thread, as
it may block on IO of its own.
"""
self.lock.acquire()
try:
for id_ in self.it:
return id_
master = self.router.context_by_id(0)
start, end = master.send_await(
mitogen.core.Message(dst_id=0, handle=mitogen.core.ALLOCATE_ID)
)
self.it = iter(xrange(start, end))
finally:
self.lock.release()
return self.allocate()
class CallChain(object):
"""
Deliver :data:`mitogen.core.CALL_FUNCTION` messages to a target context,
optionally threading related calls so an exception in an earlier call
cancels subsequent calls.
:param mitogen.core.Context context:
Target context.
:param bool pipelined:
Enable pipelining.
:meth:`call`, :meth:`call_no_reply` and :meth:`call_async`
normally issue calls and produce responses with no memory of prior
exceptions. If a call made with :meth:`call_no_reply` fails, the exception
is logged to the target context's logging framework.
**Pipelining**
When pipelining is enabled, if an exception occurs during a call,
subsequent calls made by the same :class:`CallChain` fail with the same
exception, including those already in-flight on the network, and no further
calls execute until :meth:`reset` is invoked.
No exception is logged for calls made with :meth:`call_no_reply`, instead
the exception is saved and reported as the result of subsequent
:meth:`call` or :meth:`call_async` calls.
Sequences of asynchronous calls can be made without wasting network
round-trips to discover if prior calls succeed, and chains originating from
multiple unrelated source contexts may overlap concurrently at a target
context without interference.
In this example, 4 calls complete in one round-trip::
chain = mitogen.parent.CallChain(context, pipelined=True)
chain.call_no_reply(os.mkdir, '/tmp/foo')
# If previous mkdir() failed, this never runs:
chain.call_no_reply(os.mkdir, '/tmp/foo/bar')
# If either mkdir() failed, this never runs, and the exception is
# asynchronously delivered to the receiver.
recv = chain.call_async(subprocess.check_output, '/tmp/foo')
# If anything so far failed, this never runs, and raises the exception.
chain.call(do_something)
# If this code was executed, the exception would also be raised.
if recv.get().unpickle() == 'baz':
pass
When pipelining is enabled, :meth:`reset` must be invoked to ensure any
exception is discarded, otherwise unbounded memory usage is possible in
long-running programs. The context manager protocol is supported to ensure
:meth:`reset` is always invoked::
with mitogen.parent.CallChain(context, pipelined=True) as chain:
chain.call_no_reply(...)
chain.call_no_reply(...)
chain.call_no_reply(...)
chain.call(...)
# chain.reset() automatically invoked.
"""
def __init__(self, context, pipelined=False):
self.context = context
if pipelined:
self.chain_id = self.make_chain_id()
else:
self.chain_id = None
@classmethod
def make_chain_id(cls):
return '%s-%s-%x-%x' % (
socket.gethostname(),
os.getpid(),
thread.get_ident(),
int(1e6 * mitogen.core.now()),
)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.context)
def __enter__(self):
return self
def __exit__(self, _1, _2, _3):
self.reset()
def reset(self):
"""
Instruct the target to forget any related exception.
"""
if not self.chain_id:
return
saved, self.chain_id = self.chain_id, None
try:
self.call_no_reply(mitogen.core.Dispatcher.forget_chain, saved)
finally:
self.chain_id = saved
closures_msg = (
'Mitogen cannot invoke closures, as doing so would require '
'serializing arbitrary program state, and no universal '
'method exists to recover a reference to them.'
)
lambda_msg = (
'Mitogen cannot invoke anonymous functions, as no universal method '
'exists to recover a reference to an anonymous function.'
)
method_msg = (
'Mitogen cannot invoke instance methods, as doing so would require '
'serializing arbitrary program state.'
)
def make_msg(self, fn, *args, **kwargs):
if getattr(fn, closure_attr, None) is not None:
raise TypeError(self.closures_msg)
if fn.__name__ == '<lambda>':
raise TypeError(self.lambda_msg)
if inspect.ismethod(fn):
im_self = getattr(fn, IM_SELF_ATTR)
if not inspect.isclass(im_self):
raise TypeError(self.method_msg)
klass = mitogen.core.to_text(im_self.__name__)
else:
klass = None
tup = (
self.chain_id,
mitogen.core.to_text(fn.__module__),
klass,
mitogen.core.to_text(fn.__name__),
args,
mitogen.core.Kwargs(kwargs)
)
return mitogen.core.Message.pickled(tup,
handle=mitogen.core.CALL_FUNCTION)
def call_no_reply(self, fn, *args, **kwargs):
"""
Like :meth:`call_async`, but do not wait for a return value, and inform
the target context no reply is expected. If the call fails and
pipelining is disabled, the exception will be logged to the target
context's logging framework.
"""
LOG.debug('starting no-reply function call to %r: %r',
self.context.name or self.context.context_id,
CallSpec(fn, args, kwargs))
self.context.send(self.make_msg(fn, *args, **kwargs))
def call_async(self, fn, *args, **kwargs):
"""
Arrange for `fn(*args, **kwargs)` to be invoked on the context's main
thread.
:param fn:
A free function in module scope or a class method of a class
directly reachable from module scope:
.. code-block:: python
# mymodule.py
def my_func():
'''A free function reachable as mymodule.my_func'''
class MyClass:
@classmethod
def my_classmethod(cls):
'''Reachable as mymodule.MyClass.my_classmethod'''
def my_instancemethod(self):
'''Unreachable: requires a class instance!'''
class MyEmbeddedClass:
@classmethod
def my_classmethod(cls):
'''Not directly reachable from module scope!'''
:param tuple args:
Function arguments, if any. See :ref:`serialization-rules` for
permitted types.
:param dict kwargs:
Function keyword arguments, if any. See :ref:`serialization-rules`
for permitted types.
:returns:
:class:`mitogen.core.Receiver` configured to receive the result of
the invocation:
.. code-block:: python
recv = context.call_async(os.check_output, 'ls /tmp/')
try:
# Prints output once it is received.
msg = recv.get()
print(msg.unpickle())
except mitogen.core.CallError, e:
print('Call failed:', str(e))
Asynchronous calls may be dispatched in parallel to multiple
contexts and consumed as they complete using
:class:`mitogen.select.Select`.
"""
LOG.debug('starting function call to %s: %r',
self.context.name or self.context.context_id,
CallSpec(fn, args, kwargs))
return self.context.send_async(self.make_msg(fn, *args, **kwargs))
def call(self, fn, *args, **kwargs):
"""
Like :meth:`call_async`, but block until the return value is available.
Equivalent to::
call_async(fn, *args, **kwargs).get().unpickle()
:returns:
The function's return value.
:raises mitogen.core.CallError:
An exception was raised in the remote context during execution.
"""
receiver = self.call_async(fn, *args, **kwargs)
return receiver.get().unpickle(throw_dead=False)
class Context(mitogen.core.Context):
"""
Extend :class:`mitogen.core.Context` with functionality useful to masters,
and child contexts who later become parents. Currently when this class is
required, the target context's router is upgraded at runtime.
"""
#: A :class:`CallChain` instance constructed by default, with pipelining
#: disabled. :meth:`call`, :meth:`call_async` and :meth:`call_no_reply` use
#: this instance.
call_chain_class = CallChain
via = None
def __init__(self, *args, **kwargs):
super(Context, self).__init__(*args, **kwargs)
self.default_call_chain = self.call_chain_class(self)
def __ne__(self, other):
return not (self == other)
def __eq__(self, other):
return (
isinstance(other, mitogen.core.Context) and
(other.context_id == self.context_id) and
(other.router == self.router)
)
def __hash__(self):
return hash((self.router, self.context_id))
def call_async(self, fn, *args, **kwargs):
"""
See :meth:`CallChain.call_async`.
"""
return self.default_call_chain.call_async(fn, *args, **kwargs)
def call(self, fn, *args, **kwargs):
"""
See :meth:`CallChain.call`.
"""
return self.default_call_chain.call(fn, *args, **kwargs)
def call_no_reply(self, fn, *args, **kwargs):
"""
See :meth:`CallChain.call_no_reply`.
"""
self.default_call_chain.call_no_reply(fn, *args, **kwargs)
def shutdown(self, wait=False):
"""
Arrange for the context to receive a ``SHUTDOWN`` message, triggering
graceful shutdown.
Due to a lack of support for timers, no attempt is made yet to force
terminate a hung context using this method. This will be fixed shortly.
:param bool wait:
If :data:`True`, block the calling thread until the context has
completely terminated.
:returns:
If `wait` is :data:`False`, returns a :class:`mitogen.core.Latch`
whose :meth:`get() <mitogen.core.Latch.get>` method returns
:data:`None` when shutdown completes. The `timeout` parameter may
be used to implement graceful timeouts.
"""
LOG.debug('%r.shutdown() sending SHUTDOWN', self)
latch = mitogen.core.Latch()
mitogen.core.listen(self, 'disconnect', lambda: latch.put(None))
self.send(
mitogen.core.Message(
handle=mitogen.core.SHUTDOWN,
)
)
if wait:
latch.get()
else:
return latch
class RouteMonitor(object):
"""
Generate and respond to :data:`mitogen.core.ADD_ROUTE` and
:data:`mitogen.core.DEL_ROUTE` messages sent to the local context by
maintaining a table of available routes, and propagating messages towards
parents and siblings as appropriate.
:class:`RouteMonitor` is responsible for generating routing messages for
directly attached children. It learns of new children via
:meth:`notice_stream` called by :class:`Router`, and subscribes to their
``disconnect`` event to learn when they disappear.
In children, constructing this class overwrites the stub
:data:`mitogen.core.DEL_ROUTE` handler installed by
:class:`mitogen.core.ExternalContext`, which is expected behaviour when a
child is beging upgraded in preparation to become a parent of children of
its own.
By virtue of only being active while responding to messages from a handler,
RouteMonitor lives entirely on the broker thread, so its data requires no
locking.
:param mitogen.master.Router router:
Router to install handlers on.
:param mitogen.core.Context parent:
:data:`None` in the master process, or reference to the parent context
we should propagate route updates towards.
"""
def __init__(self, router, parent=None):
self.router = router
self.parent = parent
self._log = logging.getLogger('mitogen.route_monitor')
#: Mapping of Stream instance to integer context IDs reachable via the
#: stream; used to cleanup routes during disconnection.
self._routes_by_stream = {}
self.router.add_handler(
fn=self._on_add_route,
handle=mitogen.core.ADD_ROUTE,
persist=True,
policy=is_immediate_child,
overwrite=True,
)
self.router.add_handler(
fn=self._on_del_route,
handle=mitogen.core.DEL_ROUTE,
persist=True,
policy=is_immediate_child,
overwrite=True,
)
def __repr__(self):
return 'RouteMonitor()'
def _send_one(self, stream, handle, target_id, name):
"""
Compose and send an update message on a stream.
:param mitogen.core.Stream stream:
Stream to send it on.
:param int handle:
:data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
:param int target_id:
ID of the connecting or disconnecting context.
:param str name:
Context name or :data:`None`.
"""
if not stream:
# We may not have a stream during shutdown.
return
data = str(target_id)
if name:
data = '%s:%s' % (target_id, name)
stream.protocol.send(
mitogen.core.Message(
handle=handle,
data=data.encode('utf-8'),
dst_id=stream.protocol.remote_id,
)
)
def _propagate_up(self, handle, target_id, name=None):
"""
In a non-master context, propagate an update towards the master.
:param int handle:
:data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
:param int target_id:
ID of the connecting or disconnecting context.
:param str name:
For :data:`mitogen.core.ADD_ROUTE`, the name of the new context
assigned by its parent. This is used by parents to assign the
:attr:`mitogen.core.Context.name` attribute.
"""
if self.parent:
stream = self.router.stream_by_id(self.parent.context_id)
self._send_one(stream, handle, target_id, name)
def _propagate_down(self, handle, target_id):
"""
For DEL_ROUTE, we additionally want to broadcast the message to any
stream that has ever communicated with the disconnecting ID, so
core.py's :meth:`mitogen.core.Router._on_del_route` can turn the
message into a disconnect event.
:param int handle:
:data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
:param int target_id:
ID of the connecting or disconnecting context.
"""
for stream in self.router.get_streams():
if target_id in stream.protocol.egress_ids and (
(self.parent is None) or
(self.parent.context_id!= stream.protocol.remote_id)
):
self._send_one(stream, mitogen.core.DEL_ROUTE, target_id, None)
def notice_stream(self, stream):
"""
When this parent is responsible for a new directly connected child
stream, we're also responsible for broadcasting
:data:`mitogen.core.DEL_ROUTE` upstream when that child disconnects.
"""
self._routes_by_stream[stream] = set([stream.protocol.remote_id])
self._propagate_up(mitogen.core.ADD_ROUTE, stream.protocol.remote_id,
stream.name)
mitogen.core.listen(
obj=stream,
name='disconnect',
func=lambda: self._on_stream_disconnect(stream),
)
def get_routes(self, stream):
"""
Return the set of context IDs reachable on a stream.
:param mitogen.core.Stream stream:
:returns: set([int])
"""
return self._routes_by_stream.get(stream) or set()
def _on_stream_disconnect(self, stream):
"""
Respond to disconnection of a local stream by propagating DEL_ROUTE for
any contexts we know were attached to it.
"""
# During a stream crash it is possible for disconnect signal to fire
# twice, in which case ignore the second instance.
routes = self._routes_by_stream.pop(stream, None)
if routes is None:
return
self._log.debug('stream %s is gone; propagating DEL_ROUTE for %r',
stream.name, routes)
for target_id in routes:
self.router.del_route(target_id)
self._propagate_up(mitogen.core.DEL_ROUTE, target_id)
self._propagate_down(mitogen.core.DEL_ROUTE, target_id)
context = self.router.context_by_id(target_id, create=False)
if context:
mitogen.core.fire(context, 'disconnect')
def _on_add_route(self, msg):
"""
Respond to :data:`mitogen.core.ADD_ROUTE` by validating the source of
the message, updating the local table, and propagating the message
upwards.
"""
if msg.is_dead:
return
target_id_s, _, target_name = bytes_partition(msg.data, b(':'))
target_name = target_name.decode()
target_id = int(target_id_s)
self.router.context_by_id(target_id).name = target_name
stream = self.router.stream_by_id(msg.src_id)
current = self.router.stream_by_id(target_id)
if current and current.protocol.remote_id!= mitogen.parent_id:
self._log.error('Cannot add duplicate route to %r via %r, '
'already have existing route via %r',
target_id, stream, current)
return
self._log.debug('Adding route to %d via %r', target_id, stream)
self._routes_by_stream[stream].add(target_id)
self.router.add_route(target_id, stream)
self._propagate_up(mitogen.core.ADD_ROUTE, target_id, target_name)
def _on_del_route(self, msg):
"""
Respond to :data:`mitogen.core.DEL_ROUTE` by validating the source of
the message, updating the local table, propagating the message
upwards, and downwards towards any stream that every had a message
forwarded from it towards the disconnecting context.
"""
if msg.is_dead:
return
target_id = int(msg.data)
registered_stream = self.router.stream_by_id(target_id)
if registered_stream is None:
return
stream = self.router.stream_by_id(msg.src_id)
if registered_stream!= stream:
self._log.error('received DEL_ROUTE for %d from %r, expected %r',
target_id, stream, registered_stream)
return
context = self.router.context_by_id(target_id, create=False)
if context:
self._log.debug('firing local disconnect signal for %r', context)
mitogen.core.fire(context, 'disconnect')
self._log.debug('deleting route to %d via %r', target_id, stream)
routes = self._routes_by_stream.get(stream)
if routes:
routes.discard(target_id)
self.router.del_route(target_id)
if stream.protocol.remote_id!= mitogen.parent_id:
self._propagate_up(mitogen.core.DEL_ROUTE, target_id)
self._propagate_down(mitogen.core.DEL_ROUTE, target_id)
class Router(mitogen.core.Router):
context_class = Context
debug = False
profiling = False
id_allocator = None
responder = None
log_forwarder = None
route_monitor = None
def upgrade(self, importer, parent):
LOG.debug('upgrading %r with capabilities to start new children', self)
self.id_allocator = ChildIdAllocator(router=self)
self.responder = ModuleForwarder(
router=self,
parent_context=parent,
importer=importer,
)
self.route_monitor = RouteMonitor(self, parent)
self.add_handler(
fn=self._on_detaching,
handle=mitogen.core.DETACHING,
persist=True,
)
def _on_detaching(self, msg):
if msg.is_dead:
return
stream = self.stream_by_id(msg.src_id)
if stream.protocol.remote_id!= msg.src_id or stream.conn.detached:
LOG.warning('bad DETACHING received on %r: %r', stream, msg)
return
LOG.debug('%r: marking as detached', stream)
stream.conn.detached = True
msg.reply(None)
def get_streams(self):
"""
Return an atomic snapshot of all streams in existence at time of call.
This is safe to call from any thread.
"""
self._write_lock.acquire()
try:
return itervalues(self._stream_by_id)
finally:
self._write_lock.release()
def disconnect(self, context):
"""
Disconnect a context and forget its stream, assuming the context is
directly connected.
"""
stream = self.stream_by_id(context)
if stream is None or stream.protocol.remote_id!= context.context_id:
return
l = mitogen.core.Latch()
mitogen.core.listen(stream, 'disconnect', l.put)
def disconnect():
LOG.debug('Starting disconnect of %r', stream)
stream.on_disconnect(self.broker)
self.broker.defer(disconnect)
l.get()
def add_route(self, target_id, stream):
"""
Arrange for messages whose `dst_id` is `target_id` to be forwarded on a
directly connected :class:`Stream`. Safe to call from any thread.
This is called automatically by :class:`RouteMonitor` in response to
:data:`mitogen.core.ADD_ROUTE` messages, but remains public while the
design has not yet settled, and situations may arise where routing is
not fully automatic.
:param int target_id:
Target context ID to add a route for.
:param mitogen.core.Stream stream:
Stream over which messages to the target should be routed.
"""
LOG.debug('%r: adding route to context %r via %r',
self, target_id, stream)
assert isinstance(target_id, int)
assert isinstance(stream, mitogen.core.Stream)
self._write_lock.acquire()
try:
self._stream_by_id[target_id] = stream
finally:
self._write_lock.release()
def del_route(self, target_id):
"""
Delete any route that exists for `target_id`. It is not an error to
delete a route that does not currently exist. Safe to call from any
thread.
This is called automatically by :class:`RouteMonitor` in response to
:data:`mitogen.core.DEL_ROUTE` messages, but remains public while the
design has not yet settled, and situations may arise where routing is
not fully automatic.
:param int target_id:
Target context ID to delete route for.
"""
LOG.debug('%r: deleting route to %r', self, target_id)
# DEL_ROUTE may be sent by a parent if it knows this context sent
# messages to a peer that has now disconnected, to let us raise
# 'disconnect' event on the appropriate Context instance. In that case,
# we won't a matching _stream_by_id entry for the disappearing route,
# so don't raise an error for a missing key here.
self._write_lock.acquire()
try:
self._stream_by_id.pop(target_id, None)
finally:
self._write_lock.release()
def get_module_blacklist(self):
if mitogen.context_id == 0:
return self.responder.blacklist
return self.importer.master_blacklist
def get_module_whitelist(self):
if mitogen.context_id == 0:
return self.responder.whitelist
return self.importer.master_whitelist
def allocate_id(self):
return self.id_allocator.allocate()
connection_timeout_msg = u"Connection timed out."
def _connect(self, klass, **kwargs):
context_id = self.allocate_id()
context = self.context_class(self, context_id)
context.name = kwargs.get('name')
kwargs['old_router'] = self
kwargs['max_message_size'] = self.max_message_size
conn = klass(klass.options_class(**kwargs), self)
try:
conn.connect(context=context)
except mitogen.core.TimeoutError:
raise mitogen.core.StreamError(self.connection_timeout_msg)
return context
def connect(self, method_name, name=None, **kwargs):
if name:
name = mitogen.core.to_text(name)
klass = get_connection_class(method_name)
kwargs.setdefault(u'debug', self.debug)
kwargs.setdefault(u'profiling', self.profiling)
kwargs.setdefault(u'unidirectional', self.unidirectional)
kwargs.setdefault(u'name', name)
via = kwargs.pop(u'via', None)
if via is not None:
return self.proxy_connect(via, method_name,
**mitogen.core.Kwargs(kwargs))
return self._connect(klass, **mitogen.core.Kwargs(kwargs))
def proxy_connect(self, via_context, method_name, name=None, **kwargs):
resp = via_context.call(_proxy_connect,
name=name,
method_name=method_name,
kwargs=mitogen.core.Kwargs(kwargs),
)
if resp['msg'] is not None:
raise mitogen.core.StreamError(resp['msg'])
name = u'%s.%s' % (via_context.name, resp['name'])
context = self.context_class(self, resp['id'], name=name)
context.via = via_context
self._write_lock.acquire()
try:
self._context_by_id[context.context_id] = context
finally:
self._write_lock.release()
return context
def buildah(self, **kwargs):
return self.connect(u'buildah', **kwargs)
def doas(self, **kwargs):
return self.connect(u'doas', **kwargs)
def docker(self, **kwargs):
return self.connect(u'docker', **kwargs)
def kubectl(self, **kwargs):
return self.connect(u'kubectl', **kwargs)
def fork(self, **kwargs):
return self.connect(u'fork', **kwargs)
def jail(self, **kwargs):
return self.connect(u'jail', **kwargs)
def local(self, **kwargs):
return self.connect(u'local', **kwargs)
def lxc(self, **kwargs):
return self.connect(u'lxc', **kwargs)
def lxd(self, **kwargs):
return self.connect(u'lxd', **kwargs)
def setns(self, **kwargs):
return self.connect(u'setns', **kwargs)
def su(self, **kwargs):
return self.connect(u'su', **kwargs)
def sudo(self, **kwargs):
return self.connect(u'sudo', **kwargs)
def ssh(self, **kwargs):
return self.connect(u'ssh', **kwargs)
def podman(self, **kwargs):
return self.connect(u'podman', **kwargs)
class Reaper(object):
"""
Asynchronous logic for reaping :class:`Process` objects. This is necessary
to prevent uncontrolled buildup of zombie processes in long-lived parents
that will eventually reach an OS limit, preventing creation of new threads
and processes, and to log the exit status of the child in the case of an
error.
To avoid modifying process-global state such as with
:func:`signal.set_wakeup_fd` or installing a :data:`signal.SIGCHLD` handler
that might interfere with the user's ability to use those facilities,
Reaper polls for exit with backoff using timers installed on an associated
:class:`Broker`.
:param mitogen.core.Broker broker:
The :class:`Broker` on which to install timers
:param mitogen.parent.Process proc:
The process to reap.
:param bool kill:
If :data:`True`, send ``SIGTERM`` and ``SIGKILL`` to the process.
:param bool wait_on_shutdown:
If :data:`True`, delay :class:`Broker` shutdown if child has not yet
exited. If :data:`False` simply forget the child.
"""
#: :class:`Timer` that invokes :meth:`reap` after some polling delay.
_timer = None
def __init__(self, broker, proc, kill, wait_on_shutdown):
self.broker = broker
self.proc = proc
self.kill = kill
self.wait_on_shutdown = wait_on_shutdown
self._tries = 0
def _signal_child(self, signum):
# For processes like sudo we cannot actually send sudo a signal,
# because it is setuid, so this is best-effort only.
LOG.debug('%r: sending %s', self.proc, SIGNAL_BY_NUM[signum])
try:
os.kill(self.proc.pid, signum)
except OSError:
e = sys.exc_info()[1]
if e.args[0]!= errno.EPERM:
raise
def _calc_delay(self, count):
"""
Calculate a poll delay given `count` attempts have already been made.
These constants have no principle, they just produce rapid but still
relatively conservative retries.
"""
delay = 0.05
for _ in xrange(count):
delay *= 1.72
return delay
def _on_broker_shutdown(self):
"""
Respond to :class:`Broker` shutdown by cancelling the reap timer if
:attr:`Router.await_children_at_shutdown` is disabled. Otherwise
shutdown is delayed for up to :attr:`Broker.shutdown_timeout` for
subprocesses may have no intention of exiting any time soon.
"""
if not self.wait_on_shutdown:
self._timer.cancel()
def _install_timer(self, delay):
new = self._timer is None
self._timer = self.broker.timers.schedule(
when=mitogen.core.now() + delay,
func=self.reap,
)
if new:
mitogen.core.listen(self.broker,'shutdown',
self._on_broker_shutdown)
def _remove_timer(self):
if self._timer and self._timer.active:
self._timer.cancel()
mitogen.core.unlisten(self.broker,'shutdown',
self._on_broker_shutdown)
def reap(self):
"""
Reap the child process during disconnection.
"""
status = self.proc.poll()
if status is not None:
LOG.debug('%r: %s', self.proc, returncode_to_str(status))
mitogen.core.fire(self.proc, 'exit')
self._remove_timer()
return
self._tries += 1
if self._tries > 20:
LOG.warning('%r: child will not exit, giving up', self)
self._remove_timer()
return
delay = self._calc_delay(self._tries - 1)
LOG.debug('%r still running after IO disconnect, recheck in %.03fs',
self.proc, delay)
self._install_timer(delay)
if not self.kill:
pass
elif self._tries == 2:
self._signal_child(signal.SIGTERM)
elif self._tries == 6: # roughly 4 seconds
self._signal_child(signal.SIGKILL)
class Process(object):
"""
Process objects provide a uniform interface to the :mod:`subprocess` and
:mod:`mitogen.fork`. This class is extended by :class:`PopenProcess` and
:class:`mitogen.fork.Process`.
:param int pid:
The process ID.
:param file stdin:
File object attached to standard input.
:param file stdout:
File object attached to standard output.
:param file stderr:
File object attached to standard error, or :data:`None`.
"""
#: Name of the process used in logs. Set to the stream/context name by
#: :class:`Connection`.
name = None
def __init__(self, pid, stdin, stdout, stderr=None):
#: The process ID.
self.pid = pid
#: File object attached to standard input.
self.stdin = stdin
#: File object attached to standard output.
self.stdout = stdout
#: File object attached to standard error.
self.stderr = stderr
def __repr__(self):
return '%s %s pid %d' % (
type(self).__name__,
self.name,
self.pid,
)
def poll(self):
"""
Fetch the child process exit status, or :data:`None` if it is still
running. This should be overridden by subclasses.
:returns:
Exit status in the style of the :attr:`subprocess.Popen.returncode`
attribute, i.e. with signals represented by a negative integer.
"""
raise NotImplementedError()
class PopenProcess(Process):
"""
:class:`Process` subclass wrapping a :class:`subprocess.Popen` object.
:param subprocess.Popen proc:
The subprocess.
"""
def __init__(self, proc, stdin, stdout, stderr=None):
super(PopenProcess, self).__init__(proc.pid, stdin, stdout, stderr)
#: The subprocess.
self.proc = proc
def poll(self):
return self.proc.poll()
class ModuleForwarder(object):
"""
Respond to :data:`mitogen.core.GET_MODULE` requests in a child by
forwarding the request to our parent context, or satisfying the request
from our local Importer cache.
"""
def __init__(self, router, parent_context, importer):
self.router = router
self.parent_context = parent_context
self.importer = importer
router.add_handler(
fn=self._on_forward_module,
handle=mitogen.core.FORWARD_MODULE,
persist=True,
policy=mitogen.core.has_parent_authority,
)
router.add_handler(
fn=self._on_get_module,
handle=mitogen.core.GET_MODULE,
persist=True,
policy=is_immediate_child,
)
def __repr__(self):
return 'ModuleForwarder'
def _on_forward_module(self, msg):
if msg.is_dead:
return
context_id_s, _, fullname = bytes_partition(msg.data, b('\x00'))
fullname = mitogen.core.to_text(fullname)
context_id = int(context_id_s)
stream = self.router.stream_by_id(context_id)
if stream.protocol.remote_id == mitogen.parent_id:
LOG.error('%r: dropping FORWARD_MODULE(%d, %r): no route to child',
self, context_id, fullname)
return
if fullname in stream.protocol.sent_modules:
return
LOG.debug('%r._on_forward_module() sending %r to %r via %r',
self, fullname, context_id, stream.protocol.remote_id)
self._send_module_and_related(stream, fullname)
if stream.protocol.remote_id!= context_id:
stream.protocol._send(
mitogen.core.Message(
data=msg.data,
handle=mitogen.core.FORWARD_MODULE,
dst_id=stream.protocol.remote_id,
)
)
def _on_get_module(self, msg):
if msg.is_dead:
return
fullname = msg.data.decode('utf-8')
LOG.debug('%r: %s requested by context %d', self, fullname, msg.src_id)
callback = lambda: self._on_cache_callback(msg, fullname)
self.importer._request_module(fullname, callback)
def _on_cache_callback(self, msg, fullname):
stream = self.router.stream_by_id(msg.src_id)
LOG.debug('%r: sending %s to %r', self, fullname, stream)
self._send_module_and_related(stream, fullname)
def _send_module_and_related(self, stream, fullname):
tup = self.importer._cache[fullname]
for related in tup[4]:
rtup = self.importer._cache.get(related)
if rtup:
self._send_one_module(stream, rtup)
else:
LOG.debug('%r: %s not in cache (for %s)',
self, related, fullname)
self._send_one_module(stream, tup)
def _send_one_module(self, stream, tup):
if tup[0] not in stream.protocol.sent_modules:
stream.protocol.sent_modules.add(tup[0])
self.router._async_route(
mitogen.core.Message.pickled(
tup,
dst_id=stream.protocol.remote_id,
handle=mitogen.core.LOAD_MODULE,
)
)
# Copyright 2019, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#!mitogen: minify_safe
"""
This module implements most package functionality, but remains separate from
non-essential code in order to reduce its size, since it is also serves as the
bootstrap implementation sent to every new slave context.
"""
import binascii
import collections
import encodings.latin_1
import encodings.utf_8
import errno
import fcntl
import itertools
import linecache
import logging
import os
import pickle as py_pickle
import pstats
import signal
import socket
import struct
import sys
import syslog
import threading
import time
import traceback
import warnings
import weakref
import zlib
# Python >3.7 deprecated the imp module.
warnings.filterwarnings('ignore', message='the imp module is deprecated')
import imp
# Absolute imports for <2.5.
select = __import__('select')
try:
import cProfile
except ImportError:
cProfile = None
try:
import thread
except ImportError:
import threading as thread
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
try:
BaseException
except NameError:
BaseException = Exception
try:
ModuleNotFoundError
except NameError:
ModuleNotFoundError = ImportError
# TODO: usage of 'import' after setting __name__, but before fixing up
# sys.modules generates a warning. This happens when profiling = True.
warnings.filterwarnings('ignore',
"Parent module'mitogen' not found while handling absolute import")
LOG = logging.getLogger('mitogen')
IOLOG = logging.getLogger('mitogen.io')
IOLOG.setLevel(logging.INFO)
# str.encode() may take import lock. Deadlock possible if broker calls
#.encode() on behalf of thread currently waiting for module.
LATIN1_CODEC = encodings.latin_1.Codec()
_v = False
_vv = False
GET_MODULE = 100
CALL_FUNCTION = 101
FORWARD_LOG = 102
ADD_ROUTE = 103
DEL_ROUTE = 104
ALLOCATE_ID = 105
SHUTDOWN = 106
LOAD_MODULE = 107
FORWARD_MODULE = 108
DETACHING = 109
CALL_SERVICE = 110
STUB_CALL_SERVICE = 111
#: Special value used to signal disconnection or the inability to route a
#: message, when it appears in the `reply_to` field. Usually causes
#: :class:`mitogen.core.ChannelError` to be raised when it is received.
#:
#: It indicates the sender did not know how to process the message, or wishes
#: no further messages to be delivered to it. It is used when:
#:
#: * a remote receiver is disconnected or explicitly closed.
#: * a related message could not be delivered due to no route existing for it.
#: * a router is being torn down, as a sentinel value to notify
#: :meth:`mitogen.core.Router.add_handler` callbacks to clean up.
IS_DEAD = 999
try:
BaseException
except NameError:
BaseException = Exception
PY24 = sys.version_info < (2, 5)
PY3 = sys.version_info > (3,)
if PY3:
b = str.encode
BytesType = bytes
UnicodeType = str
FsPathTypes = (str,)
BufferType = lambda buf, start: memoryview(buf)[start:]
long = int
else:
b = str
BytesType = str
FsPathTypes = (str, unicode)
BufferType = buffer
UnicodeType = unicode
AnyTextType = (BytesType, UnicodeType)
try:
next
except NameError:
next = lambda it: it.next()
# #550: prehistoric WSL did not advertise itself in uname output.
try:
fp = open('/proc/sys/kernel/osrelease')
IS_WSL = 'Microsoft' in fp.read()
fp.close()
except IOError:
IS_WSL = False
#: Default size for calls to :meth:`Side.read` or :meth:`Side.write`, and the
#: size of buffers configured by :func:`mitogen.parent.create_socketpair`. This
#: value has many performance implications, 128KiB seems to be a sweet spot.
#:
#: * When set low, large messages cause many :class:`Broker` IO loop
#: iterations, burning CPU and reducing throughput.
#: * When set high, excessive RAM is reserved by the OS for socket buffers (2x
#: per child), and an identically sized temporary userspace buffer is
#: allocated on each read that requires zeroing, and over a particular size
#: may require two system calls to allocate/deallocate.
#:
#: Care must be taken to ensure the underlying kernel object and receiving
#: program support the desired size. For example,
#:
#: * Most UNIXes have TTYs with fixed 2KiB-4KiB buffers, making them unsuitable
#: for efficient IO.
#: * Different UNIXes have varying presets for pipes, which may not be
#: configurable. On recent Linux the default pipe buffer size is 64KiB, but
#: under memory pressure may be as low as 4KiB for unprivileged processes.
#: * When communication is via an intermediary process, its internal buffers
#: effect the speed OS buffers will drain. For example OpenSSH uses 64KiB
#: reads.
#:
#: An ideal :class:`Message` has a size that is a multiple of
#: :data:`CHUNK_SIZE` inclusive of headers, to avoid wasting IO loop iterations
#: writing small trailer chunks.
CHUNK_SIZE = 131072
_tls = threading.local()
if __name__ =='mitogen.core':
# When loaded using import mechanism, ExternalContext.main() will not have
# a chance to set the synthetic mitogen global, so just import it here.
import mitogen
else:
# When loaded as __main__, ensure classes and functions gain a __module__
# attribute consistent with the host process, so that pickling succeeds.
__name__ ='mitogen.core'
class Error(Exception):
"""
Base for all exceptions raised by Mitogen.
:param str fmt:
Exception text, or format string if `args` is non-empty.
:param tuple args:
Format string arguments.
"""
def __init__(self, fmt=None, *args):
if args:
fmt %= args
if fmt and not isinstance(fmt, UnicodeType):
fmt = fmt.decode('utf-8')
Exception.__init__(self, fmt)
class LatchError(Error):
"""
Raised when an attempt is made to use a :class:`mitogen.core.Latch` that
has been marked closed.
"""
pass
class Blob(BytesType):
"""
A serializable bytes subclass whose content is summarized in repr() output,
making it suitable for logging binary data.
"""
def __repr__(self):
return '[blob: %d bytes]' % len(self)
def __reduce__(self):
return (Blob, (BytesType(self),))
class Secret(UnicodeType):
"""
A serializable unicode subclass whose content is masked in repr() output,
making it suitable for logging passwords.
"""
def __repr__(self):
return '[secret]'
if not PY3:
# TODO: what is this needed for in 2.x?
def __str__(self):
return UnicodeType(self)
def __reduce__(self):
return (Secret, (UnicodeType(self),))
class Kwargs(dict):
"""
A serializable dict subclass that indicates its keys should be coerced to
Unicode on Python 3 and bytes on Python<2.6.
Python 2 produces keyword argument dicts whose keys are bytes, requiring a
helper to ensure compatibility with Python 3 where Unicode is required,
whereas Python 3 produces keyword argument dicts whose keys are Unicode,
requiring a helper for Python 2.4/2.5, where bytes are required.
"""
if PY3:
def __init__(self, dct):
for k, v in dct.items():
if type(k) is bytes:
self[k.decode()] = v
else:
self[k] = v
elif sys.version_info < (2, 6, 5):
def __init__(self, dct):
for k, v in dct.iteritems():
if type(k) is unicode:
k, _ = encodings.utf_8.encode(k)
self[k] = v
def __repr__(self):
return 'Kwargs(%s)' % (dict.__repr__(self),)
def __reduce__(self):
return (Kwargs, (dict(self),))
class CallError(Error):
"""
Serializable :class:`Error` subclass raised when :meth:`Context.call()
<mitogen.parent.Context.call>` fails. A copy of the traceback from the
external context is appended to the exception message.
"""
def __init__(self, fmt=None, *args):
if not isinstance(fmt, BaseException):
Error.__init__(self, fmt, *args)
else:
e = fmt
cls = e.__class__
fmt = '%s.%s: %s' % (cls.__module__, cls.__name__, e)
tb = sys.exc_info()[2]
if tb:
fmt += '\n'
fmt += ''.join(traceback.format_tb(tb))
Error.__init__(self, fmt)
def __reduce__(self):
return (_unpickle_call_error, (self.args[0],))
def _unpickle_call_error(s):
if not (type(s) is UnicodeType and len(s) < 10000):
raise TypeError('cannot unpickle CallError: bad input')
return CallError(s)
class ChannelError(Error):
"""
Raised when a channel dies or has been closed.
"""
remote_msg = 'Channel closed by remote end.'
local_msg = 'Channel closed by local end.'
class StreamError(Error):
"""
Raised when a stream cannot be established.
"""
pass
class TimeoutError(Error):
"""
Raised when a timeout occurs on a stream.
"""
pass
def to_text(o):
"""
Coerce `o` to Unicode by decoding it from UTF-8 if it is an instance of
:class:`bytes`, otherwise pass it to the :class:`str` constructor. The
returned object is always a plain :class:`str`, any subclass is removed.
"""
if isinstance(o, BytesType):
return o.decode('utf-8')
return UnicodeType(o)
# Documented in api.rst to work around Sphinx limitation.
now = getattr(time,'monotonic', time.time)
# Python 2.4
try:
any
except NameError:
def any(it):
for elem in it:
if elem:
return True
def _partition(s, sep, find):
"""
(str|unicode).(partition|rpartition) for Python 2.4/2.5.
"""
idx = find(sep)
if idx!= -1:
left = s[0:idx]
return left, sep, s[len(left)+len(sep):]
def threading__current_thread():
try:
return threading.current_thread() # Added in Python 2.6+
except AttributeError:
return threading.currentThread() # Deprecated in Python 3.10+
def threading__thread_name(thread):
try:
return thread.name # Added in Python 2.6+
except AttributeError:
return thread.getName() # Deprecated in Python 3.10+
if hasattr(UnicodeType, 'rpartition'):
str_partition = UnicodeType.partition
str_rpartition = UnicodeType.rpartition
bytes_partition = BytesType.partition
else:
def str_partition(s, sep):
return _partition(s, sep, s.find) or (s, u'', u'')
def str_rpartition(s, sep):
return _partition(s, sep, s.rfind) or (u'', u'', s)
def bytes_partition(s, sep):
return _partition(s, sep, s.find) or (s, '', '')
def _has_parent_authority(context_id):
return (
(context_id == mitogen.context_id) or
(context_id in mitogen.parent_ids)
)
def has_parent_authority(msg, _stream=None):
"""
Policy function for use with :class:`Receiver` and
:meth:`Router.add_handler` that requires incoming messages to originate
from a parent context, or on a :class:`Stream` whose :attr:`auth_id
<Stream.auth_id>` has been set to that of a parent context or the current
context.
"""
return _has_parent_authority(msg.auth_id)
def _signals(obj, signal):
return (
obj.__dict__
.setdefault('_signals', {})
.setdefault(signal, [])
)
def listen(obj, name, func):
"""
Arrange for `func()` to be invoked when signal `name` is fired on `obj`.
"""
_signals(obj, name).append(func)
def unlisten(obj, name, func):
"""
Remove `func()` from the list of functions invoked when signal `name` is
fired by `obj`.
:raises ValueError:
`func()` was not on the list.
"""
_signals(obj, name).remove(func)
def fire(obj, name, *args, **kwargs):
"""
Arrange for `func(*args, **kwargs)` to be invoked for every function
registered for signal `name` on `obj`.
"""
for func in _signals(obj, name):
func(*args, **kwargs)
def takes_econtext(func):
"""
Decorator that marks a function or class method to automatically receive a
kwarg named `econtext`, referencing the
:class:`mitogen.core.ExternalContext` active in the context in which the
function is being invoked in. The decorator is only meaningful when the
function is invoked via :data:`CALL_FUNCTION <mitogen.core.CALL_FUNCTION>`.
When the function is invoked directly, `econtext` must still be passed to
it explicitly.
"""
func.mitogen_takes_econtext = True
return func
def takes_router(func):
"""
Decorator that marks a function or class method to automatically receive a
kwarg named `router`, referencing the :class:`mitogen.core.Router` active
in the context in which the function is being invoked in. The decorator is
only meaningful when the function is invoked via :data:`CALL_FUNCTION
<mitogen.core.CALL_FUNCTION>`.
When the function is invoked directly, `router` must still be passed to it
explicitly.
"""
func.mitogen_takes_router = True
return func
def is_blacklisted_import(importer, fullname):
"""
Return :data:`True` if `fullname` is part of a blacklisted package, or if
any packages have been whitelisted and `fullname` is not part of one.
NB:
- If a package is on both lists, then it is treated as blacklisted.
- If any package is whitelisted, then all non-whitelisted packages are
treated as blacklisted.
"""
return ((not any(fullname.startswith(s) for s in importer.whitelist)) or
(any(fullname.startswith(s) for s in importer.blacklist)))
def set_cloexec(fd):
"""
Set the file descriptor `fd` to automatically close on :func:`os.execve`.
This has no effect on file descriptors inherited across :func:`os.fork`,
they must be explicitly closed through some other means, such as
:func:`mitogen.fork.on_fork`.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
assert fd > 2, 'fd %r <= 2' % (fd,)
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def set_nonblock(fd):
"""
Set the file descriptor `fd` to non-blocking mode. For most underlying file
types, this causes :func:`os.read` or :func:`os.write` to raise
:class:`OSError` with :data:`errno.EAGAIN` rather than block the thread
when the underlying kernel buffer is exhausted.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def set_block(fd):
"""
Inverse of :func:`set_nonblock`, i.e. cause `fd` to block the thread when
the underlying kernel buffer is exhausted.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
def io_op(func, *args):
"""
Wrap `func(*args)` that may raise :class:`select.error`, :class:`IOError`,
or :class:`OSError`, trapping UNIX error codes relating to disconnection
and retry events in various subsystems:
* When a signal is delivered to the process on Python 2, system call retry
is signalled through :data:`errno.EINTR`. The invocation is automatically
restarted.
* When performing IO against a TTY, disconnection of the remote end is
signalled by :data:`errno.EIO`.
* When performing IO against a socket, disconnection of the remote end is
signalled by :data:`errno.ECONNRESET`.
* When performing IO against a pipe, disconnection of the remote end is
signalled by :data:`errno.EPIPE`.
:returns:
Tuple of `(return_value, disconnect_reason)`, where `return_value` is
the return value of `func(*args)`, and `disconnected` is an exception
instance when disconnection was detected, otherwise :data:`None`.
"""
while True:
try:
return func(*args), None
except (select.error, OSError, IOError):
e = sys.exc_info()[1]
_vv and IOLOG.debug('io_op(%r) -> OSError: %s', func, e)
if e.args[0] == errno.EINTR:
continue
if e.args[0] in (errno.EIO, errno.ECONNRESET, errno.EPIPE):
return None, e
raise
class PidfulStreamHandler(logging.StreamHandler):
"""
A :class:`logging.StreamHandler` subclass used when
:meth:`Router.enable_debug() <mitogen.master.Router.enable_debug>` has been
called, or the `debug` parameter was specified during context construction.
Verifies the process ID has not changed on each call to :meth:`emit`,
reopening the associated log file when a change is detected.
This ensures logging to the per-process output files happens correctly even
when uncooperative third party components call :func:`os.fork`.
"""
#: PID that last opened the log file.
open_pid = None
#: Output path template.
template = '/tmp/mitogen.%s.%s.log'
def _reopen(self):
self.acquire()
try:
if self.open_pid == os.getpid():
return
ts = time.strftime('%Y%m%d_%H%M%S')
path = self.template % (os.getpid(), ts)
self.stream = open(path, 'w', 1)
set_cloexec(self.stream.fileno())
self.stream.write('Parent PID: %s\n' % (os.getppid(),))
self.stream.write('Created by:\n\n%s\n' % (
''.join(traceback.format_stack()),
))
self.open_pid = os.getpid()
finally:
self.release()
def emit(self, record):
if self.open_pid!= os.getpid():
self._reopen()
logging.StreamHandler.emit(self, record)
def enable_debug_logging():
global _v, _vv
_v = True
_vv = True
root = logging.getLogger()
root.setLevel(logging.DEBUG)
IOLOG.setLevel(logging.DEBUG)
handler = PidfulStreamHandler()
handler.formatter = logging.Formatter(
'%(asctime)s %(levelname).1s %(name)s: %(message)s',
'%H:%M:%S'
)
root.handlers.insert(0, handler)
_profile_hook = lambda name, func, *args: func(*args)
_profile_fmt = os.environ.get(
'MITOGEN_PROFILE_FMT',
'/tmp/mitogen.stats.%(pid)s.%(identity)s.%(now)s.%(ext)s',
)
def _profile_hook(name, func, *args):
"""
Call `func(*args)` and return its result. This function is replaced by
:func:`_real_profile_hook` when :func:`enable_profiling` is called. This
interface is obsolete and will be replaced by a signals-based integration
later on.
"""
return func(*args)
def _real_profile_hook(name, func, *args):
profiler = cProfile.Profile()
profiler.enable()
try:
return func(*args)
finally:
path = _profile_fmt % {
'now': int(1e6 * now()),
'identity': name,
'pid': os.getpid(),
'ext': '%s'
}
profiler.dump_stats(path % ('pstats',))
profiler.create_stats()
fp = open(path % ('log',), 'w')
try:
stats = pstats.Stats(profiler, stream=fp)
stats.sort_stats('cumulative')
stats.print_stats()
finally:
fp.close()
def enable_profiling(econtext=None):
global _profile_hook
_profile_hook = _real_profile_hook
def import_module(modname):
"""
Import `module` and return the attribute named `attr`.
"""
return __import__(modname, None, None, [''])
def pipe():
"""
Create a UNIX pipe pair using :func:`os.pipe`, wrapping the returned
descriptors in Python file objects in order to manage their lifetime and
ensure they are closed when their last reference is discarded and they have
not been closed explicitly.
"""
rfd, wfd = os.pipe()
return (
os.fdopen(rfd, 'rb', 0),
os.fdopen(wfd, 'wb', 0)
)
def iter_split(buf, delim, func):
"""
Invoke `func(s)` for each `delim`-delimited chunk in the potentially large
`buf`, avoiding intermediate lists and quadratic string operations. Return
the trailing undelimited portion of `buf`, or any unprocessed portion of
`buf` after `func(s)` returned :data:`False`.
:returns:
`(trailer, cont)`, where `cont` is :data:`False` if the last call to
`func(s)` returned :data:`False`.
"""
dlen = len(delim)
start = 0
cont = True
while cont:
nl = buf.find(delim, start)
if nl == -1:
break
cont = not func(buf[start:nl]) is False
start = nl + dlen
return buf[start:], cont
class Py24Pickler(py_pickle.Pickler):
"""
Exceptions were classic classes until Python 2.5. Sadly for 2.4, cPickle
offers little control over how a classic instance is pickled. Therefore 2.4
uses a pure-Python pickler, so CallError can be made to look as it does on
newer Pythons.
This mess will go away once proper serialization exists.
"""
@classmethod
def dumps(cls, obj, protocol):
bio = BytesIO()
self = cls(bio, protocol=protocol)
self.dump(obj)
return bio.getvalue()
def save_exc_inst(self, obj):
if isinstance(obj, CallError):
func, args = obj.__reduce__()
self.save(func)
self.save(args)
self.write(py_pickle.REDUCE)
else:
py_pickle.Pickler.save_inst(self, obj)
if PY24:
dispatch = py_pickle.Pickler.dispatch.copy()
dispatch[py_pickle.InstanceType] = save_exc_inst
if PY3:
# In 3.x Unpickler is a class exposing find_class as an overridable, but it
# cannot be overridden without subclassing.
class _Unpickler(pickle.Unpickler):
def find_class(self, module, func):
return self.find_global(module, func)
pickle__dumps = pickle.dumps
elif PY24:
# On Python 2.4, we must use a pure-Python pickler.
pickle__dumps = Py24Pickler.dumps
_Unpickler = pickle.Unpickler
else:
pickle__dumps = pickle.dumps
# In 2.x Unpickler is a function exposing a writeable find_global
# attribute.
_Unpickler = pickle.Unpickler
class Message(object):
"""
Messages are the fundamental unit of communication, comprising fields from
the :ref:`stream-protocol` header, an optional reference to the receiving
:class:`mitogen.core.Router` for ingress messages, and helper methods for
deserialization and generating replies.
"""
#: Integer target context ID. :class:`Router` delivers messages locally
#: when their :attr:`dst_id` matches :data:`mitogen.context_id`, otherwise
#: they are routed up or downstream.
dst_id = None
#: Integer source context ID. Used as the target of replies if any are
#: generated.
src_id = None
#: Context ID under whose authority the message is acting. See
#: :ref:`source-verification`.
auth_id = None
#: Integer target handle in the destination context. This is one of the
#: :ref:`standard-handles`, or a dynamically generated handle used to
#: receive a one-time reply, such as the return value of a function call.
handle = None
#: Integer target handle to direct any reply to this message. Used to
#: receive a one-time reply, such as the return value of a function call.
#: :data:`IS_DEAD` has a special meaning when it appears in this field.
reply_to = None
#: Raw message data bytes.
data = b('')
_unpickled = object()
#: The :class:`Router` responsible for routing the message. This is
#: :data:`None` for locally originated messages.
router = None
#: The :class:`Receiver` over which the message was last received. Part of
#: the :class:`mitogen.select.Select` interface. Defaults to :data:`None`.
receiver = None
HEADER_FMT = '>hLLLLLL'
HEADER_LEN = struct.calcsize(HEADER_FMT)
HEADER_MAGIC = 0x4d49 # 'MI'
def __init__(self, **kwargs):
"""
Construct a message from from the supplied `kwargs`. :attr:`src_id` and
:attr:`auth_id` are always set to :data:`mitogen.context_id`.
"""
self.src_id = mitogen.context_id
self.auth_id = mitogen.context_id
vars(self).update(kwargs)
assert isinstance(self.data, BytesType), 'Message data is not Bytes'
def pack(self):
return (
struct.pack(self.HEADER_FMT, self.HEADER_MAGIC, self.dst_id,
self.src_id, self.auth_id, self.handle,
self.reply_to or 0, len(self.data))
+ self.data
)
def _unpickle_context(self, context_id, name):
return _unpickle_context(context_id, name, router=self.router)
def _unpickle_sender(self, context_id, dst_handle):
return _unpickle_sender(self.router, context_id, dst_handle)
def _unpickle_bytes(self, s, encoding):
s, n = LATIN1_CODEC.encode(s)
return s
def _find_global(self, module, func):
"""
Return the class implementing `module_name.class_name` or raise
`StreamError` if the module is not whitelisted.
"""
if module == __name__:
if func == '_unpickle_call_error' or func == 'CallError':
return _unpickle_call_error
elif func == '_unpickle_sender':
return self._unpickle_sender
elif func == '_unpickle_context':
return self._unpickle_context
elif func == 'Blob':
return Blob
elif func == 'Secret':
return Secret
elif func == 'Kwargs':
return Kwargs
elif module == '_codecs' and func == 'encode':
return self._unpickle_bytes
elif module == '__builtin__' and func == 'bytes':
return BytesType
raise StreamError('cannot unpickle %r/%r', module, func)
@property
def is_dead(self):
"""
:data:`True` if :attr:`reply_to` is set to the magic value
:data:`IS_DEAD`, indicating the sender considers the channel dead. Dead
messages can be raised in a variety of circumstances, see
:data:`IS_DEAD` for more information.
"""
return self.reply_to == IS_DEAD
@classmethod
def dead(cls, reason=None, **kwargs):
"""
Syntax helper to construct a dead message.
"""
kwargs['data'], _ = encodings.utf_8.encode(reason or u'')
return cls(reply_to=IS_DEAD, **kwargs)
@classmethod
def pickled(cls, obj, **kwargs):
"""
Construct a pickled message, setting :attr:`data` to the serialization
of `obj`, and setting remaining fields using `kwargs`.
:returns:
The new message.
"""
self = cls(**kwargs)
try:
self.data = pickle__dumps(obj, protocol=2)
except pickle.PicklingError:
e = sys.exc_info()[1]
self.data = pickle__dumps(CallError(e), protocol=2)
return self
def reply(self, msg, router=None, **kwargs):
"""
Compose a reply to this message and send it using :attr:`router`, or
`router` is :attr:`router` is :data:`None`.
:param obj:
Either a :class:`Message`, or an object to be serialized in order
to construct a new message.
:param router:
Optional router to use if :attr:`router` is :data:`None`.
:param kwargs:
Optional keyword parameters overriding message fields in the reply.
"""
if not isinstance(msg, Message):
msg = Message.pickled(msg)
msg.dst_id = self.src_id
msg.handle = self.reply_to
vars(msg).update(kwargs)
if msg.handle:
(self.router or router).route(msg)
else:
LOG.debug('dropping reply to message with no return address: %r',
msg)
if PY3:
UNPICKLER_KWARGS = {'encoding': 'bytes'}
else:
UNPICKLER_KWARGS = {}
def _throw_dead(self):
if len(self.data):
raise ChannelError(self.data.decode('utf-8','replace'))
elif self.src_id == mitogen.context_id:
raise ChannelError(ChannelError.local_msg)
else:
raise ChannelError(ChannelError.remote_msg)
def unpickle(self, throw=True, throw_dead=True):
"""
Unpickle :attr:`data`, optionally raising any exceptions present.
:param bool throw_dead:
If :data:`True`, raise exceptions, otherwise it is the caller's
responsibility.
:raises CallError:
The serialized data contained CallError exception.
:raises ChannelError:
The `is_dead` field was set.
"""
_vv and IOLOG.debug('%r.unpickle()', self)
if throw_dead and self.is_dead:
self._throw_dead()
obj = self._unpickled
if obj is Message._unpickled:
fp = BytesIO(self.data)
unpickler = _Unpickler(fp, **self.UNPICKLER_KWARGS)
unpickler.find_global = self._find_global
try:
# Must occur off the broker thread.
try:
obj = unpickler.load()
except:
LOG.error('raw pickle was: %r', self.data)
raise
self._unpickled = obj
except (TypeError, ValueError):
e = sys.exc_info()[1]
raise StreamError('invalid message: %s', e)
if throw:
if isinstance(obj, CallError):
raise obj
return obj
def __repr__(self):
return 'Message(%r, %r, %r, %r, %r, %r..%d)' % (
self.dst_id, self.src_id, self.auth_id, self.handle,
self.reply_to, (self.data or '')[:50], len(self.data)
)
class Sender(object):
"""
Senders are used to send pickled messages to a handle in another context,
it is the inverse of :class:`mitogen.core.Receiver`.
Senders may be serialized, making them convenient to wire up data flows.
See :meth:`mitogen.core.Receiver.to_sender` for more information.
:param mitogen.core.Context context:
Context to send messages to.
:param int dst_handle:
Destination handle to send messages to.
"""
def __init__(self, context, dst_handle):
self.context = context
self.dst_handle = dst_handle
def send(self, data):
"""
Send `data` to the remote end.
"""
_vv and IOLOG.debug('%r.send(%r..)', self, repr(data)[:100])
self.context.send(Message.pickled(data, handle=self.dst_handle))
explicit_close_msg = 'Sender was explicitly closed'
def close(self):
"""
Send a dead message to the remote, causing :meth:`ChannelError` to be
raised in any waiting thread.
"""
_vv and IOLOG.debug('%r.close()', self)
self.context.send(
Message.dead(
reason=self.explicit_close_msg,
handle=self.dst_handle
)
)
def __repr__(self):
return 'Sender(%r, %r)' % (self.context, self.dst_handle)
def __reduce__(self):
return _unpickle_sender, (self.context.context_id, self.dst_handle)
def _unpickle_sender(router, context_id, dst_handle):
if not (isinstance(router, Router) and
isinstance(context_id, (int, long)) and context_id >= 0 and
isinstance(dst_handle, (int, long)) and dst_handle > 0):
raise TypeError('cannot unpickle Sender: bad input or missing router')
return Sender(Context(router, context_id), dst_handle)
class Receiver(object):
"""
Receivers maintain a thread-safe queue of messages sent to a handle of this
context from another context.
:param mitogen.core.Router router:
Router to register the handler on.
:param int handle:
If not :data:`None`, an explicit handle to register, otherwise an
unused handle is chosen.
:param bool persist:
If :data:`False`, unregister the handler after one message is received.
Single-message receivers are intended for RPC-like transactions, such
as in the case of :meth:`mitogen.parent.Context.call_async`.
:param mitogen.core.Context respondent:
Context this receiver is receiving from. If not :data:`None`, arranges
for the receiver to receive a dead message if messages can no longer be
routed to the context due to disconnection, and ignores messages that
did not originate from the respondent context.
"""
#: If not :data:`None`, a function invoked as `notify(receiver)` after a
#: message has been received. The function is invoked on :class:`Broker`
#: thread, therefore it must not block. Used by
#: :class:`mitogen.select.Select` to efficiently implement waiting on
#: multiple event sources.
notify = None
raise_channelerror = True
def __init__(self, router, handle=None, persist=True,
respondent=None, policy=None, overwrite=False):
self.router = router
#: The handle.
self.handle = handle # Avoid __repr__ crash in add_handler()
self._latch = Latch() # Must exist prior to.add_handler()
self.handle = router.add_handler(
fn=self._on_receive,
handle=handle,
policy=policy,
persist=persist,
respondent=respondent,
overwrite=overwrite,
)
def __repr__(self):
return 'Receiver(%r, %r)' % (self.router, self.handle)
def __enter__(self):
return self
def __exit__(self, _1, _2, _3):
self.close()
def to_sender(self):
"""
Return a :class:`Sender` configured to deliver messages to this
receiver. As senders are serializable, this makes it convenient to pass
`(context_id, handle)` pairs around::
def deliver_monthly_report(sender):
for line in open('monthly_report.txt'):
sender.send(line)
sender.close()
@mitogen.main()
def main(router):
remote = router.ssh(hostname='mainframe')
recv = mitogen.core.Receiver(router)
remote.call(deliver_monthly_report, recv.to_sender())
for msg in recv:
print(msg)
"""
return Sender(self.router.myself(), self.handle)
def _on_receive(self, msg):
"""
Callback registered for the handle with :class:`Router`; appends data
to the internal queue.
"""
_vv and IOLOG.debug('%r._on_receive(%r)', self, msg)
self._latch.put(msg)
if self.notify:
self.notify(self)
closed_msg = 'the Receiver has been closed'
def close(self):
"""
Unregister the receiver's handle from its associated router, and cause
:class:`ChannelError` to be raised in any thread waiting in :meth:`get`
on this receiver.
"""
if self.handle:
self.router.del_handler(self.handle)
self.handle = None
self._latch.close()
def size(self):
"""
Return the number of items currently buffered.
As with :class:`Queue.Queue`, `0` may be returned even though a
subsequent call to :meth:`get` will succeed, since a message may be
posted at any moment between :meth:`size` and :meth:`get`.
As with :class:`Queue.Queue`, `>0` may be returned even though a
subsequent call to :meth:`get` will block, since another waiting thread
may be woken at any moment between :meth:`size` and :meth:`get`.
:raises LatchError:
The underlying latch has already been marked closed.
"""
return self._latch.size()
def empty(self):
"""
Return `size() == 0`.
.. deprecated:: 0.2.8
Use :meth:`size` instead.
:raises LatchError:
The latch has already been marked closed.
"""
return self._latch.empty()
def get(self, timeout=None, block=True, throw_dead=True):
"""
Sleep waiting for a message to arrive on this receiver.
:param float timeout:
If not :data:`None`, specifies a timeout in seconds.
:raises mitogen.core.ChannelError:
The remote end indicated the channel should be closed,
communication with it was lost, or :meth:`close` was called in the
local process.
:raises mitogen.core.TimeoutError:
Timeout was reached.
:returns:
:class:`Message` that was received.
"""
_vv and IOLOG.debug('%r.get(timeout=%r, block=%r)', self, timeout, block)
try:
msg = self._latch.get(timeout=timeout, block=block)
except LatchError:
raise ChannelError(self.closed_msg)
if msg.is_dead and throw_dead:
msg._throw_dead()
return msg
def __iter__(self):
"""
Yield consecutive :class:`Message` instances delivered to this receiver
until :class:`ChannelError` is raised.
"""
while True:
try:
msg = self.get()
except ChannelError:
return
yield msg
class Channel(Sender, Receiver):
"""
A channel inherits from :class:`mitogen.core.Sender` and
`mitogen.core.Receiver` to provide bidirectional functionality.
.. deprecated:: 0.2.0
This class is incomplete and obsolete, it will be removed in Mitogen
0.3.
Channels were an early attempt at syntax sugar. It is always easier to pass
around unidirectional pairs of senders/receivers, even though the syntax is
baroque:
.. literalinclude::../examples/ping_pong.py
Since all handles aren't known until after both ends are constructed, for
both ends to communicate through a channel, it is necessary for one end to
retrieve the handle allocated to the other and reconfigure its own channel
to match. Currently this is a manual task.
"""
def __init__(self, router, context, dst_handle, handle=None):
Sender.__init__(self, context, dst_handle)
Receiver.__init__(self, router, handle)
def close(self):
Receiver.close(self)
Sender.close(self)
def __repr__(self):
return 'Channel(%s, %s)' % (
Sender.__repr__(self),
Receiver.__repr__(self)
)
class Importer(object):
"""
Import protocol implementation that fetches modules from the parent
process.
:param context: Context to communicate via.
"""
# The Mitogen package is handled specially, since the child context must
# construct it manually during startup.
MITOGEN_PKG_CONTENT = [
'buildah',
'compat',
'debug',
'doas',
'docker',
'kubectl',
'fakessh',
'fork',
'jail',
'lxc',
'lxd',
'master',
'minify',
'os_fork',
'parent',
'podman',
'select',
'service',
'setns',
'ssh',
'su',
'sudo',
'utils',
]
ALWAYS_BLACKLIST = [
# 2.x generates needless imports for 'builtins', while 3.x does the
# same for '__builtin__'. The correct one is built-in, the other always
# a negative round-trip.
'builtins',
'__builtin__',
# On some Python releases (e.g. 3.8, 3.9) the subprocess module tries
# to import of this Windows-only builtin module.
'msvcrt',
# Python 2.x module that was renamed to _thread in 3.x.
# This entry avoids a roundtrip on 2.x -> 3.x.
'thread',
# org.python.core imported by copy, pickle, xml.sax; breaks Jython, but
# very unlikely to trigger a bug report.
'org',
]
if PY3:
ALWAYS_BLACKLIST += ['cStringIO']
def __init__(self, router, context, core_src, whitelist=(), blacklist=()):
self._log = logging.getLogger('mitogen.importer')
self._context = context
self._present = {'mitogen': self.MITOGEN_PKG_CONTENT}
self._lock = threading.Lock()
self.whitelist = list(whitelist) or ['']
self.blacklist = list(blacklist) + self.ALWAYS_BLACKLIST
# Preserve copies of the original server-supplied whitelist/blacklist
# for later use by children.
self.master_whitelist = self.whitelist[:]
self.master_blacklist = self.blacklist[:]
# Presence of an entry in this map indicates in-flight GET_MODULE.
self._callbacks = {}
self._cache = {}
if core_src:
self._update_linecache('x/mitogen/core.py', core_src)
self._cache['mitogen.core'] = (
'mitogen.core',
None,
'x/mitogen/core.py',
zlib.compress(core_src, 9),
[],
)
self._install_handler(router)
def _update_linecache(self, path, data):
"""
The Python 2.4 linecache module, used to fetch source code for
tracebacks and :func:`inspect.getsource`, does not support PEP-302,
meaning it needs extra help to for Mitogen-loaded modules. Directly
populate its cache if a loaded module belongs to the Mitogen package.
"""
if PY24 and'mitogen' in path:
linecache.cache[path] = (
len(data),
0.0,
[line+'\n' for line in data.splitlines()],
path,
)
def _install_handler(self, router):
router.add_handler(
fn=self._on_load_module,
handle=LOAD_MODULE,
policy=has_parent_authority,
)
def __repr__(self):
return 'Importer'
def builtin_find_module(self, fullname):
# imp.find_module() will always succeed for __main__, because it is a
# built-in module. That means it exists on a special linked list deep
# within the bowels of the interpreter. We must special case it.
if fullname == '__main__':
raise ModuleNotFoundError()
parent, _, modname = str_rpartition(fullname, '.')
if parent:
path = sys.modules[parent].__path__
else:
path = None
fp, pathname, description = imp.find_module(modname, path)
if fp:
fp.close()
def find_module(self, fullname, path=None):
"""
Return a loader (ourself) or None, for the module with fullname.
Implements importlib.abc.MetaPathFinder.find_module().
Deprecrated in Python 3.4+, replaced by find_spec().
Raises ImportWarning in Python 3.10+.
fullname A (fully qualified?) module name, e.g. "os.path".
path __path__ of parent packge. None for a top level module.
"""
if hasattr(_tls, 'running'):
return None
_tls.running = True
try:
#_v and self._log.debug('Python requested %r', fullname)
fullname = to_text(fullname)
pkgname, dot, _ = str_rpartition(fullname, '.')
pkg = sys.modules.get(pkgname)
if pkgname and getattr(pkg, '__loader__', None) is not self:
self._log.debug('%s is submodule of a locally loaded package',
fullname)
return None
suffix = fullname[len(pkgname+dot):]
if pkgname and suffix not in self._present.get(pkgname, ()):
self._log.debug('%s has no submodule %s', pkgname, suffix)
return None
# #114: explicitly whitelisted prefixes override any
# system-installed package.
if self.whitelist!= ['']:
if any(fullname.startswith(s) for s in self.whitelist):
return self
try:
self.builtin_find_module(fullname)
_vv and self._log.debug('%r is available locally', fullname)
except ImportError:
_vv and self._log.debug('we will try to load %r', fullname)
return self
finally:
del _tls.running
blacklisted_msg = (
'%r is present in the Mitogen importer blacklist, therefore this '
'context will not attempt to request it from the master, as the '
'request will always be refused.'
)
pkg_resources_msg = (
'pkg_resources is prohibited from importing __main__, as it causes '
'problems in applications whose main module is not designed to be '
're-imported by children.'
)
absent_msg = (
'The Mitogen master process was unable to serve %r. It may be a '
'native Python extension, or it may be missing entirely. Check the '
'importer debug logs on the master for more information.'
)
def _refuse_imports(self, fullname):
if is_blacklisted_import(self, fullname):
raise ModuleNotFoundError(self.blacklisted_msg % (fullname,))
f = sys._getframe(2)
requestee = f.f_globals['__name__']
if fullname == '__main__' and requestee == 'pkg_resources':
# Anything that imports pkg_resources will eventually cause
# pkg_resources to try and scan __main__ for its __requires__
# attribute (pkg_resources/__init__.py::_build_master()). This
# breaks any app that is not expecting its __main__ to suddenly be
# sucked over a network and injected into a remote process, like
# py.test.
raise ModuleNotFoundError(self.pkg_resources_msg)
if fullname == 'pbr':
# It claims to use pkg_resources to read version information, which
# would result in PEP-302 being used, but it actually does direct
# filesystem access. So instead smodge the environment to override
# any version that was defined. This will probably break something
# later.
os.environ['PBR_VERSION'] = '0.0.0'
def _on_load_module(self, msg):
if msg.is_dead:
return
tup = msg.unpickle()
fullname = tup[0]
_v and self._log.debug('received %s', fullname)
self._lock.acquire()
try:
self._cache[fullname] = tup
if tup[2] is not None and PY24:
self._update_linecache(
path='master:' + tup[2],
data=zlib.decompress(tup[3])
)
callbacks = self._callbacks.pop(fullname, [])
finally:
self._lock.release()
for callback in callbacks:
callback()
def _request_module(self, fullname, callback):
self._lock.acquire()
try:
present = fullname in self._cache
if not present:
funcs = self._callbacks.get(fullname)
if funcs is not None:
_v and self._log.debug('existing request for %s in flight',
fullname)
funcs.append(callback)
else:
_v and self._log.debug('sending new %s request to parent',
fullname)
self._callbacks[fullname] = [callback]
self._context.send(
Message(data=b(fullname), handle=GET_MODULE)
)
finally:
self._lock.release()
if present:
callback()
def load_module(self, fullname):
"""
Return the loaded module specified by fullname.
Implements importlib.abc.Loader.load_module().
Deprecated in Python 3.4+, replaced by create_module() & exec_module().
"""
fullname = to_text(fullname)
_v and self._log.debug('requesting %s', fullname)
self._refuse_imports(fullname)
event = threading.Event()
self._request_module(fullname, event.set)
event.wait()
ret = self._cache[fullname]
if ret[2] is None:
raise ModuleNotFoundError(self.absent_msg % (fullname,))
pkg_present = ret[1]
mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
mod.__file__ = self.get_filename(fullname)
mod.__loader__ = self
if pkg_present is not None: # it's a package.
mod.__path__ = []
mod.__package__ = fullname
self._present[fullname] = pkg_present
else:
mod.__package__ = str_rpartition(fullname, '.')[0] or None
if mod.__package__ and not PY3:
# 2.x requires __package__ to be exactly a string.
mod.__package__, _ = encodings.utf_8.encode(mod.__package__)
source = self.get_source(fullname)
try:
code = compile(source, mod.__file__, 'exec', 0, 1)
except SyntaxError:
LOG.exception('while importing %r', fullname)
raise
if PY3:
exec(code, vars(mod))
else:
exec('exec code in vars(mod)')
# #590: if a module replaces itself in sys.modules during import, below
# is necessary. This matches PyImport_ExecCodeModuleEx()
return sys.modules.get(fullname, mod)
def get_filename(self, fullname):
if fullname in self._cache:
path = self._cache[fullname][2]
if path is None:
# If find_loader() returns self but a subsequent master RPC
# reveals the module can't be loaded, and so load_module()
# throws ImportError, on Python 3.x it is still possible for
# the loader to be called to fetch metadata.
raise ModuleNotFoundError(self.absent_msg % (fullname,))
return u'master:' + self._cache[fullname][2]
def get_source(self, fullname):
if fullname in self._cache:
compressed = self._cache[fullname][3]
if compressed is None:
raise ModuleNotFoundError(self.absent_msg % (fullname,))
source = zlib.decompress(self._cache[fullname][3])
if PY3:
return to_text(source)
return source
class LogHandler(logging.Handler):
"""
A :class:`logging.Handler` subclass that arranges for :data:`FORWARD_LOG`
messages to be sent to a parent context in response to logging messages
generated by the current context. This is installed by default in child
contexts during bootstrap, so that :mod:`logging` events can be viewed and
managed centrally in the master process.
The handler is initially *corked* after construction, such that it buffers
messages until :meth:`uncork` is called. This allows logging to be
installed prior to communication with the target being available, and
avoids any possible race where early log messages might be dropped.
:param mitogen.core.Context context:
The context to send log messages towards. At present this is always
the master process.
"""
def __init__(self, context):
logging.Handler.__init__(self)
self.context = context
self.local = threading.local()
self._buffer = []
# Private synchronization is needed while corked, to ensure no
# concurrent call to _send() exists during uncork().
self._buffer_lock = threading.Lock()
def uncork(self):
"""
#305: during startup :class:`LogHandler` may be installed before it is
possible to route messages, therefore messages are buffered until
:meth:`uncork` is called by :class:`ExternalContext`.
"""
self._buffer_lock.acquire()
try:
self._send = self.context.send
for msg in self._buffer:
self._send(msg)
self._buffer = None
finally:
self._buffer_lock.release()
def _send(self, msg):
self._buffer_lock.acquire()
try:
if self._buffer is None:
# uncork() may run concurrent to _send()
self._send(msg)
else:
self._buffer.append(msg)
finally:
self._buffer_lock.release()
def emit(self, rec):
"""
Send a :data:`FORWARD_LOG` message towards the target context.
"""
if rec.name =='mitogen.io' or \
getattr(self.local, 'in_emit', False):
return
self.local.in_emit = True
try:
msg = self.format(rec)
encoded = '%s\x00%s\x00%s' % (rec.name, rec.levelno, msg)
if isinstance(encoded, UnicodeType):
# Logging package emits both :(
encoded = encoded.encode('utf-8')
self._send(Message(data=encoded, handle=FORWARD_LOG))
finally:
self.local.in_emit = False
class Stream(object):
"""
A :class:`Stream` is one readable and optionally one writeable file
descriptor (represented by :class:`Side`) aggregated alongside an
associated :class:`Protocol` that knows how to respond to IO readiness
events for those descriptors.
Streams are registered with :class:`Broker`, and callbacks are invoked on
the broker thread in response to IO activity. When registered using
:meth:`Broker.start_receive` or :meth:`Broker._start_transmit`, the broker
may call any of :meth:`on_receive`, :meth:`on_transmit`,
:meth:`on_shutdown` or :meth:`on_disconnect`.
It is expected that the :class:`Protocol` associated with a stream will
change over its life. For example during connection setup, the initial
protocol may be :class:`mitogen.parent.BootstrapProtocol` that knows how to
enter SSH and sudo passwords and transmit the :mod:`mitogen.core` source to
the target, before handing off to :class:`MitogenProtocol` when the target
process is initialized.
Streams connecting to children are in turn aggregated by
:class:`mitogen.parent.Connection`, which contains additional logic for
managing any child process, and a reference to any separate ``stderr``
:class:`Stream` connected to that process.
"""
#: A :class:`Side` representing the stream's receive file descriptor.
receive_side = None
#: A :class:`Side` representing the stream's transmit file descriptor.
transmit_side = None
#: A :class:`Protocol` representing the protocol active on the stream.
protocol = None
#: In parents, the :class:`mitogen.parent.Connection` instance.
conn = None
#: The stream name. This is used in the :meth:`__repr__` output in any log
#: messages, it may be any descriptive string.
name = u'default'
def set_protocol(self, protocol):
"""
Bind a :class:`Protocol` to this stream, by updating
:attr:`Protocol.stream` to refer to this stream, and updating this
stream's :attr:`Stream.protocol` to the refer to the protocol. Any
prior protocol's :attr:`Protocol.stream` is set to :data:`None`.
"""
if self.protocol:
self.protocol.stream = None
self.protocol = protocol
self.protocol.stream = self
def accept(self, rfp, wfp):
"""
Attach a pair of file objects to :attr:`receive_side` and
:attr:`transmit_side`, after wrapping them in :class:`Side` instances.
:class:`Side` will call :func:`set_nonblock` and :func:`set_cloexec`
on the underlying file descriptors during construction.
The same file object may be used for both sides. The default
:meth:`on_disconnect` is handles the possibility that only one
descriptor may need to be closed.
:param file rfp:
The file object to receive from.
:param file wfp:
The file object to transmit to.
"""
self.receive_side = Side(self, rfp)
self.transmit_side = Side(self, wfp)
def __repr__(self):
return "<Stream %s #%04x>" % (self.name, id(self) & 0xffff,)
def on_receive(self, broker):
"""
Invoked by :class:`Broker` when the stream's :attr:`receive_side` has
been marked readable using :meth:`Broker.start_receive` and the broker
has detected the associated file descriptor is ready for reading.
Subclasses must implement this if they are registered using
:meth:`Broker.start_receive`, and the method must invoke
:meth:`on_disconnect` if reading produces an empty string.
The default implementation reads :attr:`Protocol.read_size` bytes and
passes the resulting bytestring to :meth:`Protocol.on_receive`. If the
bytestring is 0 bytes, invokes :meth:`on_disconnect` instead.
"""
buf = self.receive_side.read(self.protocol.read_size)
if not buf:
LOG.debug('%r: empty read, disconnecting', self.receive_side)
return self.on_disconnect(broker)
self.protocol.on_receive(broker, buf)
def on_transmit(self, broker):
"""
Invoked by :class:`Broker` when the stream's :attr:`transmit_side` has
been marked writeable using :meth:`Broker._start_transmit` and the
broker has detected the associated file descriptor is ready for
writing.
Subclasses must implement they are ever registerd with
:meth:`Broker._start_transmit`.
The default implementation invokes :meth:`Protocol.on_transmit`.
"""
self.protocol.on_transmit(broker)
def on_shutdown(self, broker):
"""
Invoked by :meth:`Broker.shutdown` to allow the stream time to
gracefully shutdown.
The default implementation emits a ``shutdown`` signal before
invoking :meth:`on_disconnect`.
"""
fire(self,'shutdown')
self.protocol.on_shutdown(broker)
def on_disconnect(self, broker):
"""
Invoked by :class:`Broker` to force disconnect the stream during
shutdown, invoked by the default :meth:`on_shutdown` implementation,
and usually invoked by any subclass :meth:`on_receive` implementation
in response to a 0-byte read.
The base implementation fires a ``disconnect`` event, then closes
:attr:`receive_side` and :attr:`transmit_side` after unregistering the
stream from the broker.
"""
fire(self, 'disconnect')
self.protocol.on_disconnect(broker)
class Protocol(object):
"""
Implement the program behaviour associated with activity on a
:class:`Stream`. The protocol in use may vary over a stream's life, for
example to allow :class:`mitogen.parent.BootstrapProtocol` to initialize
the connected child before handing it off to :class:`MitogenProtocol`. A
stream's active protocol is tracked in the :attr:`Stream.protocol`
attribute, and modified via :meth:`Stream.set_protocol`.
Protocols do not handle IO, they are entirely reliant on the interface
provided by :class:`Stream` and :class:`Side`, allowing the underlying IO
implementation to be replaced without modifying behavioural logic.
"""
stream_class = Stream
#: The :class:`Stream` this protocol is currently bound to, or
#: :data:`None`.
stream = None
#: The size of the read buffer used by :class:`Stream` when this is the
#: active protocol for the stream.
read_size = CHUNK_SIZE
@classmethod
def build_stream(cls, *args, **kwargs):
stream = cls.stream_class()
stream.set_protocol(cls(*args, **kwargs))
return stream
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
self.stream and self.stream.name,
)
def on_shutdown(self, broker):
_v and LOG.debug('%r: shutting down', self)
self.stream.on_disconnect(broker)
def on_disconnect(self, broker):
# Normally both sides an FD, so it is important that tranmit_side is
# deregistered from Poller before closing the receive side, as pollers
# like epoll and kqueue unregister all events on FD close, causing
# subsequent attempt to unregister the transmit side to fail.
LOG.debug('%r: disconnecting', self)
broker.stop_receive(self.stream)
if self.stream.transmit_side:
broker._stop_transmit(self.stream)
self.stream.receive_side.close()
if self.stream.transmit_side:
self.stream.transmit_side.close()
class DelimitedProtocol(Protocol):
"""
Provide a :meth:`Protocol.on_receive` implementation for protocols that are
delimited by a fixed string, like text based protocols. Each message is
passed to :meth:`on_line_received` as it arrives, with incomplete messages
passed to :meth:`on_partial_line_received`.
When emulating user input it is often necessary to respond to incomplete
lines, such as when a "Password: " prompt is sent.
:meth:`on_partial_line_received` may be called repeatedly with an
increasingly complete message. When a complete message is finally received,
:meth:`on_line_received` will be called once for it before the buffer is
discarded.
If :func:`on_line_received` returns :data:`False`, remaining data is passed
unprocessed to the stream's current protocol's :meth:`on_receive`. This
allows switching from line-oriented to binary while the input buffer
contains both kinds of data.
"""
#: The delimiter. Defaults to newline.
delimiter = b('\n')
_trailer = b('')
def on_receive(self, broker, buf):
_vv and IOLOG.debug('%r.on_receive()', self)
stream = self.stream
self._trailer, cont = mitogen.core.iter_split(
buf=self._trailer + buf,
delim=self.delimiter,
func=self.on_line_received,
)
if self._trailer:
if cont:
self.on_partial_line_received(self._trailer)
else:
assert stream.protocol is not self, \
'stream protocol is no longer %r' % (self,)
stream.protocol.on_receive(broker, self._trailer)
def on_line_received(self, line):
"""
Receive a line from the stream.
:param bytes line:
The encoded line, excluding the delimiter.
:returns:
:data:`False` to indicate this invocation modified the stream's
active protocol, and any remaining buffered data should be passed
to the new protocol's :meth:`on_receive` method.
Any other return value is ignored.
"""
pass
def on_partial_line_received(self, line):
"""
Receive a trailing unterminated partial line from the stream.
:param bytes line:
The encoded partial line.
"""
pass
class BufferedWriter(object):
"""
Implement buffered output while avoiding quadratic string operations. This
is currently constructed by each protocol, in future it may become fixed
for each stream instead.
"""
def __init__(self, broker, protocol):
self._broker = broker
self._protocol = protocol
self._buf = collections.deque()
self._len = 0
def write(self, s):
"""
Transmit `s` immediately, falling back to enqueuing it and marking the
stream writeable if no OS buffer space is available.
"""
if not self._len:
# Modifying epoll/Kqueue state is expensive, as are needless broker
# loops. Rather than wait for writeability, just write immediately,
# and fall back to the broker loop on error or full buffer.
try:
n = self._protocol.stream.transmit_side.write(s)
if n:
if n == len(s):
return
s = s[n:]
except OSError:
pass
self._broker._start_transmit(self._protocol.stream)
self._buf.append(s)
self._len += len(s)
def on_transmit(self, broker):
"""
Respond to stream writeability by retrying previously buffered
:meth:`write` calls.
"""
if self._buf:
buf = self._buf.popleft()
written = self._protocol.stream.transmit_side.write(buf)
if not written:
_v and LOG.debug('disconnected during write to %r', self)
self._protocol.stream.on_disconnect(broker)
return
elif written!= len(buf):
self._buf.appendleft(BufferType(buf, written))
_vv and IOLOG.debug('transmitted %d bytes to %r', written, self)
self._len -= written
if not self._buf:
broker._stop_transmit(self._protocol.stream)
class Side(object):
"""
Represent one side of a :class:`Stream`. This allows unidirectional (e.g.
pipe) and bidirectional (e.g. socket) streams to operate identically.
Sides are also responsible for tracking the open/closed state of the
underlying FD, preventing erroneous duplicate calls to :func:`os.close` due
to duplicate :meth:`Stream.on_disconnect` calls, which would otherwise risk
silently succeeding by closing an unrelated descriptor. For this reason, it
is crucial only one file object exists per unique descriptor.
:param mitogen.core.Stream stream:
The stream this side is associated with.
:param object fp:
The file or socket object managing the underlying file descriptor. Any
object may be used that supports `fileno()` and `close()` methods.
:param bool cloexec:
If :data:`True`, the descriptor has its :data:`fcntl.FD_CLOEXEC` flag
enabled using :func:`fcntl.fcntl`.
:param bool keep_alive:
If :data:`True`, the continued existence of this side will extend the
shutdown grace period until it has been unregistered from the broker.
:param bool blocking:
If :data:`False`, the descriptor has its :data:`os.O_NONBLOCK` flag
enabled using :func:`fcntl.fcntl`.
"""
_fork_refs = weakref.WeakValueDictionary()
closed = False
def __init__(self, stream, fp, cloexec=True, keep_alive=True, blocking=False):
#: The :class:`Stream` for which this is a read or write side.
self.stream = stream
# File or socket object responsible for the lifetime of its underlying
# file descriptor.
self.fp = fp
#: Integer file descriptor to perform IO on, or :data:`None` if
#: :meth:`close` has been called. This is saved separately from the
#: file object, since :meth:`file.fileno` cannot be called on it after
#: it has been closed.
self.fd = fp.fileno()
#: If :data:`True`, causes presence of this side in
#: :class:`Broker`'s active reader set to defer shutdown until the
#: side is disconnected.
self.keep_alive = keep_alive
self._fork_refs[id(self)] = self
if cloexec:
set_cloexec(self.fd)
if not blocking:
set_nonblock(self.fd)
def __repr__(self):
return '<Side of %s fd %s>' % (
self.stream.name or repr(self.stream),
self.fd
)
@classmethod
def _on_fork(cls):
while cls._fork_refs:
_, side = cls._fork_refs.popitem()
_vv and IOLOG.debug('Side._on_fork() closing %r', side)
side.close()
def close(self):
"""
Call :meth:`file.close` on :attr:`fp` if it is not :data:`None`,
then set it to :data:`None`.
"""
_vv and IOLOG.debug('%r.close()', self)
if not self.closed:
self.closed = True
self.fp.close()
def read(self, n=CHUNK_SIZE):
"""
Read up to `n` bytes from the file descriptor, wrapping the underlying
:func:`os.read` call with :func:`io_op` to trap common disconnection
conditions.
:meth:`read` always behaves as if it is reading from a regular UNIX
file; socket, pipe, and TTY disconnection errors are masked and result
in a 0-sized read like a regular file.
:returns:
Bytes read, or the empty string to indicate disconnection was
detected.
"""
if self.closed:
# Refuse to touch the handle after closed, it may have been reused
# by another thread. TODO: synchronize read()/write()/close().
return b('')
s, disconnected = io_op(os.read, self.fd, n)
if disconnected:
LOG.debug('%r: disconnected during read: %s', self, disconnected)
return b('')
return s
def write(self, s):
"""
Write as much of the bytes from `s` as possible to the file descriptor,
wrapping the underlying :func:`os.write` call with :func:`io_op` to
trap common disconnection conditions.
:returns:
Number of bytes written, or :data:`None` if disconnection was
detected.
"""
if self.closed:
# Don't touch the handle after close, it may be reused elsewhere.
return None
written, disconnected = io_op(os.write, self.fd, s)
if disconnected:
LOG.debug('%r: disconnected during write: %s', self, disconnected)
return None
return written
class MitogenProtocol(Protocol):
"""
:class:`Protocol` implementing mitogen's :ref:`stream protocol
<stream-protocol>`.
"""
#: If not :data:`False`, indicates the stream has :attr:`auth_id` set and
#: its value is the same as :data:`mitogen.context_id` or appears in
#: :data:`mitogen.parent_ids`.
is_privileged = False
#: Invoked as `on_message(stream, msg)` each message received from the
#: peer.
on_message = None
def __init__(self, router, remote_id, auth_id=None,
local_id=None, parent_ids=None):
self._router = router
self.remote_id = remote_id
#: If not :data:`None`, :class:`Router` stamps this into
#: :attr:`Message.auth_id` of every message received on this stream.
self.auth_id = auth_id
if parent_ids is None:
parent_ids = mitogen.parent_ids
if local_id is None:
local_id = mitogen.context_id
self.is_privileged = (
(remote_id in parent_ids) or
auth_id in ([local_id] + parent_ids)
)
self.sent_modules = set(['mitogen','mitogen.core'])
self._input_buf = collections.deque()
self._input_buf_len = 0
self._writer = BufferedWriter(router.broker, self)
#: Routing records the dst_id of every message arriving from this
#: stream. Any arriving DEL_ROUTE is rebroadcast for any such ID.
self.egress_ids = set()
def on_receive(self, broker, buf):
"""
Handle the next complete message on the stream. Raise
:class:`StreamError` on failure.
"""
_vv and IOLOG.debug('%r.on_receive()', self)
if self._input_buf and self._input_buf_len < 128:
self._input_buf[0] += buf
else:
self._input_buf.append(buf)
self._input_buf_len += len(buf)
while self._receive_one(broker):
pass
corrupt_msg = (
'%s: Corruption detected: frame signature incorrect. This likely means'
'some external process is interfering with the connection. Received:'
'\n\n'
'%r'
)
def _receive_one(self, broker):
if self._input_buf_len < Message.HEADER_LEN:
return False
msg = Message()
msg.router = self._router
(magic, msg.dst_id, msg.src_id, msg.auth_id,
msg.handle, msg.reply_to, msg_len) = struct.unpack(
Message.HEADER_FMT,
self._input_buf[0][:Message.HEADER_LEN],
)
if magic!= Message.HEADER_MAGIC:
LOG.error(self.corrupt_msg, self.stream.name, self._input_buf[0][:2048])
self.stream.on_disconnect(broker)
return False
if msg_len > self._router.max_message_size:
LOG.error('%r: Maximum message size exceeded (got %d, max %d)',
self, msg_len, self._router.max_message_size)
self.stream.on_disconnect(broker)
return False
total_len = msg_len + Message.HEADER_LEN
if self._input_buf_len < total_len:
_vv and IOLOG.debug(
'%r: Input too short (want %d, got %d)',
self, msg_len, self._input_buf_len - Message.HEADER_LEN
)
return False
start = Message.HEADER_LEN
prev_start = start
remain = total_len
bits = []
while remain:
buf = self._input_buf.popleft()
bit = buf[start:remain]
bits.append(bit)
remain -= len(bit) + start
prev_start = start
start = 0
msg.data = b('').join(bits)
self._input_buf.appendleft(buf[prev_start+len(bit):])
self._input_buf_len -= total_len
self._router._async_route(msg, self.stream)
return True
def pending_bytes(self):
"""
Return the number of bytes queued for transmission on this stream. This
can be used to limit the amount of data buffered in RAM by an otherwise
unlimited consumer.
For an accurate result, this method should be called from the Broker
thread, for example by using :meth:`Broker.defer_sync`.
"""
return self._writer._len
def on_transmit(self, broker):
"""
Transmit buffered messages.
"""
_vv and IOLOG.debug('%r.on_transmit()', self)
self._writer.on_transmit(broker)
def _send(self, msg):
_vv and IOLOG.debug('%r._send(%r)', self, msg)
self._writer.write(msg.pack())
def send(self, msg):
"""
Send `data` to `handle`, and tell the broker we have output. May be
called from any thread.
"""
self._router.broker.defer(self._send, msg)
def on_shutdown(self, broker):
"""
Disable :class:`Protocol` immediate disconnect behaviour.
"""
_v and LOG.debug('%r: shutting down', self)
class Context(object):
"""
Represent a remote context regardless of the underlying connection method.
Context objects are simple facades that emit messages through an
associated router, and have :ref:`signals` raised against them in response
to various events relating to the context.
**Note:** This is the somewhat limited core version, used by child
contexts. The master subclass is documented below this one.
Contexts maintain no internal state and are thread-safe.
Prefer :meth:`Router.context_by_id` over constructing context objects
explicitly, as that method is deduplicating, and returns the only context
instance :ref:`signals` will be raised on.
:param mitogen.core.Router router:
Router to emit messages through.
:param int context_id:
Context ID.
:param str name:
Context name.
"""
name = None
remote_name = None
def __init__(self, router, context_id, name=None):
self.router = router
self.context_id = context_id
if name:
self.name = to_text(name)
def __reduce__(self):
return _unpickle_context, (self.context_id, self.name)
def on_disconnect(self):
_v and LOG.debug('%r: disconnecting', self)
fire(self, 'disconnect')
def send_async(self, msg, persist=False):
"""
Arrange for `msg` to be delivered to this context, with replies
directed to a newly constructed receiver. :attr:`dst_id
<Message.dst_id>` is set to the target context ID, and :attr:`reply_to
<Message.reply_to>` is set to the newly constructed receiver's handle.
:param bool persist:
If :data:`False`, the handler will be unregistered after a single
message has been received.
:param mitogen.core.Message msg:
The message.
:returns:
:class:`Receiver` configured to receive any replies sent to the
message's `reply_to` handle.
"""
receiver = Receiver(self.router, persist=persist, respondent=self)
msg.dst_id = self.context_id
msg.reply_to = receiver.handle
_v and LOG.debug('sending message to %r: %r', self, msg)
self.send(msg)
return receiver
def call_service_async(self, service_name, method_name, **kwargs):
if isinstance(service_name, BytesType):
service_name = service_name.encode('utf-8')
elif not isinstance(service_name, UnicodeType):
service_name = service_name.name() # Service.name()
_v and LOG.debug('calling service %s.%s of %r, args: %r',
service_name, method_name, self, kwargs)
tup = (service_name, to_text(method_name), Kwargs(kwargs))
msg = Message.pickled(tup, handle=CALL_SERVICE)
return self.send_async(msg)
def send(self, msg):
"""
Arrange for `msg` to be delivered to this context. :attr:`dst_id
<Message.dst_id>` is set to the target context ID.
:param Message msg:
Message.
"""
msg.dst_id = self.context_id
self.router.route(msg)
def call_service(self, service_name, method_name, **kwargs):
recv = self.call_service_async(service_name, method_name, **kwargs)
return recv.get().unpickle()
def send_await(self, msg, deadline=None):
"""
Like :meth:`send_async`, but expect a single reply (`persist=False`)
delivered within `deadline` seconds.
:param mitogen.core.Message msg:
The message.
:param float deadline:
If not :data:`None`, seconds before timing out waiting for a reply.
:returns:
Deserialized reply.
:raises TimeoutError:
No message was received and `deadline` passed.
"""
receiver = self.send_async(msg)
response = receiver.get(deadline)
data = response.unpickle()
_vv and IOLOG.debug('%r._send_await() -> %r', self, data)
return data
def __repr__(self):
return 'Context(%s, %r)' % (self.context_id, self.name)
def _unpickle_context(context_id, name, router=None):
if not (isinstance(context_id, (int, long)) and context_id >= 0 and (
(name is None) or
(isinstance(name, UnicodeType) and len(name) < 100))
):
raise TypeError('cannot unpickle Context: bad input')
if isinstance(router, Router):
return router.context_by_id(context_id, name=name)
return Context(None, context_id, name) # For plain Jane pickle.
class Poller(object):
"""
A poller manages OS file descriptors the user is waiting to become
available for IO. The :meth:`poll` method blocks the calling thread
until one or more become ready. The default implementation is based on
:func:`select.poll`.
Each descriptor has an associated `data` element, which is unique for each
readiness type, and defaults to being the same as the file descriptor. The
:meth:`poll` method yields the data associated with a descriptor, rather
than the descriptor itself, allowing concise loops like::
p = Poller()
p.start_receive(conn.fd, data=conn.on_read)
p.start_transmit(conn.fd, data=conn.on_write)
for callback in p.poll():
callback() # invoke appropriate bound instance method
Pollers may be modified while :meth:`poll` is yielding results. Removals
are processed immediately, causing pending events for the descriptor to be
discarded.
The :meth:`close` method must be called when a poller is discarded to avoid
a resource leak.
Pollers may only be used by one thread at a time.
"""
SUPPORTED = True
# This changed from select() to poll() in Mitogen 0.2.4. Since poll() has
# no upper FD limit, it is suitable for use with Latch, which must handle
# FDs larger than select's limit during many-host runs. We want this
# because poll() requires no setup and teardown: just a single system call,
# which is important because Latch.get() creates a Poller on each
# invocation. In a microbenchmark, poll() vs. epoll_ctl() is 30% faster in
# this scenario. If select() must return in future, it is important
# Latch.poller_class is set from parent.py to point to the industrial
# strength poller for the OS, otherwise Latch will fail randomly.
#: Increments on every poll(). Used to version _rfds and _wfds.
_generation = 1
def __init__(self):
self._rfds = {}
self._wfds = {}
def __repr__(self):
return '%s' % (type(self).__name__,)
def _update(self, fd):
"""
Required by PollPoller subclass.
"""
pass
@property
def readers(self):
"""
Return a list of `(fd, data)` tuples for every FD registered for
receive readiness.
"""
return list((fd, data) for fd, (data, gen) in self._rfds.items())
@property
def writers(self):
"""
Return a list of `(fd, data)` tuples for every FD registered for
transmit readiness.
"""
return list((fd, data) for fd, (data, gen) in self._wfds.items())
def close(self):
"""
Close any underlying OS resource used by the poller.
"""
pass
def start_receive(self, fd, data=None):
"""
Cause :meth:`poll` to yield `data` when `fd` is readable.
"""
self._rfds[fd] = (data or fd, self._generation)
self._update(fd)
def stop_receive(self, fd):
"""
Stop yielding readability events for `fd`.
Redundant calls to :meth:`stop_receive` are silently ignored, this may
change in future.
"""
self._rfds.pop(fd, None)
self._update(fd)
def start_transmit(self, fd, data=None):
"""
Cause :meth:`poll` to yield `data` when `fd` is writeable.
"""
self._wfds[fd] = (data or fd, self._generation)
self._update(fd)
def stop_transmit(self, fd):
"""
Stop yielding writeability events for `fd`.
Redundant calls to :meth:`stop_transmit` are silently ignored, this may
change in future.
"""
self._wfds.pop(fd, None)
self._update(fd)
def _poll(self, timeout):
(rfds, wfds, _), _ = io_op(select.select,
self._rfds,
self._wfds,
(), timeout
)
for fd in rfds:
_vv and IOLOG.debug('%r: POLLIN for %r', self, fd)
data, gen = self._rfds.get(fd, (None, None))
if gen and gen < self._generation:
yield data
for fd in wfds:
_vv and IOLOG.debug('%r: POLLOUT for %r', self, fd)
data, gen = self._wfds.get(fd, (None, None))
if gen and gen < self._generation:
yield data
def poll(self, timeout=None):
"""
Block the calling thread until one or more FDs are ready for IO.
:param float timeout:
If not :data:`None`, seconds to wait without an event before
returning an empty iterable.
:returns:
Iterable of `data` elements associated with ready FDs.
"""
_vv and IOLOG.debug('%r.poll(%r)', self, timeout)
self._generation += 1
return self._poll(timeout)
class Latch(object):
"""
A latch is a :class:`Queue.Queue`-like object that supports mutation and
waiting from multiple threads, however unlike :class:`Queue.Queue`,
waiting threads always remain interruptible, so CTRL+C always succeeds, and
waits where a timeout is set experience no wake up latency. These
properties are not possible in combination using the built-in threading
primitives available in Python 2.x.
Latches implement queues using the UNIX self-pipe trick, and a per-thread
:func:`socket.socketpair` that is lazily created the first time any
latch attempts to sleep on a thread, and dynamically associated with the
waiting Latch only for duration of the wait.
See :ref:`waking-sleeping-threads` for further discussion.
"""
#: The :class:`Poller` implementation to use for waiting. Since the poller
#: will be very short-lived, we prefer :class:`mitogen.parent.PollPoller`
#: if it is available, or :class:`mitogen.core.Poller` otherwise, since
#: these implementations require no system calls to create, configure or
#: destroy.
poller_class = Poller
#: If not :data:`None`, a function invoked as `notify(latch)` after a
#: successful call to :meth:`put`. The function is invoked on the
#: :meth:`put` caller's thread, which may be the :class:`Broker` thread,
#: therefore it must not block. Used by :class:`mitogen.select.Select` to
#: efficiently implement waiting on multiple event sources.
notify = None
# The _cls_ prefixes here are to make it crystal clear in the code which
# state mutation isn't covered by :attr:`_lock`.
#: List of reusable :func:`socket.socketpair` tuples. The list is mutated
#: from multiple threads, the only safe operations are `append()` and
#: `pop()`.
_cls_idle_socketpairs = []
#: List of every socket object that must be closed by :meth:`_on_fork`.
#: Inherited descriptors cannot be reused, as the duplicated handles
#: reference the same underlying kernel object in use by the parent.
_cls_all_sockets = []
def __init__(self):
self.closed = False
self._lock = threading.Lock()
#: List of unconsumed enqueued items.
self._queue = []
#: List of `(wsock, cookie)` awaiting an element, where `wsock` is the
#: socketpair's write side, and `cookie` is the string to write.
self._sleeping = []
#: Number of elements of :attr:`_sleeping` that have already been
#: woken, and have a corresponding element index from :attr:`_queue`
#: assigned to them.
self._waking = 0
@classmethod
def _on_fork(cls):
"""
Clean up any files belonging to the parent process after a fork.
"""
cls._cls_idle_socketpairs = []
while cls._cls_all_sockets:
cls._cls_all_sockets.pop().close()
def close(self):
"""
Mark the latch as closed, and cause every sleeping thread to be woken,
with :class:`mitogen.core.LatchError` raised in each thread.
"""
self._lock.acquire()
try:
self.closed = True
while self._waking < len(self._sleeping):
wsock, cookie = self._sleeping[self._waking]
self._wake(wsock, cookie)
self._waking += 1
finally:
self._lock.release()
def size(self):
"""
Return the number of items currently buffered.
As with :class:`Queue.Queue`, `0` may be returned even though a
subsequent call to :meth:`get` will succeed, since a message may be
posted at any moment between :meth:`size` and :meth:`get`.
As with :class:`Queue.Queue`, `>0` may be returned even though a
subsequent call to :meth:`get` will block, since another waiting thread
may be woken at any moment between :meth:`size` and :meth:`get`.
:raises LatchError:
The latch has already been marked closed.
"""
self._lock.acquire()
try:
if self.closed:
raise LatchError()
return len(self._queue)
finally:
self._lock.release()
def empty(self):
"""
Return `size() == 0`.
.. deprecated:: 0.2.8
Use :meth:`size` instead.
:raises LatchError:
The latch has already been marked closed.
"""
return self.size() == 0
def _get_socketpair(self):
"""
Return an unused socketpair, creating one if none exist.
"""
try:
return self._cls_idle_socketpairs.pop() # pop() must be atomic
except IndexError:
rsock, wsock = socket.socketpair()
rsock.setblocking(False)
set_cloexec(rsock.fileno())
set_cloexec(wsock.fileno())
self._cls_all_sockets.extend((rsock, wsock))
return rsock, wsock
COOKIE_MAGIC, = struct.unpack('L', b('LTCH') * (struct.calcsize('L')//4))
COOKIE_FMT = '>Qqqq' # #545: id() and get_ident() may exceed long on armhfp.
COOKIE_SIZE = struct.calcsize(COOKIE_FMT)
def _make_cookie(self):
"""
Return a string encoding the ID of the process, instance and thread.
This disambiguates legitimate wake-ups, accidental writes to the FD,
and buggy internal FD sharing.
"""
return struct.pack(self.COOKIE_FMT, self.COOKIE_MAGIC,
os.getpid(), id(self), thread.get_ident())
def get(self, timeout=None, block=True):
"""
Return the next enqueued object, or sleep waiting for one.
:param float timeout:
If not :data:`None`, specifies a timeout in seconds.
:param bool block:
If :data:`False`, immediately raise
:class:`mitogen.core.TimeoutError` if the latch is empty.
:raises mitogen.core.LatchError:
:meth:`close` has been called, and the object is no longer valid.
:raises mitogen.core.TimeoutError:
Timeout was reached.
:returns:
The de-queued object.
"""
_vv and IOLOG.debug('%r.get(timeout=%r, block=%r)',
self, timeout, block)
self._lock.acquire()
try:
if self.closed:
raise LatchError()
i = len(self._sleeping)
if len(self._queue) > i:
_vv and IOLOG.debug('%r.get() -> %r', self, self._queue[i])
return self._queue.pop(i)
if not block:
raise TimeoutError()
rsock, wsock = self._get_socketpair()
cookie = self._make_cookie()
self._sleeping.append((wsock, cookie))
finally:
self._lock.release()
poller = self.poller_class()
poller.start_receive(rsock.fileno())
try:
return self._get_sleep(poller, timeout, block, rsock, wsock, cookie)
finally:
poller.close()
def _get_sleep(self, poller, timeout, block, rsock, wsock, cookie):
"""
When a result is not immediately available, sleep waiting for
:meth:`put` to write a byte to our socket pair.
"""
_vv and IOLOG.debug(
'%r._get_sleep(timeout=%r, block=%r, fd=%d/%d)',
self, timeout, block, rsock.fileno(), wsock.fileno()
)
e = None
try:
list(poller.poll(timeout))
except Exception:
e = sys.exc_info()[1]
self._lock.acquire()
try:
i = self._sleeping.index((wsock, cookie))
del self._sleeping[i]
try:
got_cookie = rsock.recv(self.COOKIE_SIZE)
except socket.error:
e2 = sys.exc_info()[1]
if e2.args[0] == errno.EAGAIN:
e = TimeoutError()
else:
e = e2
self._cls_idle_socketpairs.append((rsock, wsock))
if e:
raise e
assert cookie == got_cookie, (
"Cookie incorrect; got %r, expected %r"
% (binascii.hexlify(got_cookie),
binascii.hexlify(cookie))
)
assert i < self._waking, (
"Cookie correct, but no queue element assigned."
)
self._waking -= 1
if self.closed:
raise LatchError()
_vv and IOLOG.debug('%r.get() wake -> %r', self, self._queue[i])
return self._queue.pop(i)
finally:
self._lock.release()
def put(self, obj=None):
"""
Enqueue an object, waking the first thread waiting for a result, if one
exists.
:param obj:
Object to enqueue. Defaults to :data:`None` as a convenience when
using :class:`Latch` only for synchronization.
:raises mitogen.core.LatchError:
:meth:`close` has been called, and the object is no longer valid.
"""
_vv and IOLOG.debug('%r.put(%r)', self, obj)
self._lock.acquire()
try:
if self.closed:
raise LatchError()
self._queue.append(obj)
wsock = None
if self._waking < len(self._sleeping):
wsock, cookie = self._sleeping[self._waking]
self._waking += 1
_vv and IOLOG.debug('%r.put() -> waking wfd=%r',
self, wsock.fileno())
elif self.notify:
self.notify(self)
finally:
self._lock.release()
if wsock:
self._wake(wsock, cookie)
def _wake(self, wsock, cookie):
written, disconnected = io_op(os.write, wsock.fileno(), cookie)
assert written == len(cookie) and not disconnected
def __repr__(self):
return 'Latch(%#x, size=%d, t=%r)' % (
id(self),
len(self._queue),
threading__thread_name(threading__current_thread()),
)
class Waker(Protocol):
"""
:class:`Protocol` implementing the `UNIX self-pipe trick`_. Used to wake
:class:`Broker` when another thread needs to modify its state, by enqueing
a function call to run on the :class:`Broker` thread.
.. _UNIX self-pipe trick: https://cr.yp.to/docs/selfpipe.html
"""
read_size = 1
broker_ident = None
@classmethod
def build_stream(cls, broker):
stream = super(Waker, cls).build_stream(broker)
stream.accept(*pipe())
return stream
def __init__(self, broker):
self._broker = broker
self._deferred = collections.deque()
def __repr__(self):
return 'Waker(fd=%r/%r)' % (
self.stream.receive_side and self.stream.receive_side.fd,
self.stream.transmit_side and self.stream.transmit_side.fd,
)
@property
def keep_alive(self):
"""
Prevent immediate Broker shutdown while deferred functions remain.
"""
return len(self._deferred)
def on_receive(self, broker, buf):
"""
Drain the pipe and fire callbacks. Since :attr:`_deferred` is
synchronized, :meth:`defer` and :meth:`on_receive` can conspire to
ensure only one byte needs to be pending regardless of queue length.
"""
_vv and IOLOG.debug('%r.on_receive()', self)
while True:
try:
func, args, kwargs = self._deferred.popleft()
except IndexError:
return
try:
func(*args, **kwargs)
except Exception:
LOG.exception('defer() crashed: %r(*%r, **%r)',
func, args, kwargs)
broker.shutdown()
def _wake(self):
"""
Wake the multiplexer by writing a byte. If Broker is midway through
teardown, the FD may already be closed, so ignore EBADF.
"""
try:
self.stream.transmit_side.write(b(' '))
except OSError:
e = sys.exc_info()[1]
if e.args[0] not in (errno.EBADF, errno.EWOULDBLOCK):
raise
broker_shutdown_msg = (
"An attempt was made to enqueue a message with a Broker that has "
"already exitted. It is likely your program called Broker.shutdown() "
"too early."
)
def defer(self, func, *args, **kwargs):
"""
Arrange for `func()` to execute on the broker thread. This function
returns immediately without waiting the result of `func()`. Use
:meth:`defer_sync` to block until a result is available.
:raises mitogen.core.Error:
:meth:`defer` was called after :class:`Broker` has begun shutdown.
"""
if thread.get_ident() == self.broker_ident:
_vv and IOLOG.debug('%r.defer() [immediate]', self)
return func(*args, **kwargs)
if self._broker._exitted:
raise Error(self.broker_shutdown_msg)
_vv and IOLOG.debug('%r.defer() [fd=%r]', self,
self.stream.transmit_side.fd)
self._deferred.append((func, args, kwargs))
self._wake()
class IoLoggerProtocol(DelimitedProtocol):
"""
Attached to one end of a socket pair whose other end overwrites one of the
standard ``stdout`` or ``stderr`` file descriptors in a child context.
Received data is split up into lines, decoded as UTF-8 and logged to the
:mod:`logging` package as either the ``stdout`` or ``stderr`` logger.
Logging in child contexts is in turn forwarded to the master process using
:class:`LogHandler`.
"""
@classmethod
def build_stream(cls, name, dest_fd):
"""
Even though the file descriptor `dest_fd` will hold the opposite end of
the socket open, we must keep a separate dup() of it (i.e. wsock) in
case some code decides to overwrite `dest_fd` later, which would
prevent break :meth:`on_shutdown` from calling :meth:`shutdown()
<socket.socket.shutdown>` on it.
"""
rsock, wsock = socket.socketpair()
os.dup2(wsock.fileno(), dest_fd)
stream = super(IoLoggerProtocol, cls).build_stream(name)
stream.name = name
stream.accept(rsock, wsock)
return stream
def __init__(self, name):
self._log = logging.getLogger(name)
# #453: prevent accidental log initialization in a child creating a
# feedback loop.
self._log.propagate = False
self._log.handlers = logging.getLogger().handlers[:]
def on_shutdown(self, broker):
"""
Shut down the write end of the socket, preventing any further writes to
it by this process, or subprocess that inherited it. This allows any
remaining kernel-buffered data to be drained during graceful shutdown
without the buffer continuously refilling due to some out of control
child process.
"""
_v and LOG.debug('%r: shutting down', self)
if not IS_WSL:
# #333: WSL generates invalid readiness indication on shutdown().
# This modifies the *kernel object* inherited by children, causing
# EPIPE on subsequent writes to any dupped FD in any process. The
# read side can then drain completely of prior buffered data.
self.stream.transmit_side.fp.shutdown(socket.SHUT_WR)
self.stream.transmit_side.close()
def on_line_received(self, line):
"""
Decode the received line as UTF-8 and pass it to the logging framework.
"""
self._log.info('%s', line.decode('utf-8','replace'))
class Router(object):
"""
Route messages between contexts, and invoke local handlers for messages
addressed to this context. :meth:`Router.route() <route>` straddles the
:class:`Broker` thread and user threads, it is safe to call anywhere.
**Note:** This is the somewhat limited core version of the Router class
used by child contexts. The master subclass is documented below this one.
"""
#: The :class:`mitogen.core.Context` subclass to use when constructing new
#: :class:`Context` objects in :meth:`myself` and :meth:`context_by_id`.
#: Permits :class:`Router` subclasses to extend the :class:`Context`
#: interface, as done in :class:`mitogen.parent.Router`.
context_class = Context
max_message_size = 128 * 1048576
#: When :data:`True`, permit children to only communicate with the current
#: context or a parent of the current context. Routing between siblings or
#: children of parents is prohibited, ensuring no communication is possible
#: between intentionally partitioned networks, such as when a program
#: simultaneously manipulates hosts spread across a corporate and a
#: production network, or production networks that are otherwise
#: air-gapped.
#:
#: Sending a prohibited message causes an error to be logged and a dead
#: message to be sent in reply to the errant message, if that message has
#: ``reply_to`` set.
#:
#: The value of :data:`unidirectional` becomes the default for the
#: :meth:`local() <mitogen.master.Router.local>` `unidirectional`
#: parameter.
unidirectional = False
duplicate_handle_msg = 'cannot register a handle that already exists'
refused_msg ='refused by policy'
invalid_handle_msg = 'invalid handle'
too_large_msg ='message too large (max %d bytes)'
respondent_disconnect_msg = 'the respondent Context has disconnected'
broker_exit_msg = 'Broker has exitted'
no_route_msg = 'no route to %r, my ID is %r'
unidirectional_msg = (
'routing mode prevents forward of message from context %d to '
'context %d via context %d'
)
def __init__(self, broker):
self.broker = broker
listen(broker, 'exit', self._on_broker_exit)
self._setup_logging()
self._write_lock = threading.Lock()
#: context ID -> Stream; must hold _write_lock to edit or iterate
self._stream_by_id = {}
#: List of contexts to notify of shutdown; must hold _write_lock
self._context_by_id = {}
self._last_handle = itertools.count(1000)
#: handle -> (persistent?, func(msg))
self._handle_map = {}
#: Context -> set { handle,.. }
self._handles_by_respondent = {}
self.add_handler(self._on_del_route, DEL_ROUTE)
def __repr__(self):
return 'Router(%r)' % (self.broker,)
def _setup_logging(self):
"""
This is done in the :class:`Router` constructor for historical reasons.
It must be called before ExternalContext logs its first messages, but
after logging has been setup. It must also be called when any router is
constructed for a consumer app.
"""
# Here seems as good a place as any.
global _v, _vv
_v = logging.getLogger().level <= logging.DEBUG
_vv = IOLOG.level <= logging.DEBUG
def _on_del_route(self, msg):
"""
Stub :data:`DEL_ROUTE` handler; fires 'disconnect' events on the
corresponding :attr:`_context_by_id` member. This is replaced by
:class:`mitogen.parent.RouteMonitor` in an upgraded context.
"""
if msg.is_dead:
return
target_id_s, _, name = bytes_partition(msg.data, b(':'))
target_id = int(target_id_s, 10)
LOG.error('%r: deleting route to %s (%d)',
self, to_text(name), target_id)
context = self._context_by_id.get(target_id)
if context:
fire(context, 'disconnect')
else:
LOG.debug('DEL_ROUTE for unknown ID %r: %r', target_id, msg)
def _on_stream_disconnect(self, stream):
notify = []
self._write_lock.acquire()
try:
for context in list(self._context_by_id.values()):
stream_ = self._stream_by_id.get(context.context_id)
if stream_ is stream:
del self._stream_by_id[context.context_id]
notify.append(context)
finally:
self._write_lock.release()
# Happens outside lock as e.g. RouteMonitor wants the same lock.
for context in notify:
context.on_disconnect()
def _on_broker_exit(self):
"""
Called prior to broker exit, informs callbacks registered with
:meth:`add_handler` the connection is dead.
"""
_v and LOG.debug('%r: broker has exitted', self)
while self._handle_map:
_, (_, func, _, _) = self._handle_map.popitem()
func(Message.dead(self.broker_exit_msg))
def myself(self):
"""
Return a :class:`Context` referring to the current process. Since
:class:`Context` is serializable, this is convenient to use in remote
function call parameter lists.
"""
return self.context_class(
router=self,
context_id=mitogen.context_id,
name='self',
)
def context_by_id(self, context_id, via_id=None, create=True, name=None):
"""
Return or construct a :class:`Context` given its ID. An internal
mapping of ID to the canonical :class:`Context` representing that ID,
so that :ref:`signals` can be raised.
This may be called from any thread, lookup and construction are atomic.
:param int context_id:
The context ID to look up.
:param int via_id:
If the :class:`Context` does not already exist, set its
:attr:`Context.via` to the :class:`Context` matching this ID.
:param bool create:
If the :class:`Context` does not already exist, create it.
:param str name:
If the :class:`Context` does not already exist, set its name.
:returns:
:class:`Context`, or return :data:`None` if `create` is
:data:`False` and no :class:`Context` previously existed.
"""
context = self._context_by_id.get(context_id)
if context:
return context
if create and via_id is not None:
via = self.context_by_id(via_id)
else:
via = None
self._write_lock.acquire()
try:
context = self._context_by_id.get(context_id)
if create and not context:
context = self.context_class(self, context_id, name=name)
context.via = via
self._context_by_id[context_id] = context
finally:
self._write_lock.release()
return context
def register(self, context, stream):
"""
Register a newly constructed context and its associated stream, and add
the stream's receive side to the I/O multiplexer. This method remains
public while the design has not yet settled.
"""
_v and LOG.debug('%s: registering %r to stream %r',
self, context, stream)
self._write_lock.acquire()
try:
self._stream_by_id[context.context_id] = stream
self._context_by_id[context.context_id] = context
finally:
self._write_lock.release()
self.broker.start_receive(stream)
listen(stream, 'disconnect', lambda: self._on_stream_disconnect(stream))
def stream_by_id(self, dst_id):
"""
Return the :class:`Stream` that should be used to communicate with
`dst_id`. If a specific route for `dst_id` is not known, a reference to
the parent context's stream is returned. If the parent is disconnected,
or when running in the master context, return :data:`None` instead.
This can be used from any thread, but its output is only meaningful
from the context of the :class:`Broker` thread, as disconnection or
replacement could happen in parallel on the broker thread at any
moment.
"""
return (
self._stream_by_id.get(dst_id) or
self._stream_by_id.get(mitogen.parent_id)
)
def del_handler(self, handle):
"""
Remove the handle registered for `handle`
:raises KeyError:
The handle wasn't registered.
"""
_, _, _, respondent = self._handle_map.pop(handle)
if respondent:
self._handles_by_respondent[respondent].discard(handle)
def add_handler(self, fn, handle=None, persist=True,
policy=None, respondent=None,
overwrite=False):
"""
Invoke `fn(msg)` on the :class:`Broker` thread for each Message sent to
`handle` from this context. Unregister after one invocation if
`persist` is :data:`False`. If `handle` is :data:`None`, a new handle
is allocated and returned.
:param int handle:
If not :data:`None`, an explicit handle to register, usually one of
the ``mitogen.core.*`` constants. If unspecified, a new unused
handle will be allocated.
:param bool persist:
If :data:`False`, the handler will be unregistered after a single
message has been received.
:param mitogen.core.Context respondent:
Context that messages to this handle are expected to be sent from.
If specified, arranges for a dead message to be delivered to `fn`
when disconnection of the context is detected.
In future `respondent` will likely also be used to prevent other
contexts from sending messages to the handle.
:param function policy:
Function invoked as `policy(msg, stream)` where `msg` is a
:class:`mitogen.core.Message` about to be delivered, and `stream`
is the :class:`mitogen.core.Stream` on which it was received. The
function must return :data:`True`, otherwise an error is logged and
delivery is refused.
Two built-in policy functions exist:
* :func:`has_parent_authority`: requires the message arrived from a
parent context, or a context acting with a parent context's
authority (``auth_id``).
* :func:`mitogen.parent.is_immediate_child`: requires the
message arrived from an immediately connected child, for use in
messaging patterns where either something becomes buggy or
insecure by permitting indirect upstream communication.
In case of refusal, and the message's ``reply_to`` field is
nonzero, a :class:`mitogen.core.CallError` is delivered to the
sender indicating refusal occurred.
:param bool overwrite:
If :data:`True`, allow existing handles to be silently overwritten.
:return:
`handle`, or if `handle` was :data:`None`, the newly allocated
handle.
:raises Error:
Attemp to register handle that was already registered.
"""
handle = handle or next(self._last_handle)
_vv and IOLOG.debug('%r.add_handler(%r, %r, %r)', self, fn, handle, persist)
if handle in self._handle_map and not overwrite:
raise Error(self.duplicate_handle_msg)
self._handle_map[handle] = persist, fn, policy, respondent
if respondent:
if respondent not in self._handles_by_respondent:
self._handles_by_respondent[respondent] = set()
listen(respondent, 'disconnect',
lambda: self._on_respondent_disconnect(respondent))
self._handles_by_respondent[respondent].add(handle)
return handle
def _on_respondent_disconnect(self, context):
for handle in self._handles_by_respondent.pop(context, ()):
_, fn, _, _ = self._handle_map[handle]
fn(Message.dead(self.respondent_disconnect_msg))
del self._handle_map[handle]
def _maybe_send_dead(self, unreachable, msg, reason, *args):
"""
Send a dead message to either the original sender or the intended
recipient of `msg`, if the original sender was expecting a reply
(because its `reply_to` was set), otherwise assume the message is a
reply of some sort, and send the dead message to the original
destination.
:param bool unreachable:
If :data:`True`, the recipient is known to be dead or routing
failed due to a security precaution, so don't attempt to fallback
to sending the dead message to the recipient if the original sender
did not include a reply address.
:param mitogen.core.Message msg:
Message that triggered the dead message.
:param str reason:
Human-readable error reason.
:param tuple args:
Elements to interpolate with `reason`.
"""
if args:
reason %= args
LOG.debug('%r: %r is dead: %r', self, msg, reason)
if msg.reply_to and not msg.is_dead:
msg.reply(Message.dead(reason=reason), router=self)
elif not unreachable:
self._async_route(
Message.dead(
dst_id=msg.dst_id,
handle=msg.handle,
reason=reason,
)
)
def _invoke(self, msg, stream):
# IOLOG.debug('%r._invoke(%r)', self, msg)
try:
persist, fn, policy, respondent = self._handle_map[msg.handle]
except KeyError:
self._maybe_send_dead(True, msg, reason=self.invalid_handle_msg)
return
if respondent and not (msg.is_dead or
msg.src_id == respondent.context_id):
self._maybe_send_dead(True, msg,'reply from unexpected context')
return
if policy and not policy(msg, stream):
self._maybe_send_dead(True, msg, self.refused_msg)
return
if not persist:
self.del_handler(msg.handle)
try:
fn(msg)
except Exception:
LOG.exception('%r._invoke(%r): %r crashed', self, msg, fn)
def _async_route(self, msg, in_stream=None):
"""
Arrange for `msg` to be forwarded towards its destination. If its
destination is the local context, then arrange for it to be dispatched
using the local handlers.
This is a lower overhead version of :meth:`route` that may only be
called from the :class:`Broker` thread.
:param Stream in_stream:
If not :data:`None`, the stream the message arrived on. Used for
performing source route verification, to ensure sensitive messages
such as ``CALL_FUNCTION`` arrive only from trusted contexts.
"""
_vv and IOLOG.debug('%r._async_route(%r, %r)', self, msg, in_stream)
if len(msg.data) > self.max_message_size:
self._maybe_send_dead(False, msg, self.too_large_msg % (
self.max_message_size,
))
return
parent_stream = self._stream_by_id.get(mitogen.parent_id)
src_stream = self._stream_by_id.get(msg.src_id, parent_stream)
# When the ingress stream is known, verify the message was received on
# the same as the stream we would expect to receive messages from the
# src_id and auth_id. This is like Reverse Path Filtering in IP, and
# ensures messages from a privileged context cannot be spoofed by a
# child.
if in_stream:
auth_stream = self._stream_by_id.get(msg.auth_id, parent_stream)
if in_stream!= auth_stream:
LOG.error('%r: bad auth_id: got %r via %r, not %r: %r',
self, msg.auth_id, in_stream, auth_stream, msg)
return
if msg.src_id!= msg.auth_id and in_stream!= src_stream:
LOG.error('%r: bad src_id: got %r via %r, not %r: %r',
self, msg.src_id, in_stream, src_stream, msg)
return
# If the stream's MitogenProtocol has auth_id set, copy it to the
# message. This allows subtrees to become privileged by stamping a
# parent's context ID. It is used by mitogen.unix to mark client
# streams (like Ansible WorkerProcess) as having the same rights as
# the parent.
if in_stream.protocol.auth_id is not None:
msg.auth_id = in_stream.protocol.auth_id
if in_stream.protocol.on_message is not None:
in_stream.protocol.on_message(in_stream, msg)
# Record the IDs the source ever communicated with.
in_stream.protocol.egress_ids.add(msg.dst_id)
if msg.dst_id == mitogen.context_id:
return self._invoke(msg, in_stream)
out_stream = self._stream_by_id.get(msg.dst_id)
if (not out_stream) and (parent_stream!= src_stream or not in_stream):
# No downstream route exists. The message could be from a child or
# ourselves for a parent, in which case we must forward it
# upstream, or it could be from a parent for a dead child, in which
# case its src_id/auth_id would fail verification if returned to
# the parent, so in that case reply with a dead message instead.
out_stream = parent_stream
if out_stream is None:
self._maybe_send_dead(True, msg, self.no_route_msg,
msg.dst_id, mitogen.context_id)
return
if in_stream and self.unidirectional and not \
(in_stream.protocol.is_privileged or
out_stream.protocol.is_privileged):
self._maybe_send_dead(True, msg, self.unidirectional_msg,
in_stream.protocol.remote_id,
out_stream.protocol.remote_id,
mitogen.context_id)
return
out_stream.protocol._send(msg)
def route(self, msg):
"""
Arrange for the :class:`Message` `msg` to be delivered to its
destination using any relevant downstream context, or if none is found,
by forwarding the message upstream towards the master context. If `msg`
is destined for the local context, it is dispatched using the handles
registered with :meth:`add_handler`.
This may be called from any thread.
"""
self.broker.defer(self._async_route, msg)
class NullTimerList(object):
def get_timeout(self):
return None
class Broker(object):
"""
Responsible for handling I/O multiplexing in a private thread.
**Note:** This somewhat limited core version is used by children. The
master subclass is documented below.
"""
poller_class = Poller
_waker = None
_thread = None
# :func:`mitogen.parent._upgrade_broker` replaces this with
# :class:`mitogen.parent.TimerList` during upgrade.
timers = NullTimerList()
#: Seconds grace to allow :class:`streams <Stream>` to shutdown gracefully
#: before force-disconnecting them during :meth:`shutdown`.
shutdown_timeout = 3.0
def __init__(self, poller_class=None, activate_compat=True):
self._alive = True
self._exitted = False
self._waker = Waker.build_stream(self)
#: Arrange for `func(\*args, \**kwargs)` to be executed on the broker
#: thread, or immediately if the current thread is the broker thread.
#: Safe to call from any thread.
self.defer = self._waker.protocol.defer
self.poller = self.poller_class()
self.poller.start_receive(
self._waker.receive_side.fd,
(self._waker.receive_side, self._waker.on_receive)
)
self._thread = threading.Thread(
target=self._broker_main,
name='mitogen.broker'
)
self._thread.start()
if activate_compat:
self._py24_25_compat()
def _py24_25_compat(self):
"""
Python 2.4/2.5 have grave difficulties with threads/fork. We
mandatorily quiesce all running threads during fork using a
monkey-patch there.
"""
if sys.version_info < (2, 6):
# import_module() is used to avoid dep scanner.
os_fork = import_module('mitogen.os_fork')
os_fork._notice_broker_or_pool(self)
def start_receive(self, stream):
"""
Mark the :attr:`receive_side <Stream.receive_side>` on `stream` as
ready for reading. Safe to call from any thread. When the associated
file descriptor becomes ready for reading,
:meth:`BasicStream.on_receive` will be called.
"""
_vv and IOLOG.debug('%r.start_receive(%r)', self, stream)
side = stream.receive_side
assert side and not side.closed
self.defer(self.poller.start_receive,
side.fd, (side, stream.on_receive))
def stop_receive(self, stream):
"""
Mark the :attr:`receive_side <Stream.receive_side>` on `stream` as not
ready for reading. Safe to call from any thread.
"""
_vv and IOLOG.debug('%r.stop_receive(%r)', self, stream)
self.defer(self.poller.stop_receive, stream.receive_side.fd)
def _start_transmit(self, stream):
"""
Mark the :attr:`transmit_side <Stream.transmit_side>` on `stream` as
ready for writing. Must only be called from the Broker thread. When the
associated file descriptor becomes ready for writing,
:meth:`BasicStream.on_transmit` will be called.
"""
_vv and IOLOG.debug('%r._start_transmit(%r)', self, stream)
side = stream.transmit_side
assert side and not side.closed
self.poller.start_transmit(side.fd, (side, stream.on_transmit))
def _stop_transmit(self, stream):
"""
Mark the :attr:`transmit_side <Stream.receive_side>` on `stream` as not
ready for writing.
"""
_vv and IOLOG.debug('%r._stop_transmit(%r)', self, stream)
self.poller.stop_transmit(stream.transmit_side.fd)
def keep_alive(self):
"""
Return :data:`True` if any reader's :attr:`Side.keep_alive` attribute
is :data:`True`, or any :class:`Context` is still registered that is
not the master. Used to delay shutdown while some important work is in
progress (e.g. log draining).
"""
it = (side.keep_alive for (_, (side, _)) in self.poller.readers)
return sum(it, 0) > 0 or self.timers.get_timeout() is not None
def defer_sync(self, func):
"""
Arrange for `func()` to execute on :class:`Broker` thread, blocking the
current thread until a result or exception is available.
:returns:
Return value of `func()`.
"""
latch = Latch()
def wrapper():
try:
latch.put(func())
except Exception:
latch.put(sys.exc_info()[1])
self.defer(wrapper)
res = latch.get()
if isinstance(res, Exception):
raise res
return res
def _call(self, stream, func):
"""
Call `func(self)`, catching any exception that might occur, logging it,
and force-disconnecting the related `stream`.
"""
try:
func(self)
except Exception:
LOG.exception('%r crashed', stream)
stream.on_disconnect(self)
def _loop_once(self, timeout=None):
"""
Execute a single :class:`Poller` wait, dispatching any IO events that
caused the wait to complete.
:param float timeout:
If not :data:`None`, maximum time in seconds to wait for events.
"""
_vv and IOLOG.debug('%r._loop_once(%r, %r)',
self, timeout, self.poller)
timer_to = self.timers.get_timeout()
if timeout is None:
timeout = timer_to
elif timer_to is not None and timer_to < timeout:
timeout = timer_to
#IOLOG.debug('readers =\n%s', pformat(self.poller.readers))
#IOLOG.debug('writers =\n%s', pformat(self.poller.writers))
for side, func in self.poller.poll(timeout):
self._call(side.stream, func)
if timer_to is not None:
self.timers.expire()
def _broker_exit(self):
"""
Forcefully call :meth:`Stream.on_disconnect` on any streams that failed
to shut down gracefully, then discard the :class:`Poller`.
"""
for _, (side, _) in self.poller.readers + self.poller.writers:
LOG.debug('%r: force disconnecting %r', self, side)
side.stream.on_disconnect(self)
self.poller.close()
def _broker_shutdown(self):
"""
Invoke :meth:`Stream.on_shutdown` for every active stream, then allow
up to :attr:`shutdown_timeout` seconds for the streams to unregister
themselves, logging an error if any did not unregister during the grace
period.
"""
for _, (side, _) in self.poller.readers + self.poller.writers:
self._call(side.stream, side.stream.on_shutdown)
deadline = now() + self.shutdown_timeout
while self.keep_alive() and now() < deadline:
self._loop_once(max(0, deadline - now()))
if self.keep_alive():
LOG.error('%r: pending work still existed %d seconds after '
'shutdown began. This may be due to a timer that is yet '
'to expire, or a child connection that did not fully '
'shut down.', self, self.shutdown_timeout)
def _do_broker_main(self):
"""
Broker thread main function. Dispatches IO events until
:meth:`shutdown` is called.
"""
# For Python 2.4, no way to retrieve ident except on thread.
self._waker.protocol.broker_ident = thread.get_ident()
try:
while self._alive:
self._loop_once()
fire(self, 'before_shutdown')
fire(self,'shutdown')
self._broker_shutdown()
except Exception:
e = sys.exc_info()[1]
LOG.exception('broker crashed')
syslog.syslog(syslog.LOG_ERR, 'broker crashed: %s' % (e,))
syslog.closelog() # prevent test 'fd leak'.
self._alive = False # Ensure _alive is consistent on crash.
self._exitted = True
self._broker_exit()
def _broker_main(self):
try:
_profile_hook('mitogen.broker', self._do_broker_main)
finally:
# 'finally' to ensure _on_broker_exit() can always SIGTERM.
fire(self, 'exit')
def shutdown(self):
"""
Request broker gracefully disconnect streams and stop. Safe to call
from any thread.
"""
_v and LOG.debug('%r: shutting down', self)
def _shutdown():
self._alive = False
if self._alive and not self._exitted:
self.defer(_shutdown)
def join(self):
"""
Wait for the broker to stop, expected to be called after
:meth:`shutdown`.
"""
self._thread.join()
def __repr__(self):
return 'Broker(%04x)' % (id(self) & 0xffff,)
class Dispatcher(object):
"""
Implementation of the :data:`CALL_FUNCTION` handle for a child context.
Listens on the child's main thread for messages sent by
:class:`mitogen.parent.CallChain` and dispatches the function calls they
describe.
If a :class:`mitogen.parent.CallChain` sending a message is in pipelined
mode, any exception that occurs is recorded, and causes all subsequent
calls with the same `chain_id` to fail with the same exception.
"""
_service_recv = None
def __repr__(self):
return 'Dispatcher'
def __init__(self, econtext):
self.econtext = econtext
#: Chain ID -> CallError if prior call failed.
self._error_by_chain_id = {}
self.recv = Receiver(
router=econtext.router,
handle=CALL_FUNCTION,
policy=has_parent_authority,
)
#: The :data:`CALL_SERVICE` :class:`Receiver` that will eventually be
#: reused by :class:`mitogen.service.Pool`, should it ever be loaded.
#: This is necessary for race-free reception of all service requests
#: delivered regardless of whether the stub or real service pool are
#: loaded. See #547 for related sorrows.
Dispatcher._service_recv = Receiver(
router=econtext.router,
handle=CALL_SERVICE,
policy=has_parent_authority,
)
self._service_recv.notify = self._on_call_service
listen(econtext.broker,'shutdown', self._on_broker_shutdown)
def _on_broker_shutdown(self):
if self._service_recv.notify == self._on_call_service:
self._service_recv.notify = None
self.recv.close()
@classmethod
@takes_econtext
def forget_chain(cls, chain_id, econtext):
econtext.dispatcher._error_by_chain_id.pop(chain_id, None)
def _parse_request(self, msg):
data = msg.unpickle(throw=False)
_v and LOG.debug('%r: dispatching %r', self, data)
chain_id, modname, klass, func, args, kwargs = data
obj = import_module(modname)
if klass:
obj = getattr(obj, klass)
fn = getattr(obj, func)
if getattr(fn,'mitogen_takes_econtext', None):
kwargs.setdefault('econtext', self.econtext)
if getattr(fn,'mitogen_takes_router', None):
kwargs.setdefault('router', self.econtext.router)
return chain_id, fn, args, kwargs
def _dispatch_one(self, msg):
try:
chain_id, fn, args, kwargs = self._parse_request(msg)
except Exception:
return None, CallError(sys.exc_info()[1])
if chain_id in self._error_by_chain_id:
return chain_id, self._error_by_chain_id[chain_id]
try:
return chain_id, fn(*args, **kwargs)
except Exception:
e = CallError(sys.exc_info()[1])
if chain_id is not None:
self._error_by_chain_id[chain_id] = e
return chain_id, e
def _on_call_service(self, recv):
"""
Notifier for the :data:`CALL_SERVICE` receiver. This is called on the
:class:`Broker` thread for any service messages arriving at this
context, for as long as no real service pool implementation is loaded.
In order to safely bootstrap the service pool implementation a sentinel
message is enqueued on the :data:`CALL_FUNCTION` receiver in order to
wake the main thread, where the importer can run without any
possibility of suffering deadlock due to concurrent uses of the
importer.
Should the main thread be blocked indefinitely, preventing the import
from ever running, if it is blocked waiting on a service call, then it
means :mod:`mitogen.service` has already been imported and
:func:`mitogen.service.get_or_create_pool` has already run, meaning the
service pool is already active and the duplicate initialization was not
needed anyway.
#547: This trickery is needed to avoid the alternate option of spinning
a temporary thread to import the service pool, which could deadlock if
a custom import hook executing on the main thread (under the importer
lock) would block waiting for some data that was in turn received by a
service. Main thread import lock can't be released until service is
running, service cannot satisfy request until import lock is released.
"""
self.recv._on_receive(Message(handle=STUB_CALL_SERVICE))
def _init_service_pool(self):
import mitogen.service
mitogen.service.get_or_create_pool(router=self.econtext.router)
def _dispatch_calls(self):
for msg in self.recv:
if msg.handle == STUB_CALL_SERVICE:
if msg.src_id == mitogen.context_id:
self._init_service_pool()
continue
chain_id, ret = self._dispatch_one(msg)
_v and LOG.debug('%r: %r -> %r', self, msg, ret)
if msg.reply_to:
msg.reply(ret)
elif isinstance(ret, CallError) and chain_id is None:
LOG.error('No-reply function call failed: %s', ret)
def run(self):
if self.econtext.config.get('on_start'):
self.econtext.config['on_start'](self.econtext)
_profile_hook('mitogen.child_main', self._dispatch_calls)
class ExternalContext(object):
"""
External context implementation.
This class contains the main program implementation for new children. It is
responsible for setting up everything about the process environment, import
hooks, standard IO redirection, logging, configuring a :class:`Router` and
:class:`Broker`, and finally arranging for :class:`Dispatcher` to take over
the main thread after initialization is complete.
.. attribute:: broker
The :class:`mitogen.core.Broker` instance.
.. attribute:: context
The :class:`mitogen.core.Context` instance.
.. attribute:: channel
The :class:`mitogen.core.Channel` over which :data:`CALL_FUNCTION`
requests are received.
.. attribute:: importer
The :class:`mitogen.core.Importer` instance.
.. attribute:: stdout_log
The :class:`IoLogger` connected to :data:`sys.stdout`.
.. attribute:: stderr_log
The :class:`IoLogger` connected to :data:`sys.stderr`.
"""
detached = False
def __init__(self, config):
self.config = config
def _on_broker_exit(self):
if not self.config['profiling']:
os.kill(os.getpid(), signal.SIGTERM)
def _on_shutdown_msg(self, msg):
if not msg.is_dead:
_v and LOG.debug('shutdown request from context %d', msg.src_id)
self.broker.shutdown()
def _on_parent_disconnect(self):
if self.detached:
mitogen.parent_ids = []
mitogen.parent_id = None
LOG.info('Detachment complete')
else:
_v and LOG.debug('parent stream is gone, dying.')
self.broker.shutdown()
def detach(self):
self.detached = True
stream = self.router.stream_by_id(mitogen.parent_id)
if stream: # not double-detach()'d
os.setsid()
self.parent.send_await(Message(handle=DETACHING))
LOG.info('Detaching from %r; parent is %s', stream, self.parent)
for x in range(20):
pending = self.broker.defer_sync(stream.protocol.pending_bytes)
if not pending:
break
time.sleep(0.05)
if pending:
LOG.error('Stream had %d bytes after 2000ms', pending)
self.broker.defer(stream.on_disconnect, self.broker)
def _setup_master(self):
Router.max_message_size = self.config['max_message_size']
if self.config['profiling']:
enable_profiling()
self.broker = Broker(activate_compat=False)
self.router = Router(self.broker)
self.router.debug = self.config.get('debug', False)
self.router.unidirectional = self.config['unidirectional']
self.router.add_handler(
fn=self._on_shutdown_msg,
handle=SHUTDOWN,
policy=has_parent_authority,
)
self.master = Context(self.router, 0,'master')
parent_id = self.config['parent_ids'][0]
if parent_id == 0:
self.parent = self.master
else:
self.parent = Context(self.router, parent_id, 'parent')
in_fd = self.config.get('in_fd', 100)
in_fp = os.fdopen(os.dup(in_fd), 'rb', 0)
os.close(in_fd)
out_fp = os.fdopen(os.dup(self.config.get('out_fd', 1)), 'wb', 0)
self.stream = MitogenProtocol.build_stream(
self.router,
parent_id,
local_id=self.config['context_id'],
parent_ids=self.config['parent_ids']
)
self.stream.accept(in_fp, out_fp)
self.stream.name = 'parent'
self.stream.receive_side.keep_alive = False
listen(self.stream, 'disconnect', self._on_parent_disconnect)
listen(self.broker, 'exit', self._on_broker_exit)
def _reap_first_stage(self):
try:
os.wait() # Reap first stage.
except OSError:
pass # No first stage exists (e.g. fakessh)
def _setup_logging(self):
self.log_handler = LogHandler(self.master)
root = logging.getLogger()
root.setLevel(self.config['log_level'])
root.handlers = [self.log_handler]
if self.config['debug']:
enable_debug_logging()
def _setup_importer(self):
importer = self.config.get('importer')
if importer:
importer._install_handler(self.router)
importer._context = self.parent
else:
core_src_fd = self.config.get('core_src_fd', 101)
if core_src_fd:
fp = os.fdopen(core_src_fd, 'rb', 0)
try:
core_src = fp.read()
# Strip "ExternalContext.main()" call from last line.
core_src = b('\n').join(core_src.splitlines()[:-1])
finally:
fp.close()
else:
core_src = None
importer = Importer(
self.router,
self.parent,
core_src,
self.config.get('whitelist', ()),
self.config.get('blacklist', ()),
)
self.importer = importer
self.router.importer = importer
sys.meta_path.insert(0, self.importer)
def _setup_package(self):
global mitogen
mitogen = imp.new_module('mitogen')
mitogen.__package__ ='mitogen'
mitogen.__path__ = []
mitogen.__loader__ = self.importer
mitogen.main = lambda *args, **kwargs: (lambda func: None)
mitogen.core = sys.modules['__main__']
mitogen.core.__file__ = 'x/mitogen/core.py' # For inspect.getsource()
mitogen.core.__loader__ = self.importer
sys.modules['mitogen'] = mitogen
sys.modules['mitogen.core'] = mitogen.core
del sys.modules['__main__']
def _setup_globals(self):
mitogen.is_master = False
mitogen.__version__ = self.config['version']
mitogen.context_id = self.config['context_id']
mitogen.parent_ids = self.config['parent_ids'][:]
mitogen.parent_id = mitogen.parent_ids[0]
def _nullify_stdio(self):
"""
Open /dev/null to replace stdio temporarily. In case of odd startup,
assume we may be allocated a standard handle.
"""
for stdfd, mode in ((0, os.O_RDONLY), (1, os.O_RDWR), (2, os.O_RDWR)):
fd = os.open('/dev/null', mode)
if fd!= stdfd:
os.dup2(fd, stdfd)
os.close(fd)
def _preserve_tty_fp(self):
"""
#481: when stderr is a TTY due to being started via tty_create_child()
or hybrid_tty_create_child(), and some privilege escalation tool like
prehistoric versions of sudo exec this process over the top of itself,
there is nothing left to keep the slave PTY open after we replace our
stdio. Therefore if stderr is a TTY, keep around a permanent dup() to
avoid receiving SIGHUP.
"""
try:
if os.isatty(2):
self.reserve_tty_fp = os.fdopen(os.dup(2), 'r+b', 0)
set_cloexec(self.reserve_tty_fp.fileno())
except OSError:
pass
def _setup_stdio(self):
self._preserve_tty_fp()
# When sys.stdout was opened by the runtime, overwriting it will not
# close FD 1. However when forking from a child that previously used
# fdopen(), overwriting it /will/ close FD 1. So we must swallow the
# close before IoLogger overwrites FD 1, otherwise its new FD 1 will be
# clobbered. Additionally, stdout must be replaced with /dev/null prior
# to stdout.close(), since if block buffering was active in the parent,
# any pre-fork buffered data will be flushed on close(), corrupting the
# connection to the parent.
self._nullify_stdio()
sys.stdout.close()
self._nullify_stdio()
self.loggers = []
for name, fd in (('stdout', 1), ('stderr', 2)):
log = IoLoggerProtocol.build_stream(name, fd)
self.broker.start_receive(log)
self.loggers.append(log)
# Reopen with line buffering.
sys.stdout = os.fdopen(1, 'w', 1)
def main(self):
self._setup_master()
try:
try:
self._setup_logging()
self._setup_importer()
self._reap_first_stage()
if self.config.get('setup_package', True):
self._setup_package()
self._setup_globals()
if self.config.get('setup_stdio', True):
self._setup_stdio()
self.dispatcher = Dispatcher(self)
self.router.register(self.parent, self.stream)
self.router._setup_logging()
_v and LOG.debug('Python version is %s', sys.version)
_v and LOG.debug('Parent is context %r (%s); my ID is %r',
self.parent.context_id, self.parent.name,
mitogen.context_id)
_v and LOG.debug('pid:%r ppid:%r uid:%r/%r, gid:%r/%r host:%r',
os.getpid(), os.getppid(), os.geteuid(),
os.getuid(), os.getegid(), os.getgid(),
socket.gethostname())
sys.executable = os.environ.pop('ARGV0', sys.executable)
_v and LOG.debug('Recovered sys.executable: %r', sys.executable)
if self.config.get('send_ec2', True):
self.stream.transmit_side.write(b('MITO002\n'))
self.broker._py24_25_compat()
self.log_handler.uncork()
self.dispatcher.run()
_v and LOG.debug('ExternalContext.main() normal exit')
except KeyboardInterrupt:
LOG.debug('KeyboardInterrupt received, exiting gracefully.')
except BaseException:
LOG.exception('ExternalContext.main() crashed')
raise
finally:
self.broker.shutdown() |
|
mitogen-hq__mitogen | howitworks.rst | Tutorial | Generate how Mitogen works tutorial | BSD 3-Clause New or Revised License | mitogen-hq__mitogen/docs/howitworks.rst | [
"mitogen-hq__mitogen/mitogen/core.py"
] | How Mitogen Works
Some effort is required to accomplish the seemingly magical feat of
bootstrapping a remote Python process without any software installed on
the remote machine. The steps involved are unlikely to be immediately
obvious to the casual reader, and they required several iterations to
discover, so we document them thoroughly below.
The UNIX First Stage
To allow delivery of the bootstrap compressed using zlib, it is
necessary for something on the remote to be prepared to decompress the
payload and feed it to a Python interpreter[1]. Since we would like to
avoid writing an error-prone shell fragment to implement this, and since
we must avoid writing to the remote machine's disk in case it is
read-only, the Python process started on the remote machine by Mitogen
immediately forks in order to implement the decompression.
Python Command Line
The Python command line sent to the host is a zlib-compressed[2] and
base64-encoded copy of the mitogen.master.Stream._first_stage function,
which has been carefully optimized to reduce its size. Prior to
compression and encoding, CONTEXT_NAME is replaced with the desired
context name in the function's source code.
python -c 'exec "xxx".decode("base64").decode("zlib")'
The command-line arranges for the Python interpreter to decode the
base64'd component, decompress it and execute it as Python code. Base64
is used since to protect against any special characters that may be
interpreted by the system shell in use.
Forking The First Stage
The first stage creates a UNIX pipe and saves a copy of the process's
real stdin file descriptor (used for communication with the master) so
that it can be recovered by the bootstrapped process later. It then
forks into a new process.
After fork, the parent half overwrites its stdin with the read end of
the pipe, and the child half writes the string MITOGEN0\n, then begins
reading the zlib-compressed payload supplied on stdin by the master, and
writing the decompressed result to the write-end of the UNIX pipe.
To allow recovery of stdin for reuse by the bootstrapped process for
parent<->child communication, it is necessary for the first stage to
avoid closing stdin or reading from it until EOF. Therefore, the master
sends the zlib-compressed payload prefixed with an integer size,
allowing reading by the first stage of exactly the required bytes.
Configuring argv[0]
Forking provides an excellent opportunity to tidy up the eventual Python
interpreter, in particular, restarting it using a fresh command-line to
get rid of the large base64-encoded first stage parameter, and to
replace argv[0] with something descriptive.
After configuring its stdin to point to the read end of the pipe, the
parent half of the fork re-executes Python, with argv[0] taken from the
CONTEXT_NAME variable earlier substituted into its source code. As no
arguments are provided to this new execution of Python, and since stdin
is connected to a pipe (whose write end is connected to the first
stage), the Python interpreter begins reading source code to execute
from the pipe connected to stdin.
Bootstrap Preparation
Now we have the mechanism in place to send a zlib-compressed script to
the remote Python interpreter, it is time to choose what to send.
The script sent is simply the source code for mitogen.core, with a
single line suffixed to trigger execution of the
mitogen.core.ExternalContext.main function. The encoded arguments to the
main function include some additional details, such as the logging
package level that was active in the parent process, and whether
debugging or profiling are enabled.
After the script source code is prepared, it is passed through
mitogen.master.minimize_source to strip it of docstrings and comments,
while preserving line numbers. This reduces the compressed payload by
around 20%.
Preserving The mitogen.core Source
One final trick is implemented in the first stage: after bootstrapping
the new child, it writes a duplicate copy of the mitogen.core source it
just used to bootstrap it back into another pipe connected to the child.
The child's module importer cache is initialized with a copy of the
source, so that subsequent bootstraps of children-of-children do not
require the source to be fetched from the master a second time.
Signalling Success
Once the first stage has signalled MITO000\n, the master knows it is
ready to receive the compressed bootstrap. After decompressing and
writing the bootstrap source to its parent Python interpreter, the first
stage writes the string MITO001\n to stdout before exiting. The master
process waits for this string before considering bootstrap successful
and the child's stdio ready to receive messages.
The signal value is 8 bytes to match the minimum chunk size required to
disambiguate between lines containing an interesting token during SSH
password authentication, a debug message from the SSH client itself, or
a message from the first stage.
ExternalContext.main()
mitogen.core.ExternalContext.main
Generating A Synthetic mitogen Package
Since the bootstrap consists of the mitogen.core source code, and this
code is loaded by Python by way of its main script (__main__ module),
initially the module layout in the child will be incorrect.
The first step taken after bootstrap is to rearrange sys.modules
slightly so that mitogen.core appears in the correct location, and all
classes defined in that module have their __module__ attribute fixed up
such that cPickle correctly serializes instance module names.
Once a synthetic mitogen package and mitogen.core module have been
generated, the bootstrap deletes sys.modules['__main__'], so that any
attempt to import it (by cPickle) will cause the import to be satisfied
by fetching the master's actual __main__ module. This is necessary to
allow master programs to be written as a self-contained Python script.
Reaping The First Stage
After the bootstrap has called os.dup on the copy of the stdin file
descriptor saved by the first stage, it is closed.
Additionally, since the first stage was forked prior to re-executing the
Python interpreter, it will exist as a zombie process until the parent
process reaps it. Therefore the bootstrap must call os.wait soon after
startup.
Setup Logging
The child's logging package root logger is configured to have the same
log level as the root logger in the master, and mitogen.core.LogHandler
is installed to forward logs to the master context's
FORWARD_LOG <mitogen.core.FORWARD_LOG> handle.
The log level is copied into the child to avoid generating a potentially
large amount of network IO forwarding logs that will simply be filtered
away once they reach the master.
The Module Importer
An instance of mitogen.core.Importer is installed in sys.meta_path,
where Python's import statement will execute it before attempting to
find a module locally.
Standard IO Redirection
Two instances of mitogen.core.IoLogger are created, one for stdout and
one for stderr. This class creates a UNIX pipe whose read end is added
to the IO multiplexer, and whose write end is used to overwrite the
handles inherited during process creation.
Even without IO redirection, something must replace stdin and stdout,
otherwise it is possible for the stream used for communication between
parent and child to be accidentally corrupted by subprocesses run by
user code.
The inherited stdin is replaced by a file descriptor pointing to
/dev/null.
Finally Python's sys.stdout is reopened to ensure line buffering is
active, so that print statements and suchlike promptly appear in the
logs.
Function Call Dispatch
mitogen.core
After all initialization is complete, the child's main thread sits in a
loop reading from a Channel <mitogen.core.Channel> connected to the
CALL_FUNCTION <mitogen.core.CALL_FUNCTION> handle. This handle is
written to by call() <mitogen.parent.Context.call> and
call_async() <mitogen.parent.Context.call_async>.
CALL_FUNCTION <mitogen.core.CALL_FUNCTION> only accepts requests from
the context IDs listed in mitogen.parent_ids, forming a chain of trust
between the master and any intermediate context leading to the recipient
of the message. In combination with source-verification, this is a major
contributor to ensuring contexts running on compromised infrastructure
cannot trigger code execution in siblings or any parent.
Shutdown
mitogen.core
When a context receives SHUTDOWN <mitogen.core.SHUTDOWN> from its
immediate parent, it closes its own CALL_FUNCTION
<mitogen.core.CALL_FUNCTION> Channel <mitogen.core.Channel> before
sending SHUTDOWN <mitogen.core.SHUTDOWN> to any directly connected
children. Closing the channel has the effect of causing
ExternalContext._dispatch_calls to exit and begin joining on the broker
thread.
During shutdown, the master waits up to 5 seconds for children to
disconnect gracefully before force disconnecting them, while children
will use that time to call
socket.shutdown(SHUT_WR) <socket.socket.shutdown> on their
IoLogger <mitogen.core.IoLogger> socket's write ends before draining any
remaining data buffered on the read ends, and ensuring any deferred
broker function callbacks have had a chance to complete, necessary to
capture for example forwarding any remaining logging records.
An alternative approach is to wait until the IoLogger socket is
completely closed, with some hard timeout, but this necessitates greater
discipline than is common in infrastructure code (how often have you
forgotten to redirect stderr to /dev/null when starting a daemon
process?), so needless irritating delays would often be experienced
during program termination.
If the main thread (responsible for function call dispatch) fails to
shut down gracefully, because some user function is hanging, it will
still be cleaned up since as the final step in broker shutdown, the
broker sends signal.SIGTERM <signal> to its own process.
Stream Protocol
mitogen.core
Once connected, a basic framing protocol is used to communicate between
parent and child. Integers use big endian in their encoded form.
Field Size Description
---------- ------ ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
magic 2 Integer 0x4d49 (MI), used to detect stream corruption.
dst_id 4 Integer target context ID. Router delivers messages locally when their dst_id matches mitogen.context_id, otherwise they are routed up or downstream.
src_id 4 Integer source context ID. Used as the target of replies if any are generated.
auth_id 4 The context ID under whose authority the message is acting. See source-verification.
handle 4 Integer target handle in the destination context. This is one of the standard-handles, or a dynamically generated handle used to receive a one-time reply, such as the return value of a function call.
reply_to 4 Integer target handle to direct any reply to this message. Used to receive a one-time reply, such as the return value of a function call, or to signal a special condition for the message. See below
<reply_to_values> for special values for this field.
length 4 Length of the data part of the message.
data n/a Message data, which may be raw or pickled.
Standard Handles
Masters listen on the following handles:
FORWARD_LOG
Receives (logger_name, level, msg) 3-tuples and writes them to the
master's mitogen.ctx.<context_name> logger.
GET_MODULE
Receives the name of a module to load fullname, locates the source code
for fullname, and routes one or more LOAD_MODULE messages back towards
the sender of the GET_MODULE request. If lookup fails, None is sent
instead.
See import-preloading for a deeper discussion of GET_MODULE/LOAD_MODULE.
ALLOCATE_ID
Replies to any message sent to it with a newly allocated range of
context IDs, to allow children to safely start their own contexts.
Presently IDs are allocated in batches of 1000 from a 32 bit range,
allowing up to 4.2 million parent contexts to be created and destroyed
before the associated Router must be recreated.
This is handled by mitogen.master.IdAllocator in the master process, and
messages are sent to it from mitogen.parent.ChildIdAllocator in
children.
Children listen on the following handles:
LOAD_MODULE
Receives (pkg_present, path, compressed, related) tuples, composed of:
- pkg_present: Either None for a plain .py module, or a list of
canonical names of submodules existing witin this package. For
example, a LOAD_MODULE for the mitogen package would return a list
like: ["mitogen.core", "mitogen.fakessh", "mitogen.master", ..].
This list is used by children to avoid generating useless
round-trips due to Python 2.x's import statement behavior.
- path: Original filesystem where the module was found on the master.
- compressed: zlib-compressed module source code.
- related: list of canonical module names on which this module appears
to depend. Used by children that have ever started any children of
their own to preload those children with LOAD_MODULE messages in
response to a GET_MODULE request.
CALL_FUNCTION
Receives (chain_id, mod_name, class_name, func_name, args, kwargs)
6-tuples from mitogen.parent.CallChain, imports mod_name, then attempts
to execute class_name.func_name(*args, **kwargs).
- `chain_id`: if not None, an identifier unique to the originating
mitogen.parent.CallChain. When set, if an exception occurs during a
call, future calls with the same ID automatically fail with the same
exception without ever executing, and failed calls with no reply_to
set are not dumped to the logging framework as they otherwise would.
This is used to implement pipelining.
When this channel is closed (by way of receiving a dead message), the
child's main thread begins graceful shutdown of its own Broker and
Router.
SHUTDOWN
When received from a child's immediate parent, causes the broker thread
to enter graceful shutdown, including sending a dead message to the
child's main thread, causing it to join on the exit of the broker
thread.
The final step of a child's broker shutdown process sends
signal.SIGTERM <signal> to itself, ensuring the process dies even if the
main thread was hung executing user code.
Each context is responsible for sending SHUTDOWN to each of its directly
connected children in response to the master sending SHUTDOWN to it, and
arranging for the connection to its parent to be closed shortly
thereafter.
Masters, and children that have ever been used to create a descendent
child also listen on the following handles:
ADD_ROUTE
Receives target_id integer from downstream, describing an ID allocated
to a recently constructed child. The receiver verifies no existing route
exists to target_id before updating its local table to route messages
for target_id via the stream from which the ADD_ROUTE message was
received.
DEL_ROUTE
Receives target_id integer from downstream, verifies a route exists to
target_id via the stream on which the message was received, removes that
route from its local table, triggers the disconnect signal on any
mitogen.core.Context instance in the local process, then propagates the
message upward towards its own parent.
mitogen.core
DETACHING
Sent to inform a parent that user code has invoked
ExternalContext.detach to decouple the lifecycle of a directly connected
context and its subtree from the running program.
A child usually shuts down immediately if it loses its parent
connection, and parents usually terminate any related Python/SSH
subprocess on disconnection. Receiving DETACHING informs the parent the
connection will soon drop, but the process intends to continue life
independently, and to avoid terminating the related subprocess if that
subprocess is the child itself.
Non-master parents also listen on the following handles:
mitogen.core
GET_MODULE
As with master's GET_MODULE, except this implementation
(mitogen.master.ModuleForwarder) serves responses using
mitogen.core.Importer's cache before forwarding the request to its
parent context. The response is cached by each context in turn before
being forwarded on to the child context that originally made the
request. In this way, the master need never re-send a module it has
already sent to a direct descendant.
mitogen.core
FORWARD_MODULE
Receives (context, fullname) tuples from its parent and arranges for a
LOAD_MODULE to be sent towards context for the module fullname and any
related modules. The module must already have been delivered to the
current context by its parent in a prior LOAD_MODULE message.
If the receiver is the immediate parent of context, then only
LOAD_MODULE is sent to the child. Otherwise LOAD_MODULE is sent to the
next closest parent if the module has not previously been sent on that
stream, followed by a copy of the FORWARD_MODULE message.
This message is used to recursively preload indirect children with
modules, ensuring they are cached and deduplicated at each hop in the
chain leading to the target context.
Special values for the reply_to field:
IS_DEAD
Additional handles are created to receive the result of every function
call triggered by call_async() <mitogen.parent.Context.call_async>.
Use of Pickle
The current implementation uses the Python cPickle module, with a
restrictive class whitelist to prevent triggering undesirable code
execution. The primary reason for using cPickle is that it is
computationally efficient, and avoids including a potentially large body
of serialization code in the bootstrap.
The pickler will instantiate only built-in types and one of 3
constructor functions, to support unpickling CallError
<mitogen.core.CallError>, mitogen.core.Sender,and
Context <mitogen.core.Context>.
The choice of Pickle is one area to be revisited later. All accounts
suggest it cannot be used securely, however few of those accounts appear
to be expert, and none mention any additional attacks that would not be
prevented by using a restrictive class whitelist.
The IO Multiplexer
Since we must include our IO multiplexer as part of the bootstrap,
off-the-shelf implementations are for the most part entirely
inappropriate. For example, a minimal copy of Twisted weighs in at
around 440KiB and is composed of approximately 115 files. Even if we
could arrange for an entire Python package to be transferred during
bootstrap, this minimal configuration is massive in comparison to
Mitogen's solution, multiplies quickly in the presence of many machines,
and would require manually splitting up the parts of Twisted that we
would like to use.
Message Routing
Routing assumes it is impossible to construct a tree such that one of a
context's parents will not know the ID of a target the context is
attempting to communicate with.
When mitogen.core.Router receives a message, it checks the IDs
associated with its directly connected streams for a potential route. If
any stream matches, either because it directly connects to the target
ID, or because the master sent an ADD_ROUTE <mitogen.core.ADD_ROUTE>
message associating it, then the message will be forwarded down the tree
using that stream.
If the message does not match any ADD_ROUTE <mitogen.core.ADD_ROUTE>
message or stream, instead it is forwarded upwards to the immediate
parent, and recursively by each parent in turn until one is reached that
knows how to forward the message down the tree.
When a parent establishes a new child, it sends a corresponding
ADD_ROUTE <mitogen.core.ADD_ROUTE> message towards its parent, which
recursively forwards it up towards the root.
Parents keep note of all routes associated with each stream they connect
with, and trigger DEL_ROUTE messages propagated upstream for each route
associated with that stream if the stream is disconnected for any
reason.
Example
[image]
In the diagram, when node12b is creating the sudo:node12b:webapp
context, it must send ADD_ROUTE messages to rack12, which will propagate
it to dc1, and recursively to bastion, and master; node12b does not
require an ADD_ROUTE message since it has a stream directly connected to
the new context.
Since Mitogen streams are strictly ordered, it is never possible for a
parent to receive a message from a newly constructed child before
receiving a corresponding ADD_ROUTE sent by the child's parent,
describing how to reply to it.
When sudo:node12b:webapp wants to send a message to sudo:node22a:webapp,
the message will be routed as follows:
sudo:node12b:webapp -> node12b -> rack12 -> dc1 -> bastion -> dc2 -> rack22 -> node22a -> sudo:node22a:webapp
[image]
Disconnect Propagation
To ensure timely shutdown when a failure occurs, where some context is
awaiting a response from another context that has become disconnected,
mitogen.core.Router additionally records the destination context ID of
every message received on a particular stream.
When DEL_ROUTE is generated locally or received on some other stream,
mitogen.parent.RouteMonitor uses this to find every stream that ever
communicated with the route that is about to go away, and forwards the
message to each found.
The recipient DEL_ROUTE handler in turn uses the message to find any
mitogen.core.Context in the local process corresponding to the
disappearing route, and if found, fires a disconnected event on it.
Any interested party, such as mitogen.core.Receiver, may subscribe to
the event and use it to abort any threads that were asleep waiting for a
reply that will never arrive.
Source Verification
Before forwarding or dispatching a message it has received,
mitogen.core.Router first looks up the corresponding mitogen.core.Stream
it would use to send responses towards the context ID listed in the
auth_id field, and if the looked up stream does not match the stream on
which the message was received, the message is discarded and a warning
is logged.
This creates a trust chain leading up to the root of the tree,
preventing downstream contexts from injecting messages appearing to be
from the master or any more trustworthy parent. In this way, privileged
functionality such as CALL_FUNCTION <mitogen.core.CALL_FUNCTION> can
base trust decisions on the accuracy of auth_id <stream-protocol>.
The auth_id field is separate from src_id in order to support granting
privilege to contexts that do not follow the tree's natural trust chain.
This supports cases where siblings are permitted to execute code on one
another, or where isolated processes can connect to a listener and
communicate with an already established established tree, such as where
a mitogen.unix client receives the same privilege as the process it
connects to.
Differences Between Master And Child Brokers
The main difference between mitogen.core.Broker and
mitogen.master.Broker is that when the stream connection to the parent
is lost in a child, the broker will trigger its own shutdown.
The Module Importer
mitogen.core.Importer is still a work in progress, as there are a
variety of approaches to implementing it, and the present implementation
is not pefectly efficient in every case.
It operates by intercepting import statements via sys.meta_path, asking
Python if it can satisfy the import by itself, and if not, indicating to
Python that it is capable of loading the module.
In load_module() <mitogen.core.Importer.load_module> an RPC is started
to the parent context, requesting the module source code by way of a
GET_MODULE <mitogen.core.GET_MODULE>. If the parent context does not
have the module available, it recursively forwards the request upstream,
while avoiding duplicate requests for the same module from its own
threads and any child contexts.
Neutralizing __main__
To avoid accidental execution of the __main__ module's code in a slave
context, when serving the source of the main module, Mitogen removes any
code occurring after the first conditional that looks like a standard
__main__ execution guard:
# Code that looks like this is stripped from __main__.
if __name__ == '__main__':
run_some_code()
To further avoid accidental execution, Mitogen will refuse to serve
__main__ to children if no execution guard is found, as it is common
that no guard is present during early script prototyping.
These are hacks, but they are the safest and least annoying found to
solve the problem.
Avoiding Negative Imports
In Python 2.x where relative imports are the default, a large number of
import requests will be made for modules that do not exist. For example:
# mypkg/__init__.py
import sys
import os
In Python 2.x, Python will first try to load mypkg.sys and mypkg.os,
which do not exist, before falling back on sys and os.
These negative imports present a challenge, as they introduce a large
number of pointless network round-trips. Therefore in addition to the
zlib-compressed source, for packages the master sends along a list of
child modules known to exist.
Before indicating it can satisfy an import request,
mitogen.core.Importer first checks to see if the module belongs to a
package it has previously imported, and if so, ignores the request if
the module does not appear in the enumeration of child modules belonging
to the package that was provided by the master.
Import Preloading
mitogen.core
To further avoid round-trips, when a module or package is requested by a
child, its bytecode is scanned in the master to find all the module's
import statements, and of those, which associated modules appear to have
been loaded in the master's sys.modules.
The sys.modules check is necessary to handle various kinds of
conditional execution, for example, when a module's code guards an
import statement based on the active Python runtime version, operating
system, or optional third party dependencies.
Before replying to a child's request for a module with dependencies:
- If the request is for a package, any dependent modules used by the
package that appear within the package itself are known to be
missing from the child, since the child requested the top-level
package module, therefore they are pre-loaded into the child using
LOAD_MODULE messages before sending the LOAD_MODULE message for the
requested package module itself. In this way, the child will already
have dependent modules cached by the time it receives the requested
module, avoiding one round-trip for each dependency.
For example, when a child requests the django package, and the
master determines the django module code in the master has import
statements for django.utils, django.utils.lru_cache, and
django.utils.version, and that execution of the module code on the
master caused those modules to appear in the master's sys.modules,
there is high probability execution of the django module code in the
child will cause the same modules to be loaded. Since all those
modules exist within the django package, and we already know the
child lacks that package, it is safe to assume the child will make
follow-up requests for those modules too.
In the example, 4 round-trips are replaced by 1 round-trip.
For any package module ever requested by a child, the parent keeps a
note of the name of the package for one final optimization:
- If the request is for a sub-module of a package, and it is known the
child loaded the package's implementation from the parent, then any
dependent modules of the requested module at any nesting level
within the package that is known to be missing are sent using
LOAD_MODULE messages before sending the LOAD_MODULE message for the
requested module, avoiding 1 round-trip for each dependency within
the same top-level package.
For example, when a child has previously requested the django
package module, the parent knows the package was completely absent
on the child. Therefore when the child subsequently requests the
django.db package module, it is safe to assume the child will
generate subsequent GET_MODULE requests for the 2 django.conf, 3
django.core, 2 django.db, 3 django.dispatch, and 7 django.utils
indirect dependencies for django.db.
In the example, 17 round-trips are replaced by 1 round-trip.
The method used to detect import statements is similar to the standard
library modulefinder module: rather than analyze module source code,
IMPORT_NAME <python:bytecodes> opcodes are extracted from the module's
bytecode. This is since clean source analysis methods (ast and compiler)
are an order of magnitude slower, and incompatible across major Python
versions.
Concurrency
Duplicate requests must never be issued to the parent, either due to a
local import or any GET_MODULE originating from a child. This lets
parents assume a module requested once by a downstream connection need
never be re-sent, for example, if it appears as a preloading dependency
in a subsequent GET_MODULE, or had been requested immediately after
being sent as a preloading dependency for an unrelated request by a
descendent.
Therefore each tree layer must deduplicate GET_MODULE requests, and
synchronize their descendents and local threads on corresponding
LOAD_MODULE responses from the parent.
In each context, pending requests are serialized by a threading.Lock
within mitogen.core.Importer, which may only be held for operations that
cannot block, since ModuleForwarder
<mitogen.master.ModuleForwarder> must acquire it while synchronizing
GET_MODULE requests from children on the IO multiplexer thread.
Requests From Local Threads
When Mitogen begins satisfying an import, it is known the module has
never been imported in the local process.
Importer <mitogen.core.Importer> executes under the runtime importer
lock, ensuring import statements executing in local threads are
serialized.
Note
In Python 2, ImportError is raised when import is attempted while the
runtime import lock is held by another thread, therefore imports must be
serialized by only attempting them from the main (CALL_FUNCTION) thread.
The problem is most likely to manifest in third party libraries that
lazily import optional dependencies at runtime from a non-main thread.
The workaround is to explicitly import those dependencies from the main
thread before initializing the third party library.
This was fixed in Python 3.5, but Python 3.x is not yet supported. See
Python Issue #9260.
While holding its own lock, Importer <mitogen.core.Importer> checks if
the source is not yet cached, determines if an in-flight GET_MODULE
exists for it, starting one if none exists, adds itself to a list of
callbacks fired when a corresponding LOAD_MODULE arrives from the
parent, then sleeps waiting for the callback.
When the source becomes available, the module is constructed on the
calling thread using the best practice documented in PEP 302.
Requests From Children
As with local imports, when GET_MODULE is received from a child, while
holding the Importer <mitogen.core.Importer> lock,
ModuleForwarder <mitogen.master.ModuleForwarder> checks if the source is
not yet cached, determines if an in-flight GET_MODULE toward the parent
exists for it, starting one if none exists, then adds a completion
handler to the list of callbacks fired when a corresponding LOAD_MODULE
arrives from the parent.
When the source becomes available, the completion handler issues
corresponding LOAD_MODULE messages toward the child for the requested
module after any required for dependencies known to be absent from the
child.
Since intermediaries do not know a module's dependencies until the
module's source arrives, it is not possible to preemptively issue
LOAD_MODULE for those dependencies toward a requesting child as they
become available from the parent at the intermediary. This creates
needless network serialization and latency that should be addressed in a
future design.
Child Module Enumeration
Package children are enumerated using pkgutil.iter_modules.
Use Of Threads
The package always runs the IO multiplexer in a thread. This is so the
multiplexer retains control flow in order to shut down gracefully, say,
if the user's code has hung and the master context has disconnected.
While it is possible for the IO multiplexer to recover control of a hung
function call on UNIX using for example signal.SIGALRM <signal>, this
mechanism is not portable to non-UNIX operating systems, and does not
work in every case, for example when Python blocks signals during a
variety of threading package operations.
At some point it is likely Mitogen will be extended to support children
running on Windows. When that happens, it would be nice if the process
model on Windows and UNIX did not differ, and in fact the code used on
both were identical.
Waking Sleeping Threads
Due to fundamental deficiencies in Python 2's threading implementation,
it is not possible to block waiting on synchronization objects sanely.
Two major problems exist:
- Sleeping with no timeout set causes signals to be blocked,
preventing the user from terminating the process using CTRL+C.
- Sleeping with a timeout set internally makes use of polling, with an
exponential backoff that eventually results in the thread sleeping
unconditionally in 50ms increments. . This is a huge source of
latency that quickly multiplies.
As the UNIX self-pipe trick must already be employed to wake the broker
thread from its select loop, Mitogen reuses this technique to wake any
thread synchronization primitive exposed by the library, embodied in a
queue-like abstraction called a mitogen.core.Latch.
Unfortunately it is commonplace for hosts to enforce severe per-process
file descriptors limits, so aside from being inefficient, it is
impossible in the usual case to create a pair of descriptors for every
waitable object, which for example includes the result of every single
asynchronous function call.
For this reason self-pipes are created on a per-thread basis, with their
associated socketpairs <socket.socketpair> kept in thread-local storage.
When a latch wishes to sleep its thread, this pair is created on-demand
and temporarily associated with it only for the duration of the sleep.
Python's garbage collector is relied on to clean up by calling the
pair's destructor on thread exit. There does not otherwise seem to be a
robust method to trigger cleanup code on arbitrary threads.
To summarize, file descriptor usage is bounded by the number of threads
rather than the number of waitables, which is a much smaller number,
however it also means that Mitogen requires twice as many file
descriptors as there are user threads, with a minimum of 4 required in
any configuration.
Latch Internals
mitogen.core
Attributes:
- lock – threading.Lock.
- queue – items waiting to be dequeued.
- sleeping – write sides of the socketpairs for each sleeping thread,
and threads in the process of waking from sleep.
- waking – integer number of sleeping threads in the process of waking
up.
- closed – boolean defaulting to False. Every time lock is acquired,
closed must be tested, and if it is True, LatchError must be thrown.
Latch.put()
Latch.put operates by:
1. Acquiring lock.
2. Appending the item on to queue.
3. If waking is less than the length of sleeping, write a byte to the
socket at sleeping[waking] and increment waking.
In this way each thread is woken only once, and receives each element
according to when its socket was placed on sleeping.
Latch.close()
Latch.close acquires lock, sets closed to True, then writes a byte to
every sleeping[waking] socket, while incrementing waking, until no more
unwoken sockets exist. Per above, on waking from sleep, after removing
itself from sleeping, each sleeping thread tests if closed is True, and
if so throws LatchError.
It is necessary to ensure at most one byte is delivered on each socket,
even if the latch is being torn down, as the sockets outlive the scope
of a single latch, and must never have extraneous data buffered on them,
as this will cause unexpected wakeups if future latches sleep on the
same thread.
Latch.get()
Latch.get is far more intricate, as there are many outcomes to handle.
Queue ordering is strictly first-in first-out, and threads always
receive items in the order they are requested, as they become available.
1. Non-empty, No Waiters, No sleep
On entry lock is taken, and if queue is non-empty, and sleeping is
empty, it is safe to return queue's first item without blocking.
2. Non-empty, Waiters Present, Queue > Waiters, No sleep
When sleeping is non-empty but there are more items than sleeping
threads, it is safe to pop queue[len(sleeping)] without blocking.
3. Non-empty, Waiters Present, Queue <= Waiters
In this case sleeping is non-empty and there are no surplus items.
It is not safe to pop any item even though we are holding lock, as
it would starve waking threads of their position in favour of the
calling thread, since scheduling uncertainty exists between a thread
waking from select.select and re-acquiring lock.
This avoids the need for a retry loop for waking threads, and a
thread being continually re-woken to discover queue drained by a
thread that never slept.
4. Sleep
Since no surplus items existed, the thread adds its socket to
sleeping before releasing lock, and sleeping in select.select
waiting for timeout, or a write from Latch.put or Latch.close.
If select.select throws an exception, the exception must be caught
and re-raised only after some of the wake steps below have
completed.
5. Wake, Non-empty
On wake lock is re-acquired, the socket is removed from sleeping
after noting its index, and TimeoutError is thrown if waking
indicates Latch.put() nor Latch.close have yet to send a wake byte
to that index. The byte is then read off, LatchError is thrown if
closed is True, otherwise the queue item corresponding to the
thread's index is popped and returned.
It is paramount that in every case, if a byte was written to the
socket, that the byte is read away. The socket is reused by
subsequent latches sleeping on the same thread, and unexpected
wakeups are triggered if extraneous data remains buffered on the
socket.
It is also necessary to favour the synchronized waking variable over
the return value of select.select, as scheduling uncertainty
introduces a race between the select timing out, and Latch.put() or
Latch.close writing a wake byte before Latch.get has re-acquired
lock. | # Copyright 2019, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#!mitogen: minify_safe
"""
This module implements most package functionality, but remains separate from
non-essential code in order to reduce its size, since it is also serves as the
bootstrap implementation sent to every new slave context.
"""
import binascii
import collections
import encodings.latin_1
import encodings.utf_8
import errno
import fcntl
import itertools
import linecache
import logging
import os
import pickle as py_pickle
import pstats
import signal
import socket
import struct
import sys
import syslog
import threading
import time
import traceback
import warnings
import weakref
import zlib
# Python >3.7 deprecated the imp module.
warnings.filterwarnings('ignore', message='the imp module is deprecated')
import imp
# Absolute imports for <2.5.
select = __import__('select')
try:
import cProfile
except ImportError:
cProfile = None
try:
import thread
except ImportError:
import threading as thread
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
try:
BaseException
except NameError:
BaseException = Exception
try:
ModuleNotFoundError
except NameError:
ModuleNotFoundError = ImportError
# TODO: usage of 'import' after setting __name__, but before fixing up
# sys.modules generates a warning. This happens when profiling = True.
warnings.filterwarnings('ignore',
"Parent module'mitogen' not found while handling absolute import")
LOG = logging.getLogger('mitogen')
IOLOG = logging.getLogger('mitogen.io')
IOLOG.setLevel(logging.INFO)
# str.encode() may take import lock. Deadlock possible if broker calls
#.encode() on behalf of thread currently waiting for module.
LATIN1_CODEC = encodings.latin_1.Codec()
_v = False
_vv = False
GET_MODULE = 100
CALL_FUNCTION = 101
FORWARD_LOG = 102
ADD_ROUTE = 103
DEL_ROUTE = 104
ALLOCATE_ID = 105
SHUTDOWN = 106
LOAD_MODULE = 107
FORWARD_MODULE = 108
DETACHING = 109
CALL_SERVICE = 110
STUB_CALL_SERVICE = 111
#: Special value used to signal disconnection or the inability to route a
#: message, when it appears in the `reply_to` field. Usually causes
#: :class:`mitogen.core.ChannelError` to be raised when it is received.
#:
#: It indicates the sender did not know how to process the message, or wishes
#: no further messages to be delivered to it. It is used when:
#:
#: * a remote receiver is disconnected or explicitly closed.
#: * a related message could not be delivered due to no route existing for it.
#: * a router is being torn down, as a sentinel value to notify
#: :meth:`mitogen.core.Router.add_handler` callbacks to clean up.
IS_DEAD = 999
try:
BaseException
except NameError:
BaseException = Exception
PY24 = sys.version_info < (2, 5)
PY3 = sys.version_info > (3,)
if PY3:
b = str.encode
BytesType = bytes
UnicodeType = str
FsPathTypes = (str,)
BufferType = lambda buf, start: memoryview(buf)[start:]
long = int
else:
b = str
BytesType = str
FsPathTypes = (str, unicode)
BufferType = buffer
UnicodeType = unicode
AnyTextType = (BytesType, UnicodeType)
try:
next
except NameError:
next = lambda it: it.next()
# #550: prehistoric WSL did not advertise itself in uname output.
try:
fp = open('/proc/sys/kernel/osrelease')
IS_WSL = 'Microsoft' in fp.read()
fp.close()
except IOError:
IS_WSL = False
#: Default size for calls to :meth:`Side.read` or :meth:`Side.write`, and the
#: size of buffers configured by :func:`mitogen.parent.create_socketpair`. This
#: value has many performance implications, 128KiB seems to be a sweet spot.
#:
#: * When set low, large messages cause many :class:`Broker` IO loop
#: iterations, burning CPU and reducing throughput.
#: * When set high, excessive RAM is reserved by the OS for socket buffers (2x
#: per child), and an identically sized temporary userspace buffer is
#: allocated on each read that requires zeroing, and over a particular size
#: may require two system calls to allocate/deallocate.
#:
#: Care must be taken to ensure the underlying kernel object and receiving
#: program support the desired size. For example,
#:
#: * Most UNIXes have TTYs with fixed 2KiB-4KiB buffers, making them unsuitable
#: for efficient IO.
#: * Different UNIXes have varying presets for pipes, which may not be
#: configurable. On recent Linux the default pipe buffer size is 64KiB, but
#: under memory pressure may be as low as 4KiB for unprivileged processes.
#: * When communication is via an intermediary process, its internal buffers
#: effect the speed OS buffers will drain. For example OpenSSH uses 64KiB
#: reads.
#:
#: An ideal :class:`Message` has a size that is a multiple of
#: :data:`CHUNK_SIZE` inclusive of headers, to avoid wasting IO loop iterations
#: writing small trailer chunks.
CHUNK_SIZE = 131072
_tls = threading.local()
if __name__ =='mitogen.core':
# When loaded using import mechanism, ExternalContext.main() will not have
# a chance to set the synthetic mitogen global, so just import it here.
import mitogen
else:
# When loaded as __main__, ensure classes and functions gain a __module__
# attribute consistent with the host process, so that pickling succeeds.
__name__ ='mitogen.core'
class Error(Exception):
"""
Base for all exceptions raised by Mitogen.
:param str fmt:
Exception text, or format string if `args` is non-empty.
:param tuple args:
Format string arguments.
"""
def __init__(self, fmt=None, *args):
if args:
fmt %= args
if fmt and not isinstance(fmt, UnicodeType):
fmt = fmt.decode('utf-8')
Exception.__init__(self, fmt)
class LatchError(Error):
"""
Raised when an attempt is made to use a :class:`mitogen.core.Latch` that
has been marked closed.
"""
pass
class Blob(BytesType):
"""
A serializable bytes subclass whose content is summarized in repr() output,
making it suitable for logging binary data.
"""
def __repr__(self):
return '[blob: %d bytes]' % len(self)
def __reduce__(self):
return (Blob, (BytesType(self),))
class Secret(UnicodeType):
"""
A serializable unicode subclass whose content is masked in repr() output,
making it suitable for logging passwords.
"""
def __repr__(self):
return '[secret]'
if not PY3:
# TODO: what is this needed for in 2.x?
def __str__(self):
return UnicodeType(self)
def __reduce__(self):
return (Secret, (UnicodeType(self),))
class Kwargs(dict):
"""
A serializable dict subclass that indicates its keys should be coerced to
Unicode on Python 3 and bytes on Python<2.6.
Python 2 produces keyword argument dicts whose keys are bytes, requiring a
helper to ensure compatibility with Python 3 where Unicode is required,
whereas Python 3 produces keyword argument dicts whose keys are Unicode,
requiring a helper for Python 2.4/2.5, where bytes are required.
"""
if PY3:
def __init__(self, dct):
for k, v in dct.items():
if type(k) is bytes:
self[k.decode()] = v
else:
self[k] = v
elif sys.version_info < (2, 6, 5):
def __init__(self, dct):
for k, v in dct.iteritems():
if type(k) is unicode:
k, _ = encodings.utf_8.encode(k)
self[k] = v
def __repr__(self):
return 'Kwargs(%s)' % (dict.__repr__(self),)
def __reduce__(self):
return (Kwargs, (dict(self),))
class CallError(Error):
"""
Serializable :class:`Error` subclass raised when :meth:`Context.call()
<mitogen.parent.Context.call>` fails. A copy of the traceback from the
external context is appended to the exception message.
"""
def __init__(self, fmt=None, *args):
if not isinstance(fmt, BaseException):
Error.__init__(self, fmt, *args)
else:
e = fmt
cls = e.__class__
fmt = '%s.%s: %s' % (cls.__module__, cls.__name__, e)
tb = sys.exc_info()[2]
if tb:
fmt += '\n'
fmt += ''.join(traceback.format_tb(tb))
Error.__init__(self, fmt)
def __reduce__(self):
return (_unpickle_call_error, (self.args[0],))
def _unpickle_call_error(s):
if not (type(s) is UnicodeType and len(s) < 10000):
raise TypeError('cannot unpickle CallError: bad input')
return CallError(s)
class ChannelError(Error):
"""
Raised when a channel dies or has been closed.
"""
remote_msg = 'Channel closed by remote end.'
local_msg = 'Channel closed by local end.'
class StreamError(Error):
"""
Raised when a stream cannot be established.
"""
pass
class TimeoutError(Error):
"""
Raised when a timeout occurs on a stream.
"""
pass
def to_text(o):
"""
Coerce `o` to Unicode by decoding it from UTF-8 if it is an instance of
:class:`bytes`, otherwise pass it to the :class:`str` constructor. The
returned object is always a plain :class:`str`, any subclass is removed.
"""
if isinstance(o, BytesType):
return o.decode('utf-8')
return UnicodeType(o)
# Documented in api.rst to work around Sphinx limitation.
now = getattr(time,'monotonic', time.time)
# Python 2.4
try:
any
except NameError:
def any(it):
for elem in it:
if elem:
return True
def _partition(s, sep, find):
"""
(str|unicode).(partition|rpartition) for Python 2.4/2.5.
"""
idx = find(sep)
if idx!= -1:
left = s[0:idx]
return left, sep, s[len(left)+len(sep):]
def threading__current_thread():
try:
return threading.current_thread() # Added in Python 2.6+
except AttributeError:
return threading.currentThread() # Deprecated in Python 3.10+
def threading__thread_name(thread):
try:
return thread.name # Added in Python 2.6+
except AttributeError:
return thread.getName() # Deprecated in Python 3.10+
if hasattr(UnicodeType, 'rpartition'):
str_partition = UnicodeType.partition
str_rpartition = UnicodeType.rpartition
bytes_partition = BytesType.partition
else:
def str_partition(s, sep):
return _partition(s, sep, s.find) or (s, u'', u'')
def str_rpartition(s, sep):
return _partition(s, sep, s.rfind) or (u'', u'', s)
def bytes_partition(s, sep):
return _partition(s, sep, s.find) or (s, '', '')
def _has_parent_authority(context_id):
return (
(context_id == mitogen.context_id) or
(context_id in mitogen.parent_ids)
)
def has_parent_authority(msg, _stream=None):
"""
Policy function for use with :class:`Receiver` and
:meth:`Router.add_handler` that requires incoming messages to originate
from a parent context, or on a :class:`Stream` whose :attr:`auth_id
<Stream.auth_id>` has been set to that of a parent context or the current
context.
"""
return _has_parent_authority(msg.auth_id)
def _signals(obj, signal):
return (
obj.__dict__
.setdefault('_signals', {})
.setdefault(signal, [])
)
def listen(obj, name, func):
"""
Arrange for `func()` to be invoked when signal `name` is fired on `obj`.
"""
_signals(obj, name).append(func)
def unlisten(obj, name, func):
"""
Remove `func()` from the list of functions invoked when signal `name` is
fired by `obj`.
:raises ValueError:
`func()` was not on the list.
"""
_signals(obj, name).remove(func)
def fire(obj, name, *args, **kwargs):
"""
Arrange for `func(*args, **kwargs)` to be invoked for every function
registered for signal `name` on `obj`.
"""
for func in _signals(obj, name):
func(*args, **kwargs)
def takes_econtext(func):
"""
Decorator that marks a function or class method to automatically receive a
kwarg named `econtext`, referencing the
:class:`mitogen.core.ExternalContext` active in the context in which the
function is being invoked in. The decorator is only meaningful when the
function is invoked via :data:`CALL_FUNCTION <mitogen.core.CALL_FUNCTION>`.
When the function is invoked directly, `econtext` must still be passed to
it explicitly.
"""
func.mitogen_takes_econtext = True
return func
def takes_router(func):
"""
Decorator that marks a function or class method to automatically receive a
kwarg named `router`, referencing the :class:`mitogen.core.Router` active
in the context in which the function is being invoked in. The decorator is
only meaningful when the function is invoked via :data:`CALL_FUNCTION
<mitogen.core.CALL_FUNCTION>`.
When the function is invoked directly, `router` must still be passed to it
explicitly.
"""
func.mitogen_takes_router = True
return func
def is_blacklisted_import(importer, fullname):
"""
Return :data:`True` if `fullname` is part of a blacklisted package, or if
any packages have been whitelisted and `fullname` is not part of one.
NB:
- If a package is on both lists, then it is treated as blacklisted.
- If any package is whitelisted, then all non-whitelisted packages are
treated as blacklisted.
"""
return ((not any(fullname.startswith(s) for s in importer.whitelist)) or
(any(fullname.startswith(s) for s in importer.blacklist)))
def set_cloexec(fd):
"""
Set the file descriptor `fd` to automatically close on :func:`os.execve`.
This has no effect on file descriptors inherited across :func:`os.fork`,
they must be explicitly closed through some other means, such as
:func:`mitogen.fork.on_fork`.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
assert fd > 2, 'fd %r <= 2' % (fd,)
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def set_nonblock(fd):
"""
Set the file descriptor `fd` to non-blocking mode. For most underlying file
types, this causes :func:`os.read` or :func:`os.write` to raise
:class:`OSError` with :data:`errno.EAGAIN` rather than block the thread
when the underlying kernel buffer is exhausted.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def set_block(fd):
"""
Inverse of :func:`set_nonblock`, i.e. cause `fd` to block the thread when
the underlying kernel buffer is exhausted.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
def io_op(func, *args):
"""
Wrap `func(*args)` that may raise :class:`select.error`, :class:`IOError`,
or :class:`OSError`, trapping UNIX error codes relating to disconnection
and retry events in various subsystems:
* When a signal is delivered to the process on Python 2, system call retry
is signalled through :data:`errno.EINTR`. The invocation is automatically
restarted.
* When performing IO against a TTY, disconnection of the remote end is
signalled by :data:`errno.EIO`.
* When performing IO against a socket, disconnection of the remote end is
signalled by :data:`errno.ECONNRESET`.
* When performing IO against a pipe, disconnection of the remote end is
signalled by :data:`errno.EPIPE`.
:returns:
Tuple of `(return_value, disconnect_reason)`, where `return_value` is
the return value of `func(*args)`, and `disconnected` is an exception
instance when disconnection was detected, otherwise :data:`None`.
"""
while True:
try:
return func(*args), None
except (select.error, OSError, IOError):
e = sys.exc_info()[1]
_vv and IOLOG.debug('io_op(%r) -> OSError: %s', func, e)
if e.args[0] == errno.EINTR:
continue
if e.args[0] in (errno.EIO, errno.ECONNRESET, errno.EPIPE):
return None, e
raise
class PidfulStreamHandler(logging.StreamHandler):
"""
A :class:`logging.StreamHandler` subclass used when
:meth:`Router.enable_debug() <mitogen.master.Router.enable_debug>` has been
called, or the `debug` parameter was specified during context construction.
Verifies the process ID has not changed on each call to :meth:`emit`,
reopening the associated log file when a change is detected.
This ensures logging to the per-process output files happens correctly even
when uncooperative third party components call :func:`os.fork`.
"""
#: PID that last opened the log file.
open_pid = None
#: Output path template.
template = '/tmp/mitogen.%s.%s.log'
def _reopen(self):
self.acquire()
try:
if self.open_pid == os.getpid():
return
ts = time.strftime('%Y%m%d_%H%M%S')
path = self.template % (os.getpid(), ts)
self.stream = open(path, 'w', 1)
set_cloexec(self.stream.fileno())
self.stream.write('Parent PID: %s\n' % (os.getppid(),))
self.stream.write('Created by:\n\n%s\n' % (
''.join(traceback.format_stack()),
))
self.open_pid = os.getpid()
finally:
self.release()
def emit(self, record):
if self.open_pid!= os.getpid():
self._reopen()
logging.StreamHandler.emit(self, record)
def enable_debug_logging():
global _v, _vv
_v = True
_vv = True
root = logging.getLogger()
root.setLevel(logging.DEBUG)
IOLOG.setLevel(logging.DEBUG)
handler = PidfulStreamHandler()
handler.formatter = logging.Formatter(
'%(asctime)s %(levelname).1s %(name)s: %(message)s',
'%H:%M:%S'
)
root.handlers.insert(0, handler)
_profile_hook = lambda name, func, *args: func(*args)
_profile_fmt = os.environ.get(
'MITOGEN_PROFILE_FMT',
'/tmp/mitogen.stats.%(pid)s.%(identity)s.%(now)s.%(ext)s',
)
def _profile_hook(name, func, *args):
"""
Call `func(*args)` and return its result. This function is replaced by
:func:`_real_profile_hook` when :func:`enable_profiling` is called. This
interface is obsolete and will be replaced by a signals-based integration
later on.
"""
return func(*args)
def _real_profile_hook(name, func, *args):
profiler = cProfile.Profile()
profiler.enable()
try:
return func(*args)
finally:
path = _profile_fmt % {
'now': int(1e6 * now()),
'identity': name,
'pid': os.getpid(),
'ext': '%s'
}
profiler.dump_stats(path % ('pstats',))
profiler.create_stats()
fp = open(path % ('log',), 'w')
try:
stats = pstats.Stats(profiler, stream=fp)
stats.sort_stats('cumulative')
stats.print_stats()
finally:
fp.close()
def enable_profiling(econtext=None):
global _profile_hook
_profile_hook = _real_profile_hook
def import_module(modname):
"""
Import `module` and return the attribute named `attr`.
"""
return __import__(modname, None, None, [''])
def pipe():
"""
Create a UNIX pipe pair using :func:`os.pipe`, wrapping the returned
descriptors in Python file objects in order to manage their lifetime and
ensure they are closed when their last reference is discarded and they have
not been closed explicitly.
"""
rfd, wfd = os.pipe()
return (
os.fdopen(rfd, 'rb', 0),
os.fdopen(wfd, 'wb', 0)
)
def iter_split(buf, delim, func):
"""
Invoke `func(s)` for each `delim`-delimited chunk in the potentially large
`buf`, avoiding intermediate lists and quadratic string operations. Return
the trailing undelimited portion of `buf`, or any unprocessed portion of
`buf` after `func(s)` returned :data:`False`.
:returns:
`(trailer, cont)`, where `cont` is :data:`False` if the last call to
`func(s)` returned :data:`False`.
"""
dlen = len(delim)
start = 0
cont = True
while cont:
nl = buf.find(delim, start)
if nl == -1:
break
cont = not func(buf[start:nl]) is False
start = nl + dlen
return buf[start:], cont
class Py24Pickler(py_pickle.Pickler):
"""
Exceptions were classic classes until Python 2.5. Sadly for 2.4, cPickle
offers little control over how a classic instance is pickled. Therefore 2.4
uses a pure-Python pickler, so CallError can be made to look as it does on
newer Pythons.
This mess will go away once proper serialization exists.
"""
@classmethod
def dumps(cls, obj, protocol):
bio = BytesIO()
self = cls(bio, protocol=protocol)
self.dump(obj)
return bio.getvalue()
def save_exc_inst(self, obj):
if isinstance(obj, CallError):
func, args = obj.__reduce__()
self.save(func)
self.save(args)
self.write(py_pickle.REDUCE)
else:
py_pickle.Pickler.save_inst(self, obj)
if PY24:
dispatch = py_pickle.Pickler.dispatch.copy()
dispatch[py_pickle.InstanceType] = save_exc_inst
if PY3:
# In 3.x Unpickler is a class exposing find_class as an overridable, but it
# cannot be overridden without subclassing.
class _Unpickler(pickle.Unpickler):
def find_class(self, module, func):
return self.find_global(module, func)
pickle__dumps = pickle.dumps
elif PY24:
# On Python 2.4, we must use a pure-Python pickler.
pickle__dumps = Py24Pickler.dumps
_Unpickler = pickle.Unpickler
else:
pickle__dumps = pickle.dumps
# In 2.x Unpickler is a function exposing a writeable find_global
# attribute.
_Unpickler = pickle.Unpickler
class Message(object):
"""
Messages are the fundamental unit of communication, comprising fields from
the :ref:`stream-protocol` header, an optional reference to the receiving
:class:`mitogen.core.Router` for ingress messages, and helper methods for
deserialization and generating replies.
"""
#: Integer target context ID. :class:`Router` delivers messages locally
#: when their :attr:`dst_id` matches :data:`mitogen.context_id`, otherwise
#: they are routed up or downstream.
dst_id = None
#: Integer source context ID. Used as the target of replies if any are
#: generated.
src_id = None
#: Context ID under whose authority the message is acting. See
#: :ref:`source-verification`.
auth_id = None
#: Integer target handle in the destination context. This is one of the
#: :ref:`standard-handles`, or a dynamically generated handle used to
#: receive a one-time reply, such as the return value of a function call.
handle = None
#: Integer target handle to direct any reply to this message. Used to
#: receive a one-time reply, such as the return value of a function call.
#: :data:`IS_DEAD` has a special meaning when it appears in this field.
reply_to = None
#: Raw message data bytes.
data = b('')
_unpickled = object()
#: The :class:`Router` responsible for routing the message. This is
#: :data:`None` for locally originated messages.
router = None
#: The :class:`Receiver` over which the message was last received. Part of
#: the :class:`mitogen.select.Select` interface. Defaults to :data:`None`.
receiver = None
HEADER_FMT = '>hLLLLLL'
HEADER_LEN = struct.calcsize(HEADER_FMT)
HEADER_MAGIC = 0x4d49 # 'MI'
def __init__(self, **kwargs):
"""
Construct a message from from the supplied `kwargs`. :attr:`src_id` and
:attr:`auth_id` are always set to :data:`mitogen.context_id`.
"""
self.src_id = mitogen.context_id
self.auth_id = mitogen.context_id
vars(self).update(kwargs)
assert isinstance(self.data, BytesType), 'Message data is not Bytes'
def pack(self):
return (
struct.pack(self.HEADER_FMT, self.HEADER_MAGIC, self.dst_id,
self.src_id, self.auth_id, self.handle,
self.reply_to or 0, len(self.data))
+ self.data
)
def _unpickle_context(self, context_id, name):
return _unpickle_context(context_id, name, router=self.router)
def _unpickle_sender(self, context_id, dst_handle):
return _unpickle_sender(self.router, context_id, dst_handle)
def _unpickle_bytes(self, s, encoding):
s, n = LATIN1_CODEC.encode(s)
return s
def _find_global(self, module, func):
"""
Return the class implementing `module_name.class_name` or raise
`StreamError` if the module is not whitelisted.
"""
if module == __name__:
if func == '_unpickle_call_error' or func == 'CallError':
return _unpickle_call_error
elif func == '_unpickle_sender':
return self._unpickle_sender
elif func == '_unpickle_context':
return self._unpickle_context
elif func == 'Blob':
return Blob
elif func == 'Secret':
return Secret
elif func == 'Kwargs':
return Kwargs
elif module == '_codecs' and func == 'encode':
return self._unpickle_bytes
elif module == '__builtin__' and func == 'bytes':
return BytesType
raise StreamError('cannot unpickle %r/%r', module, func)
@property
def is_dead(self):
"""
:data:`True` if :attr:`reply_to` is set to the magic value
:data:`IS_DEAD`, indicating the sender considers the channel dead. Dead
messages can be raised in a variety of circumstances, see
:data:`IS_DEAD` for more information.
"""
return self.reply_to == IS_DEAD
@classmethod
def dead(cls, reason=None, **kwargs):
"""
Syntax helper to construct a dead message.
"""
kwargs['data'], _ = encodings.utf_8.encode(reason or u'')
return cls(reply_to=IS_DEAD, **kwargs)
@classmethod
def pickled(cls, obj, **kwargs):
"""
Construct a pickled message, setting :attr:`data` to the serialization
of `obj`, and setting remaining fields using `kwargs`.
:returns:
The new message.
"""
self = cls(**kwargs)
try:
self.data = pickle__dumps(obj, protocol=2)
except pickle.PicklingError:
e = sys.exc_info()[1]
self.data = pickle__dumps(CallError(e), protocol=2)
return self
def reply(self, msg, router=None, **kwargs):
"""
Compose a reply to this message and send it using :attr:`router`, or
`router` is :attr:`router` is :data:`None`.
:param obj:
Either a :class:`Message`, or an object to be serialized in order
to construct a new message.
:param router:
Optional router to use if :attr:`router` is :data:`None`.
:param kwargs:
Optional keyword parameters overriding message fields in the reply.
"""
if not isinstance(msg, Message):
msg = Message.pickled(msg)
msg.dst_id = self.src_id
msg.handle = self.reply_to
vars(msg).update(kwargs)
if msg.handle:
(self.router or router).route(msg)
else:
LOG.debug('dropping reply to message with no return address: %r',
msg)
if PY3:
UNPICKLER_KWARGS = {'encoding': 'bytes'}
else:
UNPICKLER_KWARGS = {}
def _throw_dead(self):
if len(self.data):
raise ChannelError(self.data.decode('utf-8','replace'))
elif self.src_id == mitogen.context_id:
raise ChannelError(ChannelError.local_msg)
else:
raise ChannelError(ChannelError.remote_msg)
def unpickle(self, throw=True, throw_dead=True):
"""
Unpickle :attr:`data`, optionally raising any exceptions present.
:param bool throw_dead:
If :data:`True`, raise exceptions, otherwise it is the caller's
responsibility.
:raises CallError:
The serialized data contained CallError exception.
:raises ChannelError:
The `is_dead` field was set.
"""
_vv and IOLOG.debug('%r.unpickle()', self)
if throw_dead and self.is_dead:
self._throw_dead()
obj = self._unpickled
if obj is Message._unpickled:
fp = BytesIO(self.data)
unpickler = _Unpickler(fp, **self.UNPICKLER_KWARGS)
unpickler.find_global = self._find_global
try:
# Must occur off the broker thread.
try:
obj = unpickler.load()
except:
LOG.error('raw pickle was: %r', self.data)
raise
self._unpickled = obj
except (TypeError, ValueError):
e = sys.exc_info()[1]
raise StreamError('invalid message: %s', e)
if throw:
if isinstance(obj, CallError):
raise obj
return obj
def __repr__(self):
return 'Message(%r, %r, %r, %r, %r, %r..%d)' % (
self.dst_id, self.src_id, self.auth_id, self.handle,
self.reply_to, (self.data or '')[:50], len(self.data)
)
class Sender(object):
"""
Senders are used to send pickled messages to a handle in another context,
it is the inverse of :class:`mitogen.core.Receiver`.
Senders may be serialized, making them convenient to wire up data flows.
See :meth:`mitogen.core.Receiver.to_sender` for more information.
:param mitogen.core.Context context:
Context to send messages to.
:param int dst_handle:
Destination handle to send messages to.
"""
def __init__(self, context, dst_handle):
self.context = context
self.dst_handle = dst_handle
def send(self, data):
"""
Send `data` to the remote end.
"""
_vv and IOLOG.debug('%r.send(%r..)', self, repr(data)[:100])
self.context.send(Message.pickled(data, handle=self.dst_handle))
explicit_close_msg = 'Sender was explicitly closed'
def close(self):
"""
Send a dead message to the remote, causing :meth:`ChannelError` to be
raised in any waiting thread.
"""
_vv and IOLOG.debug('%r.close()', self)
self.context.send(
Message.dead(
reason=self.explicit_close_msg,
handle=self.dst_handle
)
)
def __repr__(self):
return 'Sender(%r, %r)' % (self.context, self.dst_handle)
def __reduce__(self):
return _unpickle_sender, (self.context.context_id, self.dst_handle)
def _unpickle_sender(router, context_id, dst_handle):
if not (isinstance(router, Router) and
isinstance(context_id, (int, long)) and context_id >= 0 and
isinstance(dst_handle, (int, long)) and dst_handle > 0):
raise TypeError('cannot unpickle Sender: bad input or missing router')
return Sender(Context(router, context_id), dst_handle)
class Receiver(object):
"""
Receivers maintain a thread-safe queue of messages sent to a handle of this
context from another context.
:param mitogen.core.Router router:
Router to register the handler on.
:param int handle:
If not :data:`None`, an explicit handle to register, otherwise an
unused handle is chosen.
:param bool persist:
If :data:`False`, unregister the handler after one message is received.
Single-message receivers are intended for RPC-like transactions, such
as in the case of :meth:`mitogen.parent.Context.call_async`.
:param mitogen.core.Context respondent:
Context this receiver is receiving from. If not :data:`None`, arranges
for the receiver to receive a dead message if messages can no longer be
routed to the context due to disconnection, and ignores messages that
did not originate from the respondent context.
"""
#: If not :data:`None`, a function invoked as `notify(receiver)` after a
#: message has been received. The function is invoked on :class:`Broker`
#: thread, therefore it must not block. Used by
#: :class:`mitogen.select.Select` to efficiently implement waiting on
#: multiple event sources.
notify = None
raise_channelerror = True
def __init__(self, router, handle=None, persist=True,
respondent=None, policy=None, overwrite=False):
self.router = router
#: The handle.
self.handle = handle # Avoid __repr__ crash in add_handler()
self._latch = Latch() # Must exist prior to.add_handler()
self.handle = router.add_handler(
fn=self._on_receive,
handle=handle,
policy=policy,
persist=persist,
respondent=respondent,
overwrite=overwrite,
)
def __repr__(self):
return 'Receiver(%r, %r)' % (self.router, self.handle)
def __enter__(self):
return self
def __exit__(self, _1, _2, _3):
self.close()
def to_sender(self):
"""
Return a :class:`Sender` configured to deliver messages to this
receiver. As senders are serializable, this makes it convenient to pass
`(context_id, handle)` pairs around::
def deliver_monthly_report(sender):
for line in open('monthly_report.txt'):
sender.send(line)
sender.close()
@mitogen.main()
def main(router):
remote = router.ssh(hostname='mainframe')
recv = mitogen.core.Receiver(router)
remote.call(deliver_monthly_report, recv.to_sender())
for msg in recv:
print(msg)
"""
return Sender(self.router.myself(), self.handle)
def _on_receive(self, msg):
"""
Callback registered for the handle with :class:`Router`; appends data
to the internal queue.
"""
_vv and IOLOG.debug('%r._on_receive(%r)', self, msg)
self._latch.put(msg)
if self.notify:
self.notify(self)
closed_msg = 'the Receiver has been closed'
def close(self):
"""
Unregister the receiver's handle from its associated router, and cause
:class:`ChannelError` to be raised in any thread waiting in :meth:`get`
on this receiver.
"""
if self.handle:
self.router.del_handler(self.handle)
self.handle = None
self._latch.close()
def size(self):
"""
Return the number of items currently buffered.
As with :class:`Queue.Queue`, `0` may be returned even though a
subsequent call to :meth:`get` will succeed, since a message may be
posted at any moment between :meth:`size` and :meth:`get`.
As with :class:`Queue.Queue`, `>0` may be returned even though a
subsequent call to :meth:`get` will block, since another waiting thread
may be woken at any moment between :meth:`size` and :meth:`get`.
:raises LatchError:
The underlying latch has already been marked closed.
"""
return self._latch.size()
def empty(self):
"""
Return `size() == 0`.
.. deprecated:: 0.2.8
Use :meth:`size` instead.
:raises LatchError:
The latch has already been marked closed.
"""
return self._latch.empty()
def get(self, timeout=None, block=True, throw_dead=True):
"""
Sleep waiting for a message to arrive on this receiver.
:param float timeout:
If not :data:`None`, specifies a timeout in seconds.
:raises mitogen.core.ChannelError:
The remote end indicated the channel should be closed,
communication with it was lost, or :meth:`close` was called in the
local process.
:raises mitogen.core.TimeoutError:
Timeout was reached.
:returns:
:class:`Message` that was received.
"""
_vv and IOLOG.debug('%r.get(timeout=%r, block=%r)', self, timeout, block)
try:
msg = self._latch.get(timeout=timeout, block=block)
except LatchError:
raise ChannelError(self.closed_msg)
if msg.is_dead and throw_dead:
msg._throw_dead()
return msg
def __iter__(self):
"""
Yield consecutive :class:`Message` instances delivered to this receiver
until :class:`ChannelError` is raised.
"""
while True:
try:
msg = self.get()
except ChannelError:
return
yield msg
class Channel(Sender, Receiver):
"""
A channel inherits from :class:`mitogen.core.Sender` and
`mitogen.core.Receiver` to provide bidirectional functionality.
.. deprecated:: 0.2.0
This class is incomplete and obsolete, it will be removed in Mitogen
0.3.
Channels were an early attempt at syntax sugar. It is always easier to pass
around unidirectional pairs of senders/receivers, even though the syntax is
baroque:
.. literalinclude::../examples/ping_pong.py
Since all handles aren't known until after both ends are constructed, for
both ends to communicate through a channel, it is necessary for one end to
retrieve the handle allocated to the other and reconfigure its own channel
to match. Currently this is a manual task.
"""
def __init__(self, router, context, dst_handle, handle=None):
Sender.__init__(self, context, dst_handle)
Receiver.__init__(self, router, handle)
def close(self):
Receiver.close(self)
Sender.close(self)
def __repr__(self):
return 'Channel(%s, %s)' % (
Sender.__repr__(self),
Receiver.__repr__(self)
)
class Importer(object):
"""
Import protocol implementation that fetches modules from the parent
process.
:param context: Context to communicate via.
"""
# The Mitogen package is handled specially, since the child context must
# construct it manually during startup.
MITOGEN_PKG_CONTENT = [
'buildah',
'compat',
'debug',
'doas',
'docker',
'kubectl',
'fakessh',
'fork',
'jail',
'lxc',
'lxd',
'master',
'minify',
'os_fork',
'parent',
'podman',
'select',
'service',
'setns',
'ssh',
'su',
'sudo',
'utils',
]
ALWAYS_BLACKLIST = [
# 2.x generates needless imports for 'builtins', while 3.x does the
# same for '__builtin__'. The correct one is built-in, the other always
# a negative round-trip.
'builtins',
'__builtin__',
# On some Python releases (e.g. 3.8, 3.9) the subprocess module tries
# to import of this Windows-only builtin module.
'msvcrt',
# Python 2.x module that was renamed to _thread in 3.x.
# This entry avoids a roundtrip on 2.x -> 3.x.
'thread',
# org.python.core imported by copy, pickle, xml.sax; breaks Jython, but
# very unlikely to trigger a bug report.
'org',
]
if PY3:
ALWAYS_BLACKLIST += ['cStringIO']
def __init__(self, router, context, core_src, whitelist=(), blacklist=()):
self._log = logging.getLogger('mitogen.importer')
self._context = context
self._present = {'mitogen': self.MITOGEN_PKG_CONTENT}
self._lock = threading.Lock()
self.whitelist = list(whitelist) or ['']
self.blacklist = list(blacklist) + self.ALWAYS_BLACKLIST
# Preserve copies of the original server-supplied whitelist/blacklist
# for later use by children.
self.master_whitelist = self.whitelist[:]
self.master_blacklist = self.blacklist[:]
# Presence of an entry in this map indicates in-flight GET_MODULE.
self._callbacks = {}
self._cache = {}
if core_src:
self._update_linecache('x/mitogen/core.py', core_src)
self._cache['mitogen.core'] = (
'mitogen.core',
None,
'x/mitogen/core.py',
zlib.compress(core_src, 9),
[],
)
self._install_handler(router)
def _update_linecache(self, path, data):
"""
The Python 2.4 linecache module, used to fetch source code for
tracebacks and :func:`inspect.getsource`, does not support PEP-302,
meaning it needs extra help to for Mitogen-loaded modules. Directly
populate its cache if a loaded module belongs to the Mitogen package.
"""
if PY24 and'mitogen' in path:
linecache.cache[path] = (
len(data),
0.0,
[line+'\n' for line in data.splitlines()],
path,
)
def _install_handler(self, router):
router.add_handler(
fn=self._on_load_module,
handle=LOAD_MODULE,
policy=has_parent_authority,
)
def __repr__(self):
return 'Importer'
def builtin_find_module(self, fullname):
# imp.find_module() will always succeed for __main__, because it is a
# built-in module. That means it exists on a special linked list deep
# within the bowels of the interpreter. We must special case it.
if fullname == '__main__':
raise ModuleNotFoundError()
parent, _, modname = str_rpartition(fullname, '.')
if parent:
path = sys.modules[parent].__path__
else:
path = None
fp, pathname, description = imp.find_module(modname, path)
if fp:
fp.close()
def find_module(self, fullname, path=None):
"""
Return a loader (ourself) or None, for the module with fullname.
Implements importlib.abc.MetaPathFinder.find_module().
Deprecrated in Python 3.4+, replaced by find_spec().
Raises ImportWarning in Python 3.10+.
fullname A (fully qualified?) module name, e.g. "os.path".
path __path__ of parent packge. None for a top level module.
"""
if hasattr(_tls, 'running'):
return None
_tls.running = True
try:
#_v and self._log.debug('Python requested %r', fullname)
fullname = to_text(fullname)
pkgname, dot, _ = str_rpartition(fullname, '.')
pkg = sys.modules.get(pkgname)
if pkgname and getattr(pkg, '__loader__', None) is not self:
self._log.debug('%s is submodule of a locally loaded package',
fullname)
return None
suffix = fullname[len(pkgname+dot):]
if pkgname and suffix not in self._present.get(pkgname, ()):
self._log.debug('%s has no submodule %s', pkgname, suffix)
return None
# #114: explicitly whitelisted prefixes override any
# system-installed package.
if self.whitelist!= ['']:
if any(fullname.startswith(s) for s in self.whitelist):
return self
try:
self.builtin_find_module(fullname)
_vv and self._log.debug('%r is available locally', fullname)
except ImportError:
_vv and self._log.debug('we will try to load %r', fullname)
return self
finally:
del _tls.running
blacklisted_msg = (
'%r is present in the Mitogen importer blacklist, therefore this '
'context will not attempt to request it from the master, as the '
'request will always be refused.'
)
pkg_resources_msg = (
'pkg_resources is prohibited from importing __main__, as it causes '
'problems in applications whose main module is not designed to be '
're-imported by children.'
)
absent_msg = (
'The Mitogen master process was unable to serve %r. It may be a '
'native Python extension, or it may be missing entirely. Check the '
'importer debug logs on the master for more information.'
)
def _refuse_imports(self, fullname):
if is_blacklisted_import(self, fullname):
raise ModuleNotFoundError(self.blacklisted_msg % (fullname,))
f = sys._getframe(2)
requestee = f.f_globals['__name__']
if fullname == '__main__' and requestee == 'pkg_resources':
# Anything that imports pkg_resources will eventually cause
# pkg_resources to try and scan __main__ for its __requires__
# attribute (pkg_resources/__init__.py::_build_master()). This
# breaks any app that is not expecting its __main__ to suddenly be
# sucked over a network and injected into a remote process, like
# py.test.
raise ModuleNotFoundError(self.pkg_resources_msg)
if fullname == 'pbr':
# It claims to use pkg_resources to read version information, which
# would result in PEP-302 being used, but it actually does direct
# filesystem access. So instead smodge the environment to override
# any version that was defined. This will probably break something
# later.
os.environ['PBR_VERSION'] = '0.0.0'
def _on_load_module(self, msg):
if msg.is_dead:
return
tup = msg.unpickle()
fullname = tup[0]
_v and self._log.debug('received %s', fullname)
self._lock.acquire()
try:
self._cache[fullname] = tup
if tup[2] is not None and PY24:
self._update_linecache(
path='master:' + tup[2],
data=zlib.decompress(tup[3])
)
callbacks = self._callbacks.pop(fullname, [])
finally:
self._lock.release()
for callback in callbacks:
callback()
def _request_module(self, fullname, callback):
self._lock.acquire()
try:
present = fullname in self._cache
if not present:
funcs = self._callbacks.get(fullname)
if funcs is not None:
_v and self._log.debug('existing request for %s in flight',
fullname)
funcs.append(callback)
else:
_v and self._log.debug('sending new %s request to parent',
fullname)
self._callbacks[fullname] = [callback]
self._context.send(
Message(data=b(fullname), handle=GET_MODULE)
)
finally:
self._lock.release()
if present:
callback()
def load_module(self, fullname):
"""
Return the loaded module specified by fullname.
Implements importlib.abc.Loader.load_module().
Deprecated in Python 3.4+, replaced by create_module() & exec_module().
"""
fullname = to_text(fullname)
_v and self._log.debug('requesting %s', fullname)
self._refuse_imports(fullname)
event = threading.Event()
self._request_module(fullname, event.set)
event.wait()
ret = self._cache[fullname]
if ret[2] is None:
raise ModuleNotFoundError(self.absent_msg % (fullname,))
pkg_present = ret[1]
mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
mod.__file__ = self.get_filename(fullname)
mod.__loader__ = self
if pkg_present is not None: # it's a package.
mod.__path__ = []
mod.__package__ = fullname
self._present[fullname] = pkg_present
else:
mod.__package__ = str_rpartition(fullname, '.')[0] or None
if mod.__package__ and not PY3:
# 2.x requires __package__ to be exactly a string.
mod.__package__, _ = encodings.utf_8.encode(mod.__package__)
source = self.get_source(fullname)
try:
code = compile(source, mod.__file__, 'exec', 0, 1)
except SyntaxError:
LOG.exception('while importing %r', fullname)
raise
if PY3:
exec(code, vars(mod))
else:
exec('exec code in vars(mod)')
# #590: if a module replaces itself in sys.modules during import, below
# is necessary. This matches PyImport_ExecCodeModuleEx()
return sys.modules.get(fullname, mod)
def get_filename(self, fullname):
if fullname in self._cache:
path = self._cache[fullname][2]
if path is None:
# If find_loader() returns self but a subsequent master RPC
# reveals the module can't be loaded, and so load_module()
# throws ImportError, on Python 3.x it is still possible for
# the loader to be called to fetch metadata.
raise ModuleNotFoundError(self.absent_msg % (fullname,))
return u'master:' + self._cache[fullname][2]
def get_source(self, fullname):
if fullname in self._cache:
compressed = self._cache[fullname][3]
if compressed is None:
raise ModuleNotFoundError(self.absent_msg % (fullname,))
source = zlib.decompress(self._cache[fullname][3])
if PY3:
return to_text(source)
return source
class LogHandler(logging.Handler):
"""
A :class:`logging.Handler` subclass that arranges for :data:`FORWARD_LOG`
messages to be sent to a parent context in response to logging messages
generated by the current context. This is installed by default in child
contexts during bootstrap, so that :mod:`logging` events can be viewed and
managed centrally in the master process.
The handler is initially *corked* after construction, such that it buffers
messages until :meth:`uncork` is called. This allows logging to be
installed prior to communication with the target being available, and
avoids any possible race where early log messages might be dropped.
:param mitogen.core.Context context:
The context to send log messages towards. At present this is always
the master process.
"""
def __init__(self, context):
logging.Handler.__init__(self)
self.context = context
self.local = threading.local()
self._buffer = []
# Private synchronization is needed while corked, to ensure no
# concurrent call to _send() exists during uncork().
self._buffer_lock = threading.Lock()
def uncork(self):
"""
#305: during startup :class:`LogHandler` may be installed before it is
possible to route messages, therefore messages are buffered until
:meth:`uncork` is called by :class:`ExternalContext`.
"""
self._buffer_lock.acquire()
try:
self._send = self.context.send
for msg in self._buffer:
self._send(msg)
self._buffer = None
finally:
self._buffer_lock.release()
def _send(self, msg):
self._buffer_lock.acquire()
try:
if self._buffer is None:
# uncork() may run concurrent to _send()
self._send(msg)
else:
self._buffer.append(msg)
finally:
self._buffer_lock.release()
def emit(self, rec):
"""
Send a :data:`FORWARD_LOG` message towards the target context.
"""
if rec.name =='mitogen.io' or \
getattr(self.local, 'in_emit', False):
return
self.local.in_emit = True
try:
msg = self.format(rec)
encoded = '%s\x00%s\x00%s' % (rec.name, rec.levelno, msg)
if isinstance(encoded, UnicodeType):
# Logging package emits both :(
encoded = encoded.encode('utf-8')
self._send(Message(data=encoded, handle=FORWARD_LOG))
finally:
self.local.in_emit = False
class Stream(object):
"""
A :class:`Stream` is one readable and optionally one writeable file
descriptor (represented by :class:`Side`) aggregated alongside an
associated :class:`Protocol` that knows how to respond to IO readiness
events for those descriptors.
Streams are registered with :class:`Broker`, and callbacks are invoked on
the broker thread in response to IO activity. When registered using
:meth:`Broker.start_receive` or :meth:`Broker._start_transmit`, the broker
may call any of :meth:`on_receive`, :meth:`on_transmit`,
:meth:`on_shutdown` or :meth:`on_disconnect`.
It is expected that the :class:`Protocol` associated with a stream will
change over its life. For example during connection setup, the initial
protocol may be :class:`mitogen.parent.BootstrapProtocol` that knows how to
enter SSH and sudo passwords and transmit the :mod:`mitogen.core` source to
the target, before handing off to :class:`MitogenProtocol` when the target
process is initialized.
Streams connecting to children are in turn aggregated by
:class:`mitogen.parent.Connection`, which contains additional logic for
managing any child process, and a reference to any separate ``stderr``
:class:`Stream` connected to that process.
"""
#: A :class:`Side` representing the stream's receive file descriptor.
receive_side = None
#: A :class:`Side` representing the stream's transmit file descriptor.
transmit_side = None
#: A :class:`Protocol` representing the protocol active on the stream.
protocol = None
#: In parents, the :class:`mitogen.parent.Connection` instance.
conn = None
#: The stream name. This is used in the :meth:`__repr__` output in any log
#: messages, it may be any descriptive string.
name = u'default'
def set_protocol(self, protocol):
"""
Bind a :class:`Protocol` to this stream, by updating
:attr:`Protocol.stream` to refer to this stream, and updating this
stream's :attr:`Stream.protocol` to the refer to the protocol. Any
prior protocol's :attr:`Protocol.stream` is set to :data:`None`.
"""
if self.protocol:
self.protocol.stream = None
self.protocol = protocol
self.protocol.stream = self
def accept(self, rfp, wfp):
"""
Attach a pair of file objects to :attr:`receive_side` and
:attr:`transmit_side`, after wrapping them in :class:`Side` instances.
:class:`Side` will call :func:`set_nonblock` and :func:`set_cloexec`
on the underlying file descriptors during construction.
The same file object may be used for both sides. The default
:meth:`on_disconnect` is handles the possibility that only one
descriptor may need to be closed.
:param file rfp:
The file object to receive from.
:param file wfp:
The file object to transmit to.
"""
self.receive_side = Side(self, rfp)
self.transmit_side = Side(self, wfp)
def __repr__(self):
return "<Stream %s #%04x>" % (self.name, id(self) & 0xffff,)
def on_receive(self, broker):
"""
Invoked by :class:`Broker` when the stream's :attr:`receive_side` has
been marked readable using :meth:`Broker.start_receive` and the broker
has detected the associated file descriptor is ready for reading.
Subclasses must implement this if they are registered using
:meth:`Broker.start_receive`, and the method must invoke
:meth:`on_disconnect` if reading produces an empty string.
The default implementation reads :attr:`Protocol.read_size` bytes and
passes the resulting bytestring to :meth:`Protocol.on_receive`. If the
bytestring is 0 bytes, invokes :meth:`on_disconnect` instead.
"""
buf = self.receive_side.read(self.protocol.read_size)
if not buf:
LOG.debug('%r: empty read, disconnecting', self.receive_side)
return self.on_disconnect(broker)
self.protocol.on_receive(broker, buf)
def on_transmit(self, broker):
"""
Invoked by :class:`Broker` when the stream's :attr:`transmit_side` has
been marked writeable using :meth:`Broker._start_transmit` and the
broker has detected the associated file descriptor is ready for
writing.
Subclasses must implement they are ever registerd with
:meth:`Broker._start_transmit`.
The default implementation invokes :meth:`Protocol.on_transmit`.
"""
self.protocol.on_transmit(broker)
def on_shutdown(self, broker):
"""
Invoked by :meth:`Broker.shutdown` to allow the stream time to
gracefully shutdown.
The default implementation emits a ``shutdown`` signal before
invoking :meth:`on_disconnect`.
"""
fire(self,'shutdown')
self.protocol.on_shutdown(broker)
def on_disconnect(self, broker):
"""
Invoked by :class:`Broker` to force disconnect the stream during
shutdown, invoked by the default :meth:`on_shutdown` implementation,
and usually invoked by any subclass :meth:`on_receive` implementation
in response to a 0-byte read.
The base implementation fires a ``disconnect`` event, then closes
:attr:`receive_side` and :attr:`transmit_side` after unregistering the
stream from the broker.
"""
fire(self, 'disconnect')
self.protocol.on_disconnect(broker)
class Protocol(object):
"""
Implement the program behaviour associated with activity on a
:class:`Stream`. The protocol in use may vary over a stream's life, for
example to allow :class:`mitogen.parent.BootstrapProtocol` to initialize
the connected child before handing it off to :class:`MitogenProtocol`. A
stream's active protocol is tracked in the :attr:`Stream.protocol`
attribute, and modified via :meth:`Stream.set_protocol`.
Protocols do not handle IO, they are entirely reliant on the interface
provided by :class:`Stream` and :class:`Side`, allowing the underlying IO
implementation to be replaced without modifying behavioural logic.
"""
stream_class = Stream
#: The :class:`Stream` this protocol is currently bound to, or
#: :data:`None`.
stream = None
#: The size of the read buffer used by :class:`Stream` when this is the
#: active protocol for the stream.
read_size = CHUNK_SIZE
@classmethod
def build_stream(cls, *args, **kwargs):
stream = cls.stream_class()
stream.set_protocol(cls(*args, **kwargs))
return stream
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
self.stream and self.stream.name,
)
def on_shutdown(self, broker):
_v and LOG.debug('%r: shutting down', self)
self.stream.on_disconnect(broker)
def on_disconnect(self, broker):
# Normally both sides an FD, so it is important that tranmit_side is
# deregistered from Poller before closing the receive side, as pollers
# like epoll and kqueue unregister all events on FD close, causing
# subsequent attempt to unregister the transmit side to fail.
LOG.debug('%r: disconnecting', self)
broker.stop_receive(self.stream)
if self.stream.transmit_side:
broker._stop_transmit(self.stream)
self.stream.receive_side.close()
if self.stream.transmit_side:
self.stream.transmit_side.close()
class DelimitedProtocol(Protocol):
"""
Provide a :meth:`Protocol.on_receive` implementation for protocols that are
delimited by a fixed string, like text based protocols. Each message is
passed to :meth:`on_line_received` as it arrives, with incomplete messages
passed to :meth:`on_partial_line_received`.
When emulating user input it is often necessary to respond to incomplete
lines, such as when a "Password: " prompt is sent.
:meth:`on_partial_line_received` may be called repeatedly with an
increasingly complete message. When a complete message is finally received,
:meth:`on_line_received` will be called once for it before the buffer is
discarded.
If :func:`on_line_received` returns :data:`False`, remaining data is passed
unprocessed to the stream's current protocol's :meth:`on_receive`. This
allows switching from line-oriented to binary while the input buffer
contains both kinds of data.
"""
#: The delimiter. Defaults to newline.
delimiter = b('\n')
_trailer = b('')
def on_receive(self, broker, buf):
_vv and IOLOG.debug('%r.on_receive()', self)
stream = self.stream
self._trailer, cont = mitogen.core.iter_split(
buf=self._trailer + buf,
delim=self.delimiter,
func=self.on_line_received,
)
if self._trailer:
if cont:
self.on_partial_line_received(self._trailer)
else:
assert stream.protocol is not self, \
'stream protocol is no longer %r' % (self,)
stream.protocol.on_receive(broker, self._trailer)
def on_line_received(self, line):
"""
Receive a line from the stream.
:param bytes line:
The encoded line, excluding the delimiter.
:returns:
:data:`False` to indicate this invocation modified the stream's
active protocol, and any remaining buffered data should be passed
to the new protocol's :meth:`on_receive` method.
Any other return value is ignored.
"""
pass
def on_partial_line_received(self, line):
"""
Receive a trailing unterminated partial line from the stream.
:param bytes line:
The encoded partial line.
"""
pass
class BufferedWriter(object):
"""
Implement buffered output while avoiding quadratic string operations. This
is currently constructed by each protocol, in future it may become fixed
for each stream instead.
"""
def __init__(self, broker, protocol):
self._broker = broker
self._protocol = protocol
self._buf = collections.deque()
self._len = 0
def write(self, s):
"""
Transmit `s` immediately, falling back to enqueuing it and marking the
stream writeable if no OS buffer space is available.
"""
if not self._len:
# Modifying epoll/Kqueue state is expensive, as are needless broker
# loops. Rather than wait for writeability, just write immediately,
# and fall back to the broker loop on error or full buffer.
try:
n = self._protocol.stream.transmit_side.write(s)
if n:
if n == len(s):
return
s = s[n:]
except OSError:
pass
self._broker._start_transmit(self._protocol.stream)
self._buf.append(s)
self._len += len(s)
def on_transmit(self, broker):
"""
Respond to stream writeability by retrying previously buffered
:meth:`write` calls.
"""
if self._buf:
buf = self._buf.popleft()
written = self._protocol.stream.transmit_side.write(buf)
if not written:
_v and LOG.debug('disconnected during write to %r', self)
self._protocol.stream.on_disconnect(broker)
return
elif written!= len(buf):
self._buf.appendleft(BufferType(buf, written))
_vv and IOLOG.debug('transmitted %d bytes to %r', written, self)
self._len -= written
if not self._buf:
broker._stop_transmit(self._protocol.stream)
class Side(object):
"""
Represent one side of a :class:`Stream`. This allows unidirectional (e.g.
pipe) and bidirectional (e.g. socket) streams to operate identically.
Sides are also responsible for tracking the open/closed state of the
underlying FD, preventing erroneous duplicate calls to :func:`os.close` due
to duplicate :meth:`Stream.on_disconnect` calls, which would otherwise risk
silently succeeding by closing an unrelated descriptor. For this reason, it
is crucial only one file object exists per unique descriptor.
:param mitogen.core.Stream stream:
The stream this side is associated with.
:param object fp:
The file or socket object managing the underlying file descriptor. Any
object may be used that supports `fileno()` and `close()` methods.
:param bool cloexec:
If :data:`True`, the descriptor has its :data:`fcntl.FD_CLOEXEC` flag
enabled using :func:`fcntl.fcntl`.
:param bool keep_alive:
If :data:`True`, the continued existence of this side will extend the
shutdown grace period until it has been unregistered from the broker.
:param bool blocking:
If :data:`False`, the descriptor has its :data:`os.O_NONBLOCK` flag
enabled using :func:`fcntl.fcntl`.
"""
_fork_refs = weakref.WeakValueDictionary()
closed = False
def __init__(self, stream, fp, cloexec=True, keep_alive=True, blocking=False):
#: The :class:`Stream` for which this is a read or write side.
self.stream = stream
# File or socket object responsible for the lifetime of its underlying
# file descriptor.
self.fp = fp
#: Integer file descriptor to perform IO on, or :data:`None` if
#: :meth:`close` has been called. This is saved separately from the
#: file object, since :meth:`file.fileno` cannot be called on it after
#: it has been closed.
self.fd = fp.fileno()
#: If :data:`True`, causes presence of this side in
#: :class:`Broker`'s active reader set to defer shutdown until the
#: side is disconnected.
self.keep_alive = keep_alive
self._fork_refs[id(self)] = self
if cloexec:
set_cloexec(self.fd)
if not blocking:
set_nonblock(self.fd)
def __repr__(self):
return '<Side of %s fd %s>' % (
self.stream.name or repr(self.stream),
self.fd
)
@classmethod
def _on_fork(cls):
while cls._fork_refs:
_, side = cls._fork_refs.popitem()
_vv and IOLOG.debug('Side._on_fork() closing %r', side)
side.close()
def close(self):
"""
Call :meth:`file.close` on :attr:`fp` if it is not :data:`None`,
then set it to :data:`None`.
"""
_vv and IOLOG.debug('%r.close()', self)
if not self.closed:
self.closed = True
self.fp.close()
def read(self, n=CHUNK_SIZE):
"""
Read up to `n` bytes from the file descriptor, wrapping the underlying
:func:`os.read` call with :func:`io_op` to trap common disconnection
conditions.
:meth:`read` always behaves as if it is reading from a regular UNIX
file; socket, pipe, and TTY disconnection errors are masked and result
in a 0-sized read like a regular file.
:returns:
Bytes read, or the empty string to indicate disconnection was
detected.
"""
if self.closed:
# Refuse to touch the handle after closed, it may have been reused
# by another thread. TODO: synchronize read()/write()/close().
return b('')
s, disconnected = io_op(os.read, self.fd, n)
if disconnected:
LOG.debug('%r: disconnected during read: %s', self, disconnected)
return b('')
return s
def write(self, s):
"""
Write as much of the bytes from `s` as possible to the file descriptor,
wrapping the underlying :func:`os.write` call with :func:`io_op` to
trap common disconnection conditions.
:returns:
Number of bytes written, or :data:`None` if disconnection was
detected.
"""
if self.closed:
# Don't touch the handle after close, it may be reused elsewhere.
return None
written, disconnected = io_op(os.write, self.fd, s)
if disconnected:
LOG.debug('%r: disconnected during write: %s', self, disconnected)
return None
return written
class MitogenProtocol(Protocol):
"""
:class:`Protocol` implementing mitogen's :ref:`stream protocol
<stream-protocol>`.
"""
#: If not :data:`False`, indicates the stream has :attr:`auth_id` set and
#: its value is the same as :data:`mitogen.context_id` or appears in
#: :data:`mitogen.parent_ids`.
is_privileged = False
#: Invoked as `on_message(stream, msg)` each message received from the
#: peer.
on_message = None
def __init__(self, router, remote_id, auth_id=None,
local_id=None, parent_ids=None):
self._router = router
self.remote_id = remote_id
#: If not :data:`None`, :class:`Router` stamps this into
#: :attr:`Message.auth_id` of every message received on this stream.
self.auth_id = auth_id
if parent_ids is None:
parent_ids = mitogen.parent_ids
if local_id is None:
local_id = mitogen.context_id
self.is_privileged = (
(remote_id in parent_ids) or
auth_id in ([local_id] + parent_ids)
)
self.sent_modules = set(['mitogen','mitogen.core'])
self._input_buf = collections.deque()
self._input_buf_len = 0
self._writer = BufferedWriter(router.broker, self)
#: Routing records the dst_id of every message arriving from this
#: stream. Any arriving DEL_ROUTE is rebroadcast for any such ID.
self.egress_ids = set()
def on_receive(self, broker, buf):
"""
Handle the next complete message on the stream. Raise
:class:`StreamError` on failure.
"""
_vv and IOLOG.debug('%r.on_receive()', self)
if self._input_buf and self._input_buf_len < 128:
self._input_buf[0] += buf
else:
self._input_buf.append(buf)
self._input_buf_len += len(buf)
while self._receive_one(broker):
pass
corrupt_msg = (
'%s: Corruption detected: frame signature incorrect. This likely means'
'some external process is interfering with the connection. Received:'
'\n\n'
'%r'
)
def _receive_one(self, broker):
if self._input_buf_len < Message.HEADER_LEN:
return False
msg = Message()
msg.router = self._router
(magic, msg.dst_id, msg.src_id, msg.auth_id,
msg.handle, msg.reply_to, msg_len) = struct.unpack(
Message.HEADER_FMT,
self._input_buf[0][:Message.HEADER_LEN],
)
if magic!= Message.HEADER_MAGIC:
LOG.error(self.corrupt_msg, self.stream.name, self._input_buf[0][:2048])
self.stream.on_disconnect(broker)
return False
if msg_len > self._router.max_message_size:
LOG.error('%r: Maximum message size exceeded (got %d, max %d)',
self, msg_len, self._router.max_message_size)
self.stream.on_disconnect(broker)
return False
total_len = msg_len + Message.HEADER_LEN
if self._input_buf_len < total_len:
_vv and IOLOG.debug(
'%r: Input too short (want %d, got %d)',
self, msg_len, self._input_buf_len - Message.HEADER_LEN
)
return False
start = Message.HEADER_LEN
prev_start = start
remain = total_len
bits = []
while remain:
buf = self._input_buf.popleft()
bit = buf[start:remain]
bits.append(bit)
remain -= len(bit) + start
prev_start = start
start = 0
msg.data = b('').join(bits)
self._input_buf.appendleft(buf[prev_start+len(bit):])
self._input_buf_len -= total_len
self._router._async_route(msg, self.stream)
return True
def pending_bytes(self):
"""
Return the number of bytes queued for transmission on this stream. This
can be used to limit the amount of data buffered in RAM by an otherwise
unlimited consumer.
For an accurate result, this method should be called from the Broker
thread, for example by using :meth:`Broker.defer_sync`.
"""
return self._writer._len
def on_transmit(self, broker):
"""
Transmit buffered messages.
"""
_vv and IOLOG.debug('%r.on_transmit()', self)
self._writer.on_transmit(broker)
def _send(self, msg):
_vv and IOLOG.debug('%r._send(%r)', self, msg)
self._writer.write(msg.pack())
def send(self, msg):
"""
Send `data` to `handle`, and tell the broker we have output. May be
called from any thread.
"""
self._router.broker.defer(self._send, msg)
def on_shutdown(self, broker):
"""
Disable :class:`Protocol` immediate disconnect behaviour.
"""
_v and LOG.debug('%r: shutting down', self)
class Context(object):
"""
Represent a remote context regardless of the underlying connection method.
Context objects are simple facades that emit messages through an
associated router, and have :ref:`signals` raised against them in response
to various events relating to the context.
**Note:** This is the somewhat limited core version, used by child
contexts. The master subclass is documented below this one.
Contexts maintain no internal state and are thread-safe.
Prefer :meth:`Router.context_by_id` over constructing context objects
explicitly, as that method is deduplicating, and returns the only context
instance :ref:`signals` will be raised on.
:param mitogen.core.Router router:
Router to emit messages through.
:param int context_id:
Context ID.
:param str name:
Context name.
"""
name = None
remote_name = None
def __init__(self, router, context_id, name=None):
self.router = router
self.context_id = context_id
if name:
self.name = to_text(name)
def __reduce__(self):
return _unpickle_context, (self.context_id, self.name)
def on_disconnect(self):
_v and LOG.debug('%r: disconnecting', self)
fire(self, 'disconnect')
def send_async(self, msg, persist=False):
"""
Arrange for `msg` to be delivered to this context, with replies
directed to a newly constructed receiver. :attr:`dst_id
<Message.dst_id>` is set to the target context ID, and :attr:`reply_to
<Message.reply_to>` is set to the newly constructed receiver's handle.
:param bool persist:
If :data:`False`, the handler will be unregistered after a single
message has been received.
:param mitogen.core.Message msg:
The message.
:returns:
:class:`Receiver` configured to receive any replies sent to the
message's `reply_to` handle.
"""
receiver = Receiver(self.router, persist=persist, respondent=self)
msg.dst_id = self.context_id
msg.reply_to = receiver.handle
_v and LOG.debug('sending message to %r: %r', self, msg)
self.send(msg)
return receiver
def call_service_async(self, service_name, method_name, **kwargs):
if isinstance(service_name, BytesType):
service_name = service_name.encode('utf-8')
elif not isinstance(service_name, UnicodeType):
service_name = service_name.name() # Service.name()
_v and LOG.debug('calling service %s.%s of %r, args: %r',
service_name, method_name, self, kwargs)
tup = (service_name, to_text(method_name), Kwargs(kwargs))
msg = Message.pickled(tup, handle=CALL_SERVICE)
return self.send_async(msg)
def send(self, msg):
"""
Arrange for `msg` to be delivered to this context. :attr:`dst_id
<Message.dst_id>` is set to the target context ID.
:param Message msg:
Message.
"""
msg.dst_id = self.context_id
self.router.route(msg)
def call_service(self, service_name, method_name, **kwargs):
recv = self.call_service_async(service_name, method_name, **kwargs)
return recv.get().unpickle()
def send_await(self, msg, deadline=None):
"""
Like :meth:`send_async`, but expect a single reply (`persist=False`)
delivered within `deadline` seconds.
:param mitogen.core.Message msg:
The message.
:param float deadline:
If not :data:`None`, seconds before timing out waiting for a reply.
:returns:
Deserialized reply.
:raises TimeoutError:
No message was received and `deadline` passed.
"""
receiver = self.send_async(msg)
response = receiver.get(deadline)
data = response.unpickle()
_vv and IOLOG.debug('%r._send_await() -> %r', self, data)
return data
def __repr__(self):
return 'Context(%s, %r)' % (self.context_id, self.name)
def _unpickle_context(context_id, name, router=None):
if not (isinstance(context_id, (int, long)) and context_id >= 0 and (
(name is None) or
(isinstance(name, UnicodeType) and len(name) < 100))
):
raise TypeError('cannot unpickle Context: bad input')
if isinstance(router, Router):
return router.context_by_id(context_id, name=name)
return Context(None, context_id, name) # For plain Jane pickle.
class Poller(object):
"""
A poller manages OS file descriptors the user is waiting to become
available for IO. The :meth:`poll` method blocks the calling thread
until one or more become ready. The default implementation is based on
:func:`select.poll`.
Each descriptor has an associated `data` element, which is unique for each
readiness type, and defaults to being the same as the file descriptor. The
:meth:`poll` method yields the data associated with a descriptor, rather
than the descriptor itself, allowing concise loops like::
p = Poller()
p.start_receive(conn.fd, data=conn.on_read)
p.start_transmit(conn.fd, data=conn.on_write)
for callback in p.poll():
callback() # invoke appropriate bound instance method
Pollers may be modified while :meth:`poll` is yielding results. Removals
are processed immediately, causing pending events for the descriptor to be
discarded.
The :meth:`close` method must be called when a poller is discarded to avoid
a resource leak.
Pollers may only be used by one thread at a time.
"""
SUPPORTED = True
# This changed from select() to poll() in Mitogen 0.2.4. Since poll() has
# no upper FD limit, it is suitable for use with Latch, which must handle
# FDs larger than select's limit during many-host runs. We want this
# because poll() requires no setup and teardown: just a single system call,
# which is important because Latch.get() creates a Poller on each
# invocation. In a microbenchmark, poll() vs. epoll_ctl() is 30% faster in
# this scenario. If select() must return in future, it is important
# Latch.poller_class is set from parent.py to point to the industrial
# strength poller for the OS, otherwise Latch will fail randomly.
#: Increments on every poll(). Used to version _rfds and _wfds.
_generation = 1
def __init__(self):
self._rfds = {}
self._wfds = {}
def __repr__(self):
return '%s' % (type(self).__name__,)
def _update(self, fd):
"""
Required by PollPoller subclass.
"""
pass
@property
def readers(self):
"""
Return a list of `(fd, data)` tuples for every FD registered for
receive readiness.
"""
return list((fd, data) for fd, (data, gen) in self._rfds.items())
@property
def writers(self):
"""
Return a list of `(fd, data)` tuples for every FD registered for
transmit readiness.
"""
return list((fd, data) for fd, (data, gen) in self._wfds.items())
def close(self):
"""
Close any underlying OS resource used by the poller.
"""
pass
def start_receive(self, fd, data=None):
"""
Cause :meth:`poll` to yield `data` when `fd` is readable.
"""
self._rfds[fd] = (data or fd, self._generation)
self._update(fd)
def stop_receive(self, fd):
"""
Stop yielding readability events for `fd`.
Redundant calls to :meth:`stop_receive` are silently ignored, this may
change in future.
"""
self._rfds.pop(fd, None)
self._update(fd)
def start_transmit(self, fd, data=None):
"""
Cause :meth:`poll` to yield `data` when `fd` is writeable.
"""
self._wfds[fd] = (data or fd, self._generation)
self._update(fd)
def stop_transmit(self, fd):
"""
Stop yielding writeability events for `fd`.
Redundant calls to :meth:`stop_transmit` are silently ignored, this may
change in future.
"""
self._wfds.pop(fd, None)
self._update(fd)
def _poll(self, timeout):
(rfds, wfds, _), _ = io_op(select.select,
self._rfds,
self._wfds,
(), timeout
)
for fd in rfds:
_vv and IOLOG.debug('%r: POLLIN for %r', self, fd)
data, gen = self._rfds.get(fd, (None, None))
if gen and gen < self._generation:
yield data
for fd in wfds:
_vv and IOLOG.debug('%r: POLLOUT for %r', self, fd)
data, gen = self._wfds.get(fd, (None, None))
if gen and gen < self._generation:
yield data
def poll(self, timeout=None):
"""
Block the calling thread until one or more FDs are ready for IO.
:param float timeout:
If not :data:`None`, seconds to wait without an event before
returning an empty iterable.
:returns:
Iterable of `data` elements associated with ready FDs.
"""
_vv and IOLOG.debug('%r.poll(%r)', self, timeout)
self._generation += 1
return self._poll(timeout)
class Latch(object):
"""
A latch is a :class:`Queue.Queue`-like object that supports mutation and
waiting from multiple threads, however unlike :class:`Queue.Queue`,
waiting threads always remain interruptible, so CTRL+C always succeeds, and
waits where a timeout is set experience no wake up latency. These
properties are not possible in combination using the built-in threading
primitives available in Python 2.x.
Latches implement queues using the UNIX self-pipe trick, and a per-thread
:func:`socket.socketpair` that is lazily created the first time any
latch attempts to sleep on a thread, and dynamically associated with the
waiting Latch only for duration of the wait.
See :ref:`waking-sleeping-threads` for further discussion.
"""
#: The :class:`Poller` implementation to use for waiting. Since the poller
#: will be very short-lived, we prefer :class:`mitogen.parent.PollPoller`
#: if it is available, or :class:`mitogen.core.Poller` otherwise, since
#: these implementations require no system calls to create, configure or
#: destroy.
poller_class = Poller
#: If not :data:`None`, a function invoked as `notify(latch)` after a
#: successful call to :meth:`put`. The function is invoked on the
#: :meth:`put` caller's thread, which may be the :class:`Broker` thread,
#: therefore it must not block. Used by :class:`mitogen.select.Select` to
#: efficiently implement waiting on multiple event sources.
notify = None
# The _cls_ prefixes here are to make it crystal clear in the code which
# state mutation isn't covered by :attr:`_lock`.
#: List of reusable :func:`socket.socketpair` tuples. The list is mutated
#: from multiple threads, the only safe operations are `append()` and
#: `pop()`.
_cls_idle_socketpairs = []
#: List of every socket object that must be closed by :meth:`_on_fork`.
#: Inherited descriptors cannot be reused, as the duplicated handles
#: reference the same underlying kernel object in use by the parent.
_cls_all_sockets = []
def __init__(self):
self.closed = False
self._lock = threading.Lock()
#: List of unconsumed enqueued items.
self._queue = []
#: List of `(wsock, cookie)` awaiting an element, where `wsock` is the
#: socketpair's write side, and `cookie` is the string to write.
self._sleeping = []
#: Number of elements of :attr:`_sleeping` that have already been
#: woken, and have a corresponding element index from :attr:`_queue`
#: assigned to them.
self._waking = 0
@classmethod
def _on_fork(cls):
"""
Clean up any files belonging to the parent process after a fork.
"""
cls._cls_idle_socketpairs = []
while cls._cls_all_sockets:
cls._cls_all_sockets.pop().close()
def close(self):
"""
Mark the latch as closed, and cause every sleeping thread to be woken,
with :class:`mitogen.core.LatchError` raised in each thread.
"""
self._lock.acquire()
try:
self.closed = True
while self._waking < len(self._sleeping):
wsock, cookie = self._sleeping[self._waking]
self._wake(wsock, cookie)
self._waking += 1
finally:
self._lock.release()
def size(self):
"""
Return the number of items currently buffered.
As with :class:`Queue.Queue`, `0` may be returned even though a
subsequent call to :meth:`get` will succeed, since a message may be
posted at any moment between :meth:`size` and :meth:`get`.
As with :class:`Queue.Queue`, `>0` may be returned even though a
subsequent call to :meth:`get` will block, since another waiting thread
may be woken at any moment between :meth:`size` and :meth:`get`.
:raises LatchError:
The latch has already been marked closed.
"""
self._lock.acquire()
try:
if self.closed:
raise LatchError()
return len(self._queue)
finally:
self._lock.release()
def empty(self):
"""
Return `size() == 0`.
.. deprecated:: 0.2.8
Use :meth:`size` instead.
:raises LatchError:
The latch has already been marked closed.
"""
return self.size() == 0
def _get_socketpair(self):
"""
Return an unused socketpair, creating one if none exist.
"""
try:
return self._cls_idle_socketpairs.pop() # pop() must be atomic
except IndexError:
rsock, wsock = socket.socketpair()
rsock.setblocking(False)
set_cloexec(rsock.fileno())
set_cloexec(wsock.fileno())
self._cls_all_sockets.extend((rsock, wsock))
return rsock, wsock
COOKIE_MAGIC, = struct.unpack('L', b('LTCH') * (struct.calcsize('L')//4))
COOKIE_FMT = '>Qqqq' # #545: id() and get_ident() may exceed long on armhfp.
COOKIE_SIZE = struct.calcsize(COOKIE_FMT)
def _make_cookie(self):
"""
Return a string encoding the ID of the process, instance and thread.
This disambiguates legitimate wake-ups, accidental writes to the FD,
and buggy internal FD sharing.
"""
return struct.pack(self.COOKIE_FMT, self.COOKIE_MAGIC,
os.getpid(), id(self), thread.get_ident())
def get(self, timeout=None, block=True):
"""
Return the next enqueued object, or sleep waiting for one.
:param float timeout:
If not :data:`None`, specifies a timeout in seconds.
:param bool block:
If :data:`False`, immediately raise
:class:`mitogen.core.TimeoutError` if the latch is empty.
:raises mitogen.core.LatchError:
:meth:`close` has been called, and the object is no longer valid.
:raises mitogen.core.TimeoutError:
Timeout was reached.
:returns:
The de-queued object.
"""
_vv and IOLOG.debug('%r.get(timeout=%r, block=%r)',
self, timeout, block)
self._lock.acquire()
try:
if self.closed:
raise LatchError()
i = len(self._sleeping)
if len(self._queue) > i:
_vv and IOLOG.debug('%r.get() -> %r', self, self._queue[i])
return self._queue.pop(i)
if not block:
raise TimeoutError()
rsock, wsock = self._get_socketpair()
cookie = self._make_cookie()
self._sleeping.append((wsock, cookie))
finally:
self._lock.release()
poller = self.poller_class()
poller.start_receive(rsock.fileno())
try:
return self._get_sleep(poller, timeout, block, rsock, wsock, cookie)
finally:
poller.close()
def _get_sleep(self, poller, timeout, block, rsock, wsock, cookie):
"""
When a result is not immediately available, sleep waiting for
:meth:`put` to write a byte to our socket pair.
"""
_vv and IOLOG.debug(
'%r._get_sleep(timeout=%r, block=%r, fd=%d/%d)',
self, timeout, block, rsock.fileno(), wsock.fileno()
)
e = None
try:
list(poller.poll(timeout))
except Exception:
e = sys.exc_info()[1]
self._lock.acquire()
try:
i = self._sleeping.index((wsock, cookie))
del self._sleeping[i]
try:
got_cookie = rsock.recv(self.COOKIE_SIZE)
except socket.error:
e2 = sys.exc_info()[1]
if e2.args[0] == errno.EAGAIN:
e = TimeoutError()
else:
e = e2
self._cls_idle_socketpairs.append((rsock, wsock))
if e:
raise e
assert cookie == got_cookie, (
"Cookie incorrect; got %r, expected %r"
% (binascii.hexlify(got_cookie),
binascii.hexlify(cookie))
)
assert i < self._waking, (
"Cookie correct, but no queue element assigned."
)
self._waking -= 1
if self.closed:
raise LatchError()
_vv and IOLOG.debug('%r.get() wake -> %r', self, self._queue[i])
return self._queue.pop(i)
finally:
self._lock.release()
def put(self, obj=None):
"""
Enqueue an object, waking the first thread waiting for a result, if one
exists.
:param obj:
Object to enqueue. Defaults to :data:`None` as a convenience when
using :class:`Latch` only for synchronization.
:raises mitogen.core.LatchError:
:meth:`close` has been called, and the object is no longer valid.
"""
_vv and IOLOG.debug('%r.put(%r)', self, obj)
self._lock.acquire()
try:
if self.closed:
raise LatchError()
self._queue.append(obj)
wsock = None
if self._waking < len(self._sleeping):
wsock, cookie = self._sleeping[self._waking]
self._waking += 1
_vv and IOLOG.debug('%r.put() -> waking wfd=%r',
self, wsock.fileno())
elif self.notify:
self.notify(self)
finally:
self._lock.release()
if wsock:
self._wake(wsock, cookie)
def _wake(self, wsock, cookie):
written, disconnected = io_op(os.write, wsock.fileno(), cookie)
assert written == len(cookie) and not disconnected
def __repr__(self):
return 'Latch(%#x, size=%d, t=%r)' % (
id(self),
len(self._queue),
threading__thread_name(threading__current_thread()),
)
class Waker(Protocol):
"""
:class:`Protocol` implementing the `UNIX self-pipe trick`_. Used to wake
:class:`Broker` when another thread needs to modify its state, by enqueing
a function call to run on the :class:`Broker` thread.
.. _UNIX self-pipe trick: https://cr.yp.to/docs/selfpipe.html
"""
read_size = 1
broker_ident = None
@classmethod
def build_stream(cls, broker):
stream = super(Waker, cls).build_stream(broker)
stream.accept(*pipe())
return stream
def __init__(self, broker):
self._broker = broker
self._deferred = collections.deque()
def __repr__(self):
return 'Waker(fd=%r/%r)' % (
self.stream.receive_side and self.stream.receive_side.fd,
self.stream.transmit_side and self.stream.transmit_side.fd,
)
@property
def keep_alive(self):
"""
Prevent immediate Broker shutdown while deferred functions remain.
"""
return len(self._deferred)
def on_receive(self, broker, buf):
"""
Drain the pipe and fire callbacks. Since :attr:`_deferred` is
synchronized, :meth:`defer` and :meth:`on_receive` can conspire to
ensure only one byte needs to be pending regardless of queue length.
"""
_vv and IOLOG.debug('%r.on_receive()', self)
while True:
try:
func, args, kwargs = self._deferred.popleft()
except IndexError:
return
try:
func(*args, **kwargs)
except Exception:
LOG.exception('defer() crashed: %r(*%r, **%r)',
func, args, kwargs)
broker.shutdown()
def _wake(self):
"""
Wake the multiplexer by writing a byte. If Broker is midway through
teardown, the FD may already be closed, so ignore EBADF.
"""
try:
self.stream.transmit_side.write(b(' '))
except OSError:
e = sys.exc_info()[1]
if e.args[0] not in (errno.EBADF, errno.EWOULDBLOCK):
raise
broker_shutdown_msg = (
"An attempt was made to enqueue a message with a Broker that has "
"already exitted. It is likely your program called Broker.shutdown() "
"too early."
)
def defer(self, func, *args, **kwargs):
"""
Arrange for `func()` to execute on the broker thread. This function
returns immediately without waiting the result of `func()`. Use
:meth:`defer_sync` to block until a result is available.
:raises mitogen.core.Error:
:meth:`defer` was called after :class:`Broker` has begun shutdown.
"""
if thread.get_ident() == self.broker_ident:
_vv and IOLOG.debug('%r.defer() [immediate]', self)
return func(*args, **kwargs)
if self._broker._exitted:
raise Error(self.broker_shutdown_msg)
_vv and IOLOG.debug('%r.defer() [fd=%r]', self,
self.stream.transmit_side.fd)
self._deferred.append((func, args, kwargs))
self._wake()
class IoLoggerProtocol(DelimitedProtocol):
"""
Attached to one end of a socket pair whose other end overwrites one of the
standard ``stdout`` or ``stderr`` file descriptors in a child context.
Received data is split up into lines, decoded as UTF-8 and logged to the
:mod:`logging` package as either the ``stdout`` or ``stderr`` logger.
Logging in child contexts is in turn forwarded to the master process using
:class:`LogHandler`.
"""
@classmethod
def build_stream(cls, name, dest_fd):
"""
Even though the file descriptor `dest_fd` will hold the opposite end of
the socket open, we must keep a separate dup() of it (i.e. wsock) in
case some code decides to overwrite `dest_fd` later, which would
prevent break :meth:`on_shutdown` from calling :meth:`shutdown()
<socket.socket.shutdown>` on it.
"""
rsock, wsock = socket.socketpair()
os.dup2(wsock.fileno(), dest_fd)
stream = super(IoLoggerProtocol, cls).build_stream(name)
stream.name = name
stream.accept(rsock, wsock)
return stream
def __init__(self, name):
self._log = logging.getLogger(name)
# #453: prevent accidental log initialization in a child creating a
# feedback loop.
self._log.propagate = False
self._log.handlers = logging.getLogger().handlers[:]
def on_shutdown(self, broker):
"""
Shut down the write end of the socket, preventing any further writes to
it by this process, or subprocess that inherited it. This allows any
remaining kernel-buffered data to be drained during graceful shutdown
without the buffer continuously refilling due to some out of control
child process.
"""
_v and LOG.debug('%r: shutting down', self)
if not IS_WSL:
# #333: WSL generates invalid readiness indication on shutdown().
# This modifies the *kernel object* inherited by children, causing
# EPIPE on subsequent writes to any dupped FD in any process. The
# read side can then drain completely of prior buffered data.
self.stream.transmit_side.fp.shutdown(socket.SHUT_WR)
self.stream.transmit_side.close()
def on_line_received(self, line):
"""
Decode the received line as UTF-8 and pass it to the logging framework.
"""
self._log.info('%s', line.decode('utf-8','replace'))
class Router(object):
"""
Route messages between contexts, and invoke local handlers for messages
addressed to this context. :meth:`Router.route() <route>` straddles the
:class:`Broker` thread and user threads, it is safe to call anywhere.
**Note:** This is the somewhat limited core version of the Router class
used by child contexts. The master subclass is documented below this one.
"""
#: The :class:`mitogen.core.Context` subclass to use when constructing new
#: :class:`Context` objects in :meth:`myself` and :meth:`context_by_id`.
#: Permits :class:`Router` subclasses to extend the :class:`Context`
#: interface, as done in :class:`mitogen.parent.Router`.
context_class = Context
max_message_size = 128 * 1048576
#: When :data:`True`, permit children to only communicate with the current
#: context or a parent of the current context. Routing between siblings or
#: children of parents is prohibited, ensuring no communication is possible
#: between intentionally partitioned networks, such as when a program
#: simultaneously manipulates hosts spread across a corporate and a
#: production network, or production networks that are otherwise
#: air-gapped.
#:
#: Sending a prohibited message causes an error to be logged and a dead
#: message to be sent in reply to the errant message, if that message has
#: ``reply_to`` set.
#:
#: The value of :data:`unidirectional` becomes the default for the
#: :meth:`local() <mitogen.master.Router.local>` `unidirectional`
#: parameter.
unidirectional = False
duplicate_handle_msg = 'cannot register a handle that already exists'
refused_msg ='refused by policy'
invalid_handle_msg = 'invalid handle'
too_large_msg ='message too large (max %d bytes)'
respondent_disconnect_msg = 'the respondent Context has disconnected'
broker_exit_msg = 'Broker has exitted'
no_route_msg = 'no route to %r, my ID is %r'
unidirectional_msg = (
'routing mode prevents forward of message from context %d to '
'context %d via context %d'
)
def __init__(self, broker):
self.broker = broker
listen(broker, 'exit', self._on_broker_exit)
self._setup_logging()
self._write_lock = threading.Lock()
#: context ID -> Stream; must hold _write_lock to edit or iterate
self._stream_by_id = {}
#: List of contexts to notify of shutdown; must hold _write_lock
self._context_by_id = {}
self._last_handle = itertools.count(1000)
#: handle -> (persistent?, func(msg))
self._handle_map = {}
#: Context -> set { handle,.. }
self._handles_by_respondent = {}
self.add_handler(self._on_del_route, DEL_ROUTE)
def __repr__(self):
return 'Router(%r)' % (self.broker,)
def _setup_logging(self):
"""
This is done in the :class:`Router` constructor for historical reasons.
It must be called before ExternalContext logs its first messages, but
after logging has been setup. It must also be called when any router is
constructed for a consumer app.
"""
# Here seems as good a place as any.
global _v, _vv
_v = logging.getLogger().level <= logging.DEBUG
_vv = IOLOG.level <= logging.DEBUG
def _on_del_route(self, msg):
"""
Stub :data:`DEL_ROUTE` handler; fires 'disconnect' events on the
corresponding :attr:`_context_by_id` member. This is replaced by
:class:`mitogen.parent.RouteMonitor` in an upgraded context.
"""
if msg.is_dead:
return
target_id_s, _, name = bytes_partition(msg.data, b(':'))
target_id = int(target_id_s, 10)
LOG.error('%r: deleting route to %s (%d)',
self, to_text(name), target_id)
context = self._context_by_id.get(target_id)
if context:
fire(context, 'disconnect')
else:
LOG.debug('DEL_ROUTE for unknown ID %r: %r', target_id, msg)
def _on_stream_disconnect(self, stream):
notify = []
self._write_lock.acquire()
try:
for context in list(self._context_by_id.values()):
stream_ = self._stream_by_id.get(context.context_id)
if stream_ is stream:
del self._stream_by_id[context.context_id]
notify.append(context)
finally:
self._write_lock.release()
# Happens outside lock as e.g. RouteMonitor wants the same lock.
for context in notify:
context.on_disconnect()
def _on_broker_exit(self):
"""
Called prior to broker exit, informs callbacks registered with
:meth:`add_handler` the connection is dead.
"""
_v and LOG.debug('%r: broker has exitted', self)
while self._handle_map:
_, (_, func, _, _) = self._handle_map.popitem()
func(Message.dead(self.broker_exit_msg))
def myself(self):
"""
Return a :class:`Context` referring to the current process. Since
:class:`Context` is serializable, this is convenient to use in remote
function call parameter lists.
"""
return self.context_class(
router=self,
context_id=mitogen.context_id,
name='self',
)
def context_by_id(self, context_id, via_id=None, create=True, name=None):
"""
Return or construct a :class:`Context` given its ID. An internal
mapping of ID to the canonical :class:`Context` representing that ID,
so that :ref:`signals` can be raised.
This may be called from any thread, lookup and construction are atomic.
:param int context_id:
The context ID to look up.
:param int via_id:
If the :class:`Context` does not already exist, set its
:attr:`Context.via` to the :class:`Context` matching this ID.
:param bool create:
If the :class:`Context` does not already exist, create it.
:param str name:
If the :class:`Context` does not already exist, set its name.
:returns:
:class:`Context`, or return :data:`None` if `create` is
:data:`False` and no :class:`Context` previously existed.
"""
context = self._context_by_id.get(context_id)
if context:
return context
if create and via_id is not None:
via = self.context_by_id(via_id)
else:
via = None
self._write_lock.acquire()
try:
context = self._context_by_id.get(context_id)
if create and not context:
context = self.context_class(self, context_id, name=name)
context.via = via
self._context_by_id[context_id] = context
finally:
self._write_lock.release()
return context
def register(self, context, stream):
"""
Register a newly constructed context and its associated stream, and add
the stream's receive side to the I/O multiplexer. This method remains
public while the design has not yet settled.
"""
_v and LOG.debug('%s: registering %r to stream %r',
self, context, stream)
self._write_lock.acquire()
try:
self._stream_by_id[context.context_id] = stream
self._context_by_id[context.context_id] = context
finally:
self._write_lock.release()
self.broker.start_receive(stream)
listen(stream, 'disconnect', lambda: self._on_stream_disconnect(stream))
def stream_by_id(self, dst_id):
"""
Return the :class:`Stream` that should be used to communicate with
`dst_id`. If a specific route for `dst_id` is not known, a reference to
the parent context's stream is returned. If the parent is disconnected,
or when running in the master context, return :data:`None` instead.
This can be used from any thread, but its output is only meaningful
from the context of the :class:`Broker` thread, as disconnection or
replacement could happen in parallel on the broker thread at any
moment.
"""
return (
self._stream_by_id.get(dst_id) or
self._stream_by_id.get(mitogen.parent_id)
)
def del_handler(self, handle):
"""
Remove the handle registered for `handle`
:raises KeyError:
The handle wasn't registered.
"""
_, _, _, respondent = self._handle_map.pop(handle)
if respondent:
self._handles_by_respondent[respondent].discard(handle)
def add_handler(self, fn, handle=None, persist=True,
policy=None, respondent=None,
overwrite=False):
"""
Invoke `fn(msg)` on the :class:`Broker` thread for each Message sent to
`handle` from this context. Unregister after one invocation if
`persist` is :data:`False`. If `handle` is :data:`None`, a new handle
is allocated and returned.
:param int handle:
If not :data:`None`, an explicit handle to register, usually one of
the ``mitogen.core.*`` constants. If unspecified, a new unused
handle will be allocated.
:param bool persist:
If :data:`False`, the handler will be unregistered after a single
message has been received.
:param mitogen.core.Context respondent:
Context that messages to this handle are expected to be sent from.
If specified, arranges for a dead message to be delivered to `fn`
when disconnection of the context is detected.
In future `respondent` will likely also be used to prevent other
contexts from sending messages to the handle.
:param function policy:
Function invoked as `policy(msg, stream)` where `msg` is a
:class:`mitogen.core.Message` about to be delivered, and `stream`
is the :class:`mitogen.core.Stream` on which it was received. The
function must return :data:`True`, otherwise an error is logged and
delivery is refused.
Two built-in policy functions exist:
* :func:`has_parent_authority`: requires the message arrived from a
parent context, or a context acting with a parent context's
authority (``auth_id``).
* :func:`mitogen.parent.is_immediate_child`: requires the
message arrived from an immediately connected child, for use in
messaging patterns where either something becomes buggy or
insecure by permitting indirect upstream communication.
In case of refusal, and the message's ``reply_to`` field is
nonzero, a :class:`mitogen.core.CallError` is delivered to the
sender indicating refusal occurred.
:param bool overwrite:
If :data:`True`, allow existing handles to be silently overwritten.
:return:
`handle`, or if `handle` was :data:`None`, the newly allocated
handle.
:raises Error:
Attemp to register handle that was already registered.
"""
handle = handle or next(self._last_handle)
_vv and IOLOG.debug('%r.add_handler(%r, %r, %r)', self, fn, handle, persist)
if handle in self._handle_map and not overwrite:
raise Error(self.duplicate_handle_msg)
self._handle_map[handle] = persist, fn, policy, respondent
if respondent:
if respondent not in self._handles_by_respondent:
self._handles_by_respondent[respondent] = set()
listen(respondent, 'disconnect',
lambda: self._on_respondent_disconnect(respondent))
self._handles_by_respondent[respondent].add(handle)
return handle
def _on_respondent_disconnect(self, context):
for handle in self._handles_by_respondent.pop(context, ()):
_, fn, _, _ = self._handle_map[handle]
fn(Message.dead(self.respondent_disconnect_msg))
del self._handle_map[handle]
def _maybe_send_dead(self, unreachable, msg, reason, *args):
"""
Send a dead message to either the original sender or the intended
recipient of `msg`, if the original sender was expecting a reply
(because its `reply_to` was set), otherwise assume the message is a
reply of some sort, and send the dead message to the original
destination.
:param bool unreachable:
If :data:`True`, the recipient is known to be dead or routing
failed due to a security precaution, so don't attempt to fallback
to sending the dead message to the recipient if the original sender
did not include a reply address.
:param mitogen.core.Message msg:
Message that triggered the dead message.
:param str reason:
Human-readable error reason.
:param tuple args:
Elements to interpolate with `reason`.
"""
if args:
reason %= args
LOG.debug('%r: %r is dead: %r', self, msg, reason)
if msg.reply_to and not msg.is_dead:
msg.reply(Message.dead(reason=reason), router=self)
elif not unreachable:
self._async_route(
Message.dead(
dst_id=msg.dst_id,
handle=msg.handle,
reason=reason,
)
)
def _invoke(self, msg, stream):
# IOLOG.debug('%r._invoke(%r)', self, msg)
try:
persist, fn, policy, respondent = self._handle_map[msg.handle]
except KeyError:
self._maybe_send_dead(True, msg, reason=self.invalid_handle_msg)
return
if respondent and not (msg.is_dead or
msg.src_id == respondent.context_id):
self._maybe_send_dead(True, msg,'reply from unexpected context')
return
if policy and not policy(msg, stream):
self._maybe_send_dead(True, msg, self.refused_msg)
return
if not persist:
self.del_handler(msg.handle)
try:
fn(msg)
except Exception:
LOG.exception('%r._invoke(%r): %r crashed', self, msg, fn)
def _async_route(self, msg, in_stream=None):
"""
Arrange for `msg` to be forwarded towards its destination. If its
destination is the local context, then arrange for it to be dispatched
using the local handlers.
This is a lower overhead version of :meth:`route` that may only be
called from the :class:`Broker` thread.
:param Stream in_stream:
If not :data:`None`, the stream the message arrived on. Used for
performing source route verification, to ensure sensitive messages
such as ``CALL_FUNCTION`` arrive only from trusted contexts.
"""
_vv and IOLOG.debug('%r._async_route(%r, %r)', self, msg, in_stream)
if len(msg.data) > self.max_message_size:
self._maybe_send_dead(False, msg, self.too_large_msg % (
self.max_message_size,
))
return
parent_stream = self._stream_by_id.get(mitogen.parent_id)
src_stream = self._stream_by_id.get(msg.src_id, parent_stream)
# When the ingress stream is known, verify the message was received on
# the same as the stream we would expect to receive messages from the
# src_id and auth_id. This is like Reverse Path Filtering in IP, and
# ensures messages from a privileged context cannot be spoofed by a
# child.
if in_stream:
auth_stream = self._stream_by_id.get(msg.auth_id, parent_stream)
if in_stream!= auth_stream:
LOG.error('%r: bad auth_id: got %r via %r, not %r: %r',
self, msg.auth_id, in_stream, auth_stream, msg)
return
if msg.src_id!= msg.auth_id and in_stream!= src_stream:
LOG.error('%r: bad src_id: got %r via %r, not %r: %r',
self, msg.src_id, in_stream, src_stream, msg)
return
# If the stream's MitogenProtocol has auth_id set, copy it to the
# message. This allows subtrees to become privileged by stamping a
# parent's context ID. It is used by mitogen.unix to mark client
# streams (like Ansible WorkerProcess) as having the same rights as
# the parent.
if in_stream.protocol.auth_id is not None:
msg.auth_id = in_stream.protocol.auth_id
if in_stream.protocol.on_message is not None:
in_stream.protocol.on_message(in_stream, msg)
# Record the IDs the source ever communicated with.
in_stream.protocol.egress_ids.add(msg.dst_id)
if msg.dst_id == mitogen.context_id:
return self._invoke(msg, in_stream)
out_stream = self._stream_by_id.get(msg.dst_id)
if (not out_stream) and (parent_stream!= src_stream or not in_stream):
# No downstream route exists. The message could be from a child or
# ourselves for a parent, in which case we must forward it
# upstream, or it could be from a parent for a dead child, in which
# case its src_id/auth_id would fail verification if returned to
# the parent, so in that case reply with a dead message instead.
out_stream = parent_stream
if out_stream is None:
self._maybe_send_dead(True, msg, self.no_route_msg,
msg.dst_id, mitogen.context_id)
return
if in_stream and self.unidirectional and not \
(in_stream.protocol.is_privileged or
out_stream.protocol.is_privileged):
self._maybe_send_dead(True, msg, self.unidirectional_msg,
in_stream.protocol.remote_id,
out_stream.protocol.remote_id,
mitogen.context_id)
return
out_stream.protocol._send(msg)
def route(self, msg):
"""
Arrange for the :class:`Message` `msg` to be delivered to its
destination using any relevant downstream context, or if none is found,
by forwarding the message upstream towards the master context. If `msg`
is destined for the local context, it is dispatched using the handles
registered with :meth:`add_handler`.
This may be called from any thread.
"""
self.broker.defer(self._async_route, msg)
class NullTimerList(object):
def get_timeout(self):
return None
class Broker(object):
"""
Responsible for handling I/O multiplexing in a private thread.
**Note:** This somewhat limited core version is used by children. The
master subclass is documented below.
"""
poller_class = Poller
_waker = None
_thread = None
# :func:`mitogen.parent._upgrade_broker` replaces this with
# :class:`mitogen.parent.TimerList` during upgrade.
timers = NullTimerList()
#: Seconds grace to allow :class:`streams <Stream>` to shutdown gracefully
#: before force-disconnecting them during :meth:`shutdown`.
shutdown_timeout = 3.0
def __init__(self, poller_class=None, activate_compat=True):
self._alive = True
self._exitted = False
self._waker = Waker.build_stream(self)
#: Arrange for `func(\*args, \**kwargs)` to be executed on the broker
#: thread, or immediately if the current thread is the broker thread.
#: Safe to call from any thread.
self.defer = self._waker.protocol.defer
self.poller = self.poller_class()
self.poller.start_receive(
self._waker.receive_side.fd,
(self._waker.receive_side, self._waker.on_receive)
)
self._thread = threading.Thread(
target=self._broker_main,
name='mitogen.broker'
)
self._thread.start()
if activate_compat:
self._py24_25_compat()
def _py24_25_compat(self):
"""
Python 2.4/2.5 have grave difficulties with threads/fork. We
mandatorily quiesce all running threads during fork using a
monkey-patch there.
"""
if sys.version_info < (2, 6):
# import_module() is used to avoid dep scanner.
os_fork = import_module('mitogen.os_fork')
os_fork._notice_broker_or_pool(self)
def start_receive(self, stream):
"""
Mark the :attr:`receive_side <Stream.receive_side>` on `stream` as
ready for reading. Safe to call from any thread. When the associated
file descriptor becomes ready for reading,
:meth:`BasicStream.on_receive` will be called.
"""
_vv and IOLOG.debug('%r.start_receive(%r)', self, stream)
side = stream.receive_side
assert side and not side.closed
self.defer(self.poller.start_receive,
side.fd, (side, stream.on_receive))
def stop_receive(self, stream):
"""
Mark the :attr:`receive_side <Stream.receive_side>` on `stream` as not
ready for reading. Safe to call from any thread.
"""
_vv and IOLOG.debug('%r.stop_receive(%r)', self, stream)
self.defer(self.poller.stop_receive, stream.receive_side.fd)
def _start_transmit(self, stream):
"""
Mark the :attr:`transmit_side <Stream.transmit_side>` on `stream` as
ready for writing. Must only be called from the Broker thread. When the
associated file descriptor becomes ready for writing,
:meth:`BasicStream.on_transmit` will be called.
"""
_vv and IOLOG.debug('%r._start_transmit(%r)', self, stream)
side = stream.transmit_side
assert side and not side.closed
self.poller.start_transmit(side.fd, (side, stream.on_transmit))
def _stop_transmit(self, stream):
"""
Mark the :attr:`transmit_side <Stream.receive_side>` on `stream` as not
ready for writing.
"""
_vv and IOLOG.debug('%r._stop_transmit(%r)', self, stream)
self.poller.stop_transmit(stream.transmit_side.fd)
def keep_alive(self):
"""
Return :data:`True` if any reader's :attr:`Side.keep_alive` attribute
is :data:`True`, or any :class:`Context` is still registered that is
not the master. Used to delay shutdown while some important work is in
progress (e.g. log draining).
"""
it = (side.keep_alive for (_, (side, _)) in self.poller.readers)
return sum(it, 0) > 0 or self.timers.get_timeout() is not None
def defer_sync(self, func):
"""
Arrange for `func()` to execute on :class:`Broker` thread, blocking the
current thread until a result or exception is available.
:returns:
Return value of `func()`.
"""
latch = Latch()
def wrapper():
try:
latch.put(func())
except Exception:
latch.put(sys.exc_info()[1])
self.defer(wrapper)
res = latch.get()
if isinstance(res, Exception):
raise res
return res
def _call(self, stream, func):
"""
Call `func(self)`, catching any exception that might occur, logging it,
and force-disconnecting the related `stream`.
"""
try:
func(self)
except Exception:
LOG.exception('%r crashed', stream)
stream.on_disconnect(self)
def _loop_once(self, timeout=None):
"""
Execute a single :class:`Poller` wait, dispatching any IO events that
caused the wait to complete.
:param float timeout:
If not :data:`None`, maximum time in seconds to wait for events.
"""
_vv and IOLOG.debug('%r._loop_once(%r, %r)',
self, timeout, self.poller)
timer_to = self.timers.get_timeout()
if timeout is None:
timeout = timer_to
elif timer_to is not None and timer_to < timeout:
timeout = timer_to
#IOLOG.debug('readers =\n%s', pformat(self.poller.readers))
#IOLOG.debug('writers =\n%s', pformat(self.poller.writers))
for side, func in self.poller.poll(timeout):
self._call(side.stream, func)
if timer_to is not None:
self.timers.expire()
def _broker_exit(self):
"""
Forcefully call :meth:`Stream.on_disconnect` on any streams that failed
to shut down gracefully, then discard the :class:`Poller`.
"""
for _, (side, _) in self.poller.readers + self.poller.writers:
LOG.debug('%r: force disconnecting %r', self, side)
side.stream.on_disconnect(self)
self.poller.close()
def _broker_shutdown(self):
"""
Invoke :meth:`Stream.on_shutdown` for every active stream, then allow
up to :attr:`shutdown_timeout` seconds for the streams to unregister
themselves, logging an error if any did not unregister during the grace
period.
"""
for _, (side, _) in self.poller.readers + self.poller.writers:
self._call(side.stream, side.stream.on_shutdown)
deadline = now() + self.shutdown_timeout
while self.keep_alive() and now() < deadline:
self._loop_once(max(0, deadline - now()))
if self.keep_alive():
LOG.error('%r: pending work still existed %d seconds after '
'shutdown began. This may be due to a timer that is yet '
'to expire, or a child connection that did not fully '
'shut down.', self, self.shutdown_timeout)
def _do_broker_main(self):
"""
Broker thread main function. Dispatches IO events until
:meth:`shutdown` is called.
"""
# For Python 2.4, no way to retrieve ident except on thread.
self._waker.protocol.broker_ident = thread.get_ident()
try:
while self._alive:
self._loop_once()
fire(self, 'before_shutdown')
fire(self,'shutdown')
self._broker_shutdown()
except Exception:
e = sys.exc_info()[1]
LOG.exception('broker crashed')
syslog.syslog(syslog.LOG_ERR, 'broker crashed: %s' % (e,))
syslog.closelog() # prevent test 'fd leak'.
self._alive = False # Ensure _alive is consistent on crash.
self._exitted = True
self._broker_exit()
def _broker_main(self):
try:
_profile_hook('mitogen.broker', self._do_broker_main)
finally:
# 'finally' to ensure _on_broker_exit() can always SIGTERM.
fire(self, 'exit')
def shutdown(self):
"""
Request broker gracefully disconnect streams and stop. Safe to call
from any thread.
"""
_v and LOG.debug('%r: shutting down', self)
def _shutdown():
self._alive = False
if self._alive and not self._exitted:
self.defer(_shutdown)
def join(self):
"""
Wait for the broker to stop, expected to be called after
:meth:`shutdown`.
"""
self._thread.join()
def __repr__(self):
return 'Broker(%04x)' % (id(self) & 0xffff,)
class Dispatcher(object):
"""
Implementation of the :data:`CALL_FUNCTION` handle for a child context.
Listens on the child's main thread for messages sent by
:class:`mitogen.parent.CallChain` and dispatches the function calls they
describe.
If a :class:`mitogen.parent.CallChain` sending a message is in pipelined
mode, any exception that occurs is recorded, and causes all subsequent
calls with the same `chain_id` to fail with the same exception.
"""
_service_recv = None
def __repr__(self):
return 'Dispatcher'
def __init__(self, econtext):
self.econtext = econtext
#: Chain ID -> CallError if prior call failed.
self._error_by_chain_id = {}
self.recv = Receiver(
router=econtext.router,
handle=CALL_FUNCTION,
policy=has_parent_authority,
)
#: The :data:`CALL_SERVICE` :class:`Receiver` that will eventually be
#: reused by :class:`mitogen.service.Pool`, should it ever be loaded.
#: This is necessary for race-free reception of all service requests
#: delivered regardless of whether the stub or real service pool are
#: loaded. See #547 for related sorrows.
Dispatcher._service_recv = Receiver(
router=econtext.router,
handle=CALL_SERVICE,
policy=has_parent_authority,
)
self._service_recv.notify = self._on_call_service
listen(econtext.broker,'shutdown', self._on_broker_shutdown)
def _on_broker_shutdown(self):
if self._service_recv.notify == self._on_call_service:
self._service_recv.notify = None
self.recv.close()
@classmethod
@takes_econtext
def forget_chain(cls, chain_id, econtext):
econtext.dispatcher._error_by_chain_id.pop(chain_id, None)
def _parse_request(self, msg):
data = msg.unpickle(throw=False)
_v and LOG.debug('%r: dispatching %r', self, data)
chain_id, modname, klass, func, args, kwargs = data
obj = import_module(modname)
if klass:
obj = getattr(obj, klass)
fn = getattr(obj, func)
if getattr(fn,'mitogen_takes_econtext', None):
kwargs.setdefault('econtext', self.econtext)
if getattr(fn,'mitogen_takes_router', None):
kwargs.setdefault('router', self.econtext.router)
return chain_id, fn, args, kwargs
def _dispatch_one(self, msg):
try:
chain_id, fn, args, kwargs = self._parse_request(msg)
except Exception:
return None, CallError(sys.exc_info()[1])
if chain_id in self._error_by_chain_id:
return chain_id, self._error_by_chain_id[chain_id]
try:
return chain_id, fn(*args, **kwargs)
except Exception:
e = CallError(sys.exc_info()[1])
if chain_id is not None:
self._error_by_chain_id[chain_id] = e
return chain_id, e
def _on_call_service(self, recv):
"""
Notifier for the :data:`CALL_SERVICE` receiver. This is called on the
:class:`Broker` thread for any service messages arriving at this
context, for as long as no real service pool implementation is loaded.
In order to safely bootstrap the service pool implementation a sentinel
message is enqueued on the :data:`CALL_FUNCTION` receiver in order to
wake the main thread, where the importer can run without any
possibility of suffering deadlock due to concurrent uses of the
importer.
Should the main thread be blocked indefinitely, preventing the import
from ever running, if it is blocked waiting on a service call, then it
means :mod:`mitogen.service` has already been imported and
:func:`mitogen.service.get_or_create_pool` has already run, meaning the
service pool is already active and the duplicate initialization was not
needed anyway.
#547: This trickery is needed to avoid the alternate option of spinning
a temporary thread to import the service pool, which could deadlock if
a custom import hook executing on the main thread (under the importer
lock) would block waiting for some data that was in turn received by a
service. Main thread import lock can't be released until service is
running, service cannot satisfy request until import lock is released.
"""
self.recv._on_receive(Message(handle=STUB_CALL_SERVICE))
def _init_service_pool(self):
import mitogen.service
mitogen.service.get_or_create_pool(router=self.econtext.router)
def _dispatch_calls(self):
for msg in self.recv:
if msg.handle == STUB_CALL_SERVICE:
if msg.src_id == mitogen.context_id:
self._init_service_pool()
continue
chain_id, ret = self._dispatch_one(msg)
_v and LOG.debug('%r: %r -> %r', self, msg, ret)
if msg.reply_to:
msg.reply(ret)
elif isinstance(ret, CallError) and chain_id is None:
LOG.error('No-reply function call failed: %s', ret)
def run(self):
if self.econtext.config.get('on_start'):
self.econtext.config['on_start'](self.econtext)
_profile_hook('mitogen.child_main', self._dispatch_calls)
class ExternalContext(object):
"""
External context implementation.
This class contains the main program implementation for new children. It is
responsible for setting up everything about the process environment, import
hooks, standard IO redirection, logging, configuring a :class:`Router` and
:class:`Broker`, and finally arranging for :class:`Dispatcher` to take over
the main thread after initialization is complete.
.. attribute:: broker
The :class:`mitogen.core.Broker` instance.
.. attribute:: context
The :class:`mitogen.core.Context` instance.
.. attribute:: channel
The :class:`mitogen.core.Channel` over which :data:`CALL_FUNCTION`
requests are received.
.. attribute:: importer
The :class:`mitogen.core.Importer` instance.
.. attribute:: stdout_log
The :class:`IoLogger` connected to :data:`sys.stdout`.
.. attribute:: stderr_log
The :class:`IoLogger` connected to :data:`sys.stderr`.
"""
detached = False
def __init__(self, config):
self.config = config
def _on_broker_exit(self):
if not self.config['profiling']:
os.kill(os.getpid(), signal.SIGTERM)
def _on_shutdown_msg(self, msg):
if not msg.is_dead:
_v and LOG.debug('shutdown request from context %d', msg.src_id)
self.broker.shutdown()
def _on_parent_disconnect(self):
if self.detached:
mitogen.parent_ids = []
mitogen.parent_id = None
LOG.info('Detachment complete')
else:
_v and LOG.debug('parent stream is gone, dying.')
self.broker.shutdown()
def detach(self):
self.detached = True
stream = self.router.stream_by_id(mitogen.parent_id)
if stream: # not double-detach()'d
os.setsid()
self.parent.send_await(Message(handle=DETACHING))
LOG.info('Detaching from %r; parent is %s', stream, self.parent)
for x in range(20):
pending = self.broker.defer_sync(stream.protocol.pending_bytes)
if not pending:
break
time.sleep(0.05)
if pending:
LOG.error('Stream had %d bytes after 2000ms', pending)
self.broker.defer(stream.on_disconnect, self.broker)
def _setup_master(self):
Router.max_message_size = self.config['max_message_size']
if self.config['profiling']:
enable_profiling()
self.broker = Broker(activate_compat=False)
self.router = Router(self.broker)
self.router.debug = self.config.get('debug', False)
self.router.unidirectional = self.config['unidirectional']
self.router.add_handler(
fn=self._on_shutdown_msg,
handle=SHUTDOWN,
policy=has_parent_authority,
)
self.master = Context(self.router, 0,'master')
parent_id = self.config['parent_ids'][0]
if parent_id == 0:
self.parent = self.master
else:
self.parent = Context(self.router, parent_id, 'parent')
in_fd = self.config.get('in_fd', 100)
in_fp = os.fdopen(os.dup(in_fd), 'rb', 0)
os.close(in_fd)
out_fp = os.fdopen(os.dup(self.config.get('out_fd', 1)), 'wb', 0)
self.stream = MitogenProtocol.build_stream(
self.router,
parent_id,
local_id=self.config['context_id'],
parent_ids=self.config['parent_ids']
)
self.stream.accept(in_fp, out_fp)
self.stream.name = 'parent'
self.stream.receive_side.keep_alive = False
listen(self.stream, 'disconnect', self._on_parent_disconnect)
listen(self.broker, 'exit', self._on_broker_exit)
def _reap_first_stage(self):
try:
os.wait() # Reap first stage.
except OSError:
pass # No first stage exists (e.g. fakessh)
def _setup_logging(self):
self.log_handler = LogHandler(self.master)
root = logging.getLogger()
root.setLevel(self.config['log_level'])
root.handlers = [self.log_handler]
if self.config['debug']:
enable_debug_logging()
def _setup_importer(self):
importer = self.config.get('importer')
if importer:
importer._install_handler(self.router)
importer._context = self.parent
else:
core_src_fd = self.config.get('core_src_fd', 101)
if core_src_fd:
fp = os.fdopen(core_src_fd, 'rb', 0)
try:
core_src = fp.read()
# Strip "ExternalContext.main()" call from last line.
core_src = b('\n').join(core_src.splitlines()[:-1])
finally:
fp.close()
else:
core_src = None
importer = Importer(
self.router,
self.parent,
core_src,
self.config.get('whitelist', ()),
self.config.get('blacklist', ()),
)
self.importer = importer
self.router.importer = importer
sys.meta_path.insert(0, self.importer)
def _setup_package(self):
global mitogen
mitogen = imp.new_module('mitogen')
mitogen.__package__ ='mitogen'
mitogen.__path__ = []
mitogen.__loader__ = self.importer
mitogen.main = lambda *args, **kwargs: (lambda func: None)
mitogen.core = sys.modules['__main__']
mitogen.core.__file__ = 'x/mitogen/core.py' # For inspect.getsource()
mitogen.core.__loader__ = self.importer
sys.modules['mitogen'] = mitogen
sys.modules['mitogen.core'] = mitogen.core
del sys.modules['__main__']
def _setup_globals(self):
mitogen.is_master = False
mitogen.__version__ = self.config['version']
mitogen.context_id = self.config['context_id']
mitogen.parent_ids = self.config['parent_ids'][:]
mitogen.parent_id = mitogen.parent_ids[0]
def _nullify_stdio(self):
"""
Open /dev/null to replace stdio temporarily. In case of odd startup,
assume we may be allocated a standard handle.
"""
for stdfd, mode in ((0, os.O_RDONLY), (1, os.O_RDWR), (2, os.O_RDWR)):
fd = os.open('/dev/null', mode)
if fd!= stdfd:
os.dup2(fd, stdfd)
os.close(fd)
def _preserve_tty_fp(self):
"""
#481: when stderr is a TTY due to being started via tty_create_child()
or hybrid_tty_create_child(), and some privilege escalation tool like
prehistoric versions of sudo exec this process over the top of itself,
there is nothing left to keep the slave PTY open after we replace our
stdio. Therefore if stderr is a TTY, keep around a permanent dup() to
avoid receiving SIGHUP.
"""
try:
if os.isatty(2):
self.reserve_tty_fp = os.fdopen(os.dup(2), 'r+b', 0)
set_cloexec(self.reserve_tty_fp.fileno())
except OSError:
pass
def _setup_stdio(self):
self._preserve_tty_fp()
# When sys.stdout was opened by the runtime, overwriting it will not
# close FD 1. However when forking from a child that previously used
# fdopen(), overwriting it /will/ close FD 1. So we must swallow the
# close before IoLogger overwrites FD 1, otherwise its new FD 1 will be
# clobbered. Additionally, stdout must be replaced with /dev/null prior
# to stdout.close(), since if block buffering was active in the parent,
# any pre-fork buffered data will be flushed on close(), corrupting the
# connection to the parent.
self._nullify_stdio()
sys.stdout.close()
self._nullify_stdio()
self.loggers = []
for name, fd in (('stdout', 1), ('stderr', 2)):
log = IoLoggerProtocol.build_stream(name, fd)
self.broker.start_receive(log)
self.loggers.append(log)
# Reopen with line buffering.
sys.stdout = os.fdopen(1, 'w', 1)
def main(self):
self._setup_master()
try:
try:
self._setup_logging()
self._setup_importer()
self._reap_first_stage()
if self.config.get('setup_package', True):
self._setup_package()
self._setup_globals()
if self.config.get('setup_stdio', True):
self._setup_stdio()
self.dispatcher = Dispatcher(self)
self.router.register(self.parent, self.stream)
self.router._setup_logging()
_v and LOG.debug('Python version is %s', sys.version)
_v and LOG.debug('Parent is context %r (%s); my ID is %r',
self.parent.context_id, self.parent.name,
mitogen.context_id)
_v and LOG.debug('pid:%r ppid:%r uid:%r/%r, gid:%r/%r host:%r',
os.getpid(), os.getppid(), os.geteuid(),
os.getuid(), os.getegid(), os.getgid(),
socket.gethostname())
sys.executable = os.environ.pop('ARGV0', sys.executable)
_v and LOG.debug('Recovered sys.executable: %r', sys.executable)
if self.config.get('send_ec2', True):
self.stream.transmit_side.write(b('MITO002\n'))
self.broker._py24_25_compat()
self.log_handler.uncork()
self.dispatcher.run()
_v and LOG.debug('ExternalContext.main() normal exit')
except KeyboardInterrupt:
LOG.debug('KeyboardInterrupt received, exiting gracefully.')
except BaseException:
LOG.exception('ExternalContext.main() crashed')
raise
finally:
self.broker.shutdown() |
|
ethereum__web3.py | contracts.rst | Module doc | Generate documentation for this code | MIT License | ethereum__web3.py/docs/contracts.rst | [
"ethereum__web3.py/web3/contract.py"
] | Contracts
Contract Factories
The Contract class is not intended to be used or instantiated
directly. Instead you should use the web3.eth.contract(...) method to
generate the contract factory classes for your contracts.
Contract Factories provide an interface for deploying and interacting
with Ethereum smart contracts.
Properties
Each Contract Factory exposes the following properties.
The hexidecimal encoded 20 byte address of the contract. May be None
if not provided during factory creation.
The contract ABI array.
The contract bytecode string. May be None if not provided during
factory creation.
The runtime part of the contract bytecode string. May be None if not
provided during factory creation.
The runtime part of the contract bytecode string. May be None if not
provided during factory creation.
Methods
Each Contract Factory exposes the following methods.
Construct and send a transaction to deploy the contract.
If provided transaction should be a dictionary conforming to the
web3.eth.sendTransaction(transaction) method. This value may not
contain the keys data or to.
If the contract takes constructor arguments they should be provided as
a list via the arguments parameter.
If a gas value is not provided, then the gas value for the deployment
transaction will be created using the web3.eth.estimateGas() method.
Returns the transaction hash for the deploy transaction.
Execute the specified function by sending a new public transaction.
This is executed in two steps.
The first portion of this function call transact(transaction) takes a
single parameter which should be a python dictionary conforming to the
same format as the web3.eth.sendTransaction(transaction) method. This
dictionary may not contain the keys data or to.
The second portion of the function call myMethod(*args, **kwargs)
selects the appropriate contract function based on the name and
provided argument. Arguments can be provided as positional arguments,
keyword arguments, or a mix of the two.
Returns the transaction hash.
>>> token_contract.transact().transfer(web3.eth.accounts[1], 12345)
"0x4e3a3754410177e6937ef1f84bba68ea139e8d1a2258c5f85db9f1cd715a1bdd"
Call a contract function, executing the transaction locally using the
eth_call API. This will not create a new public transaction.
This method behaves the same as the :py:method::Contract.transact
method, with transaction details being passed into the first portion
of the function call, and function arguments being passed into the
second portion.
Returns the return value of the executed function.
>>> my_contract.call().multiply7(3)
21
>>> token_contract.call({'from': web3.eth.coinbase}).myBalance()
12345 # the token balance for `web3.eth.coinbase`
>>> token_contract.call({'from': web3.eth.accounts[1]}).myBalance()
54321 # the token balance for the account `web3.eth.accounts[1]`
Call a contract function, executing the transaction locally using the
eth_call API. This will not create a new public transaction.
This method behaves the same as the :py:method::Contract.transact
method, with transaction details being passed into the first portion
of the function call, and function arguments being passed into the
second portion.
Returns the amount of gas consumed which can be used as a gas estimate
for executing this transaction publicly.
>>> my_contract.estimateGas().multiply7(3)
42650
Events
Creates a new web3.utils.filters.LogFilter instance.
The event_name parameter should be the name of the contract event you
want to filter on.
If provided, filter_params should be a dictionary specifying
additional filters for log entries. The following keys are supported.
- filters: dictionary - (optional) Dictionary keys should be
argument names for the Event arguments. Dictionary values should
be the value you want to filter on, or a list of values to be
filtered on. Lists of values will match log entries who's argument
matches any value in the list.
- fromBlock: integer/tag - (optional, default: "latest") Integer
block number, or "latest" for the last mined block or "pending",
"earliest" for not yet mined transactions.
- toBlock: integer/tag - (optional, default: "latest") Integer block
number, or "latest" for the last mined block or "pending",
"earliest" for not yet mined transactions.
- address: string or list of strings, each 20 Bytes -(optional)
Contract address or a list of addresses from which logs should
originate.
- topics: list of 32 byte strings or null - (optional) Array of
topics that should be used for filtering. Topics are
order-dependent. This parameter can also be a list of topic lists
in which case filtering will match any of the provided topic
arrays.
The event topic for the event specified by event_name will be added to
the filter_params['topics'] list.
If the Contract.address attribute for this contract is non-null, the
contract address will be added to the filter_params.
If provided, the *callbacks parameter should be callables which accept
a single Event Log object. When callbacks are provided, the filter
will be started. Otherwise the filter will be returned without
starting it.
The Event Log Object is a python dictionary with the following keys:
- args: Dictionary - The arguments coming from the event.
- event: String - The event name.
- logIndex: Number - integer of the log index position in the block.
- transactionIndex: Number - integer of the transactions index
position log was created from.
- transactionHash: String, 32 Bytes - hash of the transactions this
log was created from.
- address: String, 32 Bytes - address from which this log
originated.
- blockHash: String, 32 Bytes - hash of the block where this log was
in. null when its pending.
- blockNumber: Number - the block number where this log was in. null
when its pending.
>>> transfer_filter = my_token_contract.on('Transfer', {'filters': {'_from': '0xdc3a9db694bcdd55ebae4a89b22ac6d12b3f0c24'}})
>>> transfer_filter.get()
[...] # array of Event Log Objects that match the filter.
>>> transfer_filter.watch(my_callback)
# now `my_callback` will be called each time a new matching event log
# is encountered.
Creates a new web3.utils.filters.PastLogFilter instance which will
match historical event logs.
All parameters behave the same as the :py:method::Contract.on method.
>>> transfer_filter = my_token_contract.pastEvents('Transfer', {'filters': {'_from': '0xdc3a9db694bcdd55ebae4a89b22ac6d12b3f0c24'}})
>>> transfer_filter.get()
[...] # array of Event Log Objects that match the filter for all historical events.
| """Interaction with smart contracts over Web3 connector.
"""
import functools
from eth_abi import (
encode_abi,
decode_abi,
)
from eth_abi.exceptions import (
EncodingError,
DecodingError,
)
from web3.exceptions import (
BadFunctionCallOutput,
)
from web3.utils.encoding import (
encode_hex,
)
from web3.utils.exception import (
raise_from,
)
from web3.utils.formatting import (
add_0x_prefix,
remove_0x_prefix,
)
from web3.utils.string import (
force_bytes,
coerce_return_to_text,
force_obj_to_bytes,
)
from web3.utils.functional import (
compose,
)
from web3.utils.abi import (
filter_by_type,
filter_by_name,
filter_by_argument_count,
filter_by_argument_name,
filter_by_encodability,
get_abi_input_types,
get_abi_output_types,
get_constructor_abi,
function_abi_to_4byte_selector,
merge_args_and_kwargs,
normalize_return_type,
check_if_arguments_can_be_encoded,
)
from web3.utils.decorators import (
combomethod,
)
from web3.utils.events import (
get_event_data,
)
from web3.utils.filters import (
construct_event_filter_params,
PastLogFilter,
)
class Contract(object):
"""Base class for Contract proxy classes.
First you need to create your Contract classes using
:func:`construct_contract_factory` that takes compiled Solidity contract
ABI definitions as input. The created class object will be a subclass of
this base class.
After you have your Contract proxy class created you can interact with
smart contracts
* Create a Contract proxy object for an existing deployed smart contract by
its address using :meth:`__init__`
* Deploy a new smart contract using :py:meth:`Contract.deploy`
"""
# set during class construction
web3 = None
# class properties (overridable at instance level)
_abi = None
_code = None
_code_runtime = None
_source = None
# instance level properties
address = None
def __init__(self,
abi=None,
address=None,
code=None,
code_runtime=None,
source=None):
"""Create a new smart contract proxy object.
:param address: Contract address as 0x hex string
:param abi: Override class level definition
:param code: Override class level definition
:param code_runtime: Override class level definition
:param source: Override class level definition
"""
if self.web3 is None:
raise AttributeError(
'The `Contract` class has not been initialized. Please use the '
'`web3.contract` interface to create your contract class.'
)
if abi is not None:
self._abi = abi
if code is not None:
self._code = code
if code_runtime is not None:
self._code_runtime = code_runtime
if source is not None:
self._source = source
self.address = address
@property
def abi(self):
if self._abi is not None:
return self._abi
# TODO: abi can be derived from the contract source.
raise AttributeError("No contract abi was specified for thes contract")
@property
def code(self):
if self._code is not None:
return self._code
# TODO: code can be derived from the contract source.
raise AttributeError("No contract code was specified for thes contract")
@property
def code_runtime(self):
if self._code_runtime is not None:
return self._code_runtime
# TODO: runtime can be derived from the contract source.
raise AttributeError(
"No contract code_runtime was specified for thes contract"
)
@property
def source(self):
if self._source is not None:
return self._source
raise AttributeError("No contract source was specified for thes contract")
@classmethod
def deploy(cls, transaction=None, args=None, kwargs=None):
"""
Deploys the contract on a blockchain.
Example:
.. code-block:: python
>>> MyContract.deploy(
transaction={
'from': web3.eth.accounts[1],
'value': 12345,
},
args=('DGD', 18),
)
'0x5c504ed432cb51138bcf09aa5e8a410dd4a1e204ef84bfed1be16dfba1b22060'
:param transaction: Transaction parameters for the deployment
transaction as a dict
:param args: The contract constructor arguments as positional arguments
:param kwargs: The contract constructor arguments as keyword arguments
:return: hexidecimal transaction hash of the deployment
transaction
"""
if transaction is None:
deploy_transaction = {}
else:
deploy_transaction = dict(**transaction)
if not cls.code:
raise ValueError(
"Cannot deploy a contract that does not have 'code' associated "
"with it"
)
if 'data' in deploy_transaction:
raise ValueError(
"Cannot specify `data` for contract deployment"
)
if 'to' in deploy_transaction:
raise ValueError(
"Cannot specify `to` for contract deployment"
)
deploy_transaction['data'] = cls._encode_constructor_data(args, kwargs)
# TODO: handle asynchronous contract creation
txn_hash = cls.web3.eth.sendTransaction(deploy_transaction)
return txn_hash
#
# Public API
#
@classmethod
@coerce_return_to_text
def encodeABI(cls, fn_name, args=None, kwargs=None, data=None):
"""
encodes the arguments using the Ethereum ABI for the contract function
that matches the given name and arguments..
:param data: defaults to function selector
"""
fn_abi, fn_selector, fn_arguments = cls._get_function_info(
fn_name, args, kwargs,
)
if data is None:
data = fn_selector
return cls._encode_abi(fn_abi, fn_arguments, data)
@combomethod
def on(self, event_name, filter_params=None, *callbacks):
"""
register a callback to be triggered on the appropriate events.
"""
if filter_params is None:
filter_params = {}
argument_filters = filter_params.pop('filter', {})
argument_filter_names = list(argument_filters.keys())
event_abi = self._find_matching_event_abi(
event_name,
argument_filter_names,
)
data_filter_set, event_filter_params = construct_event_filter_params(
event_abi,
contract_address=self.address,
argument_filters=argument_filters,
**filter_params
)
log_data_extract_fn = functools.partial(get_event_data, event_abi)
log_filter = self.web3.eth.filter(event_filter_params)
log_filter.set_data_filters(data_filter_set)
log_filter.log_entry_formatter = log_data_extract_fn
log_filter.filter_params = event_filter_params
if callbacks:
log_filter.watch(*callbacks)
return log_filter
@combomethod
def pastEvents(self, event_name, filter_params=None, *callbacks):
"""
register a callback to be triggered on all past events.
"""
if filter_params is None:
filter_params = {}
event_filter_params = {}
event_filter_params.update(filter_params)
event_filter_params.setdefault('fromBlock', 'earliest')
event_filter_params.setdefault('toBlock', self.web3.eth.blockNumber)
log_filter = self.on(
event_name,
filter_params=event_filter_params,
)
past_log_filter = PastLogFilter(
web3=log_filter.web3,
filter_id=log_filter.filter_id,
log_entry_formatter=log_filter.log_entry_formatter,
data_filter_set=log_filter.data_filter_set,
)
past_log_filter.filter_params = log_filter.filter_params
if callbacks:
past_log_filter.watch(*callbacks)
return past_log_filter
@combomethod
def estimateGas(self, transaction=None):
"""
Estimate the gas for a call
"""
if transaction is None:
estimate_transaction = {}
else:
estimate_transaction = dict(**transaction)
if 'data' in estimate_transaction:
raise ValueError("Cannot set data in call transaction")
if 'to' in estimate_transaction:
raise ValueError("Cannot set to in call transaction")
if self.address:
estimate_transaction.setdefault('to', self.address)
estimate_transaction.setdefault('from', self.web3.eth.defaultAccount)
if 'to' not in estimate_transaction:
if isinstance(self, type):
raise ValueError(
"When using `Contract.estimateGas` from a contract factory "
"you must provide a `to` address with the transaction"
)
else:
raise ValueError(
"Please ensure that this contract instance has an address."
)
contract = self
class Caller(object):
def __getattr__(self, function_name):
callable_fn = functools.partial(
estimate_gas_for_function,
contract,
function_name,
estimate_transaction,
)
return callable_fn
return Caller()
@combomethod
def call(self, transaction=None):
"""
Execute a contract function call using the `eth_call` interface.
This method prepares a ``Caller`` object that exposes the contract
functions and publib variables as callable Python functions.
Reading a public ``owner`` address variable example:
.. code-block:: python
ContractFactory = construct_contract_factory(
web3=web3,
abi=wallet_contract_definition["abi"]
)
# Not a real contract address
contract = contract_class("0x2f70d3d26829e412a602e83fe8eebf80255aeea5")
# Read "owner" public variable
addr = contract.call().owner()
:param transaction: Dictionary of transaction info for web3 interface
:return: ``Caller`` object that has contract public functions
and variables exposed as Python methods
"""
if transaction is None:
call_transaction = {}
else:
call_transaction = dict(**transaction)
if 'data' in call_transaction:
raise ValueError("Cannot set data in call transaction")
if self.address:
call_transaction.setdefault('to', self.address)
call_transaction.setdefault('from', self.web3.eth.defaultAccount)
if 'to' not in call_transaction:
if isinstance(self, type):
raise ValueError(
"When using `Contract.call` from a contract factory you "
"must provide a `to` address with the transaction"
)
else:
raise ValueError(
"Please ensure that this contract instance has an address."
)
contract = self
class Caller(object):
def __getattr__(self, function_name):
callable_fn = functools.partial(
call_contract_function,
contract,
function_name,
call_transaction,
)
return callable_fn
return Caller()
@combomethod
def transact(self, transaction=None):
"""
Execute a contract function call using the `eth_sendTransaction`
interface.
You should specify the account that pays the gas for this transaction
in `transaction`. If no account is specified the coinbase account of
web3 interface is used.
Example:
.. code-block:: python
# Assume we have a Wallet contract with the following methods.
# * Wallet.deposit() # deposits to `msg.sender`
# * Wallet.deposit(address to) # deposits to the account indicated
# by the `to` parameter.
# * Wallet.withdraw(address amount)
>>> wallet = Wallet(address='0xdc3a9db694bcdd55ebae4a89b22ac6d12b3f0c24')
# Deposit to the `web3.eth.coinbase` account.
>>> wallet.transact({'value': 12345}).deposit()
'0x5c504ed432cb51138bcf09aa5e8a410dd4a1e204ef84bfed1be16dfba1b22060'
# Deposit to some other account using funds from `web3.eth.coinbase`.
>>> wallet.transact({'value': 54321}).deposit(web3.eth.accounts[1])
'0xe122ba26d25a93911e241232d3ba7c76f5a6bfe9f8038b66b198977115fb1ddf'
# Withdraw 12345 wei.
>>> wallet.transact().withdraw(12345)
The new public transaction will be created. Transaction receipt will
be available once the transaction has been mined.
:param transaction: Dictionary of transaction info for web3 interface.
Variables include ``from``, ``gas``, ``value``, ``gasPrice``.
:return: ``Transactor`` object that has contract
public functions exposed as Python methods.
Calling these methods will execute a transaction against the contract.
"""
if transaction is None:
transact_transaction = {}
else:
transact_transaction = dict(**transaction)
if 'data' in transact_transaction:
raise ValueError("Cannot set data in call transaction")
if self.address is not None:
transact_transaction.setdefault('to', self.address)
transact_transaction.setdefault('from', self.web3.eth.defaultAccount)
if 'to' not in transact_transaction:
if isinstance(self, type):
raise ValueError(
"When using `Contract.transact` from a contract factory you "
"must provide a `to` address with the transaction"
)
else:
raise ValueError(
"Please ensure that this contract instance has an address."
)
contract = self
class Transactor(object):
def __getattr__(self, function_name):
callable_fn = functools.partial(
transact_with_contract_function,
contract,
function_name,
transact_transaction,
)
return callable_fn
return Transactor()
#
# Private Helpers
#
@classmethod
def _find_matching_fn_abi(cls, fn_name=None, args=None, kwargs=None):
filters = []
if fn_name:
filters.append(functools.partial(filter_by_name, fn_name))
if args is not None or kwargs is not None:
if args is None:
args = tuple()
if kwargs is None:
kwargs = {}
num_arguments = len(args) + len(kwargs)
filters.extend([
functools.partial(filter_by_argument_count, num_arguments),
functools.partial(filter_by_encodability, args, kwargs),
])
function_candidates = filter_by_type('function', cls.abi)
for filter_fn in filters:
function_candidates = filter_fn(function_candidates)
if len(function_candidates) == 1:
return function_candidates[0]
elif not function_candidates:
break
if not function_candidates:
raise ValueError("No matching functions found")
else:
raise ValueError("Multiple functions found")
@classmethod
def _find_matching_event_abi(cls, event_name=None, argument_names=None):
filters = [
functools.partial(filter_by_type, 'event'),
]
if event_name is not None:
filters.append(functools.partial(filter_by_name, event_name))
if argument_names is not None:
filters.append(
functools.partial(filter_by_argument_name, argument_names)
)
filter_fn = compose(*filters)
event_abi_candidates = filter_fn(cls.abi)
if len(event_abi_candidates) == 1:
return event_abi_candidates[0]
elif not event_abi_candidates:
raise ValueError("No matching functions found")
else:
raise ValueError("Multiple functions found")
@classmethod
def _get_function_info(cls, fn_name, args=None, kwargs=None):
if args is None:
args = tuple()
if kwargs is None:
kwargs = {}
fn_abi = cls._find_matching_fn_abi(fn_name, args, kwargs)
fn_selector = function_abi_to_4byte_selector(fn_abi)
fn_arguments = merge_args_and_kwargs(fn_abi, args, kwargs)
return fn_abi, fn_selector, fn_arguments
@combomethod
def _prepare_transaction(cls,
fn_name,
fn_args=None,
fn_kwargs=None,
transaction=None):
"""
Returns a dictionary of the transaction that could be used to call this
"""
if transaction is None:
prepared_transaction = {}
else:
prepared_transaction = dict(**transaction)
if 'data' in prepared_transaction:
raise ValueError("Transaction parameter may not contain a 'data' key")
if cls.address:
prepared_transaction.setdefault('to', cls.address)
prepared_transaction['data'] = cls._encode_transaction_data(
fn_name,
fn_args,
fn_kwargs,
)
return prepared_transaction
@classmethod
def _encode_abi(cls, abi, arguments, data=None):
argument_types = get_abi_input_types(abi)
if not check_if_arguments_can_be_encoded(abi, arguments, {}):
raise TypeError(
"One or more arguments could not be encoded to the necessary "
"ABI type. Expected types are: {0}".format(
', '.join(argument_types),
)
)
try:
encoded_arguments = encode_abi(
argument_types,
force_obj_to_bytes(arguments),
)
except EncodingError as e:
raise TypeError(
"One or more arguments could not be encoded to the necessary "
"ABI type: {0}".format(str(e))
)
if data:
return add_0x_prefix(
force_bytes(remove_0x_prefix(data)) +
force_bytes(remove_0x_prefix(encode_hex(encoded_arguments)))
)
else:
return encode_hex(encoded_arguments)
@classmethod
@coerce_return_to_text
def _encode_transaction_data(cls, fn_name, args=None, kwargs=None):
fn_abi, fn_selector, fn_arguments = cls._get_function_info(
fn_name, args, kwargs,
)
return add_0x_prefix(cls._encode_abi(fn_abi, fn_arguments, fn_selector))
@classmethod
@coerce_return_to_text
def _encode_constructor_data(cls, args=None, kwargs=None):
constructor_abi = get_constructor_abi(cls.abi)
if constructor_abi:
if args is None:
args = tuple()
if kwargs is None:
kwargs = {}
arguments = merge_args_and_kwargs(constructor_abi, args, kwargs)
deploy_data = add_0x_prefix(
cls._encode_abi(constructor_abi, arguments, data=cls.code)
)
else:
deploy_data = add_0x_prefix(cls.code)
return deploy_data
@coerce_return_to_text
def call_contract_function(contract,
function_name,
transaction,
*args,
**kwargs):
"""
Helper function for interacting with a contract function using the
`eth_call` API.
"""
call_transaction = contract._prepare_transaction(
fn_name=function_name,
fn_args=args,
fn_kwargs=kwargs,
transaction=transaction,
)
return_data = contract.web3.eth.call(call_transaction)
function_abi = contract._find_matching_fn_abi(function_name, args, kwargs)
output_types = get_abi_output_types(function_abi)
try:
output_data = decode_abi(output_types, return_data)
except DecodingError as e:
# Provide a more helpful error message than the one provided by
# eth-abi-utils
msg = (
"Could not decode contract function call {} return data {} for "
"output_types {}".format(
function_name,
return_data,
output_types
)
)
raise_from(BadFunctionCallOutput(msg), e)
normalized_data = [
normalize_return_type(data_type, data_value)
for data_type, data_value
in zip(output_types, output_data)
]
if len(normalized_data) == 1:
return normalized_data[0]
else:
return normalized_data
def transact_with_contract_function(contract=None,
function_name=None,
transaction=None,
*args,
**kwargs):
"""
Helper function for interacting with a contract function by sending a
transaction.
"""
transact_transaction = contract._prepare_transaction(
fn_name=function_name,
fn_args=args,
fn_kwargs=kwargs,
transaction=transaction,
)
txn_hash = contract.web3.eth.sendTransaction(transact_transaction)
return txn_hash
def estimate_gas_for_function(contract=None,
function_name=None,
transaction=None,
*args,
**kwargs):
"""Estimates gas cost a function call would take.
Don't call this directly, instead use :meth:`Contract.estimateGas`
on your contract instance.
"""
estimate_transaction = contract._prepare_transaction(
fn_name=function_name,
fn_args=args,
fn_kwargs=kwargs,
transaction=transaction,
)
gas_estimate = contract.web3.eth.estimateGas(estimate_transaction)
return gas_estimate
def construct_contract_factory(web3,
abi,
code=None,
code_runtime=None,
source=None,
contract_name='Contract',
base_contract_factory_class=Contract):
"""Creates a new Contract class.
Contract lass is a Python proxy class to interact with smart contracts.
``abi`` and other contract definition fields are coming from
``solc`` compiler or ``build/contracts.json`` in the
case of Populus framework.
After contract has been instiated you can interact with it
using :meth:`transact_with_contract_function` and
:meth:`call_contract_function`.
Example:
.. code-block:: python
# Assume we have a Token contract
token_contract_data = {
'abi': [...],
'code': '0x...',
'code_runtime': '0x...',
'source': 'contract Token {.....}',
}
# contract_factory is a python class that can be used to interact with
# or deploy the "Token" contract.
token_contract_factory = construct_contract_factory(
web3=web3,
abi=token_contract_data["abi"],
code=token_contract_data["code"],
code_runtime=token_contract_data["code_runtime"],
source=token_contract_data["source"],
)
# Create Contract instance to interact with a deployed smart contract.
token_contract = token_contract_factory(
address=address,
abi=token_contract_data["abi"],
code=token_contract_data["code"],
code_runtime=token_contract_data["code_runtime"],
source=token_contract_data["source"])
:param web3: Web3 connection
:param abi: As given by solc compiler
:param code: As given by solc compiler
:param code_runtime: As given by solc compiler
:param source: As given by solc compiler
:return: Contract class (not instance)
"""
_dict = {
'web3': web3,
'abi': abi,
'code': code,
'code_runtime': code_runtime,
'source': source,
}
return type(contract_name, (base_contract_factory_class,), _dict) |
|
ethereum__web3.py | filters.rst | Module doc | Generate documentation for this code | MIT License | ethereum__web3.py/docs/filters.rst | [
"ethereum__web3.py/web3/utils/filters.py"
] | Filtering
The web3.eth.filter method can be used to setup filter for:
- Pending Transactions
- New Blocks
- Event Logs
Filter API
The :py:class::Filter object is a subclass of the
:py:class::gevent.Greenlet object. It exposes these additional
properties and methods.
The filter_id for this filter as returned by the eth_newFilter RPC
method when this filter was created.
A list of callbacks that this filter will call with new entries.
Boolean as to whether this filter is currently polling.
Boolean as to whether this filter has been stopped. Will be set to
None if the filter has not yet been started.
Hook for subclasses to modify the format of the log entries this
filter returns, or passes to it's callback functions.
By default this returns the entry parameter umodified.
Hook for subclasses to add additional programatic filtering. The
default implementation always returns True.
Registers the provided callbacks to be called with each new entry this
filter encounters and starts the filter polling for changes.
Can only be called once on each filter. Cannot be called on a filter
that has already been started.
Stops the filter from polling and uninstalls the filter. Blocks until
all events that are currently being processed have been processed.
Block and Transaction Filters
You can setup a filter for new blocks using web3.eth.filter('latest')
which will return a new :py:class::BlockFilter object.
>>> def new_block_callback(block_hash):
... sys.stdout.write("New Block: {0}".format(block_hash))
...
>>> new_block_filter = web3.eth.filter('latest')
>>> new_block_filter.watch(new_block_filter)
# each time the client receieves a new block the `new_block_callback`
# function will be called with the block hash.
You can setup a filter for new blocks using web3.eth.filter('pending')
which will return a new :py:class::BlockFilter object.
>>> def new_transaction_callback(transaction_hash):
... sys.stdout.write("New Block: {0}".format(transaction_hash))
...
>>> new_transaction_filter = web3.eth.filter('pending')
>>> new_transaction_filter.watch(new_transaction_callback)
# each time the client receieves a unmined transaction the
# `new_transaction_filter` function will be called with the transaction
# hash.
Event Log Filters
The :py:class::LogFilter class is used for all filters pertaining to
even logs. It exposes the following additional methods.
Synchronously retrieve the event logs for this filter.
If only_changes is True then logs will be retrieved using the
web3.eth.getFilterChanges which returns only new entries since the
last poll.
If only_changes is False then the logs will be retrieved using the
web3.eth.getFilterLogs which returns all logs that match the given
filter.
This method will raise a ValueError if called on a filter that is
currently polling.
The :py:class::LogFilter class is returned from the
:py:method::web3.contract.Contract.on and will be configured to extract
the event data from the event logs.
The :py:class::PastLogFilter is a subclass of :py:class::LogFilter that
is configured specially to return historical event logs. It conforms to
the same API as the LogFilter class.
Shh Filter
The :py:class:: ShhFilter class is used for filtering Shh messages. You
can setup a callback function for Whipser messages matching the topics
subscribed using web3.shh.filter(filter_params),which will return a
:py:class::ShhFilter object
>>>def filter_callback(new_message):
... sys.stdout.write("New Shh Message: {0}".format(new_message))
...
>>>shh_filter = web3.shh.filter({"topics":[web3.fromAscii("topic_to_subscribe")]})
>>>shh_filter.watch(filter_callback)
#each time client recieves a Shh messages matching the topics subscibed,
#filter_callback is called
| import re
import random
import gevent
from.types import (
is_string,
is_array,
)
from.events import (
construct_event_topic_set,
construct_event_data_set,
)
def construct_event_filter_params(event_abi,
contract_address=None,
argument_filters=None,
topics=None,
fromBlock=None,
toBlock=None,
address=None):
filter_params = {}
if topics is None:
topic_set = construct_event_topic_set(event_abi, argument_filters)
else:
topic_set = [topics] + construct_event_topic_set(event_abi, argument_filters)
if len(topic_set) == 1 and is_array(topic_set[0]):
filter_params['topics'] = topic_set[0]
else:
filter_params['topics'] = topic_set
if address and contract_address:
if is_array(address):
filter_params['address'] = address + [contract_address]
elif is_string(address):
filter_params['address'] = [address, contract_address]
else:
raise ValueError(
"Unsupported type for `address` parameter: {0}".format(type(address))
)
elif address:
filter_params['address'] = address
elif contract_address:
filter_params['address'] = contract_address
if fromBlock is not None:
filter_params['fromBlock'] = fromBlock
if toBlock is not None:
filter_params['toBlock'] = toBlock
data_filters_set = construct_event_data_set(event_abi, argument_filters)
return data_filters_set, filter_params
class Filter(gevent.Greenlet):
callbacks = None
running = None
stopped = False
poll_interval = None
def __init__(self, web3, filter_id):
self.web3 = web3
self.filter_id = filter_id
self.callbacks = []
gevent.Greenlet.__init__(self)
def __str__(self):
return "Filter for {0}".format(self.filter_id)
def _run(self):
if self.stopped:
raise ValueError("Cannot restart a Filter")
self.running = True
while self.running:
changes = self.web3.eth.getFilterChanges(self.filter_id)
if changes:
for entry in changes:
for callback_fn in self.callbacks:
if self.is_valid_entry(entry):
callback_fn(self.format_entry(entry))
if self.poll_interval is None:
gevent.sleep(random.random())
else:
gevent.sleep(self.poll_interval)
def format_entry(self, entry):
"""
Hook for subclasses to change the format of the value that is passed
into the callback functions.
"""
return entry
def is_valid_entry(self, entry):
"""
Hook for subclasses to implement additional filtering layers.
"""
return True
def watch(self, *callbacks):
if self.stopped:
raise ValueError("Cannot watch on a filter that has been stopped")
self.callbacks.extend(callbacks)
if not self.running:
self.start()
gevent.sleep(0)
def stop_watching(self, timeout=0):
self.running = False
self.stopped = True
self.web3.eth.uninstallFilter(self.filter_id)
self.join(timeout)
stopWatching = stop_watching
class BlockFilter(Filter):
pass
class TransactionFilter(Filter):
pass
ZERO_32BYTES = '[a-f0-9]{64}'
def construct_data_filter_regex(data_filter_set):
return re.compile((
'^' +
'|'.join((
'0x' + ''.join(
(ZERO_32BYTES if v is None else v[2:] for v in data_filter)
)
for data_filter in data_filter_set
)) +
'$'
))
class LogFilter(Filter):
data_filter_set = None
data_filter_set_regex = None
log_entry_formatter = None
def __init__(self, *args, **kwargs):
self.log_entry_formatter = kwargs.pop(
'log_entry_formatter',
self.log_entry_formatter,
)
if 'data_filter_set' in kwargs:
self.set_data_filters(kwargs.pop('data_filter_set'))
super(LogFilter, self).__init__(*args, **kwargs)
def get(self, only_changes=True):
if self.running:
raise ValueError(
"Cannot call `get` on a filter object which is actively watching"
)
if only_changes:
log_entries = self.web3.eth.getFilterChanges(self.filter_id)
else:
log_entries = self.web3.eth.getFilterLogs(self.filter_id)
if log_entries is None:
log_entries = []
formatted_log_entries = [
self.format_entry(log_entry) for log_entry in log_entries
]
return formatted_log_entries
def format_entry(self, entry):
if self.log_entry_formatter:
return self.log_entry_formatter(entry)
return entry
def set_data_filters(self, data_filter_set):
self.data_filter_set = data_filter_set
if any(data_filter_set):
self.data_filter_set_regex = construct_data_filter_regex(
data_filter_set,
)
def is_valid_entry(self, entry):
if not self.data_filter_set_regex:
return True
return bool(self.data_filter_set_regex.match(entry['data']))
class PastLogFilter(LogFilter):
def _run(self):
if self.stopped:
raise ValueError("Cannot restart a Filter")
self.running = True
previous_logs = self.web3.eth.getFilterLogs(self.filter_id)
if previous_logs:
for entry in previous_logs:
for callback_fn in self.callbacks:
if self.is_valid_entry(entry):
callback_fn(self.format_entry(entry))
self.running = False
class ShhFilter(Filter):
def _run(self):
if self.stopped:
raise ValueError("Cannot restart a filter")
self.running = True
while self.running:
changes = self.web3.shh.getFilterChanges(self.filter_id)
if changes:
for entry in changes:
for callback_fn in self.callbacks:
if self.is_valid_entry(entry):
callback_fn(self.format_entry(entry))
if self.poll_interval is None:
gevent.sleep(random.random())
else:
gevent.sleep(self.poll_interval)
def stop_watching(self, timeout=0):
self.running = False
self.stopped = True
self.web3.shh.uninstallFilter(self.filter_id)
self.join(timeout)
stopWatching = stop_watching |