repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
gmr/queries | queries/session.py | Session._connect | def _connect(self):
"""Connect to PostgreSQL, either by reusing a connection from the pool
if possible, or by creating the new connection.
:rtype: psycopg2.extensions.connection
:raises: pool.NoIdleConnectionsError
"""
# Attempt to get a cached connection from the connection pool
try:
connection = self._pool_manager.get(self.pid, self)
LOGGER.debug("Re-using connection for %s", self.pid)
except pool.NoIdleConnectionsError:
if self._pool_manager.is_full(self.pid):
raise
# Create a new PostgreSQL connection
kwargs = utils.uri_to_kwargs(self._uri)
LOGGER.debug("Creating a new connection for %s", self.pid)
connection = self._psycopg2_connect(kwargs)
self._pool_manager.add(self.pid, connection)
self._pool_manager.lock(self.pid, connection, self)
# Added in because psycopg2ct connects and leaves the connection in
# a weird state: consts.STATUS_DATESTYLE, returning from
# Connection._setup without setting the state as const.STATUS_OK
if utils.PYPY:
connection.reset()
# Register the custom data types
self._register_unicode(connection)
self._register_uuid(connection)
return connection | python | def _connect(self):
"""Connect to PostgreSQL, either by reusing a connection from the pool
if possible, or by creating the new connection.
:rtype: psycopg2.extensions.connection
:raises: pool.NoIdleConnectionsError
"""
# Attempt to get a cached connection from the connection pool
try:
connection = self._pool_manager.get(self.pid, self)
LOGGER.debug("Re-using connection for %s", self.pid)
except pool.NoIdleConnectionsError:
if self._pool_manager.is_full(self.pid):
raise
# Create a new PostgreSQL connection
kwargs = utils.uri_to_kwargs(self._uri)
LOGGER.debug("Creating a new connection for %s", self.pid)
connection = self._psycopg2_connect(kwargs)
self._pool_manager.add(self.pid, connection)
self._pool_manager.lock(self.pid, connection, self)
# Added in because psycopg2ct connects and leaves the connection in
# a weird state: consts.STATUS_DATESTYLE, returning from
# Connection._setup without setting the state as const.STATUS_OK
if utils.PYPY:
connection.reset()
# Register the custom data types
self._register_unicode(connection)
self._register_uuid(connection)
return connection | Connect to PostgreSQL, either by reusing a connection from the pool
if possible, or by creating the new connection.
:rtype: psycopg2.extensions.connection
:raises: pool.NoIdleConnectionsError | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L273-L307 |
gmr/queries | queries/session.py | Session._get_cursor | def _get_cursor(self, connection, name=None):
"""Return a cursor for the given cursor_factory. Specify a name to
use server-side cursors.
:param connection: The connection to create a cursor on
:type connection: psycopg2.extensions.connection
:param str name: A cursor name for a server side cursor
:rtype: psycopg2.extensions.cursor
"""
cursor = connection.cursor(name=name,
cursor_factory=self._cursor_factory)
if name is not None:
cursor.scrollable = True
cursor.withhold = True
return cursor | python | def _get_cursor(self, connection, name=None):
"""Return a cursor for the given cursor_factory. Specify a name to
use server-side cursors.
:param connection: The connection to create a cursor on
:type connection: psycopg2.extensions.connection
:param str name: A cursor name for a server side cursor
:rtype: psycopg2.extensions.cursor
"""
cursor = connection.cursor(name=name,
cursor_factory=self._cursor_factory)
if name is not None:
cursor.scrollable = True
cursor.withhold = True
return cursor | Return a cursor for the given cursor_factory. Specify a name to
use server-side cursors.
:param connection: The connection to create a cursor on
:type connection: psycopg2.extensions.connection
:param str name: A cursor name for a server side cursor
:rtype: psycopg2.extensions.cursor | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L309-L324 |
gmr/queries | queries/session.py | Session._incr_exceptions | def _incr_exceptions(self):
"""Increment the number of exceptions for the current connection."""
self._pool_manager.get_connection(self.pid, self._conn).exceptions += 1 | python | def _incr_exceptions(self):
"""Increment the number of exceptions for the current connection."""
self._pool_manager.get_connection(self.pid, self._conn).exceptions += 1 | Increment the number of exceptions for the current connection. | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L326-L328 |
gmr/queries | queries/session.py | Session._incr_executions | def _incr_executions(self):
"""Increment the number of executions for the current connection."""
self._pool_manager.get_connection(self.pid, self._conn).executions += 1 | python | def _incr_executions(self):
"""Increment the number of executions for the current connection."""
self._pool_manager.get_connection(self.pid, self._conn).executions += 1 | Increment the number of executions for the current connection. | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L330-L332 |
gmr/queries | queries/session.py | Session._register_unicode | def _register_unicode(connection):
"""Register the cursor to be able to receive Unicode string.
:type connection: psycopg2.extensions.connection
:param connection: Where to register things
"""
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE,
connection)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY,
connection) | python | def _register_unicode(connection):
"""Register the cursor to be able to receive Unicode string.
:type connection: psycopg2.extensions.connection
:param connection: Where to register things
"""
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE,
connection)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY,
connection) | Register the cursor to be able to receive Unicode string.
:type connection: psycopg2.extensions.connection
:param connection: Where to register things | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L345-L355 |
gmr/queries | queries/session.py | Session._status | def _status(self):
"""Return the current connection status as an integer value.
The status should match one of the following constants:
- queries.Session.INTRANS: Connection established, in transaction
- queries.Session.PREPARED: Prepared for second phase of transaction
- queries.Session.READY: Connected, no active transaction
:rtype: int
"""
if self._conn.status == psycopg2.extensions.STATUS_BEGIN:
return self.READY
return self._conn.status | python | def _status(self):
"""Return the current connection status as an integer value.
The status should match one of the following constants:
- queries.Session.INTRANS: Connection established, in transaction
- queries.Session.PREPARED: Prepared for second phase of transaction
- queries.Session.READY: Connected, no active transaction
:rtype: int
"""
if self._conn.status == psycopg2.extensions.STATUS_BEGIN:
return self.READY
return self._conn.status | Return the current connection status as an integer value.
The status should match one of the following constants:
- queries.Session.INTRANS: Connection established, in transaction
- queries.Session.PREPARED: Prepared for second phase of transaction
- queries.Session.READY: Connected, no active transaction
:rtype: int | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L368-L382 |
gmr/queries | queries/results.py | Results.as_dict | def as_dict(self):
"""Return a single row result as a dictionary. If the results contain
multiple rows, a :py:class:`ValueError` will be raised.
:return: dict
:raises: ValueError
"""
if not self.cursor.rowcount:
return {}
self._rewind()
if self.cursor.rowcount == 1:
return dict(self.cursor.fetchone())
else:
raise ValueError('More than one row') | python | def as_dict(self):
"""Return a single row result as a dictionary. If the results contain
multiple rows, a :py:class:`ValueError` will be raised.
:return: dict
:raises: ValueError
"""
if not self.cursor.rowcount:
return {}
self._rewind()
if self.cursor.rowcount == 1:
return dict(self.cursor.fetchone())
else:
raise ValueError('More than one row') | Return a single row result as a dictionary. If the results contain
multiple rows, a :py:class:`ValueError` will be raised.
:return: dict
:raises: ValueError | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/results.py#L66-L81 |
gmr/queries | queries/results.py | Results.items | def items(self):
"""Return all of the rows that are in the result set.
:rtype: list
"""
if not self.cursor.rowcount:
return []
self.cursor.scroll(0, 'absolute')
return self.cursor.fetchall() | python | def items(self):
"""Return all of the rows that are in the result set.
:rtype: list
"""
if not self.cursor.rowcount:
return []
self.cursor.scroll(0, 'absolute')
return self.cursor.fetchall() | Return all of the rows that are in the result set.
:rtype: list | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/results.py#L98-L108 |
gmr/queries | queries/utils.py | get_current_user | def get_current_user():
"""Return the current username for the logged in user
:rtype: str
"""
if pwd is None:
return getpass.getuser()
else:
try:
return pwd.getpwuid(os.getuid())[0]
except KeyError as error:
LOGGER.error('Could not get logged-in user: %s', error) | python | def get_current_user():
"""Return the current username for the logged in user
:rtype: str
"""
if pwd is None:
return getpass.getuser()
else:
try:
return pwd.getpwuid(os.getuid())[0]
except KeyError as error:
LOGGER.error('Could not get logged-in user: %s', error) | Return the current username for the logged in user
:rtype: str | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/utils.py#L57-L69 |
gmr/queries | queries/utils.py | uri | def uri(host='localhost', port=5432, dbname='postgres', user='postgres',
password=None):
"""Return a PostgreSQL connection URI for the specified values.
:param str host: Host to connect to
:param int port: Port to connect on
:param str dbname: The database name
:param str user: User to connect as
:param str password: The password to use, None for no password
:return str: The PostgreSQL connection URI
"""
if port:
host = '%s:%s' % (host, port)
if password:
return 'postgresql://%s:%s@%s/%s' % (user, password, host, dbname)
return 'postgresql://%s@%s/%s' % (user, host, dbname) | python | def uri(host='localhost', port=5432, dbname='postgres', user='postgres',
password=None):
"""Return a PostgreSQL connection URI for the specified values.
:param str host: Host to connect to
:param int port: Port to connect on
:param str dbname: The database name
:param str user: User to connect as
:param str password: The password to use, None for no password
:return str: The PostgreSQL connection URI
"""
if port:
host = '%s:%s' % (host, port)
if password:
return 'postgresql://%s:%s@%s/%s' % (user, password, host, dbname)
return 'postgresql://%s@%s/%s' % (user, host, dbname) | Return a PostgreSQL connection URI for the specified values.
:param str host: Host to connect to
:param int port: Port to connect on
:param str dbname: The database name
:param str user: User to connect as
:param str password: The password to use, None for no password
:return str: The PostgreSQL connection URI | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/utils.py#L82-L98 |
gmr/queries | queries/utils.py | uri_to_kwargs | def uri_to_kwargs(uri):
"""Return a URI as kwargs for connecting to PostgreSQL with psycopg2,
applying default values for non-specified areas of the URI.
:param str uri: The connection URI
:rtype: dict
"""
parsed = urlparse(uri)
default_user = get_current_user()
password = unquote(parsed.password) if parsed.password else None
kwargs = {'host': parsed.hostname,
'port': parsed.port,
'dbname': parsed.path[1:] or default_user,
'user': parsed.username or default_user,
'password': password}
values = parse_qs(parsed.query)
if 'host' in values:
kwargs['host'] = values['host'][0]
for k in [k for k in values if k in KEYWORDS]:
kwargs[k] = values[k][0] if len(values[k]) == 1 else values[k]
try:
if kwargs[k].isdigit():
kwargs[k] = int(kwargs[k])
except AttributeError:
pass
return kwargs | python | def uri_to_kwargs(uri):
"""Return a URI as kwargs for connecting to PostgreSQL with psycopg2,
applying default values for non-specified areas of the URI.
:param str uri: The connection URI
:rtype: dict
"""
parsed = urlparse(uri)
default_user = get_current_user()
password = unquote(parsed.password) if parsed.password else None
kwargs = {'host': parsed.hostname,
'port': parsed.port,
'dbname': parsed.path[1:] or default_user,
'user': parsed.username or default_user,
'password': password}
values = parse_qs(parsed.query)
if 'host' in values:
kwargs['host'] = values['host'][0]
for k in [k for k in values if k in KEYWORDS]:
kwargs[k] = values[k][0] if len(values[k]) == 1 else values[k]
try:
if kwargs[k].isdigit():
kwargs[k] = int(kwargs[k])
except AttributeError:
pass
return kwargs | Return a URI as kwargs for connecting to PostgreSQL with psycopg2,
applying default values for non-specified areas of the URI.
:param str uri: The connection URI
:rtype: dict | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/utils.py#L101-L127 |
gmr/queries | queries/utils.py | urlparse | def urlparse(url):
"""Parse the URL in a Python2/3 independent fashion.
:param str url: The URL to parse
:rtype: Parsed
"""
value = 'http%s' % url[5:] if url[:5] == 'postgresql' else url
parsed = _urlparse.urlparse(value)
path, query = parsed.path, parsed.query
hostname = parsed.hostname if parsed.hostname else ''
return PARSED(parsed.scheme.replace('http', 'postgresql'),
parsed.netloc,
path,
parsed.params,
query,
parsed.fragment,
parsed.username,
parsed.password,
hostname.replace('%2f', '/'),
parsed.port) | python | def urlparse(url):
"""Parse the URL in a Python2/3 independent fashion.
:param str url: The URL to parse
:rtype: Parsed
"""
value = 'http%s' % url[5:] if url[:5] == 'postgresql' else url
parsed = _urlparse.urlparse(value)
path, query = parsed.path, parsed.query
hostname = parsed.hostname if parsed.hostname else ''
return PARSED(parsed.scheme.replace('http', 'postgresql'),
parsed.netloc,
path,
parsed.params,
query,
parsed.fragment,
parsed.username,
parsed.password,
hostname.replace('%2f', '/'),
parsed.port) | Parse the URL in a Python2/3 independent fashion.
:param str url: The URL to parse
:rtype: Parsed | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/utils.py#L130-L150 |
gmr/queries | queries/tornado_session.py | Results.free | def free(self):
"""Release the results and connection lock from the TornadoSession
object. This **must** be called after you finish processing the results
from :py:meth:`TornadoSession.query <queries.TornadoSession.query>` or
:py:meth:`TornadoSession.callproc <queries.TornadoSession.callproc>`
or the connection will not be able to be reused by other asynchronous
requests.
"""
self._freed = True
self._cleanup(self.cursor, self._fd) | python | def free(self):
"""Release the results and connection lock from the TornadoSession
object. This **must** be called after you finish processing the results
from :py:meth:`TornadoSession.query <queries.TornadoSession.query>` or
:py:meth:`TornadoSession.callproc <queries.TornadoSession.callproc>`
or the connection will not be able to be reused by other asynchronous
requests.
"""
self._freed = True
self._cleanup(self.cursor, self._fd) | Release the results and connection lock from the TornadoSession
object. This **must** be called after you finish processing the results
from :py:meth:`TornadoSession.query <queries.TornadoSession.query>` or
:py:meth:`TornadoSession.callproc <queries.TornadoSession.callproc>`
or the connection will not be able to be reused by other asynchronous
requests. | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L103-L113 |
gmr/queries | queries/tornado_session.py | TornadoSession._ensure_pool_exists | def _ensure_pool_exists(self):
"""Create the pool in the pool manager if it does not exist."""
if self.pid not in self._pool_manager:
self._pool_manager.create(self.pid, self._pool_idle_ttl,
self._pool_max_size, self._ioloop.time) | python | def _ensure_pool_exists(self):
"""Create the pool in the pool manager if it does not exist."""
if self.pid not in self._pool_manager:
self._pool_manager.create(self.pid, self._pool_idle_ttl,
self._pool_max_size, self._ioloop.time) | Create the pool in the pool manager if it does not exist. | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L165-L169 |
gmr/queries | queries/tornado_session.py | TornadoSession._connect | def _connect(self):
"""Connect to PostgreSQL, either by reusing a connection from the pool
if possible, or by creating the new connection.
:rtype: psycopg2.extensions.connection
:raises: pool.NoIdleConnectionsError
"""
future = concurrent.Future()
# Attempt to get a cached connection from the connection pool
try:
connection = self._pool_manager.get(self.pid, self)
self._connections[connection.fileno()] = connection
future.set_result(connection)
# Add the connection to the IOLoop
self._ioloop.add_handler(connection.fileno(),
self._on_io_events,
ioloop.IOLoop.WRITE)
except pool.NoIdleConnectionsError:
self._create_connection(future)
return future | python | def _connect(self):
"""Connect to PostgreSQL, either by reusing a connection from the pool
if possible, or by creating the new connection.
:rtype: psycopg2.extensions.connection
:raises: pool.NoIdleConnectionsError
"""
future = concurrent.Future()
# Attempt to get a cached connection from the connection pool
try:
connection = self._pool_manager.get(self.pid, self)
self._connections[connection.fileno()] = connection
future.set_result(connection)
# Add the connection to the IOLoop
self._ioloop.add_handler(connection.fileno(),
self._on_io_events,
ioloop.IOLoop.WRITE)
except pool.NoIdleConnectionsError:
self._create_connection(future)
return future | Connect to PostgreSQL, either by reusing a connection from the pool
if possible, or by creating the new connection.
:rtype: psycopg2.extensions.connection
:raises: pool.NoIdleConnectionsError | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L244-L267 |
gmr/queries | queries/tornado_session.py | TornadoSession._create_connection | def _create_connection(self, future):
"""Create a new PostgreSQL connection
:param tornado.concurrent.Future future: future for new conn result
"""
LOGGER.debug('Creating a new connection for %s', self.pid)
# Create a new PostgreSQL connection
kwargs = utils.uri_to_kwargs(self._uri)
try:
connection = self._psycopg2_connect(kwargs)
except (psycopg2.Error, OSError, socket.error) as error:
future.set_exception(error)
return
# Add the connection for use in _poll_connection
fd = connection.fileno()
self._connections[fd] = connection
def on_connected(cf):
"""Invoked by the IOLoop when the future is complete for the
connection
:param Future cf: The future for the initial connection
"""
if cf.exception():
self._cleanup_fd(fd, True)
future.set_exception(cf.exception())
else:
try:
# Add the connection to the pool
LOGGER.debug('Connection established for %s', self.pid)
self._pool_manager.add(self.pid, connection)
except (ValueError, pool.PoolException) as err:
LOGGER.exception('Failed to add %r to the pool', self.pid)
self._cleanup_fd(fd)
future.set_exception(err)
return
self._pool_manager.lock(self.pid, connection, self)
# Added in because psycopg2cffi connects and leaves the
# connection in a weird state: consts.STATUS_DATESTYLE,
# returning from Connection._setup without setting the state
# as const.STATUS_OK
if utils.PYPY:
connection.status = extensions.STATUS_READY
# Register the custom data types
self._register_unicode(connection)
self._register_uuid(connection)
# Set the future result
future.set_result(connection)
# Add a future that fires once connected
self._futures[fd] = concurrent.Future()
self._ioloop.add_future(self._futures[fd], on_connected)
# Add the connection to the IOLoop
self._ioloop.add_handler(connection.fileno(),
self._on_io_events,
ioloop.IOLoop.WRITE) | python | def _create_connection(self, future):
"""Create a new PostgreSQL connection
:param tornado.concurrent.Future future: future for new conn result
"""
LOGGER.debug('Creating a new connection for %s', self.pid)
# Create a new PostgreSQL connection
kwargs = utils.uri_to_kwargs(self._uri)
try:
connection = self._psycopg2_connect(kwargs)
except (psycopg2.Error, OSError, socket.error) as error:
future.set_exception(error)
return
# Add the connection for use in _poll_connection
fd = connection.fileno()
self._connections[fd] = connection
def on_connected(cf):
"""Invoked by the IOLoop when the future is complete for the
connection
:param Future cf: The future for the initial connection
"""
if cf.exception():
self._cleanup_fd(fd, True)
future.set_exception(cf.exception())
else:
try:
# Add the connection to the pool
LOGGER.debug('Connection established for %s', self.pid)
self._pool_manager.add(self.pid, connection)
except (ValueError, pool.PoolException) as err:
LOGGER.exception('Failed to add %r to the pool', self.pid)
self._cleanup_fd(fd)
future.set_exception(err)
return
self._pool_manager.lock(self.pid, connection, self)
# Added in because psycopg2cffi connects and leaves the
# connection in a weird state: consts.STATUS_DATESTYLE,
# returning from Connection._setup without setting the state
# as const.STATUS_OK
if utils.PYPY:
connection.status = extensions.STATUS_READY
# Register the custom data types
self._register_unicode(connection)
self._register_uuid(connection)
# Set the future result
future.set_result(connection)
# Add a future that fires once connected
self._futures[fd] = concurrent.Future()
self._ioloop.add_future(self._futures[fd], on_connected)
# Add the connection to the IOLoop
self._ioloop.add_handler(connection.fileno(),
self._on_io_events,
ioloop.IOLoop.WRITE) | Create a new PostgreSQL connection
:param tornado.concurrent.Future future: future for new conn result | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L269-L336 |
gmr/queries | queries/tornado_session.py | TornadoSession._execute | def _execute(self, method, query, parameters=None):
"""Issue a query asynchronously on the server, mogrifying the
parameters against the sql statement and yielding the results
as a :py:class:`Results <queries.tornado_session.Results>` object.
This function reduces duplicate code for callproc and query by getting
the class attribute for the method passed in as the function to call.
:param str method: The method attribute to use
:param str query: The SQL statement or Stored Procedure name
:param list|dict parameters: A dictionary of query parameters
:rtype: Results
:raises: queries.DataError
:raises: queries.DatabaseError
:raises: queries.IntegrityError
:raises: queries.InternalError
:raises: queries.InterfaceError
:raises: queries.NotSupportedError
:raises: queries.OperationalError
:raises: queries.ProgrammingError
"""
future = concurrent.Future()
def on_connected(cf):
"""Invoked by the future returned by self._connect"""
if cf.exception():
future.set_exception(cf.exception())
return
# Get the psycopg2 connection object and cursor
conn = cf.result()
cursor = self._get_cursor(conn)
def completed(qf):
"""Invoked by the IOLoop when the future has completed"""
if qf.exception():
self._incr_exceptions(conn)
err = qf.exception()
LOGGER.debug('Cleaning cursor due to exception: %r', err)
self._exec_cleanup(cursor, conn.fileno())
future.set_exception(err)
else:
self._incr_executions(conn)
value = Results(cursor, self._exec_cleanup, conn.fileno())
future.set_result(value)
# Setup a callback to wait on the query result
self._futures[conn.fileno()] = concurrent.Future()
# Add the future to the IOLoop
self._ioloop.add_future(self._futures[conn.fileno()],
completed)
# Get the cursor, execute the query
func = getattr(cursor, method)
try:
func(query, parameters)
except Exception as error:
future.set_exception(error)
# Ensure the pool exists for the connection
self._ensure_pool_exists()
# Grab a connection to PostgreSQL
self._ioloop.add_future(self._connect(), on_connected)
# Return the future for the query result
return future | python | def _execute(self, method, query, parameters=None):
"""Issue a query asynchronously on the server, mogrifying the
parameters against the sql statement and yielding the results
as a :py:class:`Results <queries.tornado_session.Results>` object.
This function reduces duplicate code for callproc and query by getting
the class attribute for the method passed in as the function to call.
:param str method: The method attribute to use
:param str query: The SQL statement or Stored Procedure name
:param list|dict parameters: A dictionary of query parameters
:rtype: Results
:raises: queries.DataError
:raises: queries.DatabaseError
:raises: queries.IntegrityError
:raises: queries.InternalError
:raises: queries.InterfaceError
:raises: queries.NotSupportedError
:raises: queries.OperationalError
:raises: queries.ProgrammingError
"""
future = concurrent.Future()
def on_connected(cf):
"""Invoked by the future returned by self._connect"""
if cf.exception():
future.set_exception(cf.exception())
return
# Get the psycopg2 connection object and cursor
conn = cf.result()
cursor = self._get_cursor(conn)
def completed(qf):
"""Invoked by the IOLoop when the future has completed"""
if qf.exception():
self._incr_exceptions(conn)
err = qf.exception()
LOGGER.debug('Cleaning cursor due to exception: %r', err)
self._exec_cleanup(cursor, conn.fileno())
future.set_exception(err)
else:
self._incr_executions(conn)
value = Results(cursor, self._exec_cleanup, conn.fileno())
future.set_result(value)
# Setup a callback to wait on the query result
self._futures[conn.fileno()] = concurrent.Future()
# Add the future to the IOLoop
self._ioloop.add_future(self._futures[conn.fileno()],
completed)
# Get the cursor, execute the query
func = getattr(cursor, method)
try:
func(query, parameters)
except Exception as error:
future.set_exception(error)
# Ensure the pool exists for the connection
self._ensure_pool_exists()
# Grab a connection to PostgreSQL
self._ioloop.add_future(self._connect(), on_connected)
# Return the future for the query result
return future | Issue a query asynchronously on the server, mogrifying the
parameters against the sql statement and yielding the results
as a :py:class:`Results <queries.tornado_session.Results>` object.
This function reduces duplicate code for callproc and query by getting
the class attribute for the method passed in as the function to call.
:param str method: The method attribute to use
:param str query: The SQL statement or Stored Procedure name
:param list|dict parameters: A dictionary of query parameters
:rtype: Results
:raises: queries.DataError
:raises: queries.DatabaseError
:raises: queries.IntegrityError
:raises: queries.InternalError
:raises: queries.InterfaceError
:raises: queries.NotSupportedError
:raises: queries.OperationalError
:raises: queries.ProgrammingError | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L338-L406 |
gmr/queries | queries/tornado_session.py | TornadoSession._exec_cleanup | def _exec_cleanup(self, cursor, fd):
"""Close the cursor, remove any references to the fd in internal state
and remove the fd from the ioloop.
:param psycopg2.extensions.cursor cursor: The cursor to close
:param int fd: The connection file descriptor
"""
LOGGER.debug('Closing cursor and cleaning %s', fd)
try:
cursor.close()
except (psycopg2.Error, psycopg2.Warning) as error:
LOGGER.debug('Error closing the cursor: %s', error)
self._cleanup_fd(fd)
# If the cleanup callback exists, remove it
if self._cleanup_callback:
self._ioloop.remove_timeout(self._cleanup_callback)
# Create a new cleanup callback to clean the pool of idle connections
self._cleanup_callback = self._ioloop.add_timeout(
self._ioloop.time() + self._pool_idle_ttl + 1,
self._pool_manager.clean, self.pid) | python | def _exec_cleanup(self, cursor, fd):
"""Close the cursor, remove any references to the fd in internal state
and remove the fd from the ioloop.
:param psycopg2.extensions.cursor cursor: The cursor to close
:param int fd: The connection file descriptor
"""
LOGGER.debug('Closing cursor and cleaning %s', fd)
try:
cursor.close()
except (psycopg2.Error, psycopg2.Warning) as error:
LOGGER.debug('Error closing the cursor: %s', error)
self._cleanup_fd(fd)
# If the cleanup callback exists, remove it
if self._cleanup_callback:
self._ioloop.remove_timeout(self._cleanup_callback)
# Create a new cleanup callback to clean the pool of idle connections
self._cleanup_callback = self._ioloop.add_timeout(
self._ioloop.time() + self._pool_idle_ttl + 1,
self._pool_manager.clean, self.pid) | Close the cursor, remove any references to the fd in internal state
and remove the fd from the ioloop.
:param psycopg2.extensions.cursor cursor: The cursor to close
:param int fd: The connection file descriptor | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L408-L431 |
gmr/queries | queries/tornado_session.py | TornadoSession._cleanup_fd | def _cleanup_fd(self, fd, close=False):
"""Ensure the socket socket is removed from the IOLoop, the
connection stack, and futures stack.
:param int fd: The fd # to cleanup
"""
self._ioloop.remove_handler(fd)
if fd in self._connections:
try:
self._pool_manager.free(self.pid, self._connections[fd])
except pool.ConnectionNotFoundError:
pass
if close:
self._connections[fd].close()
del self._connections[fd]
if fd in self._futures:
del self._futures[fd] | python | def _cleanup_fd(self, fd, close=False):
"""Ensure the socket socket is removed from the IOLoop, the
connection stack, and futures stack.
:param int fd: The fd # to cleanup
"""
self._ioloop.remove_handler(fd)
if fd in self._connections:
try:
self._pool_manager.free(self.pid, self._connections[fd])
except pool.ConnectionNotFoundError:
pass
if close:
self._connections[fd].close()
del self._connections[fd]
if fd in self._futures:
del self._futures[fd] | Ensure the socket socket is removed from the IOLoop, the
connection stack, and futures stack.
:param int fd: The fd # to cleanup | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L433-L450 |
gmr/queries | queries/tornado_session.py | TornadoSession._incr_exceptions | def _incr_exceptions(self, conn):
"""Increment the number of exceptions for the current connection.
:param psycopg2.extensions.connection conn: the psycopg2 connection
"""
self._pool_manager.get_connection(self.pid, conn).exceptions += 1 | python | def _incr_exceptions(self, conn):
"""Increment the number of exceptions for the current connection.
:param psycopg2.extensions.connection conn: the psycopg2 connection
"""
self._pool_manager.get_connection(self.pid, conn).exceptions += 1 | Increment the number of exceptions for the current connection.
:param psycopg2.extensions.connection conn: the psycopg2 connection | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L452-L458 |
gmr/queries | queries/tornado_session.py | TornadoSession._incr_executions | def _incr_executions(self, conn):
"""Increment the number of executions for the current connection.
:param psycopg2.extensions.connection conn: the psycopg2 connection
"""
self._pool_manager.get_connection(self.pid, conn).executions += 1 | python | def _incr_executions(self, conn):
"""Increment the number of executions for the current connection.
:param psycopg2.extensions.connection conn: the psycopg2 connection
"""
self._pool_manager.get_connection(self.pid, conn).executions += 1 | Increment the number of executions for the current connection.
:param psycopg2.extensions.connection conn: the psycopg2 connection | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L460-L466 |
gmr/queries | queries/tornado_session.py | TornadoSession._on_io_events | def _on_io_events(self, fd=None, _events=None):
"""Invoked by Tornado's IOLoop when there are events for the fd
:param int fd: The file descriptor for the event
:param int _events: The events raised
"""
if fd not in self._connections:
LOGGER.warning('Received IO event for non-existing connection')
return
self._poll_connection(fd) | python | def _on_io_events(self, fd=None, _events=None):
"""Invoked by Tornado's IOLoop when there are events for the fd
:param int fd: The file descriptor for the event
:param int _events: The events raised
"""
if fd not in self._connections:
LOGGER.warning('Received IO event for non-existing connection')
return
self._poll_connection(fd) | Invoked by Tornado's IOLoop when there are events for the fd
:param int fd: The file descriptor for the event
:param int _events: The events raised | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L468-L478 |
gmr/queries | queries/tornado_session.py | TornadoSession._poll_connection | def _poll_connection(self, fd):
"""Check with psycopg2 to see what action to take. If the state is
POLL_OK, we should have a pending callback for that fd.
:param int fd: The socket fd for the postgresql connection
"""
try:
state = self._connections[fd].poll()
except (OSError, socket.error) as error:
self._ioloop.remove_handler(fd)
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(
psycopg2.OperationalError('Connection error (%s)' % error)
)
except (psycopg2.Error, psycopg2.Warning) as error:
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(error)
else:
if state == extensions.POLL_OK:
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_result(True)
elif state == extensions.POLL_WRITE:
self._ioloop.update_handler(fd, ioloop.IOLoop.WRITE)
elif state == extensions.POLL_READ:
self._ioloop.update_handler(fd, ioloop.IOLoop.READ)
elif state == extensions.POLL_ERROR:
self._ioloop.remove_handler(fd)
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(
psycopg2.Error('Poll Error')) | python | def _poll_connection(self, fd):
"""Check with psycopg2 to see what action to take. If the state is
POLL_OK, we should have a pending callback for that fd.
:param int fd: The socket fd for the postgresql connection
"""
try:
state = self._connections[fd].poll()
except (OSError, socket.error) as error:
self._ioloop.remove_handler(fd)
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(
psycopg2.OperationalError('Connection error (%s)' % error)
)
except (psycopg2.Error, psycopg2.Warning) as error:
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(error)
else:
if state == extensions.POLL_OK:
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_result(True)
elif state == extensions.POLL_WRITE:
self._ioloop.update_handler(fd, ioloop.IOLoop.WRITE)
elif state == extensions.POLL_READ:
self._ioloop.update_handler(fd, ioloop.IOLoop.READ)
elif state == extensions.POLL_ERROR:
self._ioloop.remove_handler(fd)
if fd in self._futures and not self._futures[fd].done():
self._futures[fd].set_exception(
psycopg2.Error('Poll Error')) | Check with psycopg2 to see what action to take. If the state is
POLL_OK, we should have a pending callback for that fd.
:param int fd: The socket fd for the postgresql connection | https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/tornado_session.py#L480-L510 |
jquast/wcwidth | setup.py | main | def main():
"""Setup.py entry point."""
import codecs
setuptools.setup(
name='wcwidth',
version='0.1.7',
description=("Measures number of Terminal column cells "
"of wide-character codes"),
long_description=codecs.open(
os.path.join(HERE, 'README.rst'), 'r', 'utf8').read(),
author='Jeff Quast',
author_email='[email protected]',
license='MIT',
packages=['wcwidth', 'wcwidth.tests'],
url='https://github.com/jquast/wcwidth',
include_package_data=True,
test_suite='wcwidth.tests',
zip_safe=True,
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Localization',
'Topic :: Software Development :: Internationalization',
'Topic :: Terminals'
],
keywords=['terminal', 'emulator', 'wcwidth', 'wcswidth', 'cjk',
'combining', 'xterm', 'console', ],
cmdclass={'update': SetupUpdate},
) | python | def main():
"""Setup.py entry point."""
import codecs
setuptools.setup(
name='wcwidth',
version='0.1.7',
description=("Measures number of Terminal column cells "
"of wide-character codes"),
long_description=codecs.open(
os.path.join(HERE, 'README.rst'), 'r', 'utf8').read(),
author='Jeff Quast',
author_email='[email protected]',
license='MIT',
packages=['wcwidth', 'wcwidth.tests'],
url='https://github.com/jquast/wcwidth',
include_package_data=True,
test_suite='wcwidth.tests',
zip_safe=True,
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Localization',
'Topic :: Software Development :: Internationalization',
'Topic :: Terminals'
],
keywords=['terminal', 'emulator', 'wcwidth', 'wcswidth', 'cjk',
'combining', 'xterm', 'console', ],
cmdclass={'update': SetupUpdate},
) | Setup.py entry point. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L271-L307 |
jquast/wcwidth | setup.py | SetupUpdate._do_readme_update | def _do_readme_update(self):
"""Patch README.rst to reflect the data files used in release."""
import codecs
import glob
# read in,
data_in = codecs.open(
os.path.join(HERE, 'README.rst'), 'r', 'utf8').read()
# search for beginning and end positions,
pos_begin = data_in.find(self.README_PATCH_FROM)
assert pos_begin != -1, (pos_begin, self.README_PATCH_FROM)
pos_begin += len(self.README_PATCH_FROM)
pos_end = data_in.find(self.README_PATCH_TO)
assert pos_end != -1, (pos_end, self.README_PATCH_TO)
glob_pattern = os.path.join(HERE, 'data', '*.txt')
file_descriptions = [
self._describe_file_header(fpath)
for fpath in glob.glob(glob_pattern)]
# patch,
data_out = (
data_in[:pos_begin] +
'\n\n' +
'\n'.join(file_descriptions) +
'\n\n' +
data_in[pos_end:]
)
# write.
print("patching {} ..".format(self.README_RST))
codecs.open(
self.README_RST, 'w', 'utf8').write(data_out) | python | def _do_readme_update(self):
"""Patch README.rst to reflect the data files used in release."""
import codecs
import glob
# read in,
data_in = codecs.open(
os.path.join(HERE, 'README.rst'), 'r', 'utf8').read()
# search for beginning and end positions,
pos_begin = data_in.find(self.README_PATCH_FROM)
assert pos_begin != -1, (pos_begin, self.README_PATCH_FROM)
pos_begin += len(self.README_PATCH_FROM)
pos_end = data_in.find(self.README_PATCH_TO)
assert pos_end != -1, (pos_end, self.README_PATCH_TO)
glob_pattern = os.path.join(HERE, 'data', '*.txt')
file_descriptions = [
self._describe_file_header(fpath)
for fpath in glob.glob(glob_pattern)]
# patch,
data_out = (
data_in[:pos_begin] +
'\n\n' +
'\n'.join(file_descriptions) +
'\n\n' +
data_in[pos_end:]
)
# write.
print("patching {} ..".format(self.README_RST))
codecs.open(
self.README_RST, 'w', 'utf8').write(data_out) | Patch README.rst to reflect the data files used in release. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L78-L112 |
jquast/wcwidth | setup.py | SetupUpdate._do_east_asian | def _do_east_asian(self):
"""Fetch and update east-asian tables."""
self._do_retrieve(self.EAW_URL, self.EAW_IN)
(version, date, values) = self._parse_east_asian(
fname=self.EAW_IN,
properties=(u'W', u'F',)
)
table = self._make_table(values)
self._do_write(self.EAW_OUT, 'WIDE_EASTASIAN', version, date, table) | python | def _do_east_asian(self):
"""Fetch and update east-asian tables."""
self._do_retrieve(self.EAW_URL, self.EAW_IN)
(version, date, values) = self._parse_east_asian(
fname=self.EAW_IN,
properties=(u'W', u'F',)
)
table = self._make_table(values)
self._do_write(self.EAW_OUT, 'WIDE_EASTASIAN', version, date, table) | Fetch and update east-asian tables. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L114-L122 |
jquast/wcwidth | setup.py | SetupUpdate._do_zero_width | def _do_zero_width(self):
"""Fetch and update zero width tables."""
self._do_retrieve(self.UCD_URL, self.UCD_IN)
(version, date, values) = self._parse_category(
fname=self.UCD_IN,
categories=('Me', 'Mn',)
)
table = self._make_table(values)
self._do_write(self.ZERO_OUT, 'ZERO_WIDTH', version, date, table) | python | def _do_zero_width(self):
"""Fetch and update zero width tables."""
self._do_retrieve(self.UCD_URL, self.UCD_IN)
(version, date, values) = self._parse_category(
fname=self.UCD_IN,
categories=('Me', 'Mn',)
)
table = self._make_table(values)
self._do_write(self.ZERO_OUT, 'ZERO_WIDTH', version, date, table) | Fetch and update zero width tables. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L124-L132 |
jquast/wcwidth | setup.py | SetupUpdate._make_table | def _make_table(values):
"""Return a tuple of lookup tables for given values."""
import collections
table = collections.deque()
start, end = values[0], values[0]
for num, value in enumerate(values):
if num == 0:
table.append((value, value,))
continue
start, end = table.pop()
if end == value - 1:
table.append((start, value,))
else:
table.append((start, end,))
table.append((value, value,))
return tuple(table) | python | def _make_table(values):
"""Return a tuple of lookup tables for given values."""
import collections
table = collections.deque()
start, end = values[0], values[0]
for num, value in enumerate(values):
if num == 0:
table.append((value, value,))
continue
start, end = table.pop()
if end == value - 1:
table.append((start, value,))
else:
table.append((start, end,))
table.append((value, value,))
return tuple(table) | Return a tuple of lookup tables for given values. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L135-L150 |
jquast/wcwidth | setup.py | SetupUpdate._do_retrieve | def _do_retrieve(url, fname):
"""Retrieve given url to target filepath fname."""
folder = os.path.dirname(fname)
if not os.path.exists(folder):
os.makedirs(folder)
print("{}/ created.".format(folder))
if not os.path.exists(fname):
with open(fname, 'wb') as fout:
print("retrieving {}.".format(url))
resp = urlopen(url)
fout.write(resp.read())
print("{} saved.".format(fname))
else:
print("re-using artifact {}".format(fname))
return fname | python | def _do_retrieve(url, fname):
"""Retrieve given url to target filepath fname."""
folder = os.path.dirname(fname)
if not os.path.exists(folder):
os.makedirs(folder)
print("{}/ created.".format(folder))
if not os.path.exists(fname):
with open(fname, 'wb') as fout:
print("retrieving {}.".format(url))
resp = urlopen(url)
fout.write(resp.read())
print("{} saved.".format(fname))
else:
print("re-using artifact {}".format(fname))
return fname | Retrieve given url to target filepath fname. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L153-L167 |
jquast/wcwidth | setup.py | SetupUpdate._parse_east_asian | def _parse_east_asian(fname, properties=(u'W', u'F',)):
"""Parse unicode east-asian width tables."""
version, date, values = None, None, []
print("parsing {} ..".format(fname))
for line in open(fname, 'rb'):
uline = line.decode('utf-8')
if version is None:
version = uline.split(None, 1)[1].rstrip()
continue
elif date is None:
date = uline.split(':', 1)[1].rstrip()
continue
if uline.startswith('#') or not uline.lstrip():
continue
addrs, details = uline.split(';', 1)
if any(details.startswith(property)
for property in properties):
start, stop = addrs, addrs
if '..' in addrs:
start, stop = addrs.split('..')
values.extend(range(int(start, 16), int(stop, 16) + 1))
return version, date, sorted(values) | python | def _parse_east_asian(fname, properties=(u'W', u'F',)):
"""Parse unicode east-asian width tables."""
version, date, values = None, None, []
print("parsing {} ..".format(fname))
for line in open(fname, 'rb'):
uline = line.decode('utf-8')
if version is None:
version = uline.split(None, 1)[1].rstrip()
continue
elif date is None:
date = uline.split(':', 1)[1].rstrip()
continue
if uline.startswith('#') or not uline.lstrip():
continue
addrs, details = uline.split(';', 1)
if any(details.startswith(property)
for property in properties):
start, stop = addrs, addrs
if '..' in addrs:
start, stop = addrs.split('..')
values.extend(range(int(start, 16), int(stop, 16) + 1))
return version, date, sorted(values) | Parse unicode east-asian width tables. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L180-L201 |
jquast/wcwidth | setup.py | SetupUpdate._parse_category | def _parse_category(fname, categories):
"""Parse unicode category tables."""
version, date, values = None, None, []
print("parsing {} ..".format(fname))
for line in open(fname, 'rb'):
uline = line.decode('utf-8')
if version is None:
version = uline.split(None, 1)[1].rstrip()
continue
elif date is None:
date = uline.split(':', 1)[1].rstrip()
continue
if uline.startswith('#') or not uline.lstrip():
continue
addrs, details = uline.split(';', 1)
addrs, details = addrs.rstrip(), details.lstrip()
if any(details.startswith('{} #'.format(value))
for value in categories):
start, stop = addrs, addrs
if '..' in addrs:
start, stop = addrs.split('..')
values.extend(range(int(start, 16), int(stop, 16) + 1))
return version, date, sorted(values) | python | def _parse_category(fname, categories):
"""Parse unicode category tables."""
version, date, values = None, None, []
print("parsing {} ..".format(fname))
for line in open(fname, 'rb'):
uline = line.decode('utf-8')
if version is None:
version = uline.split(None, 1)[1].rstrip()
continue
elif date is None:
date = uline.split(':', 1)[1].rstrip()
continue
if uline.startswith('#') or not uline.lstrip():
continue
addrs, details = uline.split(';', 1)
addrs, details = addrs.rstrip(), details.lstrip()
if any(details.startswith('{} #'.format(value))
for value in categories):
start, stop = addrs, addrs
if '..' in addrs:
start, stop = addrs.split('..')
values.extend(range(int(start, 16), int(stop, 16) + 1))
return version, date, sorted(values) | Parse unicode category tables. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L204-L226 |
jquast/wcwidth | setup.py | SetupUpdate._do_write | def _do_write(fname, variable, version, date, table):
"""Write combining tables to filesystem as python code."""
# pylint: disable=R0914
# Too many local variables (19/15) (col 4)
print("writing {} ..".format(fname))
import unicodedata
import datetime
import string
utc_now = datetime.datetime.utcnow()
indent = 4
with open(fname, 'w') as fout:
fout.write(
'"""{variable_proper} table. Created by setup.py."""\n'
"# Generated: {iso_utc}\n"
"# Source: {version}\n"
"# Date: {date}\n"
"{variable} = (".format(iso_utc=utc_now.isoformat(),
version=version,
date=date,
variable=variable,
variable_proper=variable.title()))
for start, end in table:
ucs_start, ucs_end = unichr(start), unichr(end)
hex_start, hex_end = ('0x{0:04x}'.format(start),
'0x{0:04x}'.format(end))
try:
name_start = string.capwords(unicodedata.name(ucs_start))
except ValueError:
name_start = u''
try:
name_end = string.capwords(unicodedata.name(ucs_end))
except ValueError:
name_end = u''
fout.write('\n' + (' ' * indent))
fout.write('({0}, {1},),'.format(hex_start, hex_end))
fout.write(' # {0:24s}..{1}'.format(
name_start[:24].rstrip() or '(nil)',
name_end[:24].rstrip()))
fout.write('\n)\n')
print("complete.") | python | def _do_write(fname, variable, version, date, table):
"""Write combining tables to filesystem as python code."""
# pylint: disable=R0914
# Too many local variables (19/15) (col 4)
print("writing {} ..".format(fname))
import unicodedata
import datetime
import string
utc_now = datetime.datetime.utcnow()
indent = 4
with open(fname, 'w') as fout:
fout.write(
'"""{variable_proper} table. Created by setup.py."""\n'
"# Generated: {iso_utc}\n"
"# Source: {version}\n"
"# Date: {date}\n"
"{variable} = (".format(iso_utc=utc_now.isoformat(),
version=version,
date=date,
variable=variable,
variable_proper=variable.title()))
for start, end in table:
ucs_start, ucs_end = unichr(start), unichr(end)
hex_start, hex_end = ('0x{0:04x}'.format(start),
'0x{0:04x}'.format(end))
try:
name_start = string.capwords(unicodedata.name(ucs_start))
except ValueError:
name_start = u''
try:
name_end = string.capwords(unicodedata.name(ucs_end))
except ValueError:
name_end = u''
fout.write('\n' + (' ' * indent))
fout.write('({0}, {1},),'.format(hex_start, hex_end))
fout.write(' # {0:24s}..{1}'.format(
name_start[:24].rstrip() or '(nil)',
name_end[:24].rstrip()))
fout.write('\n)\n')
print("complete.") | Write combining tables to filesystem as python code. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L229-L268 |
jquast/wcwidth | bin/wcwidth-libc-comparator.py | report_ucs_msg | def report_ucs_msg(ucs, wcwidth_libc, wcwidth_local):
"""
Return string report of combining character differences.
:param ucs: unicode point.
:type ucs: unicode
:param wcwidth_libc: libc-wcwidth's reported character length.
:type comb_py: int
:param wcwidth_local: wcwidth's reported character length.
:type comb_wc: int
:rtype: unicode
"""
ucp = (ucs.encode('unicode_escape')[2:]
.decode('ascii')
.upper()
.lstrip('0'))
url = "http://codepoints.net/U+{}".format(ucp)
name = unicodedata.name(ucs)
return (u"libc,ours={},{} [--o{}o--] name={} val={} {}"
" ".format(wcwidth_libc, wcwidth_local, ucs, name, ord(ucs), url)) | python | def report_ucs_msg(ucs, wcwidth_libc, wcwidth_local):
"""
Return string report of combining character differences.
:param ucs: unicode point.
:type ucs: unicode
:param wcwidth_libc: libc-wcwidth's reported character length.
:type comb_py: int
:param wcwidth_local: wcwidth's reported character length.
:type comb_wc: int
:rtype: unicode
"""
ucp = (ucs.encode('unicode_escape')[2:]
.decode('ascii')
.upper()
.lstrip('0'))
url = "http://codepoints.net/U+{}".format(ucp)
name = unicodedata.name(ucs)
return (u"libc,ours={},{} [--o{}o--] name={} val={} {}"
" ".format(wcwidth_libc, wcwidth_local, ucs, name, ord(ucs), url)) | Return string report of combining character differences.
:param ucs: unicode point.
:type ucs: unicode
:param wcwidth_libc: libc-wcwidth's reported character length.
:type comb_py: int
:param wcwidth_local: wcwidth's reported character length.
:type comb_wc: int
:rtype: unicode | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-libc-comparator.py#L44-L63 |
jquast/wcwidth | bin/wcwidth-libc-comparator.py | main | def main(using_locale=('en_US', 'UTF-8',)):
"""
Program entry point.
Load the entire Unicode table into memory, excluding those that:
- are not named (func unicodedata.name returns empty string),
- are combining characters.
Using ``locale``, for each unicode character string compare libc's
wcwidth with local wcwidth.wcwidth() function; when they differ,
report a detailed AssertionError to stdout.
"""
all_ucs = (ucs for ucs in
[unichr(val) for val in range(sys.maxunicode)]
if is_named(ucs) and isnt_combining(ucs))
libc_name = ctypes.util.find_library('c')
if not libc_name:
raise ImportError("Can't find C library.")
libc = ctypes.cdll.LoadLibrary(libc_name)
libc.wcwidth.argtypes = [ctypes.c_wchar, ]
libc.wcwidth.restype = ctypes.c_int
assert getattr(libc, 'wcwidth', None) is not None
assert getattr(libc, 'wcswidth', None) is not None
locale.setlocale(locale.LC_ALL, using_locale)
for ucs in all_ucs:
try:
_is_equal_wcwidth(libc, ucs)
except AssertionError as err:
print(err) | python | def main(using_locale=('en_US', 'UTF-8',)):
"""
Program entry point.
Load the entire Unicode table into memory, excluding those that:
- are not named (func unicodedata.name returns empty string),
- are combining characters.
Using ``locale``, for each unicode character string compare libc's
wcwidth with local wcwidth.wcwidth() function; when they differ,
report a detailed AssertionError to stdout.
"""
all_ucs = (ucs for ucs in
[unichr(val) for val in range(sys.maxunicode)]
if is_named(ucs) and isnt_combining(ucs))
libc_name = ctypes.util.find_library('c')
if not libc_name:
raise ImportError("Can't find C library.")
libc = ctypes.cdll.LoadLibrary(libc_name)
libc.wcwidth.argtypes = [ctypes.c_wchar, ]
libc.wcwidth.restype = ctypes.c_int
assert getattr(libc, 'wcwidth', None) is not None
assert getattr(libc, 'wcswidth', None) is not None
locale.setlocale(locale.LC_ALL, using_locale)
for ucs in all_ucs:
try:
_is_equal_wcwidth(libc, ucs)
except AssertionError as err:
print(err) | Program entry point.
Load the entire Unicode table into memory, excluding those that:
- are not named (func unicodedata.name returns empty string),
- are combining characters.
Using ``locale``, for each unicode character string compare libc's
wcwidth with local wcwidth.wcwidth() function; when they differ,
report a detailed AssertionError to stdout. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-libc-comparator.py#L89-L123 |
jquast/wcwidth | bin/wcwidth-browser.py | validate_args | def validate_args(opts):
"""Validate and return options provided by docopt parsing."""
if opts['--wide'] is None:
opts['--wide'] = 2
else:
assert opts['--wide'] in ("1", "2"), opts['--wide']
if opts['--alignment'] is None:
opts['--alignment'] = 'left'
else:
assert opts['--alignment'] in ('left', 'right'), opts['--alignment']
opts['--wide'] = int(opts['--wide'])
opts['character_factory'] = WcWideCharacterGenerator
if opts['--combining']:
opts['character_factory'] = WcCombinedCharacterGenerator
return opts | python | def validate_args(opts):
"""Validate and return options provided by docopt parsing."""
if opts['--wide'] is None:
opts['--wide'] = 2
else:
assert opts['--wide'] in ("1", "2"), opts['--wide']
if opts['--alignment'] is None:
opts['--alignment'] = 'left'
else:
assert opts['--alignment'] in ('left', 'right'), opts['--alignment']
opts['--wide'] = int(opts['--wide'])
opts['character_factory'] = WcWideCharacterGenerator
if opts['--combining']:
opts['character_factory'] = WcCombinedCharacterGenerator
return opts | Validate and return options provided by docopt parsing. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L661-L675 |
jquast/wcwidth | bin/wcwidth-browser.py | main | def main(opts):
"""Program entry point."""
term = Terminal()
style = Style()
# if the terminal supports colors, use a Style instance with some
# standout colors (magenta, cyan).
if term.number_of_colors:
style = Style(attr_major=term.magenta,
attr_minor=term.bright_cyan,
alignment=opts['--alignment'])
style.name_len = term.width - 15
screen = Screen(term, style, wide=opts['--wide'])
pager = Pager(term, screen, opts['character_factory'])
with term.location(), term.cbreak(), \
term.fullscreen(), term.hidden_cursor():
pager.run(writer=echo, reader=term.inkey)
return 0 | python | def main(opts):
"""Program entry point."""
term = Terminal()
style = Style()
# if the terminal supports colors, use a Style instance with some
# standout colors (magenta, cyan).
if term.number_of_colors:
style = Style(attr_major=term.magenta,
attr_minor=term.bright_cyan,
alignment=opts['--alignment'])
style.name_len = term.width - 15
screen = Screen(term, style, wide=opts['--wide'])
pager = Pager(term, screen, opts['character_factory'])
with term.location(), term.cbreak(), \
term.fullscreen(), term.hidden_cursor():
pager.run(writer=echo, reader=term.inkey)
return 0 | Program entry point. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L678-L697 |
jquast/wcwidth | bin/wcwidth-browser.py | Screen.hint_width | def hint_width(self):
"""Width of a column segment."""
return sum((len(self.style.delimiter),
self.wide,
len(self.style.delimiter),
len(u' '),
UCS_PRINTLEN + 2,
len(u' '),
self.style.name_len,)) | python | def hint_width(self):
"""Width of a column segment."""
return sum((len(self.style.delimiter),
self.wide,
len(self.style.delimiter),
len(u' '),
UCS_PRINTLEN + 2,
len(u' '),
self.style.name_len,)) | Width of a column segment. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L220-L228 |
jquast/wcwidth | bin/wcwidth-browser.py | Screen.head_item | def head_item(self):
"""Text of a single column heading."""
delimiter = self.style.attr_minor(self.style.delimiter)
hint = self.style.header_hint * self.wide
heading = (u'{delimiter}{hint}{delimiter}'
.format(delimiter=delimiter, hint=hint))
alignment = lambda *args: (
self.term.rjust(*args) if self.style.alignment == 'right' else
self.term.ljust(*args))
txt = alignment(heading, self.hint_width, self.style.header_fill)
return self.style.attr_major(txt) | python | def head_item(self):
"""Text of a single column heading."""
delimiter = self.style.attr_minor(self.style.delimiter)
hint = self.style.header_hint * self.wide
heading = (u'{delimiter}{hint}{delimiter}'
.format(delimiter=delimiter, hint=hint))
alignment = lambda *args: (
self.term.rjust(*args) if self.style.alignment == 'right' else
self.term.ljust(*args))
txt = alignment(heading, self.hint_width, self.style.header_fill)
return self.style.attr_major(txt) | Text of a single column heading. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L231-L241 |
jquast/wcwidth | bin/wcwidth-browser.py | Screen.msg_intro | def msg_intro(self):
"""Introductory message disabled above heading."""
delim = self.style.attr_minor(self.style.delimiter)
txt = self.intro_msg_fmt.format(delim=delim).rstrip()
return self.term.center(txt) | python | def msg_intro(self):
"""Introductory message disabled above heading."""
delim = self.style.attr_minor(self.style.delimiter)
txt = self.intro_msg_fmt.format(delim=delim).rstrip()
return self.term.center(txt) | Introductory message disabled above heading. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L244-L248 |
jquast/wcwidth | bin/wcwidth-browser.py | Screen.num_columns | def num_columns(self):
"""Number of columns displayed."""
if self.term.is_a_tty:
return self.term.width // self.hint_width
return 1 | python | def num_columns(self):
"""Number of columns displayed."""
if self.term.is_a_tty:
return self.term.width // self.hint_width
return 1 | Number of columns displayed. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L256-L260 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager.on_resize | def on_resize(self, *args):
"""Signal handler callback for SIGWINCH."""
# pylint: disable=W0613
# Unused argument 'args'
self.screen.style.name_len = min(self.screen.style.name_len,
self.term.width - 15)
assert self.term.width >= self.screen.hint_width, (
'Screen to small {}, must be at least {}'.format(
self.term.width, self.screen.hint_width))
self._set_lastpage()
self.dirty = self.STATE_REFRESH | python | def on_resize(self, *args):
"""Signal handler callback for SIGWINCH."""
# pylint: disable=W0613
# Unused argument 'args'
self.screen.style.name_len = min(self.screen.style.name_len,
self.term.width - 15)
assert self.term.width >= self.screen.hint_width, (
'Screen to small {}, must be at least {}'.format(
self.term.width, self.screen.hint_width))
self._set_lastpage()
self.dirty = self.STATE_REFRESH | Signal handler callback for SIGWINCH. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L305-L315 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager._set_lastpage | def _set_lastpage(self):
"""Calculate value of class attribute ``last_page``."""
self.last_page = (len(self._page_data) - 1) // self.screen.page_size | python | def _set_lastpage(self):
"""Calculate value of class attribute ``last_page``."""
self.last_page = (len(self._page_data) - 1) // self.screen.page_size | Calculate value of class attribute ``last_page``. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L317-L319 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager.display_initialize | def display_initialize(self):
"""Display 'please wait' message, and narrow build warning."""
echo(self.term.home + self.term.clear)
echo(self.term.move_y(self.term.height // 2))
echo(self.term.center('Initializing page data ...').rstrip())
flushout()
if LIMIT_UCS == 0x10000:
echo('\n\n')
echo(self.term.blink_red(self.term.center(
'narrow Python build: upperbound value is {n}.'
.format(n=LIMIT_UCS)).rstrip()))
echo('\n\n')
flushout() | python | def display_initialize(self):
"""Display 'please wait' message, and narrow build warning."""
echo(self.term.home + self.term.clear)
echo(self.term.move_y(self.term.height // 2))
echo(self.term.center('Initializing page data ...').rstrip())
flushout()
if LIMIT_UCS == 0x10000:
echo('\n\n')
echo(self.term.blink_red(self.term.center(
'narrow Python build: upperbound value is {n}.'
.format(n=LIMIT_UCS)).rstrip()))
echo('\n\n')
flushout() | Display 'please wait' message, and narrow build warning. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L321-L334 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager.initialize_page_data | def initialize_page_data(self):
"""Initialize the page data for the given screen."""
if self.term.is_a_tty:
self.display_initialize()
self.character_generator = self.character_factory(self.screen.wide)
page_data = list()
while True:
try:
page_data.append(next(self.character_generator))
except StopIteration:
break
if LIMIT_UCS == 0x10000:
echo(self.term.center('press any key.').rstrip())
flushout()
self.term.inkey(timeout=None)
return page_data | python | def initialize_page_data(self):
"""Initialize the page data for the given screen."""
if self.term.is_a_tty:
self.display_initialize()
self.character_generator = self.character_factory(self.screen.wide)
page_data = list()
while True:
try:
page_data.append(next(self.character_generator))
except StopIteration:
break
if LIMIT_UCS == 0x10000:
echo(self.term.center('press any key.').rstrip())
flushout()
self.term.inkey(timeout=None)
return page_data | Initialize the page data for the given screen. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L336-L351 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager.page_data | def page_data(self, idx, offset):
"""
Return character data for page of given index and offset.
:param idx: page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: list of tuples in form of ``(ucs, name)``
:rtype: list[(unicode, unicode)]
"""
size = self.screen.page_size
while offset < 0 and idx:
offset += size
idx -= 1
offset = max(0, offset)
while offset >= size:
offset -= size
idx += 1
if idx == self.last_page:
offset = 0
idx = min(max(0, idx), self.last_page)
start = (idx * self.screen.page_size) + offset
end = start + self.screen.page_size
return (idx, offset), self._page_data[start:end] | python | def page_data(self, idx, offset):
"""
Return character data for page of given index and offset.
:param idx: page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: list of tuples in form of ``(ucs, name)``
:rtype: list[(unicode, unicode)]
"""
size = self.screen.page_size
while offset < 0 and idx:
offset += size
idx -= 1
offset = max(0, offset)
while offset >= size:
offset -= size
idx += 1
if idx == self.last_page:
offset = 0
idx = min(max(0, idx), self.last_page)
start = (idx * self.screen.page_size) + offset
end = start + self.screen.page_size
return (idx, offset), self._page_data[start:end] | Return character data for page of given index and offset.
:param idx: page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: list of tuples in form of ``(ucs, name)``
:rtype: list[(unicode, unicode)] | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L353-L381 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager._run_notty | def _run_notty(self, writer):
"""Pager run method for terminals that are not a tty."""
page_idx = page_offset = 0
while True:
npage_idx, _ = self.draw(writer, page_idx + 1, page_offset)
if npage_idx == self.last_page:
# page displayed was last page, quit.
break
page_idx = npage_idx
self.dirty = self.STATE_DIRTY
return | python | def _run_notty(self, writer):
"""Pager run method for terminals that are not a tty."""
page_idx = page_offset = 0
while True:
npage_idx, _ = self.draw(writer, page_idx + 1, page_offset)
if npage_idx == self.last_page:
# page displayed was last page, quit.
break
page_idx = npage_idx
self.dirty = self.STATE_DIRTY
return | Pager run method for terminals that are not a tty. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L383-L393 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager._run_tty | def _run_tty(self, writer, reader):
"""Pager run method for terminals that are a tty."""
# allow window-change signal to reflow screen
signal.signal(signal.SIGWINCH, self.on_resize)
page_idx = page_offset = 0
while True:
if self.dirty:
page_idx, page_offset = self.draw(writer,
page_idx,
page_offset)
self.dirty = self.STATE_CLEAN
inp = reader(timeout=0.25)
if inp is not None:
nxt, noff = self.process_keystroke(inp,
page_idx,
page_offset)
if not self.dirty:
self.dirty = nxt != page_idx or noff != page_offset
page_idx, page_offset = nxt, noff
if page_idx == -1:
return | python | def _run_tty(self, writer, reader):
"""Pager run method for terminals that are a tty."""
# allow window-change signal to reflow screen
signal.signal(signal.SIGWINCH, self.on_resize)
page_idx = page_offset = 0
while True:
if self.dirty:
page_idx, page_offset = self.draw(writer,
page_idx,
page_offset)
self.dirty = self.STATE_CLEAN
inp = reader(timeout=0.25)
if inp is not None:
nxt, noff = self.process_keystroke(inp,
page_idx,
page_offset)
if not self.dirty:
self.dirty = nxt != page_idx or noff != page_offset
page_idx, page_offset = nxt, noff
if page_idx == -1:
return | Pager run method for terminals that are a tty. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L395-L416 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager.run | def run(self, writer, reader):
"""
Pager entry point.
In interactive mode (terminal is a tty), run until
``process_keystroke()`` detects quit keystroke ('q'). In
non-interactive mode, exit after displaying all unicode points.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param reader: callable reads keystrokes from input stream, sending
instance of blessed.keyboard.Keystroke.
:type reader: callable
"""
self._page_data = self.initialize_page_data()
self._set_lastpage()
if not self.term.is_a_tty:
self._run_notty(writer)
else:
self._run_tty(writer, reader) | python | def run(self, writer, reader):
"""
Pager entry point.
In interactive mode (terminal is a tty), run until
``process_keystroke()`` detects quit keystroke ('q'). In
non-interactive mode, exit after displaying all unicode points.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param reader: callable reads keystrokes from input stream, sending
instance of blessed.keyboard.Keystroke.
:type reader: callable
"""
self._page_data = self.initialize_page_data()
self._set_lastpage()
if not self.term.is_a_tty:
self._run_notty(writer)
else:
self._run_tty(writer, reader) | Pager entry point.
In interactive mode (terminal is a tty), run until
``process_keystroke()`` detects quit keystroke ('q'). In
non-interactive mode, exit after displaying all unicode points.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param reader: callable reads keystrokes from input stream, sending
instance of blessed.keyboard.Keystroke.
:type reader: callable | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L418-L437 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager.process_keystroke | def process_keystroke(self, inp, idx, offset):
"""
Process keystroke ``inp``, adjusting screen parameters.
:param inp: return value of Terminal.inkey().
:type inp: blessed.keyboard.Keystroke
:param idx: page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: tuple of next (idx, offset).
:rtype: (int, int)
"""
if inp.lower() in (u'q', u'Q'):
# exit
return (-1, -1)
self._process_keystroke_commands(inp)
idx, offset = self._process_keystroke_movement(inp, idx, offset)
return idx, offset | python | def process_keystroke(self, inp, idx, offset):
"""
Process keystroke ``inp``, adjusting screen parameters.
:param inp: return value of Terminal.inkey().
:type inp: blessed.keyboard.Keystroke
:param idx: page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: tuple of next (idx, offset).
:rtype: (int, int)
"""
if inp.lower() in (u'q', u'Q'):
# exit
return (-1, -1)
self._process_keystroke_commands(inp)
idx, offset = self._process_keystroke_movement(inp, idx, offset)
return idx, offset | Process keystroke ``inp``, adjusting screen parameters.
:param inp: return value of Terminal.inkey().
:type inp: blessed.keyboard.Keystroke
:param idx: page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: tuple of next (idx, offset).
:rtype: (int, int) | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L439-L457 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager._process_keystroke_commands | def _process_keystroke_commands(self, inp):
"""Process keystrokes that issue commands (side effects)."""
if inp in (u'1', u'2'):
# chose 1 or 2-character wide
if int(inp) != self.screen.wide:
self.screen.wide = int(inp)
self.on_resize(None, None)
elif inp in (u'_', u'-'):
# adjust name length -2
nlen = max(1, self.screen.style.name_len - 2)
if nlen != self.screen.style.name_len:
self.screen.style.name_len = nlen
self.on_resize(None, None)
elif inp in (u'+', u'='):
# adjust name length +2
nlen = min(self.term.width - 8, self.screen.style.name_len + 2)
if nlen != self.screen.style.name_len:
self.screen.style.name_len = nlen
self.on_resize(None, None)
elif inp == u'2' and self.screen.wide != 2:
# change 2 or 1-cell wide view
self.screen.wide = 2
self.on_resize(None, None) | python | def _process_keystroke_commands(self, inp):
"""Process keystrokes that issue commands (side effects)."""
if inp in (u'1', u'2'):
# chose 1 or 2-character wide
if int(inp) != self.screen.wide:
self.screen.wide = int(inp)
self.on_resize(None, None)
elif inp in (u'_', u'-'):
# adjust name length -2
nlen = max(1, self.screen.style.name_len - 2)
if nlen != self.screen.style.name_len:
self.screen.style.name_len = nlen
self.on_resize(None, None)
elif inp in (u'+', u'='):
# adjust name length +2
nlen = min(self.term.width - 8, self.screen.style.name_len + 2)
if nlen != self.screen.style.name_len:
self.screen.style.name_len = nlen
self.on_resize(None, None)
elif inp == u'2' and self.screen.wide != 2:
# change 2 or 1-cell wide view
self.screen.wide = 2
self.on_resize(None, None) | Process keystrokes that issue commands (side effects). | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L459-L481 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager._process_keystroke_movement | def _process_keystroke_movement(self, inp, idx, offset):
"""Process keystrokes that adjust index and offset."""
term = self.term
if inp in (u'y', u'k') or inp.code in (term.KEY_UP,):
# scroll backward 1 line
idx, offset = (idx, offset - self.screen.num_columns)
elif inp in (u'e', u'j') or inp.code in (term.KEY_ENTER,
term.KEY_DOWN,):
# scroll forward 1 line
idx, offset = (idx, offset + self.screen.num_columns)
elif inp in (u'f', u' ') or inp.code in (term.KEY_PGDOWN,):
# scroll forward 1 page
idx, offset = (idx + 1, offset)
elif inp == u'b' or inp.code in (term.KEY_PGUP,):
# scroll backward 1 page
idx, offset = (max(0, idx - 1), offset)
elif inp.code in (term.KEY_SDOWN,):
# scroll forward 10 pages
idx, offset = (max(0, idx + 10), offset)
elif inp.code in (term.KEY_SUP,):
# scroll forward 10 pages
idx, offset = (max(0, idx - 10), offset)
elif inp.code == term.KEY_HOME:
# top
idx, offset = (0, 0)
elif inp.code == term.KEY_END:
# bottom
idx, offset = (self.last_page, 0)
return idx, offset | python | def _process_keystroke_movement(self, inp, idx, offset):
"""Process keystrokes that adjust index and offset."""
term = self.term
if inp in (u'y', u'k') or inp.code in (term.KEY_UP,):
# scroll backward 1 line
idx, offset = (idx, offset - self.screen.num_columns)
elif inp in (u'e', u'j') or inp.code in (term.KEY_ENTER,
term.KEY_DOWN,):
# scroll forward 1 line
idx, offset = (idx, offset + self.screen.num_columns)
elif inp in (u'f', u' ') or inp.code in (term.KEY_PGDOWN,):
# scroll forward 1 page
idx, offset = (idx + 1, offset)
elif inp == u'b' or inp.code in (term.KEY_PGUP,):
# scroll backward 1 page
idx, offset = (max(0, idx - 1), offset)
elif inp.code in (term.KEY_SDOWN,):
# scroll forward 10 pages
idx, offset = (max(0, idx + 10), offset)
elif inp.code in (term.KEY_SUP,):
# scroll forward 10 pages
idx, offset = (max(0, idx - 10), offset)
elif inp.code == term.KEY_HOME:
# top
idx, offset = (0, 0)
elif inp.code == term.KEY_END:
# bottom
idx, offset = (self.last_page, 0)
return idx, offset | Process keystrokes that adjust index and offset. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L483-L511 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager.draw | def draw(self, writer, idx, offset):
"""
Draw the current page view to ``writer``.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param idx: current page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: tuple of next (idx, offset).
:rtype: (int, int)
"""
# as our screen can be resized while we're mid-calculation,
# our self.dirty flag can become re-toggled; because we are
# not re-flowing our pagination, we must begin over again.
while self.dirty:
self.draw_heading(writer)
self.dirty = self.STATE_CLEAN
(idx, offset), data = self.page_data(idx, offset)
for txt in self.page_view(data):
writer(txt)
self.draw_status(writer, idx)
flushout()
return idx, offset | python | def draw(self, writer, idx, offset):
"""
Draw the current page view to ``writer``.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param idx: current page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: tuple of next (idx, offset).
:rtype: (int, int)
"""
# as our screen can be resized while we're mid-calculation,
# our self.dirty flag can become re-toggled; because we are
# not re-flowing our pagination, we must begin over again.
while self.dirty:
self.draw_heading(writer)
self.dirty = self.STATE_CLEAN
(idx, offset), data = self.page_data(idx, offset)
for txt in self.page_view(data):
writer(txt)
self.draw_status(writer, idx)
flushout()
return idx, offset | Draw the current page view to ``writer``.
:param writer: callable writes to output stream, receiving unicode.
:type writer: callable
:param idx: current page index.
:type idx: int
:param offset: scrolling region offset of current page.
:type offset: int
:returns: tuple of next (idx, offset).
:rtype: (int, int) | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L513-L537 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager.draw_heading | def draw_heading(self, writer):
"""
Conditionally redraw screen when ``dirty`` attribute is valued REFRESH.
When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved
to (0,0), screen is cleared, and heading is displayed.
:param writer: callable writes to output stream, receiving unicode.
:returns: True if class attribute ``dirty`` is ``STATE_REFRESH``.
"""
if self.dirty == self.STATE_REFRESH:
writer(u''.join(
(self.term.home, self.term.clear,
self.screen.msg_intro, '\n',
self.screen.header, '\n',)))
return True | python | def draw_heading(self, writer):
"""
Conditionally redraw screen when ``dirty`` attribute is valued REFRESH.
When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved
to (0,0), screen is cleared, and heading is displayed.
:param writer: callable writes to output stream, receiving unicode.
:returns: True if class attribute ``dirty`` is ``STATE_REFRESH``.
"""
if self.dirty == self.STATE_REFRESH:
writer(u''.join(
(self.term.home, self.term.clear,
self.screen.msg_intro, '\n',
self.screen.header, '\n',)))
return True | Conditionally redraw screen when ``dirty`` attribute is valued REFRESH.
When Pager attribute ``dirty`` is ``STATE_REFRESH``, cursor is moved
to (0,0), screen is cleared, and heading is displayed.
:param writer: callable writes to output stream, receiving unicode.
:returns: True if class attribute ``dirty`` is ``STATE_REFRESH``. | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L539-L554 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager.draw_status | def draw_status(self, writer, idx):
"""
Conditionally draw status bar when output terminal is a tty.
:param writer: callable writes to output stream, receiving unicode.
:param idx: current page position index.
:type idx: int
"""
if self.term.is_a_tty:
writer(self.term.hide_cursor())
style = self.screen.style
writer(self.term.move(self.term.height - 1))
if idx == self.last_page:
last_end = u'(END)'
else:
last_end = u'/{0}'.format(self.last_page)
txt = (u'Page {idx}{last_end} - '
u'{q} to quit, [keys: {keyset}]'
.format(idx=style.attr_minor(u'{0}'.format(idx)),
last_end=style.attr_major(last_end),
keyset=style.attr_major('kjfb12-='),
q=style.attr_minor(u'q')))
writer(self.term.center(txt).rstrip()) | python | def draw_status(self, writer, idx):
"""
Conditionally draw status bar when output terminal is a tty.
:param writer: callable writes to output stream, receiving unicode.
:param idx: current page position index.
:type idx: int
"""
if self.term.is_a_tty:
writer(self.term.hide_cursor())
style = self.screen.style
writer(self.term.move(self.term.height - 1))
if idx == self.last_page:
last_end = u'(END)'
else:
last_end = u'/{0}'.format(self.last_page)
txt = (u'Page {idx}{last_end} - '
u'{q} to quit, [keys: {keyset}]'
.format(idx=style.attr_minor(u'{0}'.format(idx)),
last_end=style.attr_major(last_end),
keyset=style.attr_major('kjfb12-='),
q=style.attr_minor(u'q')))
writer(self.term.center(txt).rstrip()) | Conditionally draw status bar when output terminal is a tty.
:param writer: callable writes to output stream, receiving unicode.
:param idx: current page position index.
:type idx: int | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L556-L578 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager.page_view | def page_view(self, data):
"""
Generator yields text to be displayed for the current unicode pageview.
:param data: The current page's data as tuple of ``(ucs, name)``.
:rtype: generator
"""
if self.term.is_a_tty:
yield self.term.move(self.screen.row_begins, 0)
# sequence clears to end-of-line
clear_eol = self.term.clear_eol
# sequence clears to end-of-screen
clear_eos = self.term.clear_eos
# track our current column and row, where column is
# the whole segment of unicode value text, and draw
# only self.screen.num_columns before end-of-line.
#
# use clear_eol at end of each row to erase over any
# "ghosted" text, and clear_eos at end of screen to
# clear the same, especially for the final page which
# is often short.
col = 0
for ucs, name in data:
val = self.text_entry(ucs, name)
col += 1
if col == self.screen.num_columns:
col = 0
if self.term.is_a_tty:
val = u''.join((val, clear_eol, u'\n'))
else:
val = u''.join((val.rstrip(), u'\n'))
yield val
if self.term.is_a_tty:
yield u''.join((clear_eol, u'\n', clear_eos)) | python | def page_view(self, data):
"""
Generator yields text to be displayed for the current unicode pageview.
:param data: The current page's data as tuple of ``(ucs, name)``.
:rtype: generator
"""
if self.term.is_a_tty:
yield self.term.move(self.screen.row_begins, 0)
# sequence clears to end-of-line
clear_eol = self.term.clear_eol
# sequence clears to end-of-screen
clear_eos = self.term.clear_eos
# track our current column and row, where column is
# the whole segment of unicode value text, and draw
# only self.screen.num_columns before end-of-line.
#
# use clear_eol at end of each row to erase over any
# "ghosted" text, and clear_eos at end of screen to
# clear the same, especially for the final page which
# is often short.
col = 0
for ucs, name in data:
val = self.text_entry(ucs, name)
col += 1
if col == self.screen.num_columns:
col = 0
if self.term.is_a_tty:
val = u''.join((val, clear_eol, u'\n'))
else:
val = u''.join((val.rstrip(), u'\n'))
yield val
if self.term.is_a_tty:
yield u''.join((clear_eol, u'\n', clear_eos)) | Generator yields text to be displayed for the current unicode pageview.
:param data: The current page's data as tuple of ``(ucs, name)``.
:rtype: generator | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L580-L615 |
jquast/wcwidth | bin/wcwidth-browser.py | Pager.text_entry | def text_entry(self, ucs, name):
"""
Display a single column segment row describing ``(ucs, name)``.
:param ucs: target unicode point character string.
:param name: name of unicode point.
:rtype: unicode
"""
style = self.screen.style
if len(name) > style.name_len:
idx = max(0, style.name_len - len(style.continuation))
name = u''.join((name[:idx], style.continuation if idx else u''))
if style.alignment == 'right':
fmt = u' '.join(('0x{val:0>{ucs_printlen}x}',
'{name:<{name_len}s}',
'{delimiter}{ucs}{delimiter}'
))
else:
fmt = u' '.join(('{delimiter}{ucs}{delimiter}',
'0x{val:0>{ucs_printlen}x}',
'{name:<{name_len}s}'))
delimiter = style.attr_minor(style.delimiter)
if len(ucs) != 1:
# determine display of combining characters
val = ord(ucs[1])
# a combining character displayed of any fg color
# will reset the foreground character of the cell
# combined with (iTerm2, OSX).
disp_ucs = style.attr_major(ucs[0:2])
if len(ucs) > 2:
disp_ucs += ucs[2]
else:
# non-combining
val = ord(ucs)
disp_ucs = style.attr_major(ucs)
return fmt.format(name_len=style.name_len,
ucs_printlen=UCS_PRINTLEN,
delimiter=delimiter,
name=name,
ucs=disp_ucs,
val=val) | python | def text_entry(self, ucs, name):
"""
Display a single column segment row describing ``(ucs, name)``.
:param ucs: target unicode point character string.
:param name: name of unicode point.
:rtype: unicode
"""
style = self.screen.style
if len(name) > style.name_len:
idx = max(0, style.name_len - len(style.continuation))
name = u''.join((name[:idx], style.continuation if idx else u''))
if style.alignment == 'right':
fmt = u' '.join(('0x{val:0>{ucs_printlen}x}',
'{name:<{name_len}s}',
'{delimiter}{ucs}{delimiter}'
))
else:
fmt = u' '.join(('{delimiter}{ucs}{delimiter}',
'0x{val:0>{ucs_printlen}x}',
'{name:<{name_len}s}'))
delimiter = style.attr_minor(style.delimiter)
if len(ucs) != 1:
# determine display of combining characters
val = ord(ucs[1])
# a combining character displayed of any fg color
# will reset the foreground character of the cell
# combined with (iTerm2, OSX).
disp_ucs = style.attr_major(ucs[0:2])
if len(ucs) > 2:
disp_ucs += ucs[2]
else:
# non-combining
val = ord(ucs)
disp_ucs = style.attr_major(ucs)
return fmt.format(name_len=style.name_len,
ucs_printlen=UCS_PRINTLEN,
delimiter=delimiter,
name=name,
ucs=disp_ucs,
val=val) | Display a single column segment row describing ``(ucs, name)``.
:param ucs: target unicode point character string.
:param name: name of unicode point.
:rtype: unicode | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-browser.py#L617-L658 |
matthew-brett/delocate | delocate/tools.py | back_tick | def back_tick(cmd, ret_err=False, as_str=True, raise_err=None):
""" Run command `cmd`, return stdout, or stdout, stderr if `ret_err`
Roughly equivalent to ``check_output`` in Python 2.7
Parameters
----------
cmd : sequence
command to execute
ret_err : bool, optional
If True, return stderr in addition to stdout. If False, just return
stdout
as_str : bool, optional
Whether to decode outputs to unicode string on exit.
raise_err : None or bool, optional
If True, raise RuntimeError for non-zero return code. If None, set to
True when `ret_err` is False, False if `ret_err` is True
Returns
-------
out : str or tuple
If `ret_err` is False, return stripped string containing stdout from
`cmd`. If `ret_err` is True, return tuple of (stdout, stderr) where
``stdout`` is the stripped stdout, and ``stderr`` is the stripped
stderr.
Raises
------
Raises RuntimeError if command returns non-zero exit code and `raise_err`
is True
"""
if raise_err is None:
raise_err = False if ret_err else True
cmd_is_seq = isinstance(cmd, (list, tuple))
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=not cmd_is_seq)
out, err = proc.communicate()
retcode = proc.returncode
cmd_str = ' '.join(cmd) if cmd_is_seq else cmd
if retcode is None:
proc.terminate()
raise RuntimeError(cmd_str + ' process did not terminate')
if raise_err and retcode != 0:
raise RuntimeError('{0} returned code {1} with error {2}'.format(
cmd_str, retcode, err.decode('latin-1')))
out = out.strip()
if as_str:
out = out.decode('latin-1')
if not ret_err:
return out
err = err.strip()
if as_str:
err = err.decode('latin-1')
return out, err | python | def back_tick(cmd, ret_err=False, as_str=True, raise_err=None):
""" Run command `cmd`, return stdout, or stdout, stderr if `ret_err`
Roughly equivalent to ``check_output`` in Python 2.7
Parameters
----------
cmd : sequence
command to execute
ret_err : bool, optional
If True, return stderr in addition to stdout. If False, just return
stdout
as_str : bool, optional
Whether to decode outputs to unicode string on exit.
raise_err : None or bool, optional
If True, raise RuntimeError for non-zero return code. If None, set to
True when `ret_err` is False, False if `ret_err` is True
Returns
-------
out : str or tuple
If `ret_err` is False, return stripped string containing stdout from
`cmd`. If `ret_err` is True, return tuple of (stdout, stderr) where
``stdout`` is the stripped stdout, and ``stderr`` is the stripped
stderr.
Raises
------
Raises RuntimeError if command returns non-zero exit code and `raise_err`
is True
"""
if raise_err is None:
raise_err = False if ret_err else True
cmd_is_seq = isinstance(cmd, (list, tuple))
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=not cmd_is_seq)
out, err = proc.communicate()
retcode = proc.returncode
cmd_str = ' '.join(cmd) if cmd_is_seq else cmd
if retcode is None:
proc.terminate()
raise RuntimeError(cmd_str + ' process did not terminate')
if raise_err and retcode != 0:
raise RuntimeError('{0} returned code {1} with error {2}'.format(
cmd_str, retcode, err.decode('latin-1')))
out = out.strip()
if as_str:
out = out.decode('latin-1')
if not ret_err:
return out
err = err.strip()
if as_str:
err = err.decode('latin-1')
return out, err | Run command `cmd`, return stdout, or stdout, stderr if `ret_err`
Roughly equivalent to ``check_output`` in Python 2.7
Parameters
----------
cmd : sequence
command to execute
ret_err : bool, optional
If True, return stderr in addition to stdout. If False, just return
stdout
as_str : bool, optional
Whether to decode outputs to unicode string on exit.
raise_err : None or bool, optional
If True, raise RuntimeError for non-zero return code. If None, set to
True when `ret_err` is False, False if `ret_err` is True
Returns
-------
out : str or tuple
If `ret_err` is False, return stripped string containing stdout from
`cmd`. If `ret_err` is True, return tuple of (stdout, stderr) where
``stdout`` is the stripped stdout, and ``stderr`` is the stripped
stderr.
Raises
------
Raises RuntimeError if command returns non-zero exit code and `raise_err`
is True | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L17-L69 |
matthew-brett/delocate | delocate/tools.py | unique_by_index | def unique_by_index(sequence):
""" unique elements in `sequence` in the order in which they occur
Parameters
----------
sequence : iterable
Returns
-------
uniques : list
unique elements of sequence, ordered by the order in which the element
occurs in `sequence`
"""
uniques = []
for element in sequence:
if element not in uniques:
uniques.append(element)
return uniques | python | def unique_by_index(sequence):
""" unique elements in `sequence` in the order in which they occur
Parameters
----------
sequence : iterable
Returns
-------
uniques : list
unique elements of sequence, ordered by the order in which the element
occurs in `sequence`
"""
uniques = []
for element in sequence:
if element not in uniques:
uniques.append(element)
return uniques | unique elements in `sequence` in the order in which they occur
Parameters
----------
sequence : iterable
Returns
-------
uniques : list
unique elements of sequence, ordered by the order in which the element
occurs in `sequence` | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L72-L89 |
matthew-brett/delocate | delocate/tools.py | ensure_permissions | def ensure_permissions(mode_flags=stat.S_IWUSR):
"""decorator to ensure a filename has given permissions.
If changed, original permissions are restored after the decorated
modification.
"""
def decorator(f):
def modify(filename, *args, **kwargs):
m = chmod_perms(filename) if exists(filename) else mode_flags
if not m & mode_flags:
os.chmod(filename, m | mode_flags)
try:
return f(filename, *args, **kwargs)
finally:
# restore original permissions
if not m & mode_flags:
os.chmod(filename, m)
return modify
return decorator | python | def ensure_permissions(mode_flags=stat.S_IWUSR):
"""decorator to ensure a filename has given permissions.
If changed, original permissions are restored after the decorated
modification.
"""
def decorator(f):
def modify(filename, *args, **kwargs):
m = chmod_perms(filename) if exists(filename) else mode_flags
if not m & mode_flags:
os.chmod(filename, m | mode_flags)
try:
return f(filename, *args, **kwargs)
finally:
# restore original permissions
if not m & mode_flags:
os.chmod(filename, m)
return modify
return decorator | decorator to ensure a filename has given permissions.
If changed, original permissions are restored after the decorated
modification. | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L97-L117 |
matthew-brett/delocate | delocate/tools.py | get_install_names | def get_install_names(filename):
""" Return install names from library named in `filename`
Returns tuple of install names
tuple will be empty if no install names, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_names : tuple
tuple of install names for library `filename`
"""
lines = _cmd_out_err(['otool', '-L', filename])
if not _line0_says_object(lines[0], filename):
return ()
names = tuple(parse_install_name(line)[0] for line in lines[1:])
install_id = get_install_id(filename)
if not install_id is None:
assert names[0] == install_id
return names[1:]
return names | python | def get_install_names(filename):
""" Return install names from library named in `filename`
Returns tuple of install names
tuple will be empty if no install names, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_names : tuple
tuple of install names for library `filename`
"""
lines = _cmd_out_err(['otool', '-L', filename])
if not _line0_says_object(lines[0], filename):
return ()
names = tuple(parse_install_name(line)[0] for line in lines[1:])
install_id = get_install_id(filename)
if not install_id is None:
assert names[0] == install_id
return names[1:]
return names | Return install names from library named in `filename`
Returns tuple of install names
tuple will be empty if no install names, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_names : tuple
tuple of install names for library `filename` | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L197-L222 |
matthew-brett/delocate | delocate/tools.py | get_install_id | def get_install_id(filename):
""" Return install id from library named in `filename`
Returns None if no install id, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_id : str
install id of library `filename`, or None if no install id
"""
lines = _cmd_out_err(['otool', '-D', filename])
if not _line0_says_object(lines[0], filename):
return None
if len(lines) == 1:
return None
if len(lines) != 2:
raise InstallNameError('Unexpected otool output ' + '\n'.join(lines))
return lines[1].strip() | python | def get_install_id(filename):
""" Return install id from library named in `filename`
Returns None if no install id, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_id : str
install id of library `filename`, or None if no install id
"""
lines = _cmd_out_err(['otool', '-D', filename])
if not _line0_says_object(lines[0], filename):
return None
if len(lines) == 1:
return None
if len(lines) != 2:
raise InstallNameError('Unexpected otool output ' + '\n'.join(lines))
return lines[1].strip() | Return install id from library named in `filename`
Returns None if no install id, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_id : str
install id of library `filename`, or None if no install id | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L225-L247 |
matthew-brett/delocate | delocate/tools.py | set_install_name | def set_install_name(filename, oldname, newname):
""" Set install name `oldname` to `newname` in library filename
Parameters
----------
filename : str
filename of library
oldname : str
current install name in library
newname : str
replacement name for `oldname`
"""
names = get_install_names(filename)
if oldname not in names:
raise InstallNameError('{0} not in install names for {1}'.format(
oldname, filename))
back_tick(['install_name_tool', '-change', oldname, newname, filename]) | python | def set_install_name(filename, oldname, newname):
""" Set install name `oldname` to `newname` in library filename
Parameters
----------
filename : str
filename of library
oldname : str
current install name in library
newname : str
replacement name for `oldname`
"""
names = get_install_names(filename)
if oldname not in names:
raise InstallNameError('{0} not in install names for {1}'.format(
oldname, filename))
back_tick(['install_name_tool', '-change', oldname, newname, filename]) | Set install name `oldname` to `newname` in library filename
Parameters
----------
filename : str
filename of library
oldname : str
current install name in library
newname : str
replacement name for `oldname` | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L251-L267 |
matthew-brett/delocate | delocate/tools.py | set_install_id | def set_install_id(filename, install_id):
""" Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id
"""
if get_install_id(filename) is None:
raise InstallNameError('{0} has no install id'.format(filename))
back_tick(['install_name_tool', '-id', install_id, filename]) | python | def set_install_id(filename, install_id):
""" Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id
"""
if get_install_id(filename) is None:
raise InstallNameError('{0} has no install id'.format(filename))
back_tick(['install_name_tool', '-id', install_id, filename]) | Set install id for library named in `filename`
Parameters
----------
filename : str
filename of library
install_id : str
install id for library `filename`
Raises
------
RuntimeError if `filename` has not install id | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L271-L287 |
matthew-brett/delocate | delocate/tools.py | get_rpaths | def get_rpaths(filename):
""" Return a tuple of rpaths from the library `filename`
If `filename` is not a library then the returned tuple will be empty.
Parameters
----------
filaname : str
filename of library
Returns
-------
rpath : tuple
rpath paths in `filename`
"""
try:
lines = _cmd_out_err(['otool', '-l', filename])
except RuntimeError:
return ()
if not _line0_says_object(lines[0], filename):
return ()
lines = [line.strip() for line in lines]
paths = []
line_no = 1
while line_no < len(lines):
line = lines[line_no]
line_no += 1
if line != 'cmd LC_RPATH':
continue
cmdsize, path = lines[line_no:line_no+2]
assert cmdsize.startswith('cmdsize ')
paths.append(RPATH_RE.match(path).groups()[0])
line_no += 2
return tuple(paths) | python | def get_rpaths(filename):
""" Return a tuple of rpaths from the library `filename`
If `filename` is not a library then the returned tuple will be empty.
Parameters
----------
filaname : str
filename of library
Returns
-------
rpath : tuple
rpath paths in `filename`
"""
try:
lines = _cmd_out_err(['otool', '-l', filename])
except RuntimeError:
return ()
if not _line0_says_object(lines[0], filename):
return ()
lines = [line.strip() for line in lines]
paths = []
line_no = 1
while line_no < len(lines):
line = lines[line_no]
line_no += 1
if line != 'cmd LC_RPATH':
continue
cmdsize, path = lines[line_no:line_no+2]
assert cmdsize.startswith('cmdsize ')
paths.append(RPATH_RE.match(path).groups()[0])
line_no += 2
return tuple(paths) | Return a tuple of rpaths from the library `filename`
If `filename` is not a library then the returned tuple will be empty.
Parameters
----------
filaname : str
filename of library
Returns
-------
rpath : tuple
rpath paths in `filename` | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L292-L325 |
matthew-brett/delocate | delocate/tools.py | dir2zip | def dir2zip(in_dir, zip_fname):
""" Make a zip file `zip_fname` with contents of directory `in_dir`
The recorded filenames are relative to `in_dir`, so doing a standard zip
unpack of the resulting `zip_fname` in an empty directory will result in
the original directory contents.
Parameters
----------
in_dir : str
Directory path containing files to go in the zip archive
zip_fname : str
Filename of zip archive to write
"""
z = zipfile.ZipFile(zip_fname, 'w',
compression=zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(in_dir):
for file in files:
in_fname = pjoin(root, file)
in_stat = os.stat(in_fname)
# Preserve file permissions, but allow copy
info = zipfile.ZipInfo(in_fname)
info.filename = relpath(in_fname, in_dir)
if os.path.sep == '\\':
# Make the path unix friendly on windows.
# PyPI won't accept wheels with windows path separators
info.filename = relpath(in_fname, in_dir).replace('\\', '/')
# Set time from modification time
info.date_time = time.localtime(in_stat.st_mtime)
# See https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/48435482#48435482
# Also set regular file permissions
perms = stat.S_IMODE(in_stat.st_mode) | stat.S_IFREG
info.external_attr = perms << 16
with open_readable(in_fname, 'rb') as fobj:
contents = fobj.read()
z.writestr(info, contents, zipfile.ZIP_DEFLATED)
z.close() | python | def dir2zip(in_dir, zip_fname):
""" Make a zip file `zip_fname` with contents of directory `in_dir`
The recorded filenames are relative to `in_dir`, so doing a standard zip
unpack of the resulting `zip_fname` in an empty directory will result in
the original directory contents.
Parameters
----------
in_dir : str
Directory path containing files to go in the zip archive
zip_fname : str
Filename of zip archive to write
"""
z = zipfile.ZipFile(zip_fname, 'w',
compression=zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(in_dir):
for file in files:
in_fname = pjoin(root, file)
in_stat = os.stat(in_fname)
# Preserve file permissions, but allow copy
info = zipfile.ZipInfo(in_fname)
info.filename = relpath(in_fname, in_dir)
if os.path.sep == '\\':
# Make the path unix friendly on windows.
# PyPI won't accept wheels with windows path separators
info.filename = relpath(in_fname, in_dir).replace('\\', '/')
# Set time from modification time
info.date_time = time.localtime(in_stat.st_mtime)
# See https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/48435482#48435482
# Also set regular file permissions
perms = stat.S_IMODE(in_stat.st_mode) | stat.S_IFREG
info.external_attr = perms << 16
with open_readable(in_fname, 'rb') as fobj:
contents = fobj.read()
z.writestr(info, contents, zipfile.ZIP_DEFLATED)
z.close() | Make a zip file `zip_fname` with contents of directory `in_dir`
The recorded filenames are relative to `in_dir`, so doing a standard zip
unpack of the resulting `zip_fname` in an empty directory will result in
the original directory contents.
Parameters
----------
in_dir : str
Directory path containing files to go in the zip archive
zip_fname : str
Filename of zip archive to write | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L357-L393 |
matthew-brett/delocate | delocate/tools.py | find_package_dirs | def find_package_dirs(root_path):
""" Find python package directories in directory `root_path`
Parameters
----------
root_path : str
Directory to search for package subdirectories
Returns
-------
package_sdirs : set
Set of strings where each is a subdirectory of `root_path`, containing
an ``__init__.py`` file. Paths prefixed by `root_path`
"""
package_sdirs = set()
for entry in os.listdir(root_path):
fname = entry if root_path == '.' else pjoin(root_path, entry)
if isdir(fname) and exists(pjoin(fname, '__init__.py')):
package_sdirs.add(fname)
return package_sdirs | python | def find_package_dirs(root_path):
""" Find python package directories in directory `root_path`
Parameters
----------
root_path : str
Directory to search for package subdirectories
Returns
-------
package_sdirs : set
Set of strings where each is a subdirectory of `root_path`, containing
an ``__init__.py`` file. Paths prefixed by `root_path`
"""
package_sdirs = set()
for entry in os.listdir(root_path):
fname = entry if root_path == '.' else pjoin(root_path, entry)
if isdir(fname) and exists(pjoin(fname, '__init__.py')):
package_sdirs.add(fname)
return package_sdirs | Find python package directories in directory `root_path`
Parameters
----------
root_path : str
Directory to search for package subdirectories
Returns
-------
package_sdirs : set
Set of strings where each is a subdirectory of `root_path`, containing
an ``__init__.py`` file. Paths prefixed by `root_path` | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L396-L415 |
matthew-brett/delocate | delocate/tools.py | cmp_contents | def cmp_contents(filename1, filename2):
""" Returns True if contents of the files are the same
Parameters
----------
filename1 : str
filename of first file to compare
filename2 : str
filename of second file to compare
Returns
-------
tf : bool
True if binary contents of `filename1` is same as binary contents of
`filename2`, False otherwise.
"""
with open_readable(filename1, 'rb') as fobj:
contents1 = fobj.read()
with open_readable(filename2, 'rb') as fobj:
contents2 = fobj.read()
return contents1 == contents2 | python | def cmp_contents(filename1, filename2):
""" Returns True if contents of the files are the same
Parameters
----------
filename1 : str
filename of first file to compare
filename2 : str
filename of second file to compare
Returns
-------
tf : bool
True if binary contents of `filename1` is same as binary contents of
`filename2`, False otherwise.
"""
with open_readable(filename1, 'rb') as fobj:
contents1 = fobj.read()
with open_readable(filename2, 'rb') as fobj:
contents2 = fobj.read()
return contents1 == contents2 | Returns True if contents of the files are the same
Parameters
----------
filename1 : str
filename of first file to compare
filename2 : str
filename of second file to compare
Returns
-------
tf : bool
True if binary contents of `filename1` is same as binary contents of
`filename2`, False otherwise. | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L418-L438 |
matthew-brett/delocate | delocate/tools.py | get_archs | def get_archs(libname):
""" Return architecture types from library `libname`
Parameters
----------
libname : str
filename of binary for which to return arch codes
Returns
-------
arch_names : frozenset
Empty (frozen)set if no arch codes. If not empty, contains one or more
of 'ppc', 'ppc64', 'i386', 'x86_64'
"""
if not exists(libname):
raise RuntimeError(libname + " is not a file")
try:
stdout = back_tick(['lipo', '-info', libname])
except RuntimeError:
return frozenset()
lines = [line.strip() for line in stdout.split('\n') if line.strip()]
# For some reason, output from lipo -info on .a file generates this line
if lines[0] == "input file {0} is not a fat file".format(libname):
line = lines[1]
else:
assert len(lines) == 1
line = lines[0]
for reggie in (
'Non-fat file: {0} is architecture: (.*)'.format(libname),
'Architectures in the fat file: {0} are: (.*)'.format(libname)):
reggie = re.compile(reggie)
match = reggie.match(line)
if not match is None:
return frozenset(match.groups()[0].split(' '))
raise ValueError("Unexpected output: '{0}' for {1}".format(
stdout, libname)) | python | def get_archs(libname):
""" Return architecture types from library `libname`
Parameters
----------
libname : str
filename of binary for which to return arch codes
Returns
-------
arch_names : frozenset
Empty (frozen)set if no arch codes. If not empty, contains one or more
of 'ppc', 'ppc64', 'i386', 'x86_64'
"""
if not exists(libname):
raise RuntimeError(libname + " is not a file")
try:
stdout = back_tick(['lipo', '-info', libname])
except RuntimeError:
return frozenset()
lines = [line.strip() for line in stdout.split('\n') if line.strip()]
# For some reason, output from lipo -info on .a file generates this line
if lines[0] == "input file {0} is not a fat file".format(libname):
line = lines[1]
else:
assert len(lines) == 1
line = lines[0]
for reggie in (
'Non-fat file: {0} is architecture: (.*)'.format(libname),
'Architectures in the fat file: {0} are: (.*)'.format(libname)):
reggie = re.compile(reggie)
match = reggie.match(line)
if not match is None:
return frozenset(match.groups()[0].split(' '))
raise ValueError("Unexpected output: '{0}' for {1}".format(
stdout, libname)) | Return architecture types from library `libname`
Parameters
----------
libname : str
filename of binary for which to return arch codes
Returns
-------
arch_names : frozenset
Empty (frozen)set if no arch codes. If not empty, contains one or more
of 'ppc', 'ppc64', 'i386', 'x86_64' | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L441-L476 |
matthew-brett/delocate | delocate/tools.py | validate_signature | def validate_signature(filename):
""" Remove invalid signatures from a binary file
If the file signature is missing or valid then it will be ignored
Invalid signatures are replaced with an ad-hoc signature. This is the
closest you can get to removing a signature on MacOS
Parameters
----------
filename : str
Filepath to a binary file
"""
out, err = back_tick(['codesign', '--verify', filename],
ret_err=True, as_str=True, raise_err=False)
if not err:
return # The existing signature is valid
if 'code object is not signed at all' in err:
return # File has no signature, and adding a new one isn't necessary
# This file's signature is invalid and needs to be replaced
replace_signature(filename, '-') | python | def validate_signature(filename):
""" Remove invalid signatures from a binary file
If the file signature is missing or valid then it will be ignored
Invalid signatures are replaced with an ad-hoc signature. This is the
closest you can get to removing a signature on MacOS
Parameters
----------
filename : str
Filepath to a binary file
"""
out, err = back_tick(['codesign', '--verify', filename],
ret_err=True, as_str=True, raise_err=False)
if not err:
return # The existing signature is valid
if 'code object is not signed at all' in err:
return # File has no signature, and adding a new one isn't necessary
# This file's signature is invalid and needs to be replaced
replace_signature(filename, '-') | Remove invalid signatures from a binary file
If the file signature is missing or valid then it will be ignored
Invalid signatures are replaced with an ad-hoc signature. This is the
closest you can get to removing a signature on MacOS
Parameters
----------
filename : str
Filepath to a binary file | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L513-L534 |
matthew-brett/delocate | versioneer.py | os_path_relpath | def os_path_relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = [x for x in os.path.abspath(start).split(os.path.sep) if x]
path_list = [x for x in os.path.abspath(path).split(os.path.sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list) | python | def os_path_relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = [x for x in os.path.abspath(start).split(os.path.sep) if x]
path_list = [x for x in os.path.abspath(path).split(os.path.sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list) | Return a relative version of a path | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/versioneer.py#L596-L611 |
matthew-brett/delocate | delocate/fuse.py | fuse_trees | def fuse_trees(to_tree, from_tree, lib_exts=('.so', '.dylib', '.a')):
""" Fuse path `from_tree` into path `to_tree`
For each file in `from_tree` - check for library file extension (in
`lib_exts` - if present, check if there is a file with matching relative
path in `to_tree`, if so, use :func:`delocate.tools.lipo_fuse` to fuse the
two libraries together and write into `to_tree`. If any of these
conditions are not met, just copy the file from `from_tree` to `to_tree`.
Parameters
---------
to_tree : str
path of tree to fuse into (update into)
from_tree : str
path of tree to fuse from (update from)
lib_exts : sequence, optional
filename extensions for libraries
"""
for from_dirpath, dirnames, filenames in os.walk(from_tree):
to_dirpath = pjoin(to_tree, relpath(from_dirpath, from_tree))
# Copy any missing directories in to_path
for dirname in tuple(dirnames):
to_path = pjoin(to_dirpath, dirname)
if not exists(to_path):
from_path = pjoin(from_dirpath, dirname)
shutil.copytree(from_path, to_path)
# If copying, don't further analyze this directory
dirnames.remove(dirname)
for fname in filenames:
root, ext = splitext(fname)
from_path = pjoin(from_dirpath, fname)
to_path = pjoin(to_dirpath, fname)
if not exists(to_path):
_copyfile(from_path, to_path)
elif cmp_contents(from_path, to_path):
pass
elif ext in lib_exts:
# existing lib that needs fuse
lipo_fuse(from_path, to_path, to_path)
else:
# existing not-lib file not identical to source
_copyfile(from_path, to_path) | python | def fuse_trees(to_tree, from_tree, lib_exts=('.so', '.dylib', '.a')):
""" Fuse path `from_tree` into path `to_tree`
For each file in `from_tree` - check for library file extension (in
`lib_exts` - if present, check if there is a file with matching relative
path in `to_tree`, if so, use :func:`delocate.tools.lipo_fuse` to fuse the
two libraries together and write into `to_tree`. If any of these
conditions are not met, just copy the file from `from_tree` to `to_tree`.
Parameters
---------
to_tree : str
path of tree to fuse into (update into)
from_tree : str
path of tree to fuse from (update from)
lib_exts : sequence, optional
filename extensions for libraries
"""
for from_dirpath, dirnames, filenames in os.walk(from_tree):
to_dirpath = pjoin(to_tree, relpath(from_dirpath, from_tree))
# Copy any missing directories in to_path
for dirname in tuple(dirnames):
to_path = pjoin(to_dirpath, dirname)
if not exists(to_path):
from_path = pjoin(from_dirpath, dirname)
shutil.copytree(from_path, to_path)
# If copying, don't further analyze this directory
dirnames.remove(dirname)
for fname in filenames:
root, ext = splitext(fname)
from_path = pjoin(from_dirpath, fname)
to_path = pjoin(to_dirpath, fname)
if not exists(to_path):
_copyfile(from_path, to_path)
elif cmp_contents(from_path, to_path):
pass
elif ext in lib_exts:
# existing lib that needs fuse
lipo_fuse(from_path, to_path, to_path)
else:
# existing not-lib file not identical to source
_copyfile(from_path, to_path) | Fuse path `from_tree` into path `to_tree`
For each file in `from_tree` - check for library file extension (in
`lib_exts` - if present, check if there is a file with matching relative
path in `to_tree`, if so, use :func:`delocate.tools.lipo_fuse` to fuse the
two libraries together and write into `to_tree`. If any of these
conditions are not met, just copy the file from `from_tree` to `to_tree`.
Parameters
---------
to_tree : str
path of tree to fuse into (update into)
from_tree : str
path of tree to fuse from (update from)
lib_exts : sequence, optional
filename extensions for libraries | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/fuse.py#L36-L77 |
matthew-brett/delocate | delocate/fuse.py | fuse_wheels | def fuse_wheels(to_wheel, from_wheel, out_wheel):
""" Fuse `from_wheel` into `to_wheel`, write to `out_wheel`
Parameters
---------
to_wheel : str
filename of wheel to fuse into
from_wheel : str
filename of wheel to fuse from
out_wheel : str
filename of new wheel from fusion of `to_wheel` and `from_wheel`
"""
to_wheel, from_wheel, out_wheel = [
abspath(w) for w in (to_wheel, from_wheel, out_wheel)]
with InTemporaryDirectory():
zip2dir(to_wheel, 'to_wheel')
zip2dir(from_wheel, 'from_wheel')
fuse_trees('to_wheel', 'from_wheel')
rewrite_record('to_wheel')
dir2zip('to_wheel', out_wheel) | python | def fuse_wheels(to_wheel, from_wheel, out_wheel):
""" Fuse `from_wheel` into `to_wheel`, write to `out_wheel`
Parameters
---------
to_wheel : str
filename of wheel to fuse into
from_wheel : str
filename of wheel to fuse from
out_wheel : str
filename of new wheel from fusion of `to_wheel` and `from_wheel`
"""
to_wheel, from_wheel, out_wheel = [
abspath(w) for w in (to_wheel, from_wheel, out_wheel)]
with InTemporaryDirectory():
zip2dir(to_wheel, 'to_wheel')
zip2dir(from_wheel, 'from_wheel')
fuse_trees('to_wheel', 'from_wheel')
rewrite_record('to_wheel')
dir2zip('to_wheel', out_wheel) | Fuse `from_wheel` into `to_wheel`, write to `out_wheel`
Parameters
---------
to_wheel : str
filename of wheel to fuse into
from_wheel : str
filename of wheel to fuse from
out_wheel : str
filename of new wheel from fusion of `to_wheel` and `from_wheel` | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/fuse.py#L80-L99 |
matthew-brett/delocate | delocate/delocating.py | delocate_tree_libs | def delocate_tree_libs(lib_dict, lib_path, root_path):
""" Move needed libraries in `lib_dict` into `lib_path`
`lib_dict` has keys naming libraries required by the files in the
corresponding value. Call the keys, "required libs". Call the values
"requiring objects".
Copy all the required libs to `lib_path`. Fix up the rpaths and install
names in the requiring objects to point to these new copies.
Exception: required libs within the directory tree pointed to by
`root_path` stay where they are, but we modify requiring objects to use
relative paths to these libraries.
Parameters
----------
lib_dict : dict
Dictionary with (key, value) pairs of (``depended_lib_path``,
``dependings_dict``) (see :func:`libsana.tree_libs`)
lib_path : str
Path in which to store copies of libs referred to in keys of
`lib_dict`. Assumed to exist
root_path : str, optional
Root directory of tree analyzed in `lib_dict`. Any required
library within the subtrees of `root_path` does not get copied, but
libraries linking to it have links adjusted to use relative path to
this library.
Returns
-------
copied_libs : dict
Filtered `lib_dict` dict containing only the (key, value) pairs from
`lib_dict` where the keys are the libraries copied to `lib_path``.
"""
copied_libs = {}
delocated_libs = set()
copied_basenames = set()
rp_root_path = realpath(root_path)
rp_lib_path = realpath(lib_path)
# Test for errors first to avoid getting half-way through changing the tree
for required, requirings in lib_dict.items():
if required.startswith('@'): # assume @rpath etc are correct
# But warn, because likely they are not
warnings.warn('Not processing required path {0} because it '
'begins with @'.format(required))
continue
r_ed_base = basename(required)
if relpath(required, rp_root_path).startswith('..'):
# Not local, plan to copy
if r_ed_base in copied_basenames:
raise DelocationError('Already planning to copy library with '
'same basename as: ' + r_ed_base)
if not exists(required):
raise DelocationError('library "{0}" does not exist'.format(
required))
copied_libs[required] = requirings
copied_basenames.add(r_ed_base)
else: # Is local, plan to set relative loader_path
delocated_libs.add(required)
# Modify in place now that we've checked for errors
for required in copied_libs:
shutil.copy(required, lib_path)
# Set rpath and install names for this copied library
for requiring, orig_install_name in lib_dict[required].items():
req_rel = relpath(rp_lib_path, dirname(requiring))
set_install_name(requiring, orig_install_name,
'@loader_path/{0}/{1}'.format(
req_rel, basename(required)))
for required in delocated_libs:
# Set relative path for local library
for requiring, orig_install_name in lib_dict[required].items():
req_rel = relpath(required, dirname(requiring))
set_install_name(requiring, orig_install_name,
'@loader_path/' + req_rel)
return copied_libs | python | def delocate_tree_libs(lib_dict, lib_path, root_path):
""" Move needed libraries in `lib_dict` into `lib_path`
`lib_dict` has keys naming libraries required by the files in the
corresponding value. Call the keys, "required libs". Call the values
"requiring objects".
Copy all the required libs to `lib_path`. Fix up the rpaths and install
names in the requiring objects to point to these new copies.
Exception: required libs within the directory tree pointed to by
`root_path` stay where they are, but we modify requiring objects to use
relative paths to these libraries.
Parameters
----------
lib_dict : dict
Dictionary with (key, value) pairs of (``depended_lib_path``,
``dependings_dict``) (see :func:`libsana.tree_libs`)
lib_path : str
Path in which to store copies of libs referred to in keys of
`lib_dict`. Assumed to exist
root_path : str, optional
Root directory of tree analyzed in `lib_dict`. Any required
library within the subtrees of `root_path` does not get copied, but
libraries linking to it have links adjusted to use relative path to
this library.
Returns
-------
copied_libs : dict
Filtered `lib_dict` dict containing only the (key, value) pairs from
`lib_dict` where the keys are the libraries copied to `lib_path``.
"""
copied_libs = {}
delocated_libs = set()
copied_basenames = set()
rp_root_path = realpath(root_path)
rp_lib_path = realpath(lib_path)
# Test for errors first to avoid getting half-way through changing the tree
for required, requirings in lib_dict.items():
if required.startswith('@'): # assume @rpath etc are correct
# But warn, because likely they are not
warnings.warn('Not processing required path {0} because it '
'begins with @'.format(required))
continue
r_ed_base = basename(required)
if relpath(required, rp_root_path).startswith('..'):
# Not local, plan to copy
if r_ed_base in copied_basenames:
raise DelocationError('Already planning to copy library with '
'same basename as: ' + r_ed_base)
if not exists(required):
raise DelocationError('library "{0}" does not exist'.format(
required))
copied_libs[required] = requirings
copied_basenames.add(r_ed_base)
else: # Is local, plan to set relative loader_path
delocated_libs.add(required)
# Modify in place now that we've checked for errors
for required in copied_libs:
shutil.copy(required, lib_path)
# Set rpath and install names for this copied library
for requiring, orig_install_name in lib_dict[required].items():
req_rel = relpath(rp_lib_path, dirname(requiring))
set_install_name(requiring, orig_install_name,
'@loader_path/{0}/{1}'.format(
req_rel, basename(required)))
for required in delocated_libs:
# Set relative path for local library
for requiring, orig_install_name in lib_dict[required].items():
req_rel = relpath(required, dirname(requiring))
set_install_name(requiring, orig_install_name,
'@loader_path/' + req_rel)
return copied_libs | Move needed libraries in `lib_dict` into `lib_path`
`lib_dict` has keys naming libraries required by the files in the
corresponding value. Call the keys, "required libs". Call the values
"requiring objects".
Copy all the required libs to `lib_path`. Fix up the rpaths and install
names in the requiring objects to point to these new copies.
Exception: required libs within the directory tree pointed to by
`root_path` stay where they are, but we modify requiring objects to use
relative paths to these libraries.
Parameters
----------
lib_dict : dict
Dictionary with (key, value) pairs of (``depended_lib_path``,
``dependings_dict``) (see :func:`libsana.tree_libs`)
lib_path : str
Path in which to store copies of libs referred to in keys of
`lib_dict`. Assumed to exist
root_path : str, optional
Root directory of tree analyzed in `lib_dict`. Any required
library within the subtrees of `root_path` does not get copied, but
libraries linking to it have links adjusted to use relative path to
this library.
Returns
-------
copied_libs : dict
Filtered `lib_dict` dict containing only the (key, value) pairs from
`lib_dict` where the keys are the libraries copied to `lib_path``. | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L27-L101 |
matthew-brett/delocate | delocate/delocating.py | copy_recurse | def copy_recurse(lib_path, copy_filt_func = None, copied_libs = None):
""" Analyze `lib_path` for library dependencies and copy libraries
`lib_path` is a directory containing libraries. The libraries might
themselves have dependencies. This function analyzes the dependencies and
copies library dependencies that match the filter `copy_filt_func`. It also
adjusts the depending libraries to use the copy. It keeps iterating over
`lib_path` until all matching dependencies (of dependencies of dependencies
...) have been copied.
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each depended library name; copy where
``copy_filt_func(libname)`` is True, don't copy otherwise
copied_libs : dict
Dict with (key, value) pairs of (``copied_lib_path``,
``dependings_dict``) where ``copied_lib_path`` is the canonical path of
a library that has been copied to `lib_path`, and ``dependings_dict``
is a dictionary with (key, value) pairs of (``depending_lib_path``,
``install_name``). ``depending_lib_path`` is the canonical path of the
library depending on ``copied_lib_path``, ``install_name`` is the name
that ``depending_lib_path`` uses to refer to ``copied_lib_path`` (in
its install names).
Returns
-------
copied_libs : dict
Input `copied_libs` dict with any extra libraries and / or dependencies
added.
"""
if copied_libs is None:
copied_libs = {}
else:
copied_libs = dict(copied_libs)
done = False
while not done:
in_len = len(copied_libs)
_copy_required(lib_path, copy_filt_func, copied_libs)
done = len(copied_libs) == in_len
return copied_libs | python | def copy_recurse(lib_path, copy_filt_func = None, copied_libs = None):
""" Analyze `lib_path` for library dependencies and copy libraries
`lib_path` is a directory containing libraries. The libraries might
themselves have dependencies. This function analyzes the dependencies and
copies library dependencies that match the filter `copy_filt_func`. It also
adjusts the depending libraries to use the copy. It keeps iterating over
`lib_path` until all matching dependencies (of dependencies of dependencies
...) have been copied.
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each depended library name; copy where
``copy_filt_func(libname)`` is True, don't copy otherwise
copied_libs : dict
Dict with (key, value) pairs of (``copied_lib_path``,
``dependings_dict``) where ``copied_lib_path`` is the canonical path of
a library that has been copied to `lib_path`, and ``dependings_dict``
is a dictionary with (key, value) pairs of (``depending_lib_path``,
``install_name``). ``depending_lib_path`` is the canonical path of the
library depending on ``copied_lib_path``, ``install_name`` is the name
that ``depending_lib_path`` uses to refer to ``copied_lib_path`` (in
its install names).
Returns
-------
copied_libs : dict
Input `copied_libs` dict with any extra libraries and / or dependencies
added.
"""
if copied_libs is None:
copied_libs = {}
else:
copied_libs = dict(copied_libs)
done = False
while not done:
in_len = len(copied_libs)
_copy_required(lib_path, copy_filt_func, copied_libs)
done = len(copied_libs) == in_len
return copied_libs | Analyze `lib_path` for library dependencies and copy libraries
`lib_path` is a directory containing libraries. The libraries might
themselves have dependencies. This function analyzes the dependencies and
copies library dependencies that match the filter `copy_filt_func`. It also
adjusts the depending libraries to use the copy. It keeps iterating over
`lib_path` until all matching dependencies (of dependencies of dependencies
...) have been copied.
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each depended library name; copy where
``copy_filt_func(libname)`` is True, don't copy otherwise
copied_libs : dict
Dict with (key, value) pairs of (``copied_lib_path``,
``dependings_dict``) where ``copied_lib_path`` is the canonical path of
a library that has been copied to `lib_path`, and ``dependings_dict``
is a dictionary with (key, value) pairs of (``depending_lib_path``,
``install_name``). ``depending_lib_path`` is the canonical path of the
library depending on ``copied_lib_path``, ``install_name`` is the name
that ``depending_lib_path`` uses to refer to ``copied_lib_path`` (in
its install names).
Returns
-------
copied_libs : dict
Input `copied_libs` dict with any extra libraries and / or dependencies
added. | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L104-L147 |
matthew-brett/delocate | delocate/delocating.py | _copy_required | def _copy_required(lib_path, copy_filt_func, copied_libs):
""" Copy libraries required for files in `lib_path` to `lib_path`
Augment `copied_libs` dictionary with any newly copied libraries, modifying
`copied_libs` in-place - see Notes.
This is one pass of ``copy_recurse``
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each library name; copy where ``copy_filt_func(libname)`` is
True, don't copy otherwise
copied_libs : dict
See :func:`copy_recurse` for definition.
Notes
-----
If we need to copy another library, add that (``depended_lib_path``,
``dependings_dict``) to `copied_libs`. ``dependings_dict`` has (key,
value) pairs of (``depending_lib_path``, ``install_name``).
``depending_lib_path`` will be the original (canonical) library name, not
the copy in ``lib_path``.
Sometimes we copy a library, that further depends on a library we have
already copied. In this case update ``copied_libs[depended_lib]`` with the
extra dependency (as well as fixing up the install names for the depending
library).
For example, imagine we've start with a lib path like this::
my_lib_path/
libA.dylib
libB.dylib
Our input `copied_libs` has keys ``/sys/libA.dylib``, ``/sys/libB.lib``
telling us we previously copied those guys from the ``/sys`` folder.
On a first pass, we discover that ``libA.dylib`` depends on
``/sys/libC.dylib``, so we copy that.
On a second pass, we discover now that ``libC.dylib`` also depends on
``/sys/libB.dylib``. `copied_libs` tells us that we already have a copy of
``/sys/libB.dylib``, so we fix our copy of `libC.dylib`` to point to
``my_lib_path/libB.dylib`` and add ``/sys/libC.dylib`` as a
``dependings_dict`` entry for ``copied_libs['/sys/libB.dylib']``
"""
# Paths will be prepended with `lib_path`
lib_dict = tree_libs(lib_path)
# Map library paths after copy ('copied') to path before copy ('orig')
rp_lp = realpath(lib_path)
copied2orig = dict((pjoin(rp_lp, basename(c)), c) for c in copied_libs)
for required, requirings in lib_dict.items():
if not copy_filt_func is None and not copy_filt_func(required):
continue
if required.startswith('@'):
# May have been processed by us, or have some rpath, loader_path of
# its own. Either way, leave alone
continue
# Requiring names may well be the copies in lib_path. Replace the copy
# names with the original names for entry into `copied_libs`
procd_requirings = {}
# Set requiring lib install names to point to local copy
for requiring, orig_install_name in requirings.items():
set_install_name(requiring,
orig_install_name,
'@loader_path/' + basename(required))
# Make processed version of ``dependings_dict``
mapped_requiring = copied2orig.get(requiring, requiring)
procd_requirings[mapped_requiring] = orig_install_name
if required in copied_libs:
# Have copied this already, add any new requirings
copied_libs[required].update(procd_requirings)
continue
# Haven't see this one before, add entry to copied_libs
out_path = pjoin(lib_path, basename(required))
if exists(out_path):
raise DelocationError(out_path + ' already exists')
shutil.copy(required, lib_path)
copied2orig[out_path] = required
copied_libs[required] = procd_requirings | python | def _copy_required(lib_path, copy_filt_func, copied_libs):
""" Copy libraries required for files in `lib_path` to `lib_path`
Augment `copied_libs` dictionary with any newly copied libraries, modifying
`copied_libs` in-place - see Notes.
This is one pass of ``copy_recurse``
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each library name; copy where ``copy_filt_func(libname)`` is
True, don't copy otherwise
copied_libs : dict
See :func:`copy_recurse` for definition.
Notes
-----
If we need to copy another library, add that (``depended_lib_path``,
``dependings_dict``) to `copied_libs`. ``dependings_dict`` has (key,
value) pairs of (``depending_lib_path``, ``install_name``).
``depending_lib_path`` will be the original (canonical) library name, not
the copy in ``lib_path``.
Sometimes we copy a library, that further depends on a library we have
already copied. In this case update ``copied_libs[depended_lib]`` with the
extra dependency (as well as fixing up the install names for the depending
library).
For example, imagine we've start with a lib path like this::
my_lib_path/
libA.dylib
libB.dylib
Our input `copied_libs` has keys ``/sys/libA.dylib``, ``/sys/libB.lib``
telling us we previously copied those guys from the ``/sys`` folder.
On a first pass, we discover that ``libA.dylib`` depends on
``/sys/libC.dylib``, so we copy that.
On a second pass, we discover now that ``libC.dylib`` also depends on
``/sys/libB.dylib``. `copied_libs` tells us that we already have a copy of
``/sys/libB.dylib``, so we fix our copy of `libC.dylib`` to point to
``my_lib_path/libB.dylib`` and add ``/sys/libC.dylib`` as a
``dependings_dict`` entry for ``copied_libs['/sys/libB.dylib']``
"""
# Paths will be prepended with `lib_path`
lib_dict = tree_libs(lib_path)
# Map library paths after copy ('copied') to path before copy ('orig')
rp_lp = realpath(lib_path)
copied2orig = dict((pjoin(rp_lp, basename(c)), c) for c in copied_libs)
for required, requirings in lib_dict.items():
if not copy_filt_func is None and not copy_filt_func(required):
continue
if required.startswith('@'):
# May have been processed by us, or have some rpath, loader_path of
# its own. Either way, leave alone
continue
# Requiring names may well be the copies in lib_path. Replace the copy
# names with the original names for entry into `copied_libs`
procd_requirings = {}
# Set requiring lib install names to point to local copy
for requiring, orig_install_name in requirings.items():
set_install_name(requiring,
orig_install_name,
'@loader_path/' + basename(required))
# Make processed version of ``dependings_dict``
mapped_requiring = copied2orig.get(requiring, requiring)
procd_requirings[mapped_requiring] = orig_install_name
if required in copied_libs:
# Have copied this already, add any new requirings
copied_libs[required].update(procd_requirings)
continue
# Haven't see this one before, add entry to copied_libs
out_path = pjoin(lib_path, basename(required))
if exists(out_path):
raise DelocationError(out_path + ' already exists')
shutil.copy(required, lib_path)
copied2orig[out_path] = required
copied_libs[required] = procd_requirings | Copy libraries required for files in `lib_path` to `lib_path`
Augment `copied_libs` dictionary with any newly copied libraries, modifying
`copied_libs` in-place - see Notes.
This is one pass of ``copy_recurse``
Parameters
----------
lib_path : str
Directory containing libraries
copy_filt_func : None or callable, optional
If None, copy any library that found libraries depend on. If callable,
called on each library name; copy where ``copy_filt_func(libname)`` is
True, don't copy otherwise
copied_libs : dict
See :func:`copy_recurse` for definition.
Notes
-----
If we need to copy another library, add that (``depended_lib_path``,
``dependings_dict``) to `copied_libs`. ``dependings_dict`` has (key,
value) pairs of (``depending_lib_path``, ``install_name``).
``depending_lib_path`` will be the original (canonical) library name, not
the copy in ``lib_path``.
Sometimes we copy a library, that further depends on a library we have
already copied. In this case update ``copied_libs[depended_lib]`` with the
extra dependency (as well as fixing up the install names for the depending
library).
For example, imagine we've start with a lib path like this::
my_lib_path/
libA.dylib
libB.dylib
Our input `copied_libs` has keys ``/sys/libA.dylib``, ``/sys/libB.lib``
telling us we previously copied those guys from the ``/sys`` folder.
On a first pass, we discover that ``libA.dylib`` depends on
``/sys/libC.dylib``, so we copy that.
On a second pass, we discover now that ``libC.dylib`` also depends on
``/sys/libB.dylib``. `copied_libs` tells us that we already have a copy of
``/sys/libB.dylib``, so we fix our copy of `libC.dylib`` to point to
``my_lib_path/libB.dylib`` and add ``/sys/libC.dylib`` as a
``dependings_dict`` entry for ``copied_libs['/sys/libB.dylib']`` | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L150-L233 |
matthew-brett/delocate | delocate/delocating.py | delocate_path | def delocate_path(tree_path, lib_path,
lib_filt_func = None,
copy_filt_func = filter_system_libs):
""" Copy required libraries for files in `tree_path` into `lib_path`
Parameters
----------
tree_path : str
Root path of tree to search for required libraries
lib_path : str
Directory into which we copy required libraries
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a file in the path depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library.
"""
if lib_filt_func == "dylibs-only":
lib_filt_func = _dylibs_only
if not exists(lib_path):
os.makedirs(lib_path)
lib_dict = tree_libs(tree_path, lib_filt_func)
if not copy_filt_func is None:
lib_dict = dict((key, value) for key, value in lib_dict.items()
if copy_filt_func(key))
copied = delocate_tree_libs(lib_dict, lib_path, tree_path)
return copy_recurse(lib_path, copy_filt_func, copied) | python | def delocate_path(tree_path, lib_path,
lib_filt_func = None,
copy_filt_func = filter_system_libs):
""" Copy required libraries for files in `tree_path` into `lib_path`
Parameters
----------
tree_path : str
Root path of tree to search for required libraries
lib_path : str
Directory into which we copy required libraries
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a file in the path depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library.
"""
if lib_filt_func == "dylibs-only":
lib_filt_func = _dylibs_only
if not exists(lib_path):
os.makedirs(lib_path)
lib_dict = tree_libs(tree_path, lib_filt_func)
if not copy_filt_func is None:
lib_dict = dict((key, value) for key, value in lib_dict.items()
if copy_filt_func(key))
copied = delocate_tree_libs(lib_dict, lib_path, tree_path)
return copy_recurse(lib_path, copy_filt_func, copied) | Copy required libraries for files in `tree_path` into `lib_path`
Parameters
----------
tree_path : str
Root path of tree to search for required libraries
lib_path : str
Directory into which we copy required libraries
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a file in the path depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library. | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L246-L289 |
matthew-brett/delocate | delocate/delocating.py | _merge_lib_dict | def _merge_lib_dict(d1, d2):
""" Merges lib_dict `d2` into lib_dict `d1`
"""
for required, requirings in d2.items():
if required in d1:
d1[required].update(requirings)
else:
d1[required] = requirings
return None | python | def _merge_lib_dict(d1, d2):
""" Merges lib_dict `d2` into lib_dict `d1`
"""
for required, requirings in d2.items():
if required in d1:
d1[required].update(requirings)
else:
d1[required] = requirings
return None | Merges lib_dict `d2` into lib_dict `d1` | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L292-L300 |
matthew-brett/delocate | delocate/delocating.py | delocate_wheel | def delocate_wheel(in_wheel,
out_wheel = None,
lib_sdir = '.dylibs',
lib_filt_func = None,
copy_filt_func = filter_system_libs,
require_archs = None,
check_verbose = False,
):
""" Update wheel by copying required libraries to `lib_sdir` in wheel
Create `lib_sdir` in wheel tree only if we are copying one or more
libraries.
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
out_wheel : None or str
Filename of processed wheel to write. If None, overwrite `in_wheel`
lib_sdir : str, optional
Subdirectory name in wheel package directory (or directories) to store
needed libraries.
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
require_archs : None or str or sequence, optional
If None, do no checks of architectures in libraries. If sequence,
sequence of architectures (output from ``lipo -info``) that every
library in the wheels should have (e.g. ``['x86_64, 'i386']``). An
empty sequence results in checks that depended libraries have the same
archs as depending libraries. If string, either "intel" (corresponds
to sequence ``['x86_64, 'i386']``) or name of required architecture
(e.g "i386" or "x86_64").
check_verbose : bool, optional
If True, print warning messages about missing required architectures
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a path in the wheel depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library. The filenames in the keys are relative to the wheel root path.
"""
if lib_filt_func == "dylibs-only":
lib_filt_func = _dylibs_only
in_wheel = abspath(in_wheel)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
in_place = in_wheel == out_wheel
with TemporaryDirectory() as tmpdir:
all_copied = {}
wheel_dir = realpath(pjoin(tmpdir, 'wheel'))
zip2dir(in_wheel, wheel_dir)
for package_path in find_package_dirs(wheel_dir):
lib_path = pjoin(package_path, lib_sdir)
lib_path_exists = exists(lib_path)
copied_libs = delocate_path(package_path, lib_path,
lib_filt_func, copy_filt_func)
if copied_libs and lib_path_exists:
raise DelocationError(
'{0} already exists in wheel but need to copy '
'{1}'.format(lib_path, '; '.join(copied_libs)))
if len(os.listdir(lib_path)) == 0:
shutil.rmtree(lib_path)
# Check architectures
if not require_archs is None:
stop_fast = not check_verbose
bads = check_archs(copied_libs, require_archs, stop_fast)
if len(bads) != 0:
if check_verbose:
print(bads_report(bads, pjoin(tmpdir, 'wheel')))
raise DelocationError(
"Some missing architectures in wheel")
# Change install ids to be unique within Python space
install_id_root = (DLC_PREFIX +
relpath(package_path, wheel_dir) +
'/')
for lib in copied_libs:
lib_base = basename(lib)
copied_path = pjoin(lib_path, lib_base)
set_install_id(copied_path, install_id_root + lib_base)
validate_signature(copied_path)
_merge_lib_dict(all_copied, copied_libs)
if len(all_copied):
rewrite_record(wheel_dir)
if len(all_copied) or not in_place:
dir2zip(wheel_dir, out_wheel)
return stripped_lib_dict(all_copied, wheel_dir + os.path.sep) | python | def delocate_wheel(in_wheel,
out_wheel = None,
lib_sdir = '.dylibs',
lib_filt_func = None,
copy_filt_func = filter_system_libs,
require_archs = None,
check_verbose = False,
):
""" Update wheel by copying required libraries to `lib_sdir` in wheel
Create `lib_sdir` in wheel tree only if we are copying one or more
libraries.
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
out_wheel : None or str
Filename of processed wheel to write. If None, overwrite `in_wheel`
lib_sdir : str, optional
Subdirectory name in wheel package directory (or directories) to store
needed libraries.
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
require_archs : None or str or sequence, optional
If None, do no checks of architectures in libraries. If sequence,
sequence of architectures (output from ``lipo -info``) that every
library in the wheels should have (e.g. ``['x86_64, 'i386']``). An
empty sequence results in checks that depended libraries have the same
archs as depending libraries. If string, either "intel" (corresponds
to sequence ``['x86_64, 'i386']``) or name of required architecture
(e.g "i386" or "x86_64").
check_verbose : bool, optional
If True, print warning messages about missing required architectures
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a path in the wheel depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library. The filenames in the keys are relative to the wheel root path.
"""
if lib_filt_func == "dylibs-only":
lib_filt_func = _dylibs_only
in_wheel = abspath(in_wheel)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
in_place = in_wheel == out_wheel
with TemporaryDirectory() as tmpdir:
all_copied = {}
wheel_dir = realpath(pjoin(tmpdir, 'wheel'))
zip2dir(in_wheel, wheel_dir)
for package_path in find_package_dirs(wheel_dir):
lib_path = pjoin(package_path, lib_sdir)
lib_path_exists = exists(lib_path)
copied_libs = delocate_path(package_path, lib_path,
lib_filt_func, copy_filt_func)
if copied_libs and lib_path_exists:
raise DelocationError(
'{0} already exists in wheel but need to copy '
'{1}'.format(lib_path, '; '.join(copied_libs)))
if len(os.listdir(lib_path)) == 0:
shutil.rmtree(lib_path)
# Check architectures
if not require_archs is None:
stop_fast = not check_verbose
bads = check_archs(copied_libs, require_archs, stop_fast)
if len(bads) != 0:
if check_verbose:
print(bads_report(bads, pjoin(tmpdir, 'wheel')))
raise DelocationError(
"Some missing architectures in wheel")
# Change install ids to be unique within Python space
install_id_root = (DLC_PREFIX +
relpath(package_path, wheel_dir) +
'/')
for lib in copied_libs:
lib_base = basename(lib)
copied_path = pjoin(lib_path, lib_base)
set_install_id(copied_path, install_id_root + lib_base)
validate_signature(copied_path)
_merge_lib_dict(all_copied, copied_libs)
if len(all_copied):
rewrite_record(wheel_dir)
if len(all_copied) or not in_place:
dir2zip(wheel_dir, out_wheel)
return stripped_lib_dict(all_copied, wheel_dir + os.path.sep) | Update wheel by copying required libraries to `lib_sdir` in wheel
Create `lib_sdir` in wheel tree only if we are copying one or more
libraries.
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
out_wheel : None or str
Filename of processed wheel to write. If None, overwrite `in_wheel`
lib_sdir : str, optional
Subdirectory name in wheel package directory (or directories) to store
needed libraries.
lib_filt_func : None or str or callable, optional
If None, inspect all files for dependencies on dynamic libraries. If
callable, accepts filename as argument, returns True if we should
inspect the file, False otherwise. If str == "dylibs-only" then inspect
only files with known dynamic library extensions (``.dylib``, ``.so``).
copy_filt_func : None or callable, optional
If callable, called on each library name detected as a dependency; copy
where ``copy_filt_func(libname)`` is True, don't copy otherwise.
Default is callable rejecting only libraries beginning with
``/usr/lib`` or ``/System``. None means copy all libraries. This will
usually end up copying large parts of the system run-time.
require_archs : None or str or sequence, optional
If None, do no checks of architectures in libraries. If sequence,
sequence of architectures (output from ``lipo -info``) that every
library in the wheels should have (e.g. ``['x86_64, 'i386']``). An
empty sequence results in checks that depended libraries have the same
archs as depending libraries. If string, either "intel" (corresponds
to sequence ``['x86_64, 'i386']``) or name of required architecture
(e.g "i386" or "x86_64").
check_verbose : bool, optional
If True, print warning messages about missing required architectures
Returns
-------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that was copied into `lib_sdir` of the wheel packages, and
``dependings_dict`` is a dictionary with key, value pairs where the key
is a path in the wheel depending on ``copied_lib_path``, and the value
is the ``install_name`` of ``copied_lib_path`` in the depending
library. The filenames in the keys are relative to the wheel root path. | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L303-L407 |
matthew-brett/delocate | delocate/delocating.py | patch_wheel | def patch_wheel(in_wheel, patch_fname, out_wheel=None):
""" Apply ``-p1`` style patch in `patch_fname` to contents of `in_wheel`
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
patch_fname : str
Filename of patch file. Will be applied with ``patch -p1 <
patch_fname``
out_wheel : None or str
Filename of patched wheel to write. If None, overwrite `in_wheel`
"""
in_wheel = abspath(in_wheel)
patch_fname = abspath(patch_fname)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
if not exists(patch_fname):
raise ValueError("patch file {0} does not exist".format(patch_fname))
with InWheel(in_wheel, out_wheel):
with open(patch_fname, 'rb') as fobj:
patch_proc = Popen(['patch', '-p1'],
stdin = fobj,
stdout = PIPE,
stderr = PIPE)
stdout, stderr = patch_proc.communicate()
if patch_proc.returncode != 0:
raise RuntimeError("Patch failed with stdout:\n" +
stdout.decode('latin1')) | python | def patch_wheel(in_wheel, patch_fname, out_wheel=None):
""" Apply ``-p1`` style patch in `patch_fname` to contents of `in_wheel`
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
patch_fname : str
Filename of patch file. Will be applied with ``patch -p1 <
patch_fname``
out_wheel : None or str
Filename of patched wheel to write. If None, overwrite `in_wheel`
"""
in_wheel = abspath(in_wheel)
patch_fname = abspath(patch_fname)
if out_wheel is None:
out_wheel = in_wheel
else:
out_wheel = abspath(out_wheel)
if not exists(patch_fname):
raise ValueError("patch file {0} does not exist".format(patch_fname))
with InWheel(in_wheel, out_wheel):
with open(patch_fname, 'rb') as fobj:
patch_proc = Popen(['patch', '-p1'],
stdin = fobj,
stdout = PIPE,
stderr = PIPE)
stdout, stderr = patch_proc.communicate()
if patch_proc.returncode != 0:
raise RuntimeError("Patch failed with stdout:\n" +
stdout.decode('latin1')) | Apply ``-p1`` style patch in `patch_fname` to contents of `in_wheel`
If `out_wheel` is None (the default), overwrite the wheel `in_wheel`
in-place.
Parameters
----------
in_wheel : str
Filename of wheel to process
patch_fname : str
Filename of patch file. Will be applied with ``patch -p1 <
patch_fname``
out_wheel : None or str
Filename of patched wheel to write. If None, overwrite `in_wheel` | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L410-L443 |
matthew-brett/delocate | delocate/delocating.py | check_archs | def check_archs(copied_libs, require_archs=(), stop_fast=False):
""" Check compatibility of archs in `copied_libs` dict
Parameters
----------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that has been copied during delocation, and ``dependings_dict`` is a
dictionary with key, value pairs where the key is a path in the target
being delocated (a wheel or path) depending on ``copied_lib_path``, and
the value is the ``install_name`` of ``copied_lib_path`` in the
depending library.
require_archs : str or sequence, optional
Architectures we require to be present in all library files in wheel.
If an empty sequence, just check that depended libraries do have the
architectures of the depending libraries, with no constraints on what
these architectures are. If a sequence, then a set of required
architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel
architectures. If a string, then a standard architecture name as
returned by ``lipo -info`` or the string "intel", corresponding to the
sequence ``['i386', 'x86_64']``
stop_fast : bool, optional
Whether to give up collecting errors after the first
Returns
-------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
"""
if isinstance(require_archs, string_types):
require_archs = (['i386', 'x86_64'] if require_archs == 'intel'
else [require_archs])
require_archs = frozenset(require_archs)
bads = []
for depended_lib, dep_dict in copied_libs.items():
depended_archs = get_archs(depended_lib)
for depending_lib, install_name in dep_dict.items():
depending_archs = get_archs(depending_lib)
all_required = depending_archs | require_archs
all_missing = all_required.difference(depended_archs)
if len(all_missing) == 0:
continue
required_missing = require_archs.difference(depended_archs)
if len(required_missing):
bads.append((depending_lib, required_missing))
else:
bads.append((depended_lib, depending_lib, all_missing))
if stop_fast:
return set(bads)
return set(bads) | python | def check_archs(copied_libs, require_archs=(), stop_fast=False):
""" Check compatibility of archs in `copied_libs` dict
Parameters
----------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that has been copied during delocation, and ``dependings_dict`` is a
dictionary with key, value pairs where the key is a path in the target
being delocated (a wheel or path) depending on ``copied_lib_path``, and
the value is the ``install_name`` of ``copied_lib_path`` in the
depending library.
require_archs : str or sequence, optional
Architectures we require to be present in all library files in wheel.
If an empty sequence, just check that depended libraries do have the
architectures of the depending libraries, with no constraints on what
these architectures are. If a sequence, then a set of required
architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel
architectures. If a string, then a standard architecture name as
returned by ``lipo -info`` or the string "intel", corresponding to the
sequence ``['i386', 'x86_64']``
stop_fast : bool, optional
Whether to give up collecting errors after the first
Returns
-------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
"""
if isinstance(require_archs, string_types):
require_archs = (['i386', 'x86_64'] if require_archs == 'intel'
else [require_archs])
require_archs = frozenset(require_archs)
bads = []
for depended_lib, dep_dict in copied_libs.items():
depended_archs = get_archs(depended_lib)
for depending_lib, install_name in dep_dict.items():
depending_archs = get_archs(depending_lib)
all_required = depending_archs | require_archs
all_missing = all_required.difference(depended_archs)
if len(all_missing) == 0:
continue
required_missing = require_archs.difference(depended_archs)
if len(required_missing):
bads.append((depending_lib, required_missing))
else:
bads.append((depended_lib, depending_lib, all_missing))
if stop_fast:
return set(bads)
return set(bads) | Check compatibility of archs in `copied_libs` dict
Parameters
----------
copied_libs : dict
dict containing the (key, value) pairs of (``copied_lib_path``,
``dependings_dict``), where ``copied_lib_path`` is a library real path
that has been copied during delocation, and ``dependings_dict`` is a
dictionary with key, value pairs where the key is a path in the target
being delocated (a wheel or path) depending on ``copied_lib_path``, and
the value is the ``install_name`` of ``copied_lib_path`` in the
depending library.
require_archs : str or sequence, optional
Architectures we require to be present in all library files in wheel.
If an empty sequence, just check that depended libraries do have the
architectures of the depending libraries, with no constraints on what
these architectures are. If a sequence, then a set of required
architectures e.g. ``['i386', 'x86_64']`` to specify dual Intel
architectures. If a string, then a standard architecture name as
returned by ``lipo -info`` or the string "intel", corresponding to the
sequence ``['i386', 'x86_64']``
stop_fast : bool, optional
Whether to give up collecting errors after the first
Returns
-------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required. | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L446-L505 |
matthew-brett/delocate | delocate/delocating.py | bads_report | def bads_report(bads, path_prefix=None):
""" Return a nice report of bad architectures in `bads`
Parameters
----------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
path_prefix : None or str, optional
Path prefix to strip from ``depended_lib`` and ``depending_lib``. None
means do not strip anything.
Returns
-------
report : str
A nice report for printing
"""
path_processor = ((lambda x : x) if path_prefix is None
else get_rp_stripper(path_prefix))
reports = []
for result in bads:
if len(result) == 3:
depended_lib, depending_lib, missing_archs = result
reports.append("{0} needs {1} {2} missing from {3}".format(
path_processor(depending_lib),
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depended_lib)))
elif len(result) == 2:
depending_lib, missing_archs = result
reports.append("Required {0} {1} missing from {2}".format(
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depending_lib)))
else:
raise ValueError('Report tuple should be length 2 or 3')
return '\n'.join(sorted(reports)) | python | def bads_report(bads, path_prefix=None):
""" Return a nice report of bad architectures in `bads`
Parameters
----------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
path_prefix : None or str, optional
Path prefix to strip from ``depended_lib`` and ``depending_lib``. None
means do not strip anything.
Returns
-------
report : str
A nice report for printing
"""
path_processor = ((lambda x : x) if path_prefix is None
else get_rp_stripper(path_prefix))
reports = []
for result in bads:
if len(result) == 3:
depended_lib, depending_lib, missing_archs = result
reports.append("{0} needs {1} {2} missing from {3}".format(
path_processor(depending_lib),
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depended_lib)))
elif len(result) == 2:
depending_lib, missing_archs = result
reports.append("Required {0} {1} missing from {2}".format(
'archs' if len(missing_archs) > 1 else 'arch',
', '.join(sorted(missing_archs)),
path_processor(depending_lib)))
else:
raise ValueError('Report tuple should be length 2 or 3')
return '\n'.join(sorted(reports)) | Return a nice report of bad architectures in `bads`
Parameters
----------
bads : set
set of length 2 or 3 tuples. A length 2 tuple is of form
``(depending_lib, missing_archs)`` meaning that an arch in
`require_archs` was missing from ``depending_lib``. A length 3 tuple
is of form ``(depended_lib, depending_lib, missing_archs)`` where
``depended_lib`` is the filename of the library depended on,
``depending_lib`` is the library depending on ``depending_lib`` and
``missing_archs`` is a set of missing architecture strings giving
architectures present in ``depending_lib`` and missing in
``depended_lib``. An empty set means all architectures were present as
required.
path_prefix : None or str, optional
Path prefix to strip from ``depended_lib`` and ``depending_lib``. None
means do not strip anything.
Returns
-------
report : str
A nice report for printing | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L508-L552 |
matthew-brett/delocate | delocate/libsana.py | tree_libs | def tree_libs(start_path, filt_func=None):
""" Return analysis of library dependencies within `start_path`
Parameters
----------
start_path : str
root path of tree to search for libraries depending on other libraries.
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is canonical (``os.path.realpath``) filename of library, or
library name starting with {'@rpath', '@loader_path',
'@executable_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Notes
-----
See:
* https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html
* http://matthew-brett.github.io/pydagogue/mac_runtime_link.html
"""
lib_dict = {}
for dirpath, dirnames, basenames in os.walk(start_path):
for base in basenames:
depending_libpath = realpath(pjoin(dirpath, base))
if not filt_func is None and not filt_func(depending_libpath):
continue
rpaths = get_rpaths(depending_libpath)
for install_name in get_install_names(depending_libpath):
lib_path = (install_name if install_name.startswith('@')
else realpath(install_name))
lib_path = resolve_rpath(lib_path, rpaths)
if lib_path in lib_dict:
lib_dict[lib_path][depending_libpath] = install_name
else:
lib_dict[lib_path] = {depending_libpath: install_name}
return lib_dict | python | def tree_libs(start_path, filt_func=None):
""" Return analysis of library dependencies within `start_path`
Parameters
----------
start_path : str
root path of tree to search for libraries depending on other libraries.
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is canonical (``os.path.realpath``) filename of library, or
library name starting with {'@rpath', '@loader_path',
'@executable_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Notes
-----
See:
* https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html
* http://matthew-brett.github.io/pydagogue/mac_runtime_link.html
"""
lib_dict = {}
for dirpath, dirnames, basenames in os.walk(start_path):
for base in basenames:
depending_libpath = realpath(pjoin(dirpath, base))
if not filt_func is None and not filt_func(depending_libpath):
continue
rpaths = get_rpaths(depending_libpath)
for install_name in get_install_names(depending_libpath):
lib_path = (install_name if install_name.startswith('@')
else realpath(install_name))
lib_path = resolve_rpath(lib_path, rpaths)
if lib_path in lib_dict:
lib_dict[lib_path][depending_libpath] = install_name
else:
lib_dict[lib_path] = {depending_libpath: install_name}
return lib_dict | Return analysis of library dependencies within `start_path`
Parameters
----------
start_path : str
root path of tree to search for libraries depending on other libraries.
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``).
``libpath`` is canonical (``os.path.realpath``) filename of library, or
library name starting with {'@rpath', '@loader_path',
'@executable_path'}.
``dependings_dict`` is a dict with (key, value) pairs of
(``depending_libpath``, ``install_name``), where ``dependings_libpath``
is the canonical (``os.path.realpath``) filename of the library
depending on ``libpath``, and ``install_name`` is the "install_name" by
which ``depending_libpath`` refers to ``libpath``.
Notes
-----
See:
* https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/dyld.1.html
* http://matthew-brett.github.io/pydagogue/mac_runtime_link.html | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/libsana.py#L14-L65 |
matthew-brett/delocate | delocate/libsana.py | resolve_rpath | def resolve_rpath(lib_path, rpaths):
""" Return `lib_path` with its `@rpath` resolved
If the `lib_path` doesn't have `@rpath` then it's returned as is.
If `lib_path` has `@rpath` then returns the first `rpaths`/`lib_path`
combination found. If the library can't be found in `rpaths` then a
detailed warning is printed and `lib_path` is returned as is.
Parameters
----------
lib_path : str
The path to a library file, which may or may not start with `@rpath`.
rpaths : sequence of str
A sequence of search paths, usually gotten from a call to `get_rpaths`.
Returns
-------
lib_path : str
A str with the resolved libraries realpath.
"""
if not lib_path.startswith('@rpath/'):
return lib_path
lib_rpath = lib_path.split('/', 1)[1]
for rpath in rpaths:
rpath_lib = realpath(pjoin(rpath, lib_rpath))
if os.path.exists(rpath_lib):
return rpath_lib
warnings.warn(
"Couldn't find {0} on paths:\n\t{1}".format(
lib_path,
'\n\t'.join(realpath(path) for path in rpaths),
)
)
return lib_path | python | def resolve_rpath(lib_path, rpaths):
""" Return `lib_path` with its `@rpath` resolved
If the `lib_path` doesn't have `@rpath` then it's returned as is.
If `lib_path` has `@rpath` then returns the first `rpaths`/`lib_path`
combination found. If the library can't be found in `rpaths` then a
detailed warning is printed and `lib_path` is returned as is.
Parameters
----------
lib_path : str
The path to a library file, which may or may not start with `@rpath`.
rpaths : sequence of str
A sequence of search paths, usually gotten from a call to `get_rpaths`.
Returns
-------
lib_path : str
A str with the resolved libraries realpath.
"""
if not lib_path.startswith('@rpath/'):
return lib_path
lib_rpath = lib_path.split('/', 1)[1]
for rpath in rpaths:
rpath_lib = realpath(pjoin(rpath, lib_rpath))
if os.path.exists(rpath_lib):
return rpath_lib
warnings.warn(
"Couldn't find {0} on paths:\n\t{1}".format(
lib_path,
'\n\t'.join(realpath(path) for path in rpaths),
)
)
return lib_path | Return `lib_path` with its `@rpath` resolved
If the `lib_path` doesn't have `@rpath` then it's returned as is.
If `lib_path` has `@rpath` then returns the first `rpaths`/`lib_path`
combination found. If the library can't be found in `rpaths` then a
detailed warning is printed and `lib_path` is returned as is.
Parameters
----------
lib_path : str
The path to a library file, which may or may not start with `@rpath`.
rpaths : sequence of str
A sequence of search paths, usually gotten from a call to `get_rpaths`.
Returns
-------
lib_path : str
A str with the resolved libraries realpath. | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/libsana.py#L68-L104 |
matthew-brett/delocate | delocate/libsana.py | get_prefix_stripper | def get_prefix_stripper(strip_prefix):
""" Return function to strip `strip_prefix` prefix from string if present
Parameters
----------
prefix : str
Prefix to strip from the beginning of string if present
Returns
-------
stripper : func
function such that ``stripper(a_string)`` will strip `prefix` from
``a_string`` if present, otherwise pass ``a_string`` unmodified
"""
n = len(strip_prefix)
def stripper(path):
return path if not path.startswith(strip_prefix) else path[n:]
return stripper | python | def get_prefix_stripper(strip_prefix):
""" Return function to strip `strip_prefix` prefix from string if present
Parameters
----------
prefix : str
Prefix to strip from the beginning of string if present
Returns
-------
stripper : func
function such that ``stripper(a_string)`` will strip `prefix` from
``a_string`` if present, otherwise pass ``a_string`` unmodified
"""
n = len(strip_prefix)
def stripper(path):
return path if not path.startswith(strip_prefix) else path[n:]
return stripper | Return function to strip `strip_prefix` prefix from string if present
Parameters
----------
prefix : str
Prefix to strip from the beginning of string if present
Returns
-------
stripper : func
function such that ``stripper(a_string)`` will strip `prefix` from
``a_string`` if present, otherwise pass ``a_string`` unmodified | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/libsana.py#L107-L124 |
matthew-brett/delocate | delocate/libsana.py | stripped_lib_dict | def stripped_lib_dict(lib_dict, strip_prefix):
""" Return `lib_dict` with `strip_prefix` removed from start of paths
Use to give form of `lib_dict` that appears relative to some base path
given by `strip_prefix`. Particularly useful for analyzing wheels where we
unpack to a temporary path before analyzing.
Parameters
----------
lib_dict : dict
See :func:`tree_libs` for definition. All depending and depended paths
are canonical (therefore absolute)
strip_prefix : str
Prefix to remove (if present) from all depended and depending library
paths in `lib_dict`
Returns
-------
relative_dict : dict
`lib_dict` with `strip_prefix` removed from beginning of all depended
and depending library paths.
"""
relative_dict = {}
stripper = get_prefix_stripper(strip_prefix)
for lib_path, dependings_dict in lib_dict.items():
ding_dict = {}
for depending_libpath, install_name in dependings_dict.items():
ding_dict[stripper(depending_libpath)] = install_name
relative_dict[stripper(lib_path)] = ding_dict
return relative_dict | python | def stripped_lib_dict(lib_dict, strip_prefix):
""" Return `lib_dict` with `strip_prefix` removed from start of paths
Use to give form of `lib_dict` that appears relative to some base path
given by `strip_prefix`. Particularly useful for analyzing wheels where we
unpack to a temporary path before analyzing.
Parameters
----------
lib_dict : dict
See :func:`tree_libs` for definition. All depending and depended paths
are canonical (therefore absolute)
strip_prefix : str
Prefix to remove (if present) from all depended and depending library
paths in `lib_dict`
Returns
-------
relative_dict : dict
`lib_dict` with `strip_prefix` removed from beginning of all depended
and depending library paths.
"""
relative_dict = {}
stripper = get_prefix_stripper(strip_prefix)
for lib_path, dependings_dict in lib_dict.items():
ding_dict = {}
for depending_libpath, install_name in dependings_dict.items():
ding_dict[stripper(depending_libpath)] = install_name
relative_dict[stripper(lib_path)] = ding_dict
return relative_dict | Return `lib_dict` with `strip_prefix` removed from start of paths
Use to give form of `lib_dict` that appears relative to some base path
given by `strip_prefix`. Particularly useful for analyzing wheels where we
unpack to a temporary path before analyzing.
Parameters
----------
lib_dict : dict
See :func:`tree_libs` for definition. All depending and depended paths
are canonical (therefore absolute)
strip_prefix : str
Prefix to remove (if present) from all depended and depending library
paths in `lib_dict`
Returns
-------
relative_dict : dict
`lib_dict` with `strip_prefix` removed from beginning of all depended
and depending library paths. | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/libsana.py#L145-L175 |
matthew-brett/delocate | delocate/libsana.py | wheel_libs | def wheel_libs(wheel_fname, filt_func = None):
""" Return analysis of library dependencies with a Python wheel
Use this routine for a dump of the dependency tree.
Parameters
----------
wheel_fname : str
Filename of wheel
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``). ``libpath`` is library being depended on,
relative to wheel root path if within wheel tree. ``dependings_dict``
is (key, value) of (``depending_lib_path``, ``install_name``). Again,
``depending_lib_path`` is library relative to wheel root path, if
within wheel tree.
"""
with TemporaryDirectory() as tmpdir:
zip2dir(wheel_fname, tmpdir)
lib_dict = tree_libs(tmpdir, filt_func)
return stripped_lib_dict(lib_dict, realpath(tmpdir) + os.path.sep) | python | def wheel_libs(wheel_fname, filt_func = None):
""" Return analysis of library dependencies with a Python wheel
Use this routine for a dump of the dependency tree.
Parameters
----------
wheel_fname : str
Filename of wheel
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``). ``libpath`` is library being depended on,
relative to wheel root path if within wheel tree. ``dependings_dict``
is (key, value) of (``depending_lib_path``, ``install_name``). Again,
``depending_lib_path`` is library relative to wheel root path, if
within wheel tree.
"""
with TemporaryDirectory() as tmpdir:
zip2dir(wheel_fname, tmpdir)
lib_dict = tree_libs(tmpdir, filt_func)
return stripped_lib_dict(lib_dict, realpath(tmpdir) + os.path.sep) | Return analysis of library dependencies with a Python wheel
Use this routine for a dump of the dependency tree.
Parameters
----------
wheel_fname : str
Filename of wheel
filt_func : None or callable, optional
If None, inspect all files for library dependencies. If callable,
accepts filename as argument, returns True if we should inspect the
file, False otherwise.
Returns
-------
lib_dict : dict
dictionary with (key, value) pairs of (``libpath``,
``dependings_dict``). ``libpath`` is library being depended on,
relative to wheel root path if within wheel tree. ``dependings_dict``
is (key, value) of (``depending_lib_path``, ``install_name``). Again,
``depending_lib_path`` is library relative to wheel root path, if
within wheel tree. | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/libsana.py#L178-L205 |
matthew-brett/delocate | delocate/wheeltools.py | _open_for_csv | def _open_for_csv(name, mode):
""" Deal with Python 2/3 open API differences """
if sys.version_info[0] < 3:
return open_rw(name, mode + 'b')
return open_rw(name, mode, newline='', encoding='utf-8') | python | def _open_for_csv(name, mode):
""" Deal with Python 2/3 open API differences """
if sys.version_info[0] < 3:
return open_rw(name, mode + 'b')
return open_rw(name, mode, newline='', encoding='utf-8') | Deal with Python 2/3 open API differences | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/wheeltools.py#L28-L32 |
matthew-brett/delocate | delocate/wheeltools.py | rewrite_record | def rewrite_record(bdist_dir):
""" Rewrite RECORD file with hashes for all files in `wheel_sdir`
Copied from :method:`wheel.bdist_wheel.bdist_wheel.write_record`
Will also unsign wheel
Parameters
----------
bdist_dir : str
Path of unpacked wheel file
"""
info_dirs = glob.glob(pjoin(bdist_dir, '*.dist-info'))
if len(info_dirs) != 1:
raise WheelToolsError("Should be exactly one `*.dist_info` directory")
record_path = pjoin(info_dirs[0], 'RECORD')
record_relpath = relpath(record_path, bdist_dir)
# Unsign wheel - because we're invalidating the record hash
sig_path = pjoin(info_dirs[0], 'RECORD.jws')
if exists(sig_path):
os.unlink(sig_path)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
for f in files:
yield pjoin(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with _open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relative_path = relpath(path, bdist_dir)
if skip(relative_path):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
path_for_record = relpath(
path, bdist_dir).replace(psep, '/')
writer.writerow((path_for_record, hash, size)) | python | def rewrite_record(bdist_dir):
""" Rewrite RECORD file with hashes for all files in `wheel_sdir`
Copied from :method:`wheel.bdist_wheel.bdist_wheel.write_record`
Will also unsign wheel
Parameters
----------
bdist_dir : str
Path of unpacked wheel file
"""
info_dirs = glob.glob(pjoin(bdist_dir, '*.dist-info'))
if len(info_dirs) != 1:
raise WheelToolsError("Should be exactly one `*.dist_info` directory")
record_path = pjoin(info_dirs[0], 'RECORD')
record_relpath = relpath(record_path, bdist_dir)
# Unsign wheel - because we're invalidating the record hash
sig_path = pjoin(info_dirs[0], 'RECORD.jws')
if exists(sig_path):
os.unlink(sig_path)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
for f in files:
yield pjoin(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with _open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relative_path = relpath(path, bdist_dir)
if skip(relative_path):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
path_for_record = relpath(
path, bdist_dir).replace(psep, '/')
writer.writerow((path_for_record, hash, size)) | Rewrite RECORD file with hashes for all files in `wheel_sdir`
Copied from :method:`wheel.bdist_wheel.bdist_wheel.write_record`
Will also unsign wheel
Parameters
----------
bdist_dir : str
Path of unpacked wheel file | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/wheeltools.py#L35-L81 |
matthew-brett/delocate | delocate/wheeltools.py | add_platforms | def add_platforms(in_wheel, platforms, out_path=None, clobber=False):
""" Add platform tags `platforms` to `in_wheel` filename and WHEEL tags
Add any platform tags in `platforms` that are missing from `in_wheel`
filename.
Add any platform tags in `platforms` that are missing from `in_wheel`
``WHEEL`` file.
Parameters
----------
in_wheel : str
Filename of wheel to which to add platform tags
platforms : iterable
platform tags to add to wheel filename and WHEEL tags - e.g.
``('macosx_10_9_intel', 'macosx_10_9_x86_64')
out_path : None or str, optional
Directory to which to write new wheel. Default is directory containing
`in_wheel`
clobber : bool, optional
If True, overwrite existing output filename, otherwise raise error
Returns
-------
out_wheel : None or str
Absolute path of wheel file written, or None if no wheel file written.
"""
in_wheel = abspath(in_wheel)
out_path = dirname(in_wheel) if out_path is None else abspath(out_path)
wf = WheelFile(in_wheel)
info_fname = _get_wheelinfo_name(wf)
# Check what tags we have
in_fname_tags = wf.parsed_filename.groupdict()['plat'].split('.')
extra_fname_tags = [tag for tag in platforms if tag not in in_fname_tags]
in_wheel_base, ext = splitext(basename(in_wheel))
out_wheel_base = '.'.join([in_wheel_base] + list(extra_fname_tags))
out_wheel = pjoin(out_path, out_wheel_base + ext)
if exists(out_wheel) and not clobber:
raise WheelToolsError('Not overwriting {0}; set clobber=True '
'to overwrite'.format(out_wheel))
with InWheelCtx(in_wheel) as ctx:
info = read_pkg_info(info_fname)
if info['Root-Is-Purelib'] == 'true':
raise WheelToolsError('Cannot add platforms to pure wheel')
in_info_tags = [tag for name, tag in info.items() if name == 'Tag']
# Python version, C-API version combinations
pyc_apis = ['-'.join(tag.split('-')[:2]) for tag in in_info_tags]
# unique Python version, C-API version combinations
pyc_apis = unique_by_index(pyc_apis)
# Add new platform tags for each Python version, C-API combination
required_tags = ['-'.join(tup) for tup in product(pyc_apis, platforms)]
needs_write = False
for req_tag in required_tags:
if req_tag in in_info_tags: continue
needs_write = True
info.add_header('Tag', req_tag)
if needs_write:
write_pkg_info(info_fname, info)
# Tell context manager to write wheel on exit by setting filename
ctx.out_wheel = out_wheel
return ctx.out_wheel | python | def add_platforms(in_wheel, platforms, out_path=None, clobber=False):
""" Add platform tags `platforms` to `in_wheel` filename and WHEEL tags
Add any platform tags in `platforms` that are missing from `in_wheel`
filename.
Add any platform tags in `platforms` that are missing from `in_wheel`
``WHEEL`` file.
Parameters
----------
in_wheel : str
Filename of wheel to which to add platform tags
platforms : iterable
platform tags to add to wheel filename and WHEEL tags - e.g.
``('macosx_10_9_intel', 'macosx_10_9_x86_64')
out_path : None or str, optional
Directory to which to write new wheel. Default is directory containing
`in_wheel`
clobber : bool, optional
If True, overwrite existing output filename, otherwise raise error
Returns
-------
out_wheel : None or str
Absolute path of wheel file written, or None if no wheel file written.
"""
in_wheel = abspath(in_wheel)
out_path = dirname(in_wheel) if out_path is None else abspath(out_path)
wf = WheelFile(in_wheel)
info_fname = _get_wheelinfo_name(wf)
# Check what tags we have
in_fname_tags = wf.parsed_filename.groupdict()['plat'].split('.')
extra_fname_tags = [tag for tag in platforms if tag not in in_fname_tags]
in_wheel_base, ext = splitext(basename(in_wheel))
out_wheel_base = '.'.join([in_wheel_base] + list(extra_fname_tags))
out_wheel = pjoin(out_path, out_wheel_base + ext)
if exists(out_wheel) and not clobber:
raise WheelToolsError('Not overwriting {0}; set clobber=True '
'to overwrite'.format(out_wheel))
with InWheelCtx(in_wheel) as ctx:
info = read_pkg_info(info_fname)
if info['Root-Is-Purelib'] == 'true':
raise WheelToolsError('Cannot add platforms to pure wheel')
in_info_tags = [tag for name, tag in info.items() if name == 'Tag']
# Python version, C-API version combinations
pyc_apis = ['-'.join(tag.split('-')[:2]) for tag in in_info_tags]
# unique Python version, C-API version combinations
pyc_apis = unique_by_index(pyc_apis)
# Add new platform tags for each Python version, C-API combination
required_tags = ['-'.join(tup) for tup in product(pyc_apis, platforms)]
needs_write = False
for req_tag in required_tags:
if req_tag in in_info_tags: continue
needs_write = True
info.add_header('Tag', req_tag)
if needs_write:
write_pkg_info(info_fname, info)
# Tell context manager to write wheel on exit by setting filename
ctx.out_wheel = out_wheel
return ctx.out_wheel | Add platform tags `platforms` to `in_wheel` filename and WHEEL tags
Add any platform tags in `platforms` that are missing from `in_wheel`
filename.
Add any platform tags in `platforms` that are missing from `in_wheel`
``WHEEL`` file.
Parameters
----------
in_wheel : str
Filename of wheel to which to add platform tags
platforms : iterable
platform tags to add to wheel filename and WHEEL tags - e.g.
``('macosx_10_9_intel', 'macosx_10_9_x86_64')
out_path : None or str, optional
Directory to which to write new wheel. Default is directory containing
`in_wheel`
clobber : bool, optional
If True, overwrite existing output filename, otherwise raise error
Returns
-------
out_wheel : None or str
Absolute path of wheel file written, or None if no wheel file written. | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/wheeltools.py#L162-L222 |
wiheto/teneto | teneto/networkmeasures/temporal_betweenness_centrality.py | temporal_betweenness_centrality | def temporal_betweenness_centrality(tnet=None, paths=None, calc='time'):
'''
Returns temporal betweenness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
calc : str
either 'global' or 'time'
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
normalized temporal betweenness centrality.
If calc = 'time', returns (node,time)
If calc = 'global', returns (node)
'''
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
bet = np.zeros([paths[['from', 'to']].max().max() +
1, paths['t_start'].max()+1])
for row in paths.iterrows():
if (np.isnan(row[1]['path includes'])).all():
pass
else:
nodes_in_path = np.unique(np.concatenate(
row[1]['path includes'])).astype(int).tolist()
nodes_in_path.remove(row[1]['from'])
nodes_in_path.remove(row[1]['to'])
if len(nodes_in_path) > 0:
bet[nodes_in_path, row[1]['t_start']] += 1
# Normalise bet
bet = (1/((bet.shape[0]-1)*(bet.shape[0]-2))) * bet
if calc == 'global':
bet = np.mean(bet, axis=1)
return bet | python | def temporal_betweenness_centrality(tnet=None, paths=None, calc='time'):
'''
Returns temporal betweenness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
calc : str
either 'global' or 'time'
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
normalized temporal betweenness centrality.
If calc = 'time', returns (node,time)
If calc = 'global', returns (node)
'''
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
bet = np.zeros([paths[['from', 'to']].max().max() +
1, paths['t_start'].max()+1])
for row in paths.iterrows():
if (np.isnan(row[1]['path includes'])).all():
pass
else:
nodes_in_path = np.unique(np.concatenate(
row[1]['path includes'])).astype(int).tolist()
nodes_in_path.remove(row[1]['from'])
nodes_in_path.remove(row[1]['to'])
if len(nodes_in_path) > 0:
bet[nodes_in_path, row[1]['t_start']] += 1
# Normalise bet
bet = (1/((bet.shape[0]-1)*(bet.shape[0]-2))) * bet
if calc == 'global':
bet = np.mean(bet, axis=1)
return bet | Returns temporal betweenness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
calc : str
either 'global' or 'time'
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
normalized temporal betweenness centrality.
If calc = 'time', returns (node,time)
If calc = 'global', returns (node) | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/temporal_betweenness_centrality.py#L9-L72 |
wiheto/teneto | teneto/networkmeasures/volatility.py | volatility | def volatility(tnet, distance_func_name='default', calc='global', communities=None, event_displacement=None):
r"""
Volatility of temporal networks.
Volatility is the average distance between consecutive time points of graphlets (difference is caclualted either globally or per edge).
Parameters
----------
tnet : array or dict
temporal network input (graphlet or contact). Nettype: 'bu','bd','wu','wd'
D : str
Distance function. Following options available: 'default', 'hamming', 'euclidean'. (Default implies hamming for binary networks, euclidean for weighted).
calc : str
Version of volaitility to caclulate. Possibilities include:
'global' - (default): the average distance of all nodes for each consecutive time point).
'edge' - average distance between consecutive time points for each edge). Takes considerably longer
'node' - (i.e. returns the average per node output when calculating volatility per 'edge').
'time' - returns volatility per time point
'communities' - returns volatility per communitieswork id (see communities). Also is returned per time-point and this may be changed in the future (with additional options)
'event_displacement' - calculates the volatility from a specified point. Returns time-series.
communities : array
Array of indicies for community (eiter (node) or (node,time) dimensions).
event_displacement : int
if calc = event_displacement specify the temporal index where all other time-points are calculated in relation too.
Notes
-----
Volatility calculates the difference between network snapshots.
.. math:: V_t = D(G_t,G_{t+1})
Where D is some distance function (e.g. Hamming distance for binary matrices).
V can be calculated for the entire network (global), but can also be calculated for individual edges, nodes or given a community vector.
Index of communities are returned "as is" with a shape of [max(communities)+1,max(communities)+1]. So if the indexes used are [1,2,3,5], V.shape==(6,6). The returning V[1,2] will correspond indexes 1 and 2. And missing index (e.g. here 0 and 4 will be NANs in rows and columns). If this behaviour is unwanted, call clean_communitiesdexes first. This will probably change.
Examples
--------
Import everything needed.
>>> import teneto
>>> import numpy
>>> np.random.seed(1)
>>> tnet = teneto.TemporalNetwork(nettype='bu')
Here we generate a binary network where edges have a 0.5 change of going "on", and once on a 0.2 change to go "off"
>>> tnet.generatenetwork('rand_binomial', size=(3,10), prob=(0.5,0.2))
Calculate the volatility
>>> tnet.calc_networkmeasure('volatility', distance_func_name='hamming')
0.5555555555555556
If we change the probabilities to instead be certain edges disapeared the time-point after the appeared:
>>> tnet.generatenetwork('rand_binomial', size=(3,10), prob=(0.5,1))
This will make a more volatile network
>>> tnet.calc_networkmeasure('volatility', distance_func_name='hamming')
0.1111111111111111
We can calculate the volatility per time instead
>>> vol_time = tnet.calc_networkmeasure('volatility', calc='time', distance_func_name='hamming')
>>> len(vol_time)
9
>>> vol_time[0]
0.3333333333333333
Or per node:
>>> vol_node = tnet.calc_networkmeasure('volatility', calc='node', distance_func_name='hamming')
>>> vol_node
array([0.07407407, 0.07407407, 0.07407407])
Here we see the volatility for each node was the same.
It is also possible to pass a community vector and the function will return volatility both within and between each community.
So the following has two communities:
>>> vol_com = tnet.calc_networkmeasure('volatility', calc='communities', communities=[0,1,1], distance_func_name='hamming')
>>> vol_com.shape
(2, 2, 9)
>>> vol_com[:,:,0]
array([[nan, 0.5],
[0.5, 0. ]])
And we see that, at time-point 0, there is some volatility between community 0 and 1 but no volatility within community 1. The reason for nan appearing is due to there only being 1 node in community 0.
Output
------
vol : array
"""
# Get input (C or G)
tnet, netinfo = process_input(tnet, ['C', 'G', 'TN'])
distance_func_name = check_distance_funciton_input(
distance_func_name, netinfo)
if not isinstance(distance_func_name, str):
raise ValueError('Distance metric must be a string')
# If not directional, only calc on the uppertriangle
if netinfo['nettype'][1] == 'd':
ind = np.triu_indices(tnet.shape[0], k=-tnet.shape[0])
elif netinfo['nettype'][1] == 'u':
ind = np.triu_indices(tnet.shape[0], k=1)
if calc == 'communities':
# Make sure communities is np array for indexing later on.
communities = np.array(communities)
if len(communities) != netinfo['netshape'][0]:
raise ValueError(
'When processing per network, communities vector must equal the number of nodes')
if communities.min() < 0:
raise ValueError(
'Communitiy assignments must be positive integers')
# Get chosen distance metric fucntion
distance_func = getDistanceFunction(distance_func_name)
if calc == 'global':
vol = np.mean([distance_func(tnet[ind[0], ind[1], t], tnet[ind[0], ind[1], t + 1])
for t in range(0, tnet.shape[-1] - 1)])
elif calc == 'time':
vol = [distance_func(tnet[ind[0], ind[1], t], tnet[ind[0], ind[1], t + 1])
for t in range(0, tnet.shape[-1] - 1)]
elif calc == 'event_displacement':
vol = [distance_func(tnet[ind[0], ind[1], event_displacement],
tnet[ind[0], ind[1], t]) for t in range(0, tnet.shape[-1])]
# This takes quite a bit of time to loop through. When calculating per edge/node.
elif calc == 'edge' or calc == 'node':
vol = np.zeros([tnet.shape[0], tnet.shape[1]])
for i in ind[0]:
for j in ind[1]:
vol[i, j] = np.mean([distance_func(
tnet[i, j, t], tnet[i, j, t + 1]) for t in range(0, tnet.shape[-1] - 1)])
if netinfo['nettype'][1] == 'u':
vol = vol + np.transpose(vol)
if calc == 'node':
vol = np.mean(vol, axis=1)
elif calc == 'communities':
net_id = set(communities)
vol = np.zeros([max(net_id) + 1, max(net_id) +
1, netinfo['netshape'][-1] - 1])
for net1 in net_id:
for net2 in net_id:
if net1 != net2:
vol[net1, net2, :] = [distance_func(tnet[communities == net1][:, communities == net2, t].flatten(),
tnet[communities == net1][:, communities == net2, t + 1].flatten()) for t in range(0, tnet.shape[-1] - 1)]
else:
nettmp = tnet[communities ==
net1][:, communities == net2, :]
triu = np.triu_indices(nettmp.shape[0], k=1)
nettmp = nettmp[triu[0], triu[1], :]
vol[net1, net2, :] = [distance_func(nettmp[:, t].flatten(
), nettmp[:, t + 1].flatten()) for t in range(0, tnet.shape[-1] - 1)]
elif calc == 'withincommunities':
withi = np.array([[ind[0][n], ind[1][n]] for n in range(
0, len(ind[0])) if communities[ind[0][n]] == communities[ind[1][n]]])
vol = [distance_func(tnet[withi[:, 0], withi[:, 1], t], tnet[withi[:, 0],
withi[:, 1], t + 1]) for t in range(0, tnet.shape[-1] - 1)]
elif calc == 'betweencommunities':
beti = np.array([[ind[0][n], ind[1][n]] for n in range(
0, len(ind[0])) if communities[ind[0][n]] != communities[ind[1][n]]])
vol = [distance_func(tnet[beti[:, 0], beti[:, 1], t], tnet[beti[:, 0],
beti[:, 1], t + 1]) for t in range(0, tnet.shape[-1] - 1)]
return vol | python | def volatility(tnet, distance_func_name='default', calc='global', communities=None, event_displacement=None):
r"""
Volatility of temporal networks.
Volatility is the average distance between consecutive time points of graphlets (difference is caclualted either globally or per edge).
Parameters
----------
tnet : array or dict
temporal network input (graphlet or contact). Nettype: 'bu','bd','wu','wd'
D : str
Distance function. Following options available: 'default', 'hamming', 'euclidean'. (Default implies hamming for binary networks, euclidean for weighted).
calc : str
Version of volaitility to caclulate. Possibilities include:
'global' - (default): the average distance of all nodes for each consecutive time point).
'edge' - average distance between consecutive time points for each edge). Takes considerably longer
'node' - (i.e. returns the average per node output when calculating volatility per 'edge').
'time' - returns volatility per time point
'communities' - returns volatility per communitieswork id (see communities). Also is returned per time-point and this may be changed in the future (with additional options)
'event_displacement' - calculates the volatility from a specified point. Returns time-series.
communities : array
Array of indicies for community (eiter (node) or (node,time) dimensions).
event_displacement : int
if calc = event_displacement specify the temporal index where all other time-points are calculated in relation too.
Notes
-----
Volatility calculates the difference between network snapshots.
.. math:: V_t = D(G_t,G_{t+1})
Where D is some distance function (e.g. Hamming distance for binary matrices).
V can be calculated for the entire network (global), but can also be calculated for individual edges, nodes or given a community vector.
Index of communities are returned "as is" with a shape of [max(communities)+1,max(communities)+1]. So if the indexes used are [1,2,3,5], V.shape==(6,6). The returning V[1,2] will correspond indexes 1 and 2. And missing index (e.g. here 0 and 4 will be NANs in rows and columns). If this behaviour is unwanted, call clean_communitiesdexes first. This will probably change.
Examples
--------
Import everything needed.
>>> import teneto
>>> import numpy
>>> np.random.seed(1)
>>> tnet = teneto.TemporalNetwork(nettype='bu')
Here we generate a binary network where edges have a 0.5 change of going "on", and once on a 0.2 change to go "off"
>>> tnet.generatenetwork('rand_binomial', size=(3,10), prob=(0.5,0.2))
Calculate the volatility
>>> tnet.calc_networkmeasure('volatility', distance_func_name='hamming')
0.5555555555555556
If we change the probabilities to instead be certain edges disapeared the time-point after the appeared:
>>> tnet.generatenetwork('rand_binomial', size=(3,10), prob=(0.5,1))
This will make a more volatile network
>>> tnet.calc_networkmeasure('volatility', distance_func_name='hamming')
0.1111111111111111
We can calculate the volatility per time instead
>>> vol_time = tnet.calc_networkmeasure('volatility', calc='time', distance_func_name='hamming')
>>> len(vol_time)
9
>>> vol_time[0]
0.3333333333333333
Or per node:
>>> vol_node = tnet.calc_networkmeasure('volatility', calc='node', distance_func_name='hamming')
>>> vol_node
array([0.07407407, 0.07407407, 0.07407407])
Here we see the volatility for each node was the same.
It is also possible to pass a community vector and the function will return volatility both within and between each community.
So the following has two communities:
>>> vol_com = tnet.calc_networkmeasure('volatility', calc='communities', communities=[0,1,1], distance_func_name='hamming')
>>> vol_com.shape
(2, 2, 9)
>>> vol_com[:,:,0]
array([[nan, 0.5],
[0.5, 0. ]])
And we see that, at time-point 0, there is some volatility between community 0 and 1 but no volatility within community 1. The reason for nan appearing is due to there only being 1 node in community 0.
Output
------
vol : array
"""
# Get input (C or G)
tnet, netinfo = process_input(tnet, ['C', 'G', 'TN'])
distance_func_name = check_distance_funciton_input(
distance_func_name, netinfo)
if not isinstance(distance_func_name, str):
raise ValueError('Distance metric must be a string')
# If not directional, only calc on the uppertriangle
if netinfo['nettype'][1] == 'd':
ind = np.triu_indices(tnet.shape[0], k=-tnet.shape[0])
elif netinfo['nettype'][1] == 'u':
ind = np.triu_indices(tnet.shape[0], k=1)
if calc == 'communities':
# Make sure communities is np array for indexing later on.
communities = np.array(communities)
if len(communities) != netinfo['netshape'][0]:
raise ValueError(
'When processing per network, communities vector must equal the number of nodes')
if communities.min() < 0:
raise ValueError(
'Communitiy assignments must be positive integers')
# Get chosen distance metric fucntion
distance_func = getDistanceFunction(distance_func_name)
if calc == 'global':
vol = np.mean([distance_func(tnet[ind[0], ind[1], t], tnet[ind[0], ind[1], t + 1])
for t in range(0, tnet.shape[-1] - 1)])
elif calc == 'time':
vol = [distance_func(tnet[ind[0], ind[1], t], tnet[ind[0], ind[1], t + 1])
for t in range(0, tnet.shape[-1] - 1)]
elif calc == 'event_displacement':
vol = [distance_func(tnet[ind[0], ind[1], event_displacement],
tnet[ind[0], ind[1], t]) for t in range(0, tnet.shape[-1])]
# This takes quite a bit of time to loop through. When calculating per edge/node.
elif calc == 'edge' or calc == 'node':
vol = np.zeros([tnet.shape[0], tnet.shape[1]])
for i in ind[0]:
for j in ind[1]:
vol[i, j] = np.mean([distance_func(
tnet[i, j, t], tnet[i, j, t + 1]) for t in range(0, tnet.shape[-1] - 1)])
if netinfo['nettype'][1] == 'u':
vol = vol + np.transpose(vol)
if calc == 'node':
vol = np.mean(vol, axis=1)
elif calc == 'communities':
net_id = set(communities)
vol = np.zeros([max(net_id) + 1, max(net_id) +
1, netinfo['netshape'][-1] - 1])
for net1 in net_id:
for net2 in net_id:
if net1 != net2:
vol[net1, net2, :] = [distance_func(tnet[communities == net1][:, communities == net2, t].flatten(),
tnet[communities == net1][:, communities == net2, t + 1].flatten()) for t in range(0, tnet.shape[-1] - 1)]
else:
nettmp = tnet[communities ==
net1][:, communities == net2, :]
triu = np.triu_indices(nettmp.shape[0], k=1)
nettmp = nettmp[triu[0], triu[1], :]
vol[net1, net2, :] = [distance_func(nettmp[:, t].flatten(
), nettmp[:, t + 1].flatten()) for t in range(0, tnet.shape[-1] - 1)]
elif calc == 'withincommunities':
withi = np.array([[ind[0][n], ind[1][n]] for n in range(
0, len(ind[0])) if communities[ind[0][n]] == communities[ind[1][n]]])
vol = [distance_func(tnet[withi[:, 0], withi[:, 1], t], tnet[withi[:, 0],
withi[:, 1], t + 1]) for t in range(0, tnet.shape[-1] - 1)]
elif calc == 'betweencommunities':
beti = np.array([[ind[0][n], ind[1][n]] for n in range(
0, len(ind[0])) if communities[ind[0][n]] != communities[ind[1][n]]])
vol = [distance_func(tnet[beti[:, 0], beti[:, 1], t], tnet[beti[:, 0],
beti[:, 1], t + 1]) for t in range(0, tnet.shape[-1] - 1)]
return vol | r"""
Volatility of temporal networks.
Volatility is the average distance between consecutive time points of graphlets (difference is caclualted either globally or per edge).
Parameters
----------
tnet : array or dict
temporal network input (graphlet or contact). Nettype: 'bu','bd','wu','wd'
D : str
Distance function. Following options available: 'default', 'hamming', 'euclidean'. (Default implies hamming for binary networks, euclidean for weighted).
calc : str
Version of volaitility to caclulate. Possibilities include:
'global' - (default): the average distance of all nodes for each consecutive time point).
'edge' - average distance between consecutive time points for each edge). Takes considerably longer
'node' - (i.e. returns the average per node output when calculating volatility per 'edge').
'time' - returns volatility per time point
'communities' - returns volatility per communitieswork id (see communities). Also is returned per time-point and this may be changed in the future (with additional options)
'event_displacement' - calculates the volatility from a specified point. Returns time-series.
communities : array
Array of indicies for community (eiter (node) or (node,time) dimensions).
event_displacement : int
if calc = event_displacement specify the temporal index where all other time-points are calculated in relation too.
Notes
-----
Volatility calculates the difference between network snapshots.
.. math:: V_t = D(G_t,G_{t+1})
Where D is some distance function (e.g. Hamming distance for binary matrices).
V can be calculated for the entire network (global), but can also be calculated for individual edges, nodes or given a community vector.
Index of communities are returned "as is" with a shape of [max(communities)+1,max(communities)+1]. So if the indexes used are [1,2,3,5], V.shape==(6,6). The returning V[1,2] will correspond indexes 1 and 2. And missing index (e.g. here 0 and 4 will be NANs in rows and columns). If this behaviour is unwanted, call clean_communitiesdexes first. This will probably change.
Examples
--------
Import everything needed.
>>> import teneto
>>> import numpy
>>> np.random.seed(1)
>>> tnet = teneto.TemporalNetwork(nettype='bu')
Here we generate a binary network where edges have a 0.5 change of going "on", and once on a 0.2 change to go "off"
>>> tnet.generatenetwork('rand_binomial', size=(3,10), prob=(0.5,0.2))
Calculate the volatility
>>> tnet.calc_networkmeasure('volatility', distance_func_name='hamming')
0.5555555555555556
If we change the probabilities to instead be certain edges disapeared the time-point after the appeared:
>>> tnet.generatenetwork('rand_binomial', size=(3,10), prob=(0.5,1))
This will make a more volatile network
>>> tnet.calc_networkmeasure('volatility', distance_func_name='hamming')
0.1111111111111111
We can calculate the volatility per time instead
>>> vol_time = tnet.calc_networkmeasure('volatility', calc='time', distance_func_name='hamming')
>>> len(vol_time)
9
>>> vol_time[0]
0.3333333333333333
Or per node:
>>> vol_node = tnet.calc_networkmeasure('volatility', calc='node', distance_func_name='hamming')
>>> vol_node
array([0.07407407, 0.07407407, 0.07407407])
Here we see the volatility for each node was the same.
It is also possible to pass a community vector and the function will return volatility both within and between each community.
So the following has two communities:
>>> vol_com = tnet.calc_networkmeasure('volatility', calc='communities', communities=[0,1,1], distance_func_name='hamming')
>>> vol_com.shape
(2, 2, 9)
>>> vol_com[:,:,0]
array([[nan, 0.5],
[0.5, 0. ]])
And we see that, at time-point 0, there is some volatility between community 0 and 1 but no volatility within community 1. The reason for nan appearing is due to there only being 1 node in community 0.
Output
------
vol : array | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/volatility.py#L5-L188 |
wiheto/teneto | teneto/temporalcommunity/allegiance.py | allegiance | def allegiance(community):
"""
Computes the allegiance matrix with values representing the probability that
nodes i and j were assigned to the same community by time-varying clustering methods.
parameters
----------
community : array
array of community assignment of size node,time
returns
-------
P : array
module allegiance matrix, with P_ij probability that area i and j are in the same community
Reference:
----------
Bassett, et al. (2013) “Robust detection of dynamic community structure in networks”, Chaos, 23, 1
"""
N = community.shape[0]
C = community.shape[1]
T = P = np.zeros([N, N])
for t in range(len(community[0, :])):
for i in range(len(community[:, 0])):
for j in range(len(community[:, 0])):
if i == j:
continue
# T_ij indicates the number of times that i and j are assigned to the same community across time
if community[i][t] == community[j][t]:
T[i, j] += 1
# module allegiance matrix, probability that ij were assigned to the same community
P = (1/C)*T
return P | python | def allegiance(community):
"""
Computes the allegiance matrix with values representing the probability that
nodes i and j were assigned to the same community by time-varying clustering methods.
parameters
----------
community : array
array of community assignment of size node,time
returns
-------
P : array
module allegiance matrix, with P_ij probability that area i and j are in the same community
Reference:
----------
Bassett, et al. (2013) “Robust detection of dynamic community structure in networks”, Chaos, 23, 1
"""
N = community.shape[0]
C = community.shape[1]
T = P = np.zeros([N, N])
for t in range(len(community[0, :])):
for i in range(len(community[:, 0])):
for j in range(len(community[:, 0])):
if i == j:
continue
# T_ij indicates the number of times that i and j are assigned to the same community across time
if community[i][t] == community[j][t]:
T[i, j] += 1
# module allegiance matrix, probability that ij were assigned to the same community
P = (1/C)*T
return P | Computes the allegiance matrix with values representing the probability that
nodes i and j were assigned to the same community by time-varying clustering methods.
parameters
----------
community : array
array of community assignment of size node,time
returns
-------
P : array
module allegiance matrix, with P_ij probability that area i and j are in the same community
Reference:
----------
Bassett, et al. (2013) “Robust detection of dynamic community structure in networks”, Chaos, 23, 1 | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/temporalcommunity/allegiance.py#L3-L39 |
wiheto/teneto | teneto/generatenetwork/rand_poisson.py | rand_poisson | def rand_poisson(nnodes, ncontacts, lam=1, nettype='bu', netinfo=None, netrep='graphlet'):
"""
Generate a random network where intervals between contacts are distributed by a poisson distribution
Parameters
----------
nnodes : int
Number of nodes in networks
ncontacts : int or list
Number of expected contacts (i.e. edges). If list, number of contacts for each node.
Any zeros drawn are ignored so returned degree of network can be smaller than ncontacts.
lam : int or list
Expectation of interval.
nettype : str
'bu' or 'bd'
netinfo : dict
Dictionary of additional information
netrep : str
How the output should be.
If ncontacts is a list, so should lam.
Returns
-------
net : array or dict
Random network with intervals between active edges being Poisson distributed.
"""
if isinstance(ncontacts, list):
if len(ncontacts) != nnodes:
raise ValueError(
'Number of contacts, if a list, should be one per node')
if isinstance(lam, list):
if len(lam) != nnodes:
raise ValueError(
'Lambda value of Poisson distribution, if a list, should be one per node')
if isinstance(lam, list) and not isinstance(ncontacts, list) or not isinstance(lam, list) and isinstance(ncontacts, list):
raise ValueError(
'When one of lambda or ncontacts is given as a list, the other argument must also be a list.')
if nettype == 'bu':
edgen = int((nnodes*(nnodes-1))/2)
elif nettype == 'bd':
edgen = int(nnodes*nnodes)
if not isinstance(lam, list) and not isinstance(ncontacts, list):
icts = np.random.poisson(lam, size=(edgen, ncontacts))
net = np.zeros([edgen, icts.sum(axis=1).max()+1])
for n in range(edgen):
net[n, np.unique(np.cumsum(icts[n]))] = 1
else:
icts = []
ict_max = 0
for n in range(edgen):
icts.append(np.random.poisson(lam[n], size=ncontacts[n]))
if sum(icts[-1]) > ict_max:
ict_max = sum(icts[-1])
net = np.zeros([nnodes, ict_max+1])
for n in range(nnodes):
net[n, np.unique(np.cumsum(icts[n]))] = 1
if nettype == 'bu':
nettmp = np.zeros([nnodes, nnodes, net.shape[-1]])
ind = np.triu_indices(nnodes, k=1)
nettmp[ind[0], ind[1], :] = net
net = nettmp + nettmp.transpose([1, 0, 2])
elif nettype == 'bd':
net = net.reshape([nnodes, nnodes, net.shape[-1]], order='F')
net = set_diagonal(net, 0)
if netrep == 'contact':
if not netinfo:
netinfo = {}
netinfo['nettype'] = 'b' + nettype[-1]
net = graphlet2contact(net, netinfo)
return net | python | def rand_poisson(nnodes, ncontacts, lam=1, nettype='bu', netinfo=None, netrep='graphlet'):
"""
Generate a random network where intervals between contacts are distributed by a poisson distribution
Parameters
----------
nnodes : int
Number of nodes in networks
ncontacts : int or list
Number of expected contacts (i.e. edges). If list, number of contacts for each node.
Any zeros drawn are ignored so returned degree of network can be smaller than ncontacts.
lam : int or list
Expectation of interval.
nettype : str
'bu' or 'bd'
netinfo : dict
Dictionary of additional information
netrep : str
How the output should be.
If ncontacts is a list, so should lam.
Returns
-------
net : array or dict
Random network with intervals between active edges being Poisson distributed.
"""
if isinstance(ncontacts, list):
if len(ncontacts) != nnodes:
raise ValueError(
'Number of contacts, if a list, should be one per node')
if isinstance(lam, list):
if len(lam) != nnodes:
raise ValueError(
'Lambda value of Poisson distribution, if a list, should be one per node')
if isinstance(lam, list) and not isinstance(ncontacts, list) or not isinstance(lam, list) and isinstance(ncontacts, list):
raise ValueError(
'When one of lambda or ncontacts is given as a list, the other argument must also be a list.')
if nettype == 'bu':
edgen = int((nnodes*(nnodes-1))/2)
elif nettype == 'bd':
edgen = int(nnodes*nnodes)
if not isinstance(lam, list) and not isinstance(ncontacts, list):
icts = np.random.poisson(lam, size=(edgen, ncontacts))
net = np.zeros([edgen, icts.sum(axis=1).max()+1])
for n in range(edgen):
net[n, np.unique(np.cumsum(icts[n]))] = 1
else:
icts = []
ict_max = 0
for n in range(edgen):
icts.append(np.random.poisson(lam[n], size=ncontacts[n]))
if sum(icts[-1]) > ict_max:
ict_max = sum(icts[-1])
net = np.zeros([nnodes, ict_max+1])
for n in range(nnodes):
net[n, np.unique(np.cumsum(icts[n]))] = 1
if nettype == 'bu':
nettmp = np.zeros([nnodes, nnodes, net.shape[-1]])
ind = np.triu_indices(nnodes, k=1)
nettmp[ind[0], ind[1], :] = net
net = nettmp + nettmp.transpose([1, 0, 2])
elif nettype == 'bd':
net = net.reshape([nnodes, nnodes, net.shape[-1]], order='F')
net = set_diagonal(net, 0)
if netrep == 'contact':
if not netinfo:
netinfo = {}
netinfo['nettype'] = 'b' + nettype[-1]
net = graphlet2contact(net, netinfo)
return net | Generate a random network where intervals between contacts are distributed by a poisson distribution
Parameters
----------
nnodes : int
Number of nodes in networks
ncontacts : int or list
Number of expected contacts (i.e. edges). If list, number of contacts for each node.
Any zeros drawn are ignored so returned degree of network can be smaller than ncontacts.
lam : int or list
Expectation of interval.
nettype : str
'bu' or 'bd'
netinfo : dict
Dictionary of additional information
netrep : str
How the output should be.
If ncontacts is a list, so should lam.
Returns
-------
net : array or dict
Random network with intervals between active edges being Poisson distributed. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/generatenetwork/rand_poisson.py#L9-L92 |
wiheto/teneto | teneto/networkmeasures/temporal_participation_coeff.py | temporal_participation_coeff | def temporal_participation_coeff(tnet, communities=None, decay=None, removeneg=False):
r'''
Temporal participation coefficient is a measure of diversity of connections across communities for individual nodes.
Parameters
----------
tnet : array, dict
graphlet or contact sequence input. Only positive matrices considered.
communities : array
community vector. Either 1D (node) community index or 2D (node,time).
removeneg : bool (default false)
If true, all values < 0 are made to be 0.
Returns
-------
P : array
participation coefficient
Notes
-----
Static participatoin coefficient is:
.. math:: P_i = 1 - \sum_s^{N_M}({{k_{is}}\over{k_i}})^2
Where s is the index of each community (:math:`N_M`). :math:`k_i` is total degree of node. And :math:`k_{is}` is degree of connections within community.[part-1]_
This "temporal" version only loops through temporal snapshots and calculates :math:`P_i` for each t.
If directed, function sums axis=1, so tnet may need to be transposed before hand depending on what type of directed part_coef you are interested in.
References
----------
.. [part-1] Guimera et al (2005) Functional cartography of complex metabolic networks. Nature. 433: 7028, p895-900. [`Link <http://doi.org/10.1038/nature03288>`_]
'''
if communities is None:
if isinstance(tnet, dict):
if 'communities' in tnet.keys():
communities = tnet['communities']
else:
raise ValueError('Community index not found')
else:
raise ValueError('Community must be provided for graphlet input')
# Get input in right format
tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN')
if tnet.nettype[0] == 'w':
# TODO add contingency when hdf5 data has negative edges
if tnet.hdf5 == False:
if sum(tnet.network['weight'] < 0) > 0 and not removeneg:
print(
'TENETO WARNING: negative edges exist when calculating participation coefficient.')
else:
tnet.network['weight'][tnet.network['weight'] < 0] = 0
part = np.zeros([tnet.netshape[0], tnet.netshape[1]])
if len(communities.shape) == 1:
for t in np.arange(0, tnet.netshape[1]):
C = communities
snapshot = tnet.get_network_when(t=t)
if tnet.nettype[1] == 'd':
i_at_t = snapshot['i'].values
else:
i_at_t = np.concatenate(
[snapshot['i'].values, snapshot['j'].values])
i_at_t = np.unique(i_at_t).tolist()
i_at_t = list(map(int, i_at_t))
for i in i_at_t:
# Calculate degree of node
if tnet.nettype[1] == 'd':
df = tnet.get_network_when(i=i, t=t)
j_at_t = df['j'].values
if tnet.nettype == 'wd':
k_i = df['weight'].sum()
elif tnet.nettype == 'bd':
k_i = len(df)
elif tnet.nettype[1] == 'u':
df = tnet.get_network_when(ij=i, t=t)
j_at_t = np.concatenate([df['i'].values, df['j'].values])
if tnet.nettype == 'wu':
k_i = df['weight'].sum()
elif tnet.nettype == 'bu':
k_i = len(df)
j_at_t = list(map(int, j_at_t))
for c in np.unique(C[j_at_t]):
ci = np.where(C == c)[0].tolist()
k_is = tnet.get_network_when(i=i, j=ci, t=t)
if tnet.nettype[1] == 'u':
k_is2 = tnet.get_network_when(j=i, i=ci, t=t)
k_is = pd.concat([k_is, k_is2])
if len(k_is) > 0:
if tnet.nettype[0] == 'b':
k_is = len(k_is)
else:
k_is = k_is['weight'].sum()
part[i, t] += np.square(k_is/k_i)
part[i_at_t, t] = 1 - part[i_at_t, t]
if decay is not None and t > 0:
part[i_at_t, t] += decay*part[i_at_t, t-1]
else:
for t in np.arange(0, tnet.netshape[1]):
snapshot = tnet.get_network_when(t=t)
if tnet.nettype[1] == 'd':
i_at_t = snapshot['i'].values
else:
i_at_t = np.concatenate(
[snapshot['i'].values, snapshot['j'].values])
i_at_t = np.unique(i_at_t).tolist()
i_at_t = list(map(int, i_at_t))
for i in i_at_t:
for tc in np.arange(0, tnet.netshape[1]):
C = communities[:, tc]
# Calculate degree of node
if tnet.nettype[1] == 'd':
df = tnet.get_network_when(i=i, t=t)
j_at_t = df['j'].values
if tnet.nettype == 'wd':
k_i = df['weight'].sum()
elif tnet.nettype == 'bd':
k_i = len(df)
elif tnet.nettype[1] == 'u':
df = tnet.get_network_when(ij=i, t=t)
j_at_t = np.concatenate(
[df['i'].values, df['j'].values])
if tnet.nettype == 'wu':
k_i = df['weight'].sum()
elif tnet.nettype == 'bu':
k_i = len(df)
j_at_t = list(map(int, j_at_t))
for c in np.unique(C[j_at_t]):
ci = np.where(C == c)[0].tolist()
k_is = tnet.get_network_when(i=i, j=ci, t=t)
if tnet.nettype[1] == 'u':
k_is2 = tnet.get_network_when(j=i, i=ci, t=t)
k_is = pd.concat([k_is, k_is2])
if tnet.nettype[0] == 'b':
k_is = len(k_is)
else:
k_is = k_is['weight'].sum()
part[i, t] += np.square(k_is/k_i)
part[i, t] = part[i, t] / tnet.netshape[1]
part[i_at_t, t] = 1 - part[i_at_t, t]
if decay is not None and t > 0:
part[i_at_t, t] += decay*part[i_at_t, t-1]
# Set any division by 0 to 0
part[np.isnan(part) == 1] = 0
return part | python | def temporal_participation_coeff(tnet, communities=None, decay=None, removeneg=False):
r'''
Temporal participation coefficient is a measure of diversity of connections across communities for individual nodes.
Parameters
----------
tnet : array, dict
graphlet or contact sequence input. Only positive matrices considered.
communities : array
community vector. Either 1D (node) community index or 2D (node,time).
removeneg : bool (default false)
If true, all values < 0 are made to be 0.
Returns
-------
P : array
participation coefficient
Notes
-----
Static participatoin coefficient is:
.. math:: P_i = 1 - \sum_s^{N_M}({{k_{is}}\over{k_i}})^2
Where s is the index of each community (:math:`N_M`). :math:`k_i` is total degree of node. And :math:`k_{is}` is degree of connections within community.[part-1]_
This "temporal" version only loops through temporal snapshots and calculates :math:`P_i` for each t.
If directed, function sums axis=1, so tnet may need to be transposed before hand depending on what type of directed part_coef you are interested in.
References
----------
.. [part-1] Guimera et al (2005) Functional cartography of complex metabolic networks. Nature. 433: 7028, p895-900. [`Link <http://doi.org/10.1038/nature03288>`_]
'''
if communities is None:
if isinstance(tnet, dict):
if 'communities' in tnet.keys():
communities = tnet['communities']
else:
raise ValueError('Community index not found')
else:
raise ValueError('Community must be provided for graphlet input')
# Get input in right format
tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN')
if tnet.nettype[0] == 'w':
# TODO add contingency when hdf5 data has negative edges
if tnet.hdf5 == False:
if sum(tnet.network['weight'] < 0) > 0 and not removeneg:
print(
'TENETO WARNING: negative edges exist when calculating participation coefficient.')
else:
tnet.network['weight'][tnet.network['weight'] < 0] = 0
part = np.zeros([tnet.netshape[0], tnet.netshape[1]])
if len(communities.shape) == 1:
for t in np.arange(0, tnet.netshape[1]):
C = communities
snapshot = tnet.get_network_when(t=t)
if tnet.nettype[1] == 'd':
i_at_t = snapshot['i'].values
else:
i_at_t = np.concatenate(
[snapshot['i'].values, snapshot['j'].values])
i_at_t = np.unique(i_at_t).tolist()
i_at_t = list(map(int, i_at_t))
for i in i_at_t:
# Calculate degree of node
if tnet.nettype[1] == 'd':
df = tnet.get_network_when(i=i, t=t)
j_at_t = df['j'].values
if tnet.nettype == 'wd':
k_i = df['weight'].sum()
elif tnet.nettype == 'bd':
k_i = len(df)
elif tnet.nettype[1] == 'u':
df = tnet.get_network_when(ij=i, t=t)
j_at_t = np.concatenate([df['i'].values, df['j'].values])
if tnet.nettype == 'wu':
k_i = df['weight'].sum()
elif tnet.nettype == 'bu':
k_i = len(df)
j_at_t = list(map(int, j_at_t))
for c in np.unique(C[j_at_t]):
ci = np.where(C == c)[0].tolist()
k_is = tnet.get_network_when(i=i, j=ci, t=t)
if tnet.nettype[1] == 'u':
k_is2 = tnet.get_network_when(j=i, i=ci, t=t)
k_is = pd.concat([k_is, k_is2])
if len(k_is) > 0:
if tnet.nettype[0] == 'b':
k_is = len(k_is)
else:
k_is = k_is['weight'].sum()
part[i, t] += np.square(k_is/k_i)
part[i_at_t, t] = 1 - part[i_at_t, t]
if decay is not None and t > 0:
part[i_at_t, t] += decay*part[i_at_t, t-1]
else:
for t in np.arange(0, tnet.netshape[1]):
snapshot = tnet.get_network_when(t=t)
if tnet.nettype[1] == 'd':
i_at_t = snapshot['i'].values
else:
i_at_t = np.concatenate(
[snapshot['i'].values, snapshot['j'].values])
i_at_t = np.unique(i_at_t).tolist()
i_at_t = list(map(int, i_at_t))
for i in i_at_t:
for tc in np.arange(0, tnet.netshape[1]):
C = communities[:, tc]
# Calculate degree of node
if tnet.nettype[1] == 'd':
df = tnet.get_network_when(i=i, t=t)
j_at_t = df['j'].values
if tnet.nettype == 'wd':
k_i = df['weight'].sum()
elif tnet.nettype == 'bd':
k_i = len(df)
elif tnet.nettype[1] == 'u':
df = tnet.get_network_when(ij=i, t=t)
j_at_t = np.concatenate(
[df['i'].values, df['j'].values])
if tnet.nettype == 'wu':
k_i = df['weight'].sum()
elif tnet.nettype == 'bu':
k_i = len(df)
j_at_t = list(map(int, j_at_t))
for c in np.unique(C[j_at_t]):
ci = np.where(C == c)[0].tolist()
k_is = tnet.get_network_when(i=i, j=ci, t=t)
if tnet.nettype[1] == 'u':
k_is2 = tnet.get_network_when(j=i, i=ci, t=t)
k_is = pd.concat([k_is, k_is2])
if tnet.nettype[0] == 'b':
k_is = len(k_is)
else:
k_is = k_is['weight'].sum()
part[i, t] += np.square(k_is/k_i)
part[i, t] = part[i, t] / tnet.netshape[1]
part[i_at_t, t] = 1 - part[i_at_t, t]
if decay is not None and t > 0:
part[i_at_t, t] += decay*part[i_at_t, t-1]
# Set any division by 0 to 0
part[np.isnan(part) == 1] = 0
return part | r'''
Temporal participation coefficient is a measure of diversity of connections across communities for individual nodes.
Parameters
----------
tnet : array, dict
graphlet or contact sequence input. Only positive matrices considered.
communities : array
community vector. Either 1D (node) community index or 2D (node,time).
removeneg : bool (default false)
If true, all values < 0 are made to be 0.
Returns
-------
P : array
participation coefficient
Notes
-----
Static participatoin coefficient is:
.. math:: P_i = 1 - \sum_s^{N_M}({{k_{is}}\over{k_i}})^2
Where s is the index of each community (:math:`N_M`). :math:`k_i` is total degree of node. And :math:`k_{is}` is degree of connections within community.[part-1]_
This "temporal" version only loops through temporal snapshots and calculates :math:`P_i` for each t.
If directed, function sums axis=1, so tnet may need to be transposed before hand depending on what type of directed part_coef you are interested in.
References
----------
.. [part-1] Guimera et al (2005) Functional cartography of complex metabolic networks. Nature. 433: 7028, p895-900. [`Link <http://doi.org/10.1038/nature03288>`_] | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/temporal_participation_coeff.py#L7-L162 |
wiheto/teneto | teneto/plot/graphlet_stack_plot.py | graphlet_stack_plot | def graphlet_stack_plot(netin, ax, q=10, cmap='Reds', gridcolor='k', borderwidth=2, bordercolor=None, Fs=1, timeunit='', t0=1, sharpen='yes', vminmax='minmax'):
r'''
Returns matplotlib axis handle for graphlet_stack_plot. This is a row of transformed connectivity matrices to look like a 3D stack.
Parameters
----------
netin : array, dict
network input (graphlet or contact)
ax : matplotlib ax handles.
q : int
Quality. Increaseing this will lead to smoother axis but take up more memory.
cmap : str
Colormap (matplotlib) of graphlets
Fs : int
Sampling rate. Same as contact-representation (if netin is contact, and input is unset, contact dictionary is used)
timeunit : str
Unit of time for xlabel. Same as contact-representation (if netin is contact, and input is unset, contact dictionary is used)
t0 : int
What should the first time point be called. Should be integer. Default 1.
gridcolor : str
The color of the grid section of the graphlets. Set to 'none' if not wanted.
borderwidth : int
Scales the size of border. (at the moment it cannot be set to 0.)
bordorcolor :
color of the border (at the moment it must be in RGB values between 0 and 1 -> this will be changed sometime in the future). Default: black.
vminmax : str
'maxabs', 'minmax' (default), or list/array with length of 2. Specifies the min and max colormap value of graphlets. Maxabs entails [-max(abs(G)),max(abs(G))], minmax entails [min(G), max(G)].
Returns
--------
ax : matplotlib ax handle
Note
------
This function can require a lot of RAM with larger networks.
Note
------
At the momenet bordercolor cannot be set to zero. To remove border, set bordorwidth=1 and bordercolor=[1,1,1] for temporay workaround.
Examples
-------
Create a network with some metadata
>>> import numpy as np
>>> import teneto
>>> import matplotlib.pyplot as plt
>>> np.random.seed(2017) # For reproduceability
>>> N = 5 # Number of nodes
>>> T = 10 # Number of timepoints
>>> # Probability of edge activation
>>> birth_rate = 0.2
>>> death_rate = .9
>>> # Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
>>> cfg={}
>>> cfg['Fs'] = 1
>>> cfg['timeunit'] = 'Years'
>>> cfg['t0'] = 2007 #First year in network
>>> #Generate network
>>> C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
Now this network can be plotted
>>> fig,ax = plt.subplots(figsize=(10,3))
>>> ax = teneto.plot.graphlet_stack_plot(C,ax,q=10,cmap='Greys')
>>> fig.show()
.. plot::
import numpy as np
import teneto
import matplotlib.pyplot as plt
np.random.seed(2017) # For reproduceability
N = 5 # Number of nodes
T = 10 # Number of timepoints
# Probability of edge activation
birth_rate = 0.2
death_rate = .9
# Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
cfg={}
cfg['Fs'] = 1
cfg['timeunit'] = 'Years'
cfg['t0'] = 2007 #First year in network
#Generate network
C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
fig,ax = plt.subplots(figsize=(10,3))
cmap = 'Greys'
ax = teneto.plot.graphlet_stack_plot(C,ax,q=10,cmap=cmap)
fig.show()
'''
# Get input type (C, G, TO)
inputType = checkInput(netin)
# Convert TO to C representation
if inputType == 'TO':
netin = netin.contact
inputType = 'C'
# Convert C representation to G
if inputType == 'C':
if timeunit == '':
timeunit = netin['timeunit']
if t0 == 1:
t0 = netin['t0']
if Fs == 1:
Fs = netin['Fs']
netin = contact2graphlet(netin)
if timeunit != '':
timeunit = ' (' + timeunit + ')'
if bordercolor == None:
bordercolor = [0, 0, 0]
if not isinstance(borderwidth, int):
borderwidth = int(borderwidth)
print('Warning: borderwidth should be an integer. Converting to integer.')
# x and y ranges for each of the graphlet plots
v = np.arange(0, netin.shape[0] + 1)
vr = np.arange(netin.shape[0], -1, -1)
# Preallocatie matrix
if vminmax == '' or vminmax == 'absmax' or vminmax == 'maxabs':
vminmax = [-np.nanmax(np.abs(netin)), np.nanmax(np.abs(netin))]
elif vminmax == 'minmax':
vminmax = [np.nanmin(netin), np.nanmax(netin)]
qb = q * borderwidth
figmat = np.zeros([80 * q + (qb * 2), int(((netin.shape[-1]) *
(80 * q) + (qb * 2)) - ((netin.shape[-1] - 1) * q * 80) / 2), 4])
for n in range(0, netin.shape[-1]):
# Create graphlet
figtmp, axtmp = plt.subplots(
1, facecolor='white', figsize=(q, q), dpi=80)
axtmp.pcolormesh(v, vr, netin[:, :, n], cmap=cmap, edgecolor=gridcolor,
linewidth=q * 2, vmin=vminmax[0], vmax=vminmax[1])
axtmp.set_xticklabels('')
axtmp.set_yticklabels('')
axtmp.set_xticks([])
axtmp.set_yticks([])
x0, x1 = axtmp.get_xlim()
y0, y1 = axtmp.get_ylim()
axtmp.set_aspect((x1 - x0) / (y1 - y0))
axtmp.spines['left'].set_visible(False)
axtmp.spines['right'].set_visible(False)
axtmp.spines['top'].set_visible(False)
axtmp.spines['bottom'].set_visible(False)
plt.subplots_adjust(left=0, bottom=0, right=1,
top=1, wspace=0, hspace=0)
# Convert graphlet to RGB values
figtmp.canvas.draw()
figmattmp = np.fromstring(
figtmp.canvas.tostring_rgb(), dtype=np.uint8, sep='')
figmattmp = figmattmp.reshape(
figtmp.canvas.get_width_height()[::-1] + (3,))
# Close figure for memory
plt.close(figtmp)
# Manually add a border
figmattmp_withborder = np.zeros(
[figmattmp.shape[0] + (qb * 2), figmattmp.shape[1] + (qb * 2), 3]) + (np.array(bordercolor) * 255)
figmattmp_withborder[qb:-qb, qb:-qb, :] = figmattmp
# Make corners rounded. First make a circle and then take the relevant quarter for each corner.
y, x = np.ogrid[-qb: qb + 1, -qb: qb + 1]
mask = x * x + y * y <= qb * qb
# A little clumsy. Should improve
Mq1 = np.vstack([[mask[:qb, :qb] == 0], [mask[:qb, :qb] == 0], [
mask[:qb, :qb] == 0]]).transpose([1, 2, 0])
figmattmp_withborder[:qb, :qb, :][Mq1] = 255
Mq1 = np.vstack([[mask[:qb, -qb:] == 0], [mask[:qb, -qb:]
== 0], [mask[:qb, -qb:] == 0]]).transpose([1, 2, 0])
figmattmp_withborder[:qb, -qb:, :][Mq1] = 255
Mq1 = np.vstack([[mask[-qb:, :qb] == 0], [mask[-qb:, :qb]
== 0], [mask[-qb:, :qb] == 0]]).transpose([1, 2, 0])
figmattmp_withborder[-qb:, :qb, :][Mq1] = 255
Mq1 = np.vstack([[mask[-qb:, -qb:] == 0], [mask[-qb:, -qb:]
== 0], [mask[-qb:, -qb:] == 0]]).transpose([1, 2, 0])
figmattmp_withborder[-qb:, -qb:, :][Mq1] = 255
#scale and sheer
scale = np.matrix([[1.5, 0, 0], [0, 3, 0], [0, 0, 1]])
sheer = np.matrix([[1, np.tan(np.pi / 12), 0], [0, 1, 0], [0, 0, 1]])
# apply affine transformation
figmattmp = ndimage.affine_transform(
figmattmp_withborder, sheer * (scale), offset=[-35 * q, 0, 0], cval=255)
# At the moment the alpha part does not work if the background colour is anything but white.
# Also used for detecting where the graphlets are in the image.
trans = np.where(np.sum(figmattmp, axis=2) == 255 * 3)
alphamat = np.ones([figmattmp.shape[0], figmattmp.shape[0]])
alphamat[trans[0], trans[1]] = 0
figmattmp = np.dstack([figmattmp, alphamat])
# Add graphlet to matrix
if n == 0:
figmat[:, n * (80 * q):((n + 1) * (80 * q) + (qb * 2))] = figmattmp
else:
figmat[:, n * (80 * q) - int((n * q * 80) / 2):int(((n + 1)
* (80 * q) + (qb * 2)) - (n * q * 80) / 2)] = figmattmp
# Fix colours - due to imshows weirdness when taking nxnx3
figmat[:, :, 0:3] = figmat[:, :, 0:3] / 255
# Cut end of matrix off that isn't need
figmat = figmat[:, :-int((q / 2) * 80), :]
fid = np.where(figmat[:, :, -1] > 0)
fargmin = np.argmin(fid[0])
ymax = np.max(fid[0])
yright = np.max(np.where(figmat[:, fid[1][fargmin], -1] > 0))
xtickloc = np.where(figmat[ymax, :, -1] > 0)[0]
# In case there are multiple cases of xtickloc in same graphlet (i.e. they all have the same lowest value)
xtickloc = np.delete(xtickloc, np.where(np.diff(xtickloc) == 1)[0] + 1)
fid = np.where(figmat[:, :, -1] > 0)
ymin = np.min(fid[0])
topfig = np.where(figmat[ymin, :, -1] > 0)[0]
topfig = topfig[0:len(topfig):int(len(topfig) / netin.shape[-1])]
# Make squares of non transparency around each figure (this fixes transparency issues when white is in the colormap)
# for n in range(0,len(topfig)):
# fid=np.where(figmat[ymin:ymax,xtickloc[n]:topfig[n],-1]==0)
# figmat[ymin:ymax,xtickloc[n]:topfig[n],:3][fid[0],fid[1]]=1
# figmat[ymin+q:ymax-q,xtickloc[n]+q:topfig[n]-q,-1]=1
# Create figure
# Sharped edges of figure with median filter
if sharpen == 'yes':
figmat[:, :, :-1] = ndimage.median_filter(figmat[:, :, :-1], 3)
ax.imshow(figmat[:, :, :-1], zorder=1)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.set_xticks([])
ax.set_yticks([])
L = int((((netin.shape[-1] - 3) + 1) * (80 * q) +
(qb * 2)) - ((netin.shape[-1] - 3) * q * 80) / 2 - q)
_ = [ax.plot(range(topfig[i], xt), np.zeros(len(range(topfig[i], xt))) + yright,
color='k', linestyle=':', zorder=2) for i, xt in enumerate(xtickloc[1:])]
ax.plot(range(0, L), np.zeros(L) + ymax,
color='k', linestyle=':', zorder=2)
_ = [ax.plot(np.zeros(q * 10) + xt, np.arange(ymax, ymax + q * 10),
color='k', linestyle=':', zorder=2) for xt in xtickloc]
_ = [ax.text(xt, ymax + q * 20, str(round((i + t0) * Fs, 5)),
horizontalalignment='center',) for i, xt in enumerate(xtickloc)]
ylim = ax.axes.get_ylim()
xlim = ax.axes.get_xlim()
ax.set_ylim(ylim[0] + q * 15, 0)
ax.set_xlim(xlim[0] - q * 20, xlim[1])
ax.set_xlabel('Time' + timeunit)
return ax | python | def graphlet_stack_plot(netin, ax, q=10, cmap='Reds', gridcolor='k', borderwidth=2, bordercolor=None, Fs=1, timeunit='', t0=1, sharpen='yes', vminmax='minmax'):
r'''
Returns matplotlib axis handle for graphlet_stack_plot. This is a row of transformed connectivity matrices to look like a 3D stack.
Parameters
----------
netin : array, dict
network input (graphlet or contact)
ax : matplotlib ax handles.
q : int
Quality. Increaseing this will lead to smoother axis but take up more memory.
cmap : str
Colormap (matplotlib) of graphlets
Fs : int
Sampling rate. Same as contact-representation (if netin is contact, and input is unset, contact dictionary is used)
timeunit : str
Unit of time for xlabel. Same as contact-representation (if netin is contact, and input is unset, contact dictionary is used)
t0 : int
What should the first time point be called. Should be integer. Default 1.
gridcolor : str
The color of the grid section of the graphlets. Set to 'none' if not wanted.
borderwidth : int
Scales the size of border. (at the moment it cannot be set to 0.)
bordorcolor :
color of the border (at the moment it must be in RGB values between 0 and 1 -> this will be changed sometime in the future). Default: black.
vminmax : str
'maxabs', 'minmax' (default), or list/array with length of 2. Specifies the min and max colormap value of graphlets. Maxabs entails [-max(abs(G)),max(abs(G))], minmax entails [min(G), max(G)].
Returns
--------
ax : matplotlib ax handle
Note
------
This function can require a lot of RAM with larger networks.
Note
------
At the momenet bordercolor cannot be set to zero. To remove border, set bordorwidth=1 and bordercolor=[1,1,1] for temporay workaround.
Examples
-------
Create a network with some metadata
>>> import numpy as np
>>> import teneto
>>> import matplotlib.pyplot as plt
>>> np.random.seed(2017) # For reproduceability
>>> N = 5 # Number of nodes
>>> T = 10 # Number of timepoints
>>> # Probability of edge activation
>>> birth_rate = 0.2
>>> death_rate = .9
>>> # Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
>>> cfg={}
>>> cfg['Fs'] = 1
>>> cfg['timeunit'] = 'Years'
>>> cfg['t0'] = 2007 #First year in network
>>> #Generate network
>>> C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
Now this network can be plotted
>>> fig,ax = plt.subplots(figsize=(10,3))
>>> ax = teneto.plot.graphlet_stack_plot(C,ax,q=10,cmap='Greys')
>>> fig.show()
.. plot::
import numpy as np
import teneto
import matplotlib.pyplot as plt
np.random.seed(2017) # For reproduceability
N = 5 # Number of nodes
T = 10 # Number of timepoints
# Probability of edge activation
birth_rate = 0.2
death_rate = .9
# Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
cfg={}
cfg['Fs'] = 1
cfg['timeunit'] = 'Years'
cfg['t0'] = 2007 #First year in network
#Generate network
C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
fig,ax = plt.subplots(figsize=(10,3))
cmap = 'Greys'
ax = teneto.plot.graphlet_stack_plot(C,ax,q=10,cmap=cmap)
fig.show()
'''
# Get input type (C, G, TO)
inputType = checkInput(netin)
# Convert TO to C representation
if inputType == 'TO':
netin = netin.contact
inputType = 'C'
# Convert C representation to G
if inputType == 'C':
if timeunit == '':
timeunit = netin['timeunit']
if t0 == 1:
t0 = netin['t0']
if Fs == 1:
Fs = netin['Fs']
netin = contact2graphlet(netin)
if timeunit != '':
timeunit = ' (' + timeunit + ')'
if bordercolor == None:
bordercolor = [0, 0, 0]
if not isinstance(borderwidth, int):
borderwidth = int(borderwidth)
print('Warning: borderwidth should be an integer. Converting to integer.')
# x and y ranges for each of the graphlet plots
v = np.arange(0, netin.shape[0] + 1)
vr = np.arange(netin.shape[0], -1, -1)
# Preallocatie matrix
if vminmax == '' or vminmax == 'absmax' or vminmax == 'maxabs':
vminmax = [-np.nanmax(np.abs(netin)), np.nanmax(np.abs(netin))]
elif vminmax == 'minmax':
vminmax = [np.nanmin(netin), np.nanmax(netin)]
qb = q * borderwidth
figmat = np.zeros([80 * q + (qb * 2), int(((netin.shape[-1]) *
(80 * q) + (qb * 2)) - ((netin.shape[-1] - 1) * q * 80) / 2), 4])
for n in range(0, netin.shape[-1]):
# Create graphlet
figtmp, axtmp = plt.subplots(
1, facecolor='white', figsize=(q, q), dpi=80)
axtmp.pcolormesh(v, vr, netin[:, :, n], cmap=cmap, edgecolor=gridcolor,
linewidth=q * 2, vmin=vminmax[0], vmax=vminmax[1])
axtmp.set_xticklabels('')
axtmp.set_yticklabels('')
axtmp.set_xticks([])
axtmp.set_yticks([])
x0, x1 = axtmp.get_xlim()
y0, y1 = axtmp.get_ylim()
axtmp.set_aspect((x1 - x0) / (y1 - y0))
axtmp.spines['left'].set_visible(False)
axtmp.spines['right'].set_visible(False)
axtmp.spines['top'].set_visible(False)
axtmp.spines['bottom'].set_visible(False)
plt.subplots_adjust(left=0, bottom=0, right=1,
top=1, wspace=0, hspace=0)
# Convert graphlet to RGB values
figtmp.canvas.draw()
figmattmp = np.fromstring(
figtmp.canvas.tostring_rgb(), dtype=np.uint8, sep='')
figmattmp = figmattmp.reshape(
figtmp.canvas.get_width_height()[::-1] + (3,))
# Close figure for memory
plt.close(figtmp)
# Manually add a border
figmattmp_withborder = np.zeros(
[figmattmp.shape[0] + (qb * 2), figmattmp.shape[1] + (qb * 2), 3]) + (np.array(bordercolor) * 255)
figmattmp_withborder[qb:-qb, qb:-qb, :] = figmattmp
# Make corners rounded. First make a circle and then take the relevant quarter for each corner.
y, x = np.ogrid[-qb: qb + 1, -qb: qb + 1]
mask = x * x + y * y <= qb * qb
# A little clumsy. Should improve
Mq1 = np.vstack([[mask[:qb, :qb] == 0], [mask[:qb, :qb] == 0], [
mask[:qb, :qb] == 0]]).transpose([1, 2, 0])
figmattmp_withborder[:qb, :qb, :][Mq1] = 255
Mq1 = np.vstack([[mask[:qb, -qb:] == 0], [mask[:qb, -qb:]
== 0], [mask[:qb, -qb:] == 0]]).transpose([1, 2, 0])
figmattmp_withborder[:qb, -qb:, :][Mq1] = 255
Mq1 = np.vstack([[mask[-qb:, :qb] == 0], [mask[-qb:, :qb]
== 0], [mask[-qb:, :qb] == 0]]).transpose([1, 2, 0])
figmattmp_withborder[-qb:, :qb, :][Mq1] = 255
Mq1 = np.vstack([[mask[-qb:, -qb:] == 0], [mask[-qb:, -qb:]
== 0], [mask[-qb:, -qb:] == 0]]).transpose([1, 2, 0])
figmattmp_withborder[-qb:, -qb:, :][Mq1] = 255
#scale and sheer
scale = np.matrix([[1.5, 0, 0], [0, 3, 0], [0, 0, 1]])
sheer = np.matrix([[1, np.tan(np.pi / 12), 0], [0, 1, 0], [0, 0, 1]])
# apply affine transformation
figmattmp = ndimage.affine_transform(
figmattmp_withborder, sheer * (scale), offset=[-35 * q, 0, 0], cval=255)
# At the moment the alpha part does not work if the background colour is anything but white.
# Also used for detecting where the graphlets are in the image.
trans = np.where(np.sum(figmattmp, axis=2) == 255 * 3)
alphamat = np.ones([figmattmp.shape[0], figmattmp.shape[0]])
alphamat[trans[0], trans[1]] = 0
figmattmp = np.dstack([figmattmp, alphamat])
# Add graphlet to matrix
if n == 0:
figmat[:, n * (80 * q):((n + 1) * (80 * q) + (qb * 2))] = figmattmp
else:
figmat[:, n * (80 * q) - int((n * q * 80) / 2):int(((n + 1)
* (80 * q) + (qb * 2)) - (n * q * 80) / 2)] = figmattmp
# Fix colours - due to imshows weirdness when taking nxnx3
figmat[:, :, 0:3] = figmat[:, :, 0:3] / 255
# Cut end of matrix off that isn't need
figmat = figmat[:, :-int((q / 2) * 80), :]
fid = np.where(figmat[:, :, -1] > 0)
fargmin = np.argmin(fid[0])
ymax = np.max(fid[0])
yright = np.max(np.where(figmat[:, fid[1][fargmin], -1] > 0))
xtickloc = np.where(figmat[ymax, :, -1] > 0)[0]
# In case there are multiple cases of xtickloc in same graphlet (i.e. they all have the same lowest value)
xtickloc = np.delete(xtickloc, np.where(np.diff(xtickloc) == 1)[0] + 1)
fid = np.where(figmat[:, :, -1] > 0)
ymin = np.min(fid[0])
topfig = np.where(figmat[ymin, :, -1] > 0)[0]
topfig = topfig[0:len(topfig):int(len(topfig) / netin.shape[-1])]
# Make squares of non transparency around each figure (this fixes transparency issues when white is in the colormap)
# for n in range(0,len(topfig)):
# fid=np.where(figmat[ymin:ymax,xtickloc[n]:topfig[n],-1]==0)
# figmat[ymin:ymax,xtickloc[n]:topfig[n],:3][fid[0],fid[1]]=1
# figmat[ymin+q:ymax-q,xtickloc[n]+q:topfig[n]-q,-1]=1
# Create figure
# Sharped edges of figure with median filter
if sharpen == 'yes':
figmat[:, :, :-1] = ndimage.median_filter(figmat[:, :, :-1], 3)
ax.imshow(figmat[:, :, :-1], zorder=1)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.set_xticks([])
ax.set_yticks([])
L = int((((netin.shape[-1] - 3) + 1) * (80 * q) +
(qb * 2)) - ((netin.shape[-1] - 3) * q * 80) / 2 - q)
_ = [ax.plot(range(topfig[i], xt), np.zeros(len(range(topfig[i], xt))) + yright,
color='k', linestyle=':', zorder=2) for i, xt in enumerate(xtickloc[1:])]
ax.plot(range(0, L), np.zeros(L) + ymax,
color='k', linestyle=':', zorder=2)
_ = [ax.plot(np.zeros(q * 10) + xt, np.arange(ymax, ymax + q * 10),
color='k', linestyle=':', zorder=2) for xt in xtickloc]
_ = [ax.text(xt, ymax + q * 20, str(round((i + t0) * Fs, 5)),
horizontalalignment='center',) for i, xt in enumerate(xtickloc)]
ylim = ax.axes.get_ylim()
xlim = ax.axes.get_xlim()
ax.set_ylim(ylim[0] + q * 15, 0)
ax.set_xlim(xlim[0] - q * 20, xlim[1])
ax.set_xlabel('Time' + timeunit)
return ax | r'''
Returns matplotlib axis handle for graphlet_stack_plot. This is a row of transformed connectivity matrices to look like a 3D stack.
Parameters
----------
netin : array, dict
network input (graphlet or contact)
ax : matplotlib ax handles.
q : int
Quality. Increaseing this will lead to smoother axis but take up more memory.
cmap : str
Colormap (matplotlib) of graphlets
Fs : int
Sampling rate. Same as contact-representation (if netin is contact, and input is unset, contact dictionary is used)
timeunit : str
Unit of time for xlabel. Same as contact-representation (if netin is contact, and input is unset, contact dictionary is used)
t0 : int
What should the first time point be called. Should be integer. Default 1.
gridcolor : str
The color of the grid section of the graphlets. Set to 'none' if not wanted.
borderwidth : int
Scales the size of border. (at the moment it cannot be set to 0.)
bordorcolor :
color of the border (at the moment it must be in RGB values between 0 and 1 -> this will be changed sometime in the future). Default: black.
vminmax : str
'maxabs', 'minmax' (default), or list/array with length of 2. Specifies the min and max colormap value of graphlets. Maxabs entails [-max(abs(G)),max(abs(G))], minmax entails [min(G), max(G)].
Returns
--------
ax : matplotlib ax handle
Note
------
This function can require a lot of RAM with larger networks.
Note
------
At the momenet bordercolor cannot be set to zero. To remove border, set bordorwidth=1 and bordercolor=[1,1,1] for temporay workaround.
Examples
-------
Create a network with some metadata
>>> import numpy as np
>>> import teneto
>>> import matplotlib.pyplot as plt
>>> np.random.seed(2017) # For reproduceability
>>> N = 5 # Number of nodes
>>> T = 10 # Number of timepoints
>>> # Probability of edge activation
>>> birth_rate = 0.2
>>> death_rate = .9
>>> # Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
>>> cfg={}
>>> cfg['Fs'] = 1
>>> cfg['timeunit'] = 'Years'
>>> cfg['t0'] = 2007 #First year in network
>>> #Generate network
>>> C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
Now this network can be plotted
>>> fig,ax = plt.subplots(figsize=(10,3))
>>> ax = teneto.plot.graphlet_stack_plot(C,ax,q=10,cmap='Greys')
>>> fig.show()
.. plot::
import numpy as np
import teneto
import matplotlib.pyplot as plt
np.random.seed(2017) # For reproduceability
N = 5 # Number of nodes
T = 10 # Number of timepoints
# Probability of edge activation
birth_rate = 0.2
death_rate = .9
# Add node names into the network and say time units are years, go 1 year per graphlet and startyear is 2007
cfg={}
cfg['Fs'] = 1
cfg['timeunit'] = 'Years'
cfg['t0'] = 2007 #First year in network
#Generate network
C = teneto.generatenetwork.rand_binomial([N,T],[birth_rate, death_rate],'contact','bu',netinfo=cfg)
fig,ax = plt.subplots(figsize=(10,3))
cmap = 'Greys'
ax = teneto.plot.graphlet_stack_plot(C,ax,q=10,cmap=cmap)
fig.show() | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/plot/graphlet_stack_plot.py#L9-L271 |
wiheto/teneto | teneto/communitydetection/tctc.py | partition_inference | def partition_inference(tctc_mat, comp, tau, sigma, kappa):
r"""
Takes tctc trajectory matrix and returns dataframe where all multi-label communities are listed
Can take a little bit of time with large datasets and optimizaiton could remove some for loops.
"""
communityinfo = {}
communityinfo['community'] = []
communityinfo['start'] = np.empty(0)
communityinfo['end'] = np.empty(0)
communityinfo['size'] = np.empty(0)
for i, tcomp in enumerate(comp):
# This can go in parallel loop
if len(tcomp) > 0:
for traj in tcomp:
# Check it does not already exist.
ignore = 0
preexisting = 0
if i != 0:
cutoff = i-1-kappa
if cutoff < 0:
cutoff = 0
if np.any(np.sum(np.sum(tctc_mat[traj, :, cutoff:i][:, traj], axis=0), axis=0) == np.power(len(traj), 2)):
# Make sure that a small trajectory could exist
for checknode in np.where(communityinfo['end']>=cutoff)[0]:
if traj == communityinfo['community'][checknode]:
ignore = 1
if ignore == 0:
for checknode in np.where(communityinfo['end']>=cutoff)[0]:
if set(communityinfo['community'][checknode]).issuperset(traj):
preexisting = 1
if ignore == 0:
# Check how long it continues
# For efficiency, increase in blocks
approxmaxlength = tau*2
a = np.sum(
np.sum(tctc_mat[traj, :, i:i+approxmaxlength][:, traj], axis=0), axis=0)
if len(traj)*len(traj)*approxmaxlength == a.sum():
ok = 0
ii = 1
while ok == 0:
b = np.sum(np.sum(
tctc_mat[traj, :, i+(approxmaxlength*ii):i+(approxmaxlength*(ii+1))][:, traj], axis=0), axis=0)
a = np.append(a, b)
if len(traj)*len(traj)*approxmaxlength != b.sum():
ok = 1
if i+(approxmaxlength*(ii+1)) > tctc_mat.shape[-1]:
ok = 1
ii += 1
a = np.where(a == np.power(len(traj), 2))[0]
# Add an additional value that is false in case end of time series
if len(a) == 1:
stopind = i + 1
else:
a = np.append(a, a.max()+kappa+2)
# Find the stop index (if stopind = 4 and start = 0, then tctc_mat[:,:,start:stopind]==1)
stopind = i + np.split(a, np.where(
np.diff(a) > kappa+1)[0]+1)[0][-1] + 1
# Add trajectory to dictionary
if ((stopind - i) >= tau or preexisting == 1) and len(traj) >= sigma:
communityinfo['community'].append(sorted(traj))
communityinfo['start'] = np.append(communityinfo['start'], int(i))
communityinfo['end'] = np.append(
communityinfo['end'], int(stopind))
communityinfo['size'] = np.append(communityinfo['size'], len(traj))
communityinfo = pd.DataFrame(communityinfo)
communityinfo['start'] = communityinfo['start'].astype(int)
communityinfo['end'] = communityinfo['end'].astype(int)
# First check that there is not already a trajectory that is ongoing
badrows = []
for v in communityinfo.iterrows():
skipselrule = (communityinfo['end'] == v[1]['end'])
for u in communityinfo[skipselrule].iterrows():
a = 1
if u[1]['start'] > v[1]['start'] and sorted(u[1]['community']) == sorted(v[1]['community']):
badrows.append(u[0])
communityinfo = communityinfo.drop(badrows)
# Then see if any subset trajectory can be placed earlier in time.
for v in communityinfo.iterrows():
skipselrule = (communityinfo['end'] <= v[1]['start']) & (
communityinfo['end']+kappa >= v[1]['start'])
for u in communityinfo[skipselrule].iterrows():
a = 1
if set(u[1]['community']).issuperset(v[1]['community']):
communityinfo.loc[v[0], 'start'] = u[1]['start']
# It is possible to make the condition below effective_length
communityinfo['length'] = np.array(communityinfo['end']) - np.array(communityinfo['start'])
communityinfo = communityinfo[communityinfo['length'] >= tau]
communityinfo = communityinfo[communityinfo['size'] >= sigma]
# Make sure that the traj is not completely enguled by another
badrows = []
if kappa > 0:
for v in communityinfo.iterrows():
skipselrule = (communityinfo['end'] == v[1]['end']) & (
communityinfo['start'] < v[1]['start'])
for u in communityinfo[skipselrule].iterrows():
if set(v[1]['community']).issubset(u[1]['community']):
badrows.append(v[0])
communityinfo = communityinfo.drop(badrows)
return communityinfo | python | def partition_inference(tctc_mat, comp, tau, sigma, kappa):
r"""
Takes tctc trajectory matrix and returns dataframe where all multi-label communities are listed
Can take a little bit of time with large datasets and optimizaiton could remove some for loops.
"""
communityinfo = {}
communityinfo['community'] = []
communityinfo['start'] = np.empty(0)
communityinfo['end'] = np.empty(0)
communityinfo['size'] = np.empty(0)
for i, tcomp in enumerate(comp):
# This can go in parallel loop
if len(tcomp) > 0:
for traj in tcomp:
# Check it does not already exist.
ignore = 0
preexisting = 0
if i != 0:
cutoff = i-1-kappa
if cutoff < 0:
cutoff = 0
if np.any(np.sum(np.sum(tctc_mat[traj, :, cutoff:i][:, traj], axis=0), axis=0) == np.power(len(traj), 2)):
# Make sure that a small trajectory could exist
for checknode in np.where(communityinfo['end']>=cutoff)[0]:
if traj == communityinfo['community'][checknode]:
ignore = 1
if ignore == 0:
for checknode in np.where(communityinfo['end']>=cutoff)[0]:
if set(communityinfo['community'][checknode]).issuperset(traj):
preexisting = 1
if ignore == 0:
# Check how long it continues
# For efficiency, increase in blocks
approxmaxlength = tau*2
a = np.sum(
np.sum(tctc_mat[traj, :, i:i+approxmaxlength][:, traj], axis=0), axis=0)
if len(traj)*len(traj)*approxmaxlength == a.sum():
ok = 0
ii = 1
while ok == 0:
b = np.sum(np.sum(
tctc_mat[traj, :, i+(approxmaxlength*ii):i+(approxmaxlength*(ii+1))][:, traj], axis=0), axis=0)
a = np.append(a, b)
if len(traj)*len(traj)*approxmaxlength != b.sum():
ok = 1
if i+(approxmaxlength*(ii+1)) > tctc_mat.shape[-1]:
ok = 1
ii += 1
a = np.where(a == np.power(len(traj), 2))[0]
# Add an additional value that is false in case end of time series
if len(a) == 1:
stopind = i + 1
else:
a = np.append(a, a.max()+kappa+2)
# Find the stop index (if stopind = 4 and start = 0, then tctc_mat[:,:,start:stopind]==1)
stopind = i + np.split(a, np.where(
np.diff(a) > kappa+1)[0]+1)[0][-1] + 1
# Add trajectory to dictionary
if ((stopind - i) >= tau or preexisting == 1) and len(traj) >= sigma:
communityinfo['community'].append(sorted(traj))
communityinfo['start'] = np.append(communityinfo['start'], int(i))
communityinfo['end'] = np.append(
communityinfo['end'], int(stopind))
communityinfo['size'] = np.append(communityinfo['size'], len(traj))
communityinfo = pd.DataFrame(communityinfo)
communityinfo['start'] = communityinfo['start'].astype(int)
communityinfo['end'] = communityinfo['end'].astype(int)
# First check that there is not already a trajectory that is ongoing
badrows = []
for v in communityinfo.iterrows():
skipselrule = (communityinfo['end'] == v[1]['end'])
for u in communityinfo[skipselrule].iterrows():
a = 1
if u[1]['start'] > v[1]['start'] and sorted(u[1]['community']) == sorted(v[1]['community']):
badrows.append(u[0])
communityinfo = communityinfo.drop(badrows)
# Then see if any subset trajectory can be placed earlier in time.
for v in communityinfo.iterrows():
skipselrule = (communityinfo['end'] <= v[1]['start']) & (
communityinfo['end']+kappa >= v[1]['start'])
for u in communityinfo[skipselrule].iterrows():
a = 1
if set(u[1]['community']).issuperset(v[1]['community']):
communityinfo.loc[v[0], 'start'] = u[1]['start']
# It is possible to make the condition below effective_length
communityinfo['length'] = np.array(communityinfo['end']) - np.array(communityinfo['start'])
communityinfo = communityinfo[communityinfo['length'] >= tau]
communityinfo = communityinfo[communityinfo['size'] >= sigma]
# Make sure that the traj is not completely enguled by another
badrows = []
if kappa > 0:
for v in communityinfo.iterrows():
skipselrule = (communityinfo['end'] == v[1]['end']) & (
communityinfo['start'] < v[1]['start'])
for u in communityinfo[skipselrule].iterrows():
if set(v[1]['community']).issubset(u[1]['community']):
badrows.append(v[0])
communityinfo = communityinfo.drop(badrows)
return communityinfo | r"""
Takes tctc trajectory matrix and returns dataframe where all multi-label communities are listed
Can take a little bit of time with large datasets and optimizaiton could remove some for loops. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/communitydetection/tctc.py#L8-L114 |
wiheto/teneto | teneto/communitydetection/tctc.py | tctc | def tctc(data, tau, epsilon, sigma, kappa=0, largedataset=False, rule='flock', noise=None, raw_signal='amplitude', output='array', tempdir=None, njobs=1, largestonly=False):
r"""
Runs TCTC community detection
Parameters
----------
data : array
Multiariate series with dimensions: "time, node" that belong to a network.
tau : int
tau specifies the minimum number of time-points of each temporal community must last.
epsilon : float
epsilon specifies the distance points in a community can be away from each other.
sigma : int
sigma specifies the minimum number of nodes that must be in a community.
kappa : int
kappa specifies the number of consecutive time-points that can break the distance or size rules.
largedataset : bool
If true, runs with HDF5 (beta)
rule : str
Can be 'convoy' or 'flock'.
- flock entials all nodes are max epsilon apart in a communiy.
- convoy entails that there is at least one node that is epsilon apart.
noise : array (defauly None)
Timeseries of dimensions "time, N" where N is the number of noise time series added. Any community that contains this time series is excluded.
raw_signal : str
Can be amplitude or phase
output : str
Can be array or df or None
tempdir : str
Specify where the temporary directory is if largedataset is True
njobs : int
number of jobs (not implemented yet)
largestonly : bool (default False)
If True only considers largest communities in rule application (should generally be false)
Returns
-----------
tctc : array, df
"""
# Get distance matrix
if largedataset:
raise NotImplementedError(
'HDF5 implementation for large datasets is not available yet')
else:
N_data = data.shape[1]
if noise is not None:
if len(noise.shape) == 1:
noise = np.array(noise, ndmin=2).transpose()
N_data = data.shape[1]
data = np.hstack([data, noise])
N = data.shape[1]
#T = data.shape[0]
if raw_signal == 'amplitude':
d = np.array([np.abs(data[:, n]-data[:, m])
for n in range(data.shape[-1]) for m in range(data.shape[-1])])
d = np.reshape(d, [data.shape[-1], data.shape[-1], data.shape[0]])
elif raw_signal == 'phase':
analytic_signal = hilbert(data.transpose())
instantaneous_phase = np.angle(analytic_signal)
d = np.zeros([data.shape[1], data.shape[1], data.shape[0]])
for n in range(data.shape[1]):
for m in range(data.shape[1]):
d[n, m, :] = np.remainder(
np.abs(instantaneous_phase[n, :] - instantaneous_phase[m, :]), np.pi)
# Shape of datin (with any addiitonal 0s or noise added to nodes)
dat_shape = [int(d.shape[-1]), int(d.shape[0])]
# Make trajectory matrix 1 where distance critera is kept
tctc_mat = np.zeros([dat_shape[1], dat_shape[1], dat_shape[0]])
tctc_mat[:, :, :][d <= epsilon] = 1
t1 = 1
t2 = 2
# The next two rules have to be run iteratively until it converges. i.e. when applying the sigma and tau parameters, if nothing more is pruned, then this is complete
# There may be a case where running it in this order could through some value that is unwanted due to the skipping mechanic.
# Doing it in the other order does create possible bad values.
while t1 != t2:
t1 = tctc_mat.sum()
cliques = []
if tctc_mat.sum() > 0:
# Run the trajectory clustering rule
if rule == 'flock':
cliques = [list(filter(lambda x: (len(x) >= sigma) and (len(set(x).intersection(np.arange(N_data, N+1))) == 0), nx.find_cliques(
nx.Graph(tctc_mat[:, :, t])))) for t in range(tctc_mat.shape[-1])]
#cliques = []
# with ProcessPoolExecutor(max_workers=njobs) as executor:
# job = {executor.submit(_cluster_flocks,tctc_mat[:,:,t],sigma) for t in range(tctc_mat.shape[-1])}
# for j in as_completed(job):
# cliques.append(j.result()[0])
elif rule == 'convoy':
cliques = [list(map(list, filter(lambda x: (len(x) >= sigma) and (len(set(x).intersection(np.arange(N_data, N+1))) == 0), nx.connected_components(
nx.Graph(tctc_mat[:, :, t]))))) for t in range(tctc_mat.shape[-1])]
# Reset the trajectory matrix (since info is now in "cliques").
# Add the infomation from clique into tctc_mat (i.e sigma is now implemented)
tctc_mat = np.zeros([dat_shape[1], dat_shape[1], dat_shape[0]])
# Due to advanced index copy, I've done this with too many forloops
for t in range(dat_shape[0]):
for c in cliques[t]:
# Make one of index communitytors a list.
cv = [[i] for i in c]
tctc_mat[cv, c, t] = 1
if tctc_mat.sum() > 0:
# Now impose tau criteria. This is done by flattening and (since tau has been added to the final dimension)
# Add some padding as this is going to be needed when flattening (ie different lines must have at least tau+kappa spacing between them)
tctc_mat = np.dstack([np.zeros([dat_shape[1], dat_shape[1], 1]), tctc_mat, np.zeros(
[dat_shape[1], dat_shape[1], tau+kappa])])
# Make to singular communitytor
tctc_mat_community = np.array(tctc_mat.flatten())
# Add an extra 0
tctc_mat_dif = np.append(tctc_mat_community, 0)
# Use diff. Where there is a 1 trajectory starts, where -1 trajectory ends
tctc_mat_dif = np.diff(tctc_mat_dif)
start_ones = np.where(tctc_mat_dif == 1)[0]
end_ones = np.where(tctc_mat_dif == -1)[0]
skip_ind = np.where(start_ones[1:]-end_ones[:-1] <= kappa)[0]
start_ones = np.delete(start_ones, skip_ind+1)
end_ones = np.delete(end_ones, skip_ind)
traj_len = end_ones - start_ones
# whereever traj_len is not long enough, loop through ind+t and make these 0
ind = start_ones[traj_len >= tau] + 1
l2 = traj_len[traj_len >= tau]
# for t in range(tau-1): # this didn't work (but was quicker) because of tau bug
# tctc_mat[ind+t] = 0
# Looping over each valid trajectory instance is slower but the safest was to impose tau restrain and reinserting it.
tctc_mat = np.zeros(tctc_mat_community.shape)
for i in range(len(ind)):
tctc_mat[ind[i]:ind[i]+l2[i]] = 1
tctc_mat = tctc_mat.reshape(
dat_shape[1], dat_shape[1], dat_shape[0]+kappa+tau+1)
# remove padding
tctc_mat = tctc_mat[:, :, 1:dat_shape[0]+1]
t2 = tctc_mat.sum()
# remove noise
tctc_mat = tctc_mat[:N_data, :N_data]
if output == 'array':
return tctc_mat
elif output == 'df':
if np.sum(tctc_mat) != 0:
df = partition_inference(
tctc_mat, cliques, tau, sigma, kappa)
return df
else:
return [] | python | def tctc(data, tau, epsilon, sigma, kappa=0, largedataset=False, rule='flock', noise=None, raw_signal='amplitude', output='array', tempdir=None, njobs=1, largestonly=False):
r"""
Runs TCTC community detection
Parameters
----------
data : array
Multiariate series with dimensions: "time, node" that belong to a network.
tau : int
tau specifies the minimum number of time-points of each temporal community must last.
epsilon : float
epsilon specifies the distance points in a community can be away from each other.
sigma : int
sigma specifies the minimum number of nodes that must be in a community.
kappa : int
kappa specifies the number of consecutive time-points that can break the distance or size rules.
largedataset : bool
If true, runs with HDF5 (beta)
rule : str
Can be 'convoy' or 'flock'.
- flock entials all nodes are max epsilon apart in a communiy.
- convoy entails that there is at least one node that is epsilon apart.
noise : array (defauly None)
Timeseries of dimensions "time, N" where N is the number of noise time series added. Any community that contains this time series is excluded.
raw_signal : str
Can be amplitude or phase
output : str
Can be array or df or None
tempdir : str
Specify where the temporary directory is if largedataset is True
njobs : int
number of jobs (not implemented yet)
largestonly : bool (default False)
If True only considers largest communities in rule application (should generally be false)
Returns
-----------
tctc : array, df
"""
# Get distance matrix
if largedataset:
raise NotImplementedError(
'HDF5 implementation for large datasets is not available yet')
else:
N_data = data.shape[1]
if noise is not None:
if len(noise.shape) == 1:
noise = np.array(noise, ndmin=2).transpose()
N_data = data.shape[1]
data = np.hstack([data, noise])
N = data.shape[1]
#T = data.shape[0]
if raw_signal == 'amplitude':
d = np.array([np.abs(data[:, n]-data[:, m])
for n in range(data.shape[-1]) for m in range(data.shape[-1])])
d = np.reshape(d, [data.shape[-1], data.shape[-1], data.shape[0]])
elif raw_signal == 'phase':
analytic_signal = hilbert(data.transpose())
instantaneous_phase = np.angle(analytic_signal)
d = np.zeros([data.shape[1], data.shape[1], data.shape[0]])
for n in range(data.shape[1]):
for m in range(data.shape[1]):
d[n, m, :] = np.remainder(
np.abs(instantaneous_phase[n, :] - instantaneous_phase[m, :]), np.pi)
# Shape of datin (with any addiitonal 0s or noise added to nodes)
dat_shape = [int(d.shape[-1]), int(d.shape[0])]
# Make trajectory matrix 1 where distance critera is kept
tctc_mat = np.zeros([dat_shape[1], dat_shape[1], dat_shape[0]])
tctc_mat[:, :, :][d <= epsilon] = 1
t1 = 1
t2 = 2
# The next two rules have to be run iteratively until it converges. i.e. when applying the sigma and tau parameters, if nothing more is pruned, then this is complete
# There may be a case where running it in this order could through some value that is unwanted due to the skipping mechanic.
# Doing it in the other order does create possible bad values.
while t1 != t2:
t1 = tctc_mat.sum()
cliques = []
if tctc_mat.sum() > 0:
# Run the trajectory clustering rule
if rule == 'flock':
cliques = [list(filter(lambda x: (len(x) >= sigma) and (len(set(x).intersection(np.arange(N_data, N+1))) == 0), nx.find_cliques(
nx.Graph(tctc_mat[:, :, t])))) for t in range(tctc_mat.shape[-1])]
#cliques = []
# with ProcessPoolExecutor(max_workers=njobs) as executor:
# job = {executor.submit(_cluster_flocks,tctc_mat[:,:,t],sigma) for t in range(tctc_mat.shape[-1])}
# for j in as_completed(job):
# cliques.append(j.result()[0])
elif rule == 'convoy':
cliques = [list(map(list, filter(lambda x: (len(x) >= sigma) and (len(set(x).intersection(np.arange(N_data, N+1))) == 0), nx.connected_components(
nx.Graph(tctc_mat[:, :, t]))))) for t in range(tctc_mat.shape[-1])]
# Reset the trajectory matrix (since info is now in "cliques").
# Add the infomation from clique into tctc_mat (i.e sigma is now implemented)
tctc_mat = np.zeros([dat_shape[1], dat_shape[1], dat_shape[0]])
# Due to advanced index copy, I've done this with too many forloops
for t in range(dat_shape[0]):
for c in cliques[t]:
# Make one of index communitytors a list.
cv = [[i] for i in c]
tctc_mat[cv, c, t] = 1
if tctc_mat.sum() > 0:
# Now impose tau criteria. This is done by flattening and (since tau has been added to the final dimension)
# Add some padding as this is going to be needed when flattening (ie different lines must have at least tau+kappa spacing between them)
tctc_mat = np.dstack([np.zeros([dat_shape[1], dat_shape[1], 1]), tctc_mat, np.zeros(
[dat_shape[1], dat_shape[1], tau+kappa])])
# Make to singular communitytor
tctc_mat_community = np.array(tctc_mat.flatten())
# Add an extra 0
tctc_mat_dif = np.append(tctc_mat_community, 0)
# Use diff. Where there is a 1 trajectory starts, where -1 trajectory ends
tctc_mat_dif = np.diff(tctc_mat_dif)
start_ones = np.where(tctc_mat_dif == 1)[0]
end_ones = np.where(tctc_mat_dif == -1)[0]
skip_ind = np.where(start_ones[1:]-end_ones[:-1] <= kappa)[0]
start_ones = np.delete(start_ones, skip_ind+1)
end_ones = np.delete(end_ones, skip_ind)
traj_len = end_ones - start_ones
# whereever traj_len is not long enough, loop through ind+t and make these 0
ind = start_ones[traj_len >= tau] + 1
l2 = traj_len[traj_len >= tau]
# for t in range(tau-1): # this didn't work (but was quicker) because of tau bug
# tctc_mat[ind+t] = 0
# Looping over each valid trajectory instance is slower but the safest was to impose tau restrain and reinserting it.
tctc_mat = np.zeros(tctc_mat_community.shape)
for i in range(len(ind)):
tctc_mat[ind[i]:ind[i]+l2[i]] = 1
tctc_mat = tctc_mat.reshape(
dat_shape[1], dat_shape[1], dat_shape[0]+kappa+tau+1)
# remove padding
tctc_mat = tctc_mat[:, :, 1:dat_shape[0]+1]
t2 = tctc_mat.sum()
# remove noise
tctc_mat = tctc_mat[:N_data, :N_data]
if output == 'array':
return tctc_mat
elif output == 'df':
if np.sum(tctc_mat) != 0:
df = partition_inference(
tctc_mat, cliques, tau, sigma, kappa)
return df
else:
return [] | r"""
Runs TCTC community detection
Parameters
----------
data : array
Multiariate series with dimensions: "time, node" that belong to a network.
tau : int
tau specifies the minimum number of time-points of each temporal community must last.
epsilon : float
epsilon specifies the distance points in a community can be away from each other.
sigma : int
sigma specifies the minimum number of nodes that must be in a community.
kappa : int
kappa specifies the number of consecutive time-points that can break the distance or size rules.
largedataset : bool
If true, runs with HDF5 (beta)
rule : str
Can be 'convoy' or 'flock'.
- flock entials all nodes are max epsilon apart in a communiy.
- convoy entails that there is at least one node that is epsilon apart.
noise : array (defauly None)
Timeseries of dimensions "time, N" where N is the number of noise time series added. Any community that contains this time series is excluded.
raw_signal : str
Can be amplitude or phase
output : str
Can be array or df or None
tempdir : str
Specify where the temporary directory is if largedataset is True
njobs : int
number of jobs (not implemented yet)
largestonly : bool (default False)
If True only considers largest communities in rule application (should generally be false)
Returns
-----------
tctc : array, df | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/communitydetection/tctc.py#L117-L271 |
wiheto/teneto | teneto/networkmeasures/temporal_efficiency.py | temporal_efficiency | def temporal_efficiency(tnet=None, paths=None, calc='global'):
r"""
Returns temporal efficiency estimate. BU networks only.
Parameters
----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
calc : str
Options: 'global' (default) - measure averages over time and nodes;
'node' or 'node_from' average over nodes (i) and time. Giving average efficiency for i to j;
'node_to' measure average over nodes j and time;
Giving average efficiency using paths to j from i;
Returns
-------
E : array
Global temporal efficiency
"""
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
pathmat = np.zeros([paths[['from', 'to']].max().max(
)+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan
pathmat[paths['from'].values, paths['to'].values,
paths['t_start'].values] = paths['temporal-distance']
# Calculate efficiency which is 1 over the mean path.
if calc == 'global':
eff = 1 / np.nanmean(pathmat)
elif calc == 'node' or calc == 'node_from':
eff = 1 / np.nanmean(np.nanmean(pathmat, axis=2), axis=1)
elif calc == 'node_to':
eff = 1 / np.nanmean(np.nanmean(pathmat, axis=2), axis=0)
return eff | python | def temporal_efficiency(tnet=None, paths=None, calc='global'):
r"""
Returns temporal efficiency estimate. BU networks only.
Parameters
----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
calc : str
Options: 'global' (default) - measure averages over time and nodes;
'node' or 'node_from' average over nodes (i) and time. Giving average efficiency for i to j;
'node_to' measure average over nodes j and time;
Giving average efficiency using paths to j from i;
Returns
-------
E : array
Global temporal efficiency
"""
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
pathmat = np.zeros([paths[['from', 'to']].max().max(
)+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan
pathmat[paths['from'].values, paths['to'].values,
paths['t_start'].values] = paths['temporal-distance']
# Calculate efficiency which is 1 over the mean path.
if calc == 'global':
eff = 1 / np.nanmean(pathmat)
elif calc == 'node' or calc == 'node_from':
eff = 1 / np.nanmean(np.nanmean(pathmat, axis=2), axis=1)
elif calc == 'node_to':
eff = 1 / np.nanmean(np.nanmean(pathmat, axis=2), axis=0)
return eff | r"""
Returns temporal efficiency estimate. BU networks only.
Parameters
----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
calc : str
Options: 'global' (default) - measure averages over time and nodes;
'node' or 'node_from' average over nodes (i) and time. Giving average efficiency for i to j;
'node_to' measure average over nodes j and time;
Giving average efficiency using paths to j from i;
Returns
-------
E : array
Global temporal efficiency | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/networkmeasures/temporal_efficiency.py#L9-L60 |
wiheto/teneto | teneto/classes/network.py | TemporalNetwork.network_from_array | def network_from_array(self, array):
"""impo
Defines a network from an array.
Parameters
----------
array : array
3D numpy array.
"""
if len(array.shape) == 2:
array = np.array(array, ndmin=3).transpose([1, 2, 0])
teneto.utils.check_TemporalNetwork_input(array, 'array')
uvals = np.unique(array)
if len(uvals) == 2 and 1 in uvals and 0 in uvals:
i, j, t = np.where(array == 1)
self.network = pd.DataFrame(data={'i': i, 'j': j, 't': t})
else:
i, j, t = np.where(array != 0)
w = array[array != 0]
self.network = pd.DataFrame(
data={'i': i, 'j': j, 't': t, 'weight': w})
self.N = int(array.shape[0])
self.T = int(array.shape[-1])
self._update_network() | python | def network_from_array(self, array):
"""impo
Defines a network from an array.
Parameters
----------
array : array
3D numpy array.
"""
if len(array.shape) == 2:
array = np.array(array, ndmin=3).transpose([1, 2, 0])
teneto.utils.check_TemporalNetwork_input(array, 'array')
uvals = np.unique(array)
if len(uvals) == 2 and 1 in uvals and 0 in uvals:
i, j, t = np.where(array == 1)
self.network = pd.DataFrame(data={'i': i, 'j': j, 't': t})
else:
i, j, t = np.where(array != 0)
w = array[array != 0]
self.network = pd.DataFrame(
data={'i': i, 'j': j, 't': t, 'weight': w})
self.N = int(array.shape[0])
self.T = int(array.shape[-1])
self._update_network() | impo
Defines a network from an array.
Parameters
----------
array : array
3D numpy array. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L179-L202 |
wiheto/teneto | teneto/classes/network.py | TemporalNetwork.network_from_df | def network_from_df(self, df):
"""
Defines a network from an array.
Parameters
----------
array : array
Pandas dataframe. Should have columns: \'i\', \'j\', \'t\' where i and j are node indicies and t is the temporal index.
If weighted, should also include \'weight\'. Each row is an edge.
"""
teneto.utils.check_TemporalNetwork_input(df, 'df')
self.network = df
self._update_network() | python | def network_from_df(self, df):
"""
Defines a network from an array.
Parameters
----------
array : array
Pandas dataframe. Should have columns: \'i\', \'j\', \'t\' where i and j are node indicies and t is the temporal index.
If weighted, should also include \'weight\'. Each row is an edge.
"""
teneto.utils.check_TemporalNetwork_input(df, 'df')
self.network = df
self._update_network() | Defines a network from an array.
Parameters
----------
array : array
Pandas dataframe. Should have columns: \'i\', \'j\', \'t\' where i and j are node indicies and t is the temporal index.
If weighted, should also include \'weight\'. Each row is an edge. | https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/network.py#L213-L225 |