code
stringlengths 501
4.91M
| package
stringlengths 2
88
| path
stringlengths 11
291
| filename
stringlengths 4
197
| parsed_code
stringlengths 0
4.91M
| quality_prob
float64 0
0.99
| learning_prob
float64 0.02
1
|
---|---|---|---|---|---|---|
import os
import logging
from contextlib import contextmanager
from tempfile import TemporaryDirectory, mkdtemp
from pathlib import Path
from six import string_types
from sciencebeam_trainer_delft.utils.io import copy_file, path_join
LOGGER = logging.getLogger(__name__)
def _is_cloud_location(filepath):
return isinstance(filepath, string_types) and filepath.startswith('gs://')
def _copy_file_to_cloud(source_filepath, target_filepath, overwrite=True):
copy_file(source_filepath, target_filepath, overwrite=overwrite)
def _copy_directory_to_cloud(source_filepath, target_filepath, overwrite=True):
for temp_file_path in Path(source_filepath).glob('**/*'):
if not temp_file_path.is_file():
continue
relative_filename = temp_file_path.relative_to(source_filepath)
cloud_path = path_join(target_filepath, relative_filename)
LOGGER.info('copying %s to %s', temp_file_path, cloud_path)
_copy_file_to_cloud(temp_file_path, cloud_path, overwrite=overwrite)
def _copy_to_cloud(source_filepath, target_filepath, overwrite=True):
if Path(source_filepath).is_file():
_copy_file_to_cloud(source_filepath, target_filepath, overwrite=overwrite)
return
if Path(source_filepath).is_dir():
_copy_directory_to_cloud(source_filepath, target_filepath, overwrite=overwrite)
return
def _get_temp_path(filepath):
return mkdtemp(suffix=os.path.basename(filepath))
@contextmanager
def _cloud_location_as_temp_context(filepath):
with TemporaryDirectory(suffix=os.path.basename(filepath)) as temp_dir:
temp_path = os.path.join(temp_dir, os.path.basename(filepath))
LOGGER.info('temp_path: %s', temp_dir)
yield temp_path
_copy_to_cloud(temp_path, filepath)
@contextmanager
def auto_upload_from_local_path(filepath: str):
if not filepath or not _is_cloud_location(filepath):
os.makedirs(filepath, exist_ok=True)
yield filepath
else:
with _cloud_location_as_temp_context(filepath) as temp_path:
yield temp_path
@contextmanager
def auto_upload_from_local_file(filepath: str):
if not filepath or not _is_cloud_location(filepath):
yield filepath
else:
with _cloud_location_as_temp_context(filepath) as local_path:
yield local_path
def patch_cloud_support():
# deprecated
pass | sciencebeam-trainer-delft | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/utils/cloud_support.py | cloud_support.py | import os
import logging
from contextlib import contextmanager
from tempfile import TemporaryDirectory, mkdtemp
from pathlib import Path
from six import string_types
from sciencebeam_trainer_delft.utils.io import copy_file, path_join
LOGGER = logging.getLogger(__name__)
def _is_cloud_location(filepath):
return isinstance(filepath, string_types) and filepath.startswith('gs://')
def _copy_file_to_cloud(source_filepath, target_filepath, overwrite=True):
copy_file(source_filepath, target_filepath, overwrite=overwrite)
def _copy_directory_to_cloud(source_filepath, target_filepath, overwrite=True):
for temp_file_path in Path(source_filepath).glob('**/*'):
if not temp_file_path.is_file():
continue
relative_filename = temp_file_path.relative_to(source_filepath)
cloud_path = path_join(target_filepath, relative_filename)
LOGGER.info('copying %s to %s', temp_file_path, cloud_path)
_copy_file_to_cloud(temp_file_path, cloud_path, overwrite=overwrite)
def _copy_to_cloud(source_filepath, target_filepath, overwrite=True):
if Path(source_filepath).is_file():
_copy_file_to_cloud(source_filepath, target_filepath, overwrite=overwrite)
return
if Path(source_filepath).is_dir():
_copy_directory_to_cloud(source_filepath, target_filepath, overwrite=overwrite)
return
def _get_temp_path(filepath):
return mkdtemp(suffix=os.path.basename(filepath))
@contextmanager
def _cloud_location_as_temp_context(filepath):
with TemporaryDirectory(suffix=os.path.basename(filepath)) as temp_dir:
temp_path = os.path.join(temp_dir, os.path.basename(filepath))
LOGGER.info('temp_path: %s', temp_dir)
yield temp_path
_copy_to_cloud(temp_path, filepath)
@contextmanager
def auto_upload_from_local_path(filepath: str):
if not filepath or not _is_cloud_location(filepath):
os.makedirs(filepath, exist_ok=True)
yield filepath
else:
with _cloud_location_as_temp_context(filepath) as temp_path:
yield temp_path
@contextmanager
def auto_upload_from_local_file(filepath: str):
if not filepath or not _is_cloud_location(filepath):
yield filepath
else:
with _cloud_location_as_temp_context(filepath) as local_path:
yield local_path
def patch_cloud_support():
# deprecated
pass | 0.262086 | 0.087408 |
import logging
import os
import tarfile
import tempfile
import zipfile
from abc import ABC, abstractmethod
from shutil import copyfileobj
from contextlib import contextmanager
from gzip import GzipFile
from lzma import LZMAFile
from urllib.error import HTTPError
from urllib.request import urlretrieve
from typing import List, IO, Iterator
from six import string_types, text_type
try:
from tensorflow.python.lib.io import file_io as tf_file_io # type: ignore
from tensorflow.python.framework.errors_impl import ( # type: ignore
NotFoundError as tf_NotFoundError
)
except ImportError:
tf_file_io = None
tf_NotFoundError = None
LOGGER = logging.getLogger(__name__)
def is_external_location(filepath: str):
return isinstance(filepath, string_types) and '://' in filepath
def path_join(parent, child):
return os.path.join(str(parent), str(child))
def is_gzip_filename(filepath: str):
return filepath.endswith('.gz')
def is_xz_filename(filepath: str):
return filepath.endswith('.xz')
def strip_gzip_filename_ext(filepath: str):
if not is_gzip_filename(filepath):
raise ValueError('not a gzip filename: %s' % filepath)
return os.path.splitext(filepath)[0]
def strip_xz_filename_ext(filepath: str):
if not is_xz_filename(filepath):
raise ValueError('not a xz filename: %s' % filepath)
return os.path.splitext(filepath)[0]
class CompressionWrapper(ABC):
@abstractmethod
def strip_compression_filename_ext(self, filepath: str):
pass
@abstractmethod
def wrap_fileobj(self, filename: str, fileobj: IO, mode: str = None):
pass
@contextmanager
def open(self, filename: str, mode: str) -> Iterator[IO]:
LOGGER.debug('opening file: %r, mode=%r', filename, mode)
with _open_raw(filename, mode=mode) as fp:
yield self.wrap_fileobj(
filename=filename,
fileobj=fp,
mode=mode
)
class ClosingGzipFile(GzipFile):
# GzipFile doesn't close the underlying fileobj, we will do that here
def close(self):
fileobj = self.fileobj
LOGGER.debug('ClosingGzipFile.close, fileobj: %s', fileobj)
try:
super().close()
finally:
if fileobj is not None:
LOGGER.debug('closing: %s', fileobj)
fileobj.close()
class GzipCompressionWrapper(CompressionWrapper):
def strip_compression_filename_ext(self, filepath: str):
return strip_gzip_filename_ext(filepath)
def wrap_fileobj(self, filename: str, fileobj: IO, mode: str = None):
return ClosingGzipFile(filename=filename, fileobj=fileobj, mode=mode)
@contextmanager
def open(self, filename: str, mode: str):
if is_external_location(filename):
# there seem to be an issue with GzipFile and fileobj
with tempfile.TemporaryDirectory(suffix='-gzip') as gzip_dir:
local_gzip_file = os.path.join(gzip_dir, os.path.basename(filename))
with ClosingGzipFile(filename=local_gzip_file, mode=mode) as local_fp:
yield local_fp
tf_file_io.copy(local_gzip_file, filename, overwrite=True)
else:
with ClosingGzipFile(filename=filename, mode=mode) as local_fp:
yield local_fp
class XzCompressionWrapper(CompressionWrapper):
def strip_compression_filename_ext(self, filepath: str):
return strip_xz_filename_ext(filepath)
def wrap_fileobj(self, filename: str, fileobj: IO, mode: str = None):
return LZMAFile(filename=fileobj, mode=mode or 'r')
class DummyCompressionWrapper(CompressionWrapper):
def strip_compression_filename_ext(self, filepath: str):
return filepath
def wrap_fileobj(self, filename: str, fileobj: IO, mode: str = None):
return fileobj
GZIP_COMPRESSION_WRAPPER = GzipCompressionWrapper()
XZ_COMPRESSION_WRAPPER = XzCompressionWrapper()
DUMMY_COMPRESSION_WRAPPER = DummyCompressionWrapper()
def get_compression_wrapper(filepath: str):
if is_gzip_filename(filepath):
return GZIP_COMPRESSION_WRAPPER
if is_xz_filename(filepath):
return XZ_COMPRESSION_WRAPPER
return DUMMY_COMPRESSION_WRAPPER
def strip_compression_filename_ext(filepath: str) -> str:
return get_compression_wrapper(filepath).strip_compression_filename_ext(filepath)
@contextmanager
def _open_raw(filepath: str, mode: str) -> Iterator[IO]:
if filepath.startswith('https://'):
try:
with tempfile.TemporaryDirectory(suffix='download') as temp_dir:
temp_file = os.path.join(temp_dir, os.path.basename(filepath))
urlretrieve(filepath, temp_file)
with open(temp_file, mode=mode) as fp:
yield fp
except HTTPError as error:
if error.code == 404:
raise FileNotFoundError('file not found: %s' % filepath) from error
raise
else:
try:
with tf_file_io.FileIO(filepath, mode=mode) as fp:
yield fp
except tf_NotFoundError as e:
raise FileNotFoundError('file not found: %s' % filepath) from e
@contextmanager
def open_file(filepath: str, mode: str, compression_wrapper: CompressionWrapper = None):
if compression_wrapper is None:
compression_wrapper = get_compression_wrapper(filepath)
LOGGER.debug(
'open_file, filepath=%s, mode=%s, compression_wrapper=%s',
filepath, mode, compression_wrapper
)
if mode in {'rb', 'r'}:
with _open_raw(filepath, mode=mode) as source_fp:
yield compression_wrapper.wrap_fileobj(
filename=filepath,
fileobj=source_fp,
mode=mode
)
elif mode in {'wb', 'w'}:
tf_file_io.recursive_create_dir(os.path.dirname(filepath))
with compression_wrapper.open(filepath, mode=mode) as target_fp:
yield target_fp
else:
raise ValueError('unsupported mode: %s' % mode)
def _require_tf_file_io():
if tf_file_io is None:
raise ImportError('Cloud storage file transfer requires TensorFlow.')
def copy_file(source_filepath: str, target_filepath: str, overwrite: bool = True):
_require_tf_file_io()
if not overwrite and tf_file_io.file_exists(target_filepath):
LOGGER.info('skipping already existing file: %s', target_filepath)
return
with open_file(text_type(source_filepath), mode='rb') as source_fp:
with open_file(text_type(target_filepath), mode='wb') as target_fp:
copyfileobj(source_fp, target_fp)
def list_files(directory_path: str) -> List[str]:
_require_tf_file_io()
return tf_file_io.list_directory(directory_path)
@contextmanager
def auto_uploading_output_file(filepath: str, mode: str = 'w', **kwargs):
if not is_external_location(filepath):
file_dirname = os.path.dirname(filepath)
if file_dirname:
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, mode=mode, **kwargs) as fp:
yield fp
return
with tempfile.TemporaryDirectory(suffix='-output') as temp_dir:
temp_file = os.path.join(
temp_dir,
get_compression_wrapper(filepath).strip_compression_filename_ext(
os.path.basename(filepath)
)
)
try:
with open(temp_file, mode=mode, **kwargs) as fp:
yield fp
finally:
if os.path.exists(temp_file):
copy_file(temp_file, filepath)
@contextmanager
def auto_download_input_file(filepath: str, auto_decompress: bool = False) -> Iterator[str]:
if not is_external_location(filepath):
yield filepath
return
with tempfile.TemporaryDirectory(suffix='-input') as temp_dir:
file_basename = os.path.basename(filepath)
if auto_decompress:
file_basename = get_compression_wrapper(filepath).strip_compression_filename_ext(
file_basename
)
temp_file = os.path.join(temp_dir, file_basename)
copy_file(filepath, temp_file, overwrite=True)
yield temp_file
def write_text(filepath: str, text: str, **kwargs):
with auto_uploading_output_file(filepath, mode='w', **kwargs) as fp:
fp.write(text)
def read_text(filepath: str, **kwargs) -> str:
with open_file(filepath, mode='r', **kwargs) as fp:
return fp.read()
def read_binary(filepath: str, **kwargs) -> bytes:
with open_file(filepath, mode='rb', **kwargs) as fp:
return fp.read()
class FileRef(ABC):
def __init__(self, file_url: str):
self.file_url = file_url
@property
def basename(self):
return os.path.basename(self.file_url)
def __str__(self):
return self.file_url
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.file_url)
@abstractmethod
def copy_to(self, target_url: str):
pass
class FileContainer(ABC):
def __init__(self, directory_url: str):
self.directory_url = directory_url
@abstractmethod
def list_files(self) -> List[FileRef]:
pass
def __str__(self):
return self.directory_url
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.directory_url)
class FileUrlRef(FileRef):
def copy_to(self, target_url: str):
copy_file(self.file_url, target_url)
class DirectoryFileContainer(FileContainer):
def list_files(self) -> List[FileRef]:
return [
FileUrlRef(path_join(self.directory_url, file_url))
for file_url in list_files(self.directory_url)
]
class TarFileRef(FileRef):
def __init__(
self,
file_url: str,
tar_file: tarfile.TarFile,
tar_info: tarfile.TarInfo):
super().__init__(file_url)
self.tar_file = tar_file
self.tar_info = tar_info
def open_tar_file(self) -> IO:
fp = self.tar_file.extractfile(self.tar_info)
assert fp
return fp
def copy_to(self, target_url: str):
with self.open_tar_file() as source_fp:
with open_file(
target_url,
mode='wb',
compression_wrapper=DUMMY_COMPRESSION_WRAPPER) as target_fp:
copyfileobj(source_fp, target_fp)
class TarFileContainer(FileContainer):
def __init__(self, directory_url, tar_file: tarfile.TarFile):
super().__init__(directory_url)
self.tar_file = tar_file
def list_files(self) -> List[FileRef]:
return [
TarFileRef(
path_join(self.directory_url, tar_info.name),
tar_file=self.tar_file,
tar_info=tar_info
)
for tar_info in self.tar_file.getmembers()
]
class ZipFileRef(FileRef):
def __init__(
self,
file_url: str,
zip_file: zipfile.ZipFile,
zip_info: zipfile.ZipInfo):
super().__init__(file_url)
self.zip_file = zip_file
self.zip_info = zip_info
def copy_to(self, target_url: str):
with self.zip_file.open(self.zip_info.filename) as source_fp:
with open_file(
target_url,
mode='wb',
compression_wrapper=DUMMY_COMPRESSION_WRAPPER) as target_fp:
copyfileobj(source_fp, target_fp)
class ZipFileContainer(FileContainer):
def __init__(self, directory_url, zip_file: zipfile.ZipFile):
super().__init__(directory_url)
self.zip_file = zip_file
def list_files(self) -> List[FileRef]:
return [
ZipFileRef(
path_join(self.directory_url, zip_info.filename),
zip_file=self.zip_file,
zip_info=zip_info
)
for zip_info in self.zip_file.infolist()
]
@contextmanager
def open_file_container(directory_url: str) -> Iterator[FileContainer]:
if str(directory_url).endswith('.tar.gz'):
with auto_download_input_file(directory_url) as local_tar_file:
with tarfile.open(local_tar_file) as tar_file:
yield TarFileContainer(directory_url, tar_file=tar_file)
return
if str(directory_url).endswith('.zip'):
with auto_download_input_file(directory_url) as local_zip_file:
with zipfile.ZipFile(local_zip_file, mode='r') as zip_file:
yield ZipFileContainer(directory_url, zip_file=zip_file)
return
yield DirectoryFileContainer(directory_url) | sciencebeam-trainer-delft | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/utils/io.py | io.py | import logging
import os
import tarfile
import tempfile
import zipfile
from abc import ABC, abstractmethod
from shutil import copyfileobj
from contextlib import contextmanager
from gzip import GzipFile
from lzma import LZMAFile
from urllib.error import HTTPError
from urllib.request import urlretrieve
from typing import List, IO, Iterator
from six import string_types, text_type
try:
from tensorflow.python.lib.io import file_io as tf_file_io # type: ignore
from tensorflow.python.framework.errors_impl import ( # type: ignore
NotFoundError as tf_NotFoundError
)
except ImportError:
tf_file_io = None
tf_NotFoundError = None
LOGGER = logging.getLogger(__name__)
def is_external_location(filepath: str):
return isinstance(filepath, string_types) and '://' in filepath
def path_join(parent, child):
return os.path.join(str(parent), str(child))
def is_gzip_filename(filepath: str):
return filepath.endswith('.gz')
def is_xz_filename(filepath: str):
return filepath.endswith('.xz')
def strip_gzip_filename_ext(filepath: str):
if not is_gzip_filename(filepath):
raise ValueError('not a gzip filename: %s' % filepath)
return os.path.splitext(filepath)[0]
def strip_xz_filename_ext(filepath: str):
if not is_xz_filename(filepath):
raise ValueError('not a xz filename: %s' % filepath)
return os.path.splitext(filepath)[0]
class CompressionWrapper(ABC):
@abstractmethod
def strip_compression_filename_ext(self, filepath: str):
pass
@abstractmethod
def wrap_fileobj(self, filename: str, fileobj: IO, mode: str = None):
pass
@contextmanager
def open(self, filename: str, mode: str) -> Iterator[IO]:
LOGGER.debug('opening file: %r, mode=%r', filename, mode)
with _open_raw(filename, mode=mode) as fp:
yield self.wrap_fileobj(
filename=filename,
fileobj=fp,
mode=mode
)
class ClosingGzipFile(GzipFile):
# GzipFile doesn't close the underlying fileobj, we will do that here
def close(self):
fileobj = self.fileobj
LOGGER.debug('ClosingGzipFile.close, fileobj: %s', fileobj)
try:
super().close()
finally:
if fileobj is not None:
LOGGER.debug('closing: %s', fileobj)
fileobj.close()
class GzipCompressionWrapper(CompressionWrapper):
def strip_compression_filename_ext(self, filepath: str):
return strip_gzip_filename_ext(filepath)
def wrap_fileobj(self, filename: str, fileobj: IO, mode: str = None):
return ClosingGzipFile(filename=filename, fileobj=fileobj, mode=mode)
@contextmanager
def open(self, filename: str, mode: str):
if is_external_location(filename):
# there seem to be an issue with GzipFile and fileobj
with tempfile.TemporaryDirectory(suffix='-gzip') as gzip_dir:
local_gzip_file = os.path.join(gzip_dir, os.path.basename(filename))
with ClosingGzipFile(filename=local_gzip_file, mode=mode) as local_fp:
yield local_fp
tf_file_io.copy(local_gzip_file, filename, overwrite=True)
else:
with ClosingGzipFile(filename=filename, mode=mode) as local_fp:
yield local_fp
class XzCompressionWrapper(CompressionWrapper):
def strip_compression_filename_ext(self, filepath: str):
return strip_xz_filename_ext(filepath)
def wrap_fileobj(self, filename: str, fileobj: IO, mode: str = None):
return LZMAFile(filename=fileobj, mode=mode or 'r')
class DummyCompressionWrapper(CompressionWrapper):
def strip_compression_filename_ext(self, filepath: str):
return filepath
def wrap_fileobj(self, filename: str, fileobj: IO, mode: str = None):
return fileobj
GZIP_COMPRESSION_WRAPPER = GzipCompressionWrapper()
XZ_COMPRESSION_WRAPPER = XzCompressionWrapper()
DUMMY_COMPRESSION_WRAPPER = DummyCompressionWrapper()
def get_compression_wrapper(filepath: str):
if is_gzip_filename(filepath):
return GZIP_COMPRESSION_WRAPPER
if is_xz_filename(filepath):
return XZ_COMPRESSION_WRAPPER
return DUMMY_COMPRESSION_WRAPPER
def strip_compression_filename_ext(filepath: str) -> str:
return get_compression_wrapper(filepath).strip_compression_filename_ext(filepath)
@contextmanager
def _open_raw(filepath: str, mode: str) -> Iterator[IO]:
if filepath.startswith('https://'):
try:
with tempfile.TemporaryDirectory(suffix='download') as temp_dir:
temp_file = os.path.join(temp_dir, os.path.basename(filepath))
urlretrieve(filepath, temp_file)
with open(temp_file, mode=mode) as fp:
yield fp
except HTTPError as error:
if error.code == 404:
raise FileNotFoundError('file not found: %s' % filepath) from error
raise
else:
try:
with tf_file_io.FileIO(filepath, mode=mode) as fp:
yield fp
except tf_NotFoundError as e:
raise FileNotFoundError('file not found: %s' % filepath) from e
@contextmanager
def open_file(filepath: str, mode: str, compression_wrapper: CompressionWrapper = None):
if compression_wrapper is None:
compression_wrapper = get_compression_wrapper(filepath)
LOGGER.debug(
'open_file, filepath=%s, mode=%s, compression_wrapper=%s',
filepath, mode, compression_wrapper
)
if mode in {'rb', 'r'}:
with _open_raw(filepath, mode=mode) as source_fp:
yield compression_wrapper.wrap_fileobj(
filename=filepath,
fileobj=source_fp,
mode=mode
)
elif mode in {'wb', 'w'}:
tf_file_io.recursive_create_dir(os.path.dirname(filepath))
with compression_wrapper.open(filepath, mode=mode) as target_fp:
yield target_fp
else:
raise ValueError('unsupported mode: %s' % mode)
def _require_tf_file_io():
if tf_file_io is None:
raise ImportError('Cloud storage file transfer requires TensorFlow.')
def copy_file(source_filepath: str, target_filepath: str, overwrite: bool = True):
_require_tf_file_io()
if not overwrite and tf_file_io.file_exists(target_filepath):
LOGGER.info('skipping already existing file: %s', target_filepath)
return
with open_file(text_type(source_filepath), mode='rb') as source_fp:
with open_file(text_type(target_filepath), mode='wb') as target_fp:
copyfileobj(source_fp, target_fp)
def list_files(directory_path: str) -> List[str]:
_require_tf_file_io()
return tf_file_io.list_directory(directory_path)
@contextmanager
def auto_uploading_output_file(filepath: str, mode: str = 'w', **kwargs):
if not is_external_location(filepath):
file_dirname = os.path.dirname(filepath)
if file_dirname:
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, mode=mode, **kwargs) as fp:
yield fp
return
with tempfile.TemporaryDirectory(suffix='-output') as temp_dir:
temp_file = os.path.join(
temp_dir,
get_compression_wrapper(filepath).strip_compression_filename_ext(
os.path.basename(filepath)
)
)
try:
with open(temp_file, mode=mode, **kwargs) as fp:
yield fp
finally:
if os.path.exists(temp_file):
copy_file(temp_file, filepath)
@contextmanager
def auto_download_input_file(filepath: str, auto_decompress: bool = False) -> Iterator[str]:
if not is_external_location(filepath):
yield filepath
return
with tempfile.TemporaryDirectory(suffix='-input') as temp_dir:
file_basename = os.path.basename(filepath)
if auto_decompress:
file_basename = get_compression_wrapper(filepath).strip_compression_filename_ext(
file_basename
)
temp_file = os.path.join(temp_dir, file_basename)
copy_file(filepath, temp_file, overwrite=True)
yield temp_file
def write_text(filepath: str, text: str, **kwargs):
with auto_uploading_output_file(filepath, mode='w', **kwargs) as fp:
fp.write(text)
def read_text(filepath: str, **kwargs) -> str:
with open_file(filepath, mode='r', **kwargs) as fp:
return fp.read()
def read_binary(filepath: str, **kwargs) -> bytes:
with open_file(filepath, mode='rb', **kwargs) as fp:
return fp.read()
class FileRef(ABC):
def __init__(self, file_url: str):
self.file_url = file_url
@property
def basename(self):
return os.path.basename(self.file_url)
def __str__(self):
return self.file_url
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.file_url)
@abstractmethod
def copy_to(self, target_url: str):
pass
class FileContainer(ABC):
def __init__(self, directory_url: str):
self.directory_url = directory_url
@abstractmethod
def list_files(self) -> List[FileRef]:
pass
def __str__(self):
return self.directory_url
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.directory_url)
class FileUrlRef(FileRef):
def copy_to(self, target_url: str):
copy_file(self.file_url, target_url)
class DirectoryFileContainer(FileContainer):
def list_files(self) -> List[FileRef]:
return [
FileUrlRef(path_join(self.directory_url, file_url))
for file_url in list_files(self.directory_url)
]
class TarFileRef(FileRef):
def __init__(
self,
file_url: str,
tar_file: tarfile.TarFile,
tar_info: tarfile.TarInfo):
super().__init__(file_url)
self.tar_file = tar_file
self.tar_info = tar_info
def open_tar_file(self) -> IO:
fp = self.tar_file.extractfile(self.tar_info)
assert fp
return fp
def copy_to(self, target_url: str):
with self.open_tar_file() as source_fp:
with open_file(
target_url,
mode='wb',
compression_wrapper=DUMMY_COMPRESSION_WRAPPER) as target_fp:
copyfileobj(source_fp, target_fp)
class TarFileContainer(FileContainer):
def __init__(self, directory_url, tar_file: tarfile.TarFile):
super().__init__(directory_url)
self.tar_file = tar_file
def list_files(self) -> List[FileRef]:
return [
TarFileRef(
path_join(self.directory_url, tar_info.name),
tar_file=self.tar_file,
tar_info=tar_info
)
for tar_info in self.tar_file.getmembers()
]
class ZipFileRef(FileRef):
def __init__(
self,
file_url: str,
zip_file: zipfile.ZipFile,
zip_info: zipfile.ZipInfo):
super().__init__(file_url)
self.zip_file = zip_file
self.zip_info = zip_info
def copy_to(self, target_url: str):
with self.zip_file.open(self.zip_info.filename) as source_fp:
with open_file(
target_url,
mode='wb',
compression_wrapper=DUMMY_COMPRESSION_WRAPPER) as target_fp:
copyfileobj(source_fp, target_fp)
class ZipFileContainer(FileContainer):
def __init__(self, directory_url, zip_file: zipfile.ZipFile):
super().__init__(directory_url)
self.zip_file = zip_file
def list_files(self) -> List[FileRef]:
return [
ZipFileRef(
path_join(self.directory_url, zip_info.filename),
zip_file=self.zip_file,
zip_info=zip_info
)
for zip_info in self.zip_file.infolist()
]
@contextmanager
def open_file_container(directory_url: str) -> Iterator[FileContainer]:
if str(directory_url).endswith('.tar.gz'):
with auto_download_input_file(directory_url) as local_tar_file:
with tarfile.open(local_tar_file) as tar_file:
yield TarFileContainer(directory_url, tar_file=tar_file)
return
if str(directory_url).endswith('.zip'):
with auto_download_input_file(directory_url) as local_zip_file:
with zipfile.ZipFile(local_zip_file, mode='r') as zip_file:
yield ZipFileContainer(directory_url, zip_file=zip_file)
return
yield DirectoryFileContainer(directory_url) | 0.501221 | 0.086555 |
import argparse
import logging
import sys
from abc import abstractmethod, ABC
from typing import List, Callable, Optional, Sequence
LOGGER = logging.getLogger(__name__)
def add_debug_argument(parser: argparse.ArgumentParser):
parser.add_argument("--debug", action="store_true", help="Enable debug logging")
def add_default_arguments(parser: argparse.ArgumentParser):
add_debug_argument(parser)
def get_project_package():
return 'sciencebeam_trainer_delft'
def process_debug_argument(args: argparse.Namespace):
if args.debug:
logging.getLogger('__main__').setLevel('DEBUG')
logging.getLogger(get_project_package()).setLevel('DEBUG')
def process_default_args(args: argparse.Namespace):
process_debug_argument(args)
def default_main(
parse_args: Callable[[Optional[List[str]]], argparse.Namespace],
run: Callable[[argparse.Namespace], None],
argv: List[str] = None):
LOGGER.debug('argv: %s', argv)
args = parse_args(argv)
process_default_args(args)
run(args)
def configure_main_logging():
logging.root.handlers = []
logging.basicConfig(level='INFO')
def initialize_and_call_main(main: Callable[[], None]):
configure_main_logging()
main()
class SubCommand(ABC):
def __init__(self, name, description):
self.name = name
self.description = description
@abstractmethod
def add_arguments(self, parser: argparse.ArgumentParser):
pass
@abstractmethod
def run(self, args: argparse.Namespace):
pass
class SubCommandProcessor:
def __init__(
self,
sub_commands: Sequence[SubCommand],
description: str = None,
command_dest: str = 'command'):
self.sub_commands = sub_commands
self.sub_command_by_name = {
sub_command.name: sub_command
for sub_command in sub_commands
}
self.description = description
self.command_dest = command_dest
def get_parser(self) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description=self.description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
self.add_sub_command_parsers(parser)
return parser
def parse_args(self, argv: List[str] = None) -> argparse.Namespace:
return self.get_parser().parse_args(argv)
def add_sub_command_parsers(
self,
parser: argparse.ArgumentParser):
kwargs = {}
if sys.version_info >= (3, 7):
kwargs['required'] = True
subparsers = parser.add_subparsers(
dest=self.command_dest,
**kwargs # type: ignore
)
subparsers.required = True
self.add_sub_command_parsers_to_subparsers(subparsers)
def add_sub_command_parsers_to_subparsers(
self,
subparsers: argparse._SubParsersAction # pylint: disable=protected-access
):
for sub_command in self.sub_commands:
sub_parser = subparsers.add_parser(
sub_command.name, help=sub_command.description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
sub_command.add_arguments(sub_parser)
add_default_arguments(sub_parser)
def run(self, args: argparse.Namespace):
sub_command = self.sub_command_by_name[getattr(args, self.command_dest)]
sub_command.run(args)
def main(self, argv: List[str] = None):
args = self.parse_args(argv)
process_default_args(args)
self.run(args) | sciencebeam-trainer-delft | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/utils/cli.py | cli.py | import argparse
import logging
import sys
from abc import abstractmethod, ABC
from typing import List, Callable, Optional, Sequence
LOGGER = logging.getLogger(__name__)
def add_debug_argument(parser: argparse.ArgumentParser):
parser.add_argument("--debug", action="store_true", help="Enable debug logging")
def add_default_arguments(parser: argparse.ArgumentParser):
add_debug_argument(parser)
def get_project_package():
return 'sciencebeam_trainer_delft'
def process_debug_argument(args: argparse.Namespace):
if args.debug:
logging.getLogger('__main__').setLevel('DEBUG')
logging.getLogger(get_project_package()).setLevel('DEBUG')
def process_default_args(args: argparse.Namespace):
process_debug_argument(args)
def default_main(
parse_args: Callable[[Optional[List[str]]], argparse.Namespace],
run: Callable[[argparse.Namespace], None],
argv: List[str] = None):
LOGGER.debug('argv: %s', argv)
args = parse_args(argv)
process_default_args(args)
run(args)
def configure_main_logging():
logging.root.handlers = []
logging.basicConfig(level='INFO')
def initialize_and_call_main(main: Callable[[], None]):
configure_main_logging()
main()
class SubCommand(ABC):
def __init__(self, name, description):
self.name = name
self.description = description
@abstractmethod
def add_arguments(self, parser: argparse.ArgumentParser):
pass
@abstractmethod
def run(self, args: argparse.Namespace):
pass
class SubCommandProcessor:
def __init__(
self,
sub_commands: Sequence[SubCommand],
description: str = None,
command_dest: str = 'command'):
self.sub_commands = sub_commands
self.sub_command_by_name = {
sub_command.name: sub_command
for sub_command in sub_commands
}
self.description = description
self.command_dest = command_dest
def get_parser(self) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description=self.description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
self.add_sub_command_parsers(parser)
return parser
def parse_args(self, argv: List[str] = None) -> argparse.Namespace:
return self.get_parser().parse_args(argv)
def add_sub_command_parsers(
self,
parser: argparse.ArgumentParser):
kwargs = {}
if sys.version_info >= (3, 7):
kwargs['required'] = True
subparsers = parser.add_subparsers(
dest=self.command_dest,
**kwargs # type: ignore
)
subparsers.required = True
self.add_sub_command_parsers_to_subparsers(subparsers)
def add_sub_command_parsers_to_subparsers(
self,
subparsers: argparse._SubParsersAction # pylint: disable=protected-access
):
for sub_command in self.sub_commands:
sub_parser = subparsers.add_parser(
sub_command.name, help=sub_command.description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
sub_command.add_arguments(sub_parser)
add_default_arguments(sub_parser)
def run(self, args: argparse.Namespace):
sub_command = self.sub_command_by_name[getattr(args, self.command_dest)]
sub_command.run(args)
def main(self, argv: List[str] = None):
args = self.parse_args(argv)
process_default_args(args)
self.run(args) | 0.592902 | 0.093512 |
from keras import backend as K
from keras.engine.topology import Layer
from keras import initializers, regularizers, constraints
# mostly copied from:
# https://github.com/kermitt2/delft/blob/v0.2.3/delft/utilities/Attention.py
# - updated to be compatible with newer Keras version
class Attention(Layer):
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
self.W = None
self.b = None
super().__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight(shape=(input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, inputs, mask=None):
return None
def call(self, inputs, mask=None, **kwargs): # pylint: disable=arguments-differ
x = inputs
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(
K.dot(
K.reshape(x, (-1, features_dim)),
K.reshape(self.W, (features_dim, 1))
),
(-1, step_dim)
)
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim | sciencebeam-trainer-delft | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/utils/models/Attention.py | Attention.py | from keras import backend as K
from keras.engine.topology import Layer
from keras import initializers, regularizers, constraints
# mostly copied from:
# https://github.com/kermitt2/delft/blob/v0.2.3/delft/utilities/Attention.py
# - updated to be compatible with newer Keras version
class Attention(Layer):
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
self.W = None
self.b = None
super().__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight(shape=(input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, inputs, mask=None):
return None
def call(self, inputs, mask=None, **kwargs): # pylint: disable=arguments-differ
x = inputs
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(
K.dot(
K.reshape(x, (-1, features_dim)),
K.reshape(self.W, (features_dim, 1))
),
(-1, step_dim)
)
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim | 0.922023 | 0.362095 |
import logging
import warnings
from typing import Any, Dict, Optional
from typing_extensions import Protocol
import numpy as np
from keras.callbacks import Callback, EarlyStopping
LOGGER = logging.getLogger(__name__)
class ResumableEarlyStopping(EarlyStopping):
class MetaKeys:
EARLY_STOPPING = 'early_stopping'
WAIT = 'wait'
STOPPED_EPOCH = 'stopped_epoch'
BEST = 'best'
def __init__(
self,
initial_meta: Optional[dict] = None,
**kwargs
):
super().__init__(**kwargs)
self.best: Optional[float] = None
self.initial_wait = 0
self.initial_stopped_epoch = 0
self.initial_best: Optional[float] = None
self.restore_state(initial_meta)
def restore_state(self, initial_meta: Optional[dict]):
if not initial_meta:
return
early_stopping_meta = initial_meta.get(ResumableEarlyStopping.MetaKeys.EARLY_STOPPING)
if not early_stopping_meta:
return
self.initial_wait = early_stopping_meta.get(
ResumableEarlyStopping.MetaKeys.WAIT,
0
)
self.initial_stopped_epoch = early_stopping_meta.get(
ResumableEarlyStopping.MetaKeys.STOPPED_EPOCH,
0
)
self.initial_best = early_stopping_meta.get(
ResumableEarlyStopping.MetaKeys.BEST,
None
)
LOGGER.info(
(
'restored early stopping state: initial_wait=%s, initial_stopped_epoch=%s'
', initial_best=%s'
),
self.initial_wait, self.initial_stopped_epoch, self.initial_best
)
def on_train_begin(self, logs=None):
super().on_train_begin(logs=logs)
self.wait = self.initial_wait
self.stopped_epoch = self.stopped_epoch
if self.initial_best is not None:
self.best = self.initial_best
def _get_early_stopping_meta(self):
return {
ResumableEarlyStopping.MetaKeys.WAIT: self.wait,
ResumableEarlyStopping.MetaKeys.STOPPED_EPOCH: self.stopped_epoch,
ResumableEarlyStopping.MetaKeys.BEST: self.best
}
def _add_early_stopping_meta_to_logs(self, logs: dict):
logs[ResumableEarlyStopping.MetaKeys.EARLY_STOPPING] = (
self._get_early_stopping_meta()
)
def on_epoch_end(self, epoch, logs=None):
super().on_epoch_end(epoch, logs=logs)
self._add_early_stopping_meta_to_logs(logs)
LOGGER.info('on_epoch_end: logs=%s', logs)
class SaveFunctionProtocol(Protocol):
def __call__(self, epoch: int, logs: Dict[str, Any], **kwargs):
pass
class ModelSaverCallback(Callback):
"""Similar to ModelCheckpoint but leaves the actual saving to the save_fn.
"""
def __init__(
self,
save_fn: Optional[SaveFunctionProtocol],
monitor: str = 'val_loss',
mode: str = 'auto',
period: int = 1,
save_best_only: bool = False,
save_kwargs: dict = None):
super().__init__()
self.monitor = monitor
self.save_fn = save_fn
self.period = period
self.save_best_only = save_best_only
self.save_kwargs = save_kwargs or {}
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn(
'ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode),
RuntimeWarning
)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def _save(self, epoch, logs=None):
self.save_fn(epoch=epoch, logs=logs, **self.save_kwargs)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn(
'Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning
)
else:
if self.monitor_op(current, self.best):
LOGGER.info(
'Epoch %05d: %s improved from %0.5f to %0.5f',
epoch + 1, self.monitor, self.best, current
)
self.best = current
self._save(epoch=epoch, logs=logs)
else:
LOGGER.info(
'Epoch %05d: %s did not improve from %0.5f',
epoch + 1, self.monitor, self.best
)
else:
self._save(epoch=epoch, logs=logs) | sciencebeam-trainer-delft | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/utils/keras/callbacks.py | callbacks.py | import logging
import warnings
from typing import Any, Dict, Optional
from typing_extensions import Protocol
import numpy as np
from keras.callbacks import Callback, EarlyStopping
LOGGER = logging.getLogger(__name__)
class ResumableEarlyStopping(EarlyStopping):
class MetaKeys:
EARLY_STOPPING = 'early_stopping'
WAIT = 'wait'
STOPPED_EPOCH = 'stopped_epoch'
BEST = 'best'
def __init__(
self,
initial_meta: Optional[dict] = None,
**kwargs
):
super().__init__(**kwargs)
self.best: Optional[float] = None
self.initial_wait = 0
self.initial_stopped_epoch = 0
self.initial_best: Optional[float] = None
self.restore_state(initial_meta)
def restore_state(self, initial_meta: Optional[dict]):
if not initial_meta:
return
early_stopping_meta = initial_meta.get(ResumableEarlyStopping.MetaKeys.EARLY_STOPPING)
if not early_stopping_meta:
return
self.initial_wait = early_stopping_meta.get(
ResumableEarlyStopping.MetaKeys.WAIT,
0
)
self.initial_stopped_epoch = early_stopping_meta.get(
ResumableEarlyStopping.MetaKeys.STOPPED_EPOCH,
0
)
self.initial_best = early_stopping_meta.get(
ResumableEarlyStopping.MetaKeys.BEST,
None
)
LOGGER.info(
(
'restored early stopping state: initial_wait=%s, initial_stopped_epoch=%s'
', initial_best=%s'
),
self.initial_wait, self.initial_stopped_epoch, self.initial_best
)
def on_train_begin(self, logs=None):
super().on_train_begin(logs=logs)
self.wait = self.initial_wait
self.stopped_epoch = self.stopped_epoch
if self.initial_best is not None:
self.best = self.initial_best
def _get_early_stopping_meta(self):
return {
ResumableEarlyStopping.MetaKeys.WAIT: self.wait,
ResumableEarlyStopping.MetaKeys.STOPPED_EPOCH: self.stopped_epoch,
ResumableEarlyStopping.MetaKeys.BEST: self.best
}
def _add_early_stopping_meta_to_logs(self, logs: dict):
logs[ResumableEarlyStopping.MetaKeys.EARLY_STOPPING] = (
self._get_early_stopping_meta()
)
def on_epoch_end(self, epoch, logs=None):
super().on_epoch_end(epoch, logs=logs)
self._add_early_stopping_meta_to_logs(logs)
LOGGER.info('on_epoch_end: logs=%s', logs)
class SaveFunctionProtocol(Protocol):
def __call__(self, epoch: int, logs: Dict[str, Any], **kwargs):
pass
class ModelSaverCallback(Callback):
"""Similar to ModelCheckpoint but leaves the actual saving to the save_fn.
"""
def __init__(
self,
save_fn: Optional[SaveFunctionProtocol],
monitor: str = 'val_loss',
mode: str = 'auto',
period: int = 1,
save_best_only: bool = False,
save_kwargs: dict = None):
super().__init__()
self.monitor = monitor
self.save_fn = save_fn
self.period = period
self.save_best_only = save_best_only
self.save_kwargs = save_kwargs or {}
self.epochs_since_last_save = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn(
'ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (mode),
RuntimeWarning
)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def _save(self, epoch, logs=None):
self.save_fn(epoch=epoch, logs=logs, **self.save_kwargs)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn(
'Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning
)
else:
if self.monitor_op(current, self.best):
LOGGER.info(
'Epoch %05d: %s improved from %0.5f to %0.5f',
epoch + 1, self.monitor, self.best, current
)
self.best = current
self._save(epoch=epoch, logs=logs)
else:
LOGGER.info(
'Epoch %05d: %s did not improve from %0.5f',
epoch + 1, self.monitor, self.best
)
else:
self._save(epoch=epoch, logs=logs) | 0.824638 | 0.113678 |
import json
import logging
import os
from pathlib import Path
from typing import Dict, List, Optional
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.embedding.embedding import Embeddings
from sciencebeam_trainer_delft.utils.io import (
is_external_location,
strip_compression_filename_ext
)
LOGGER = logging.getLogger(__name__)
DEFAULT_EMBEDDING_REGISTRY = 'embedding-registry.json'
DEFAULT_DOWNLOAD_DIR = 'data/download'
DEFAULT_EMBEDDING_LMDB_PATH = 'data/db'
DEFAULT_MIN_LMDB_CACHE_SIZE = 1024 * 1024
def _find_embedding_index(embedding_list: List[dict], name: str) -> int:
matching_indices = [i for i, x in enumerate(embedding_list) if x['name'] == name]
if matching_indices:
return matching_indices[0]
return -1
def _get_embedding_name_for_filename(filename: str) -> str:
name = os.path.splitext(os.path.basename(filename))[0]
if name.endswith('.mdb'):
return os.path.splitext(name)[0]
return name
def _get_embedding_format_for_filename(filename: str) -> str:
if '.bin' in filename:
return 'bin'
return 'vec'
def _get_embedding_type_for_filename(filename: str) -> str:
if 'glove' in filename.lower():
return 'glove'
return filename
def _get_embedding_config_for_filename(filename: str) -> dict:
return {
'name': _get_embedding_name_for_filename(filename),
'format': _get_embedding_format_for_filename(filename),
'type': _get_embedding_type_for_filename(filename),
'lang': 'en'
}
class EmbeddingManager:
def __init__(
self, path: str = DEFAULT_EMBEDDING_REGISTRY,
download_manager: DownloadManager = None,
download_dir: str = DEFAULT_DOWNLOAD_DIR,
default_embedding_lmdb_path: str = DEFAULT_EMBEDDING_LMDB_PATH,
min_lmdb_cache_size: int = DEFAULT_MIN_LMDB_CACHE_SIZE):
assert download_manager
self.path = path
self.download_manager = download_manager
self.download_dir = download_dir
self.default_embedding_lmdb_path = default_embedding_lmdb_path
self.min_lmdb_cache_size = min_lmdb_cache_size
def _load(self) -> dict:
return json.loads(Path(self.path).read_text())
def _save(self, registry_data: dict):
LOGGER.debug('saving registry data: %s', registry_data)
return Path(self.path).write_text(json.dumps(registry_data, indent=4))
def _get_registry_data(self) -> dict:
try:
return self._load()
except FileNotFoundError:
return {}
def get_embedding_lmdb_path(self):
registry_data = self._get_registry_data()
return registry_data.get('embedding-lmdb-path', self.default_embedding_lmdb_path)
def is_embedding_lmdb_cache_enabled(self):
return self.get_embedding_lmdb_path()
def set_embedding_lmdb_cache_path(self, embedding_lmdb_cache_path: str):
registry_data = self._get_registry_data()
registry_data['embedding-lmdb-path'] = embedding_lmdb_cache_path
self._save(registry_data)
def disable_embedding_lmdb_cache(self):
self.set_embedding_lmdb_cache_path(None)
def add_embedding_config(self, embedding_config: dict):
LOGGER.debug('adding config: %s', embedding_config)
embedding_name = embedding_config['name']
registry_data = self._get_registry_data()
if 'embeddings' not in registry_data:
registry_data['embeddings'] = []
embedding_list = registry_data['embeddings']
index = _find_embedding_index(embedding_list, embedding_name)
if index < 0:
embedding_list.append(embedding_config)
else:
embedding_list[index] = embedding_config
if 'embedding-lmdb-path' not in registry_data:
registry_data['embedding-lmdb-path'] = self.default_embedding_lmdb_path
self._save(registry_data)
def get_embedding_config(self, embedding_name: str) -> Optional[dict]:
embedding_list = self._get_registry_data().get('embeddings', [])
index = _find_embedding_index(embedding_list, embedding_name)
if index < 0:
LOGGER.info('embedding not found with name "%s" in %s', embedding_name, embedding_list)
return None
return embedding_list[index]
def set_embedding_aliases(self, embedding_aliases: Dict[str, str]):
registry_data = self._get_registry_data()
registry_data['embedding-aliases'] = embedding_aliases
self._save(registry_data)
def get_embedding_aliases(self) -> dict:
registry_data = self._get_registry_data()
return registry_data.get('embedding-aliases', {})
def resolve_alias(self, embedding_name: str):
return self.get_embedding_aliases().get(embedding_name, embedding_name)
def is_lmdb_cache_file(self, embedding_url: str) -> bool:
return strip_compression_filename_ext(
embedding_url
).endswith('.mdb')
def download_and_install_source_embedding(self, embedding_url: str) -> str:
download_file = self.download_manager.download_if_url(embedding_url)
filename = os.path.basename(strip_compression_filename_ext(embedding_url))
embedding_config = _get_embedding_config_for_filename(filename)
embedding_name = embedding_config['name']
self.add_embedding_config({
**embedding_config,
'path': str(download_file)
})
return embedding_name
def _download_lmdb_cache_embedding(self, embedding_name: str, embedding_url: str):
self.download_manager.download(
embedding_url,
local_file=str(self.get_embedding_lmdb_cache_data_path(embedding_name))
)
def download_and_install_lmdb_cache_embedding(self, embedding_url: str) -> str:
embedding_config = _get_embedding_config_for_filename(embedding_url)
embedding_name = embedding_config['name']
self._download_lmdb_cache_embedding(embedding_name, embedding_url)
self.add_embedding_config(embedding_config)
return embedding_name
def download_and_install_embedding(self, embedding_url: str) -> str:
if self.is_lmdb_cache_file(embedding_url):
return self.download_and_install_lmdb_cache_embedding(embedding_url)
return self.download_and_install_source_embedding(embedding_url)
def download_and_install_embedding_if_url(self, embedding_url_or_name: str):
if is_external_location(embedding_url_or_name):
return self.download_and_install_embedding(embedding_url_or_name)
return embedding_url_or_name
def get_embedding_lmdb_cache_data_path(self, embedding_name: str):
embedding_lmdb_path = self.get_embedding_lmdb_path()
if not embedding_lmdb_path:
return None
embedding_lmdb_dir = Path(embedding_lmdb_path).joinpath(embedding_name)
return embedding_lmdb_dir.joinpath('data.mdb')
def has_lmdb_cache(self, embedding_name: str):
embedding_lmdb_file = self.get_embedding_lmdb_cache_data_path(
embedding_name
)
if not embedding_lmdb_file:
return False
exists = embedding_lmdb_file.is_file()
size = exists and embedding_lmdb_file.stat().st_size
valid = exists and size >= self.min_lmdb_cache_size
LOGGER.debug(
'embedding_lmdb_file: %s (exists: %s, valid: %s, size: %s)',
embedding_lmdb_file, exists, valid, size
)
if valid:
LOGGER.info(
'has already lmdb cache: %s (%s bytes)',
embedding_lmdb_file, size
)
return valid
def is_downloaded(self, embedding_name: str):
embedding_config = self.get_embedding_config(embedding_name)
if not embedding_config:
return False
embedding_path = embedding_config.get('path')
if not embedding_path or not Path(embedding_path).exists():
return False
LOGGER.info('already downloaded: %s', embedding_name)
return True
def is_downloaded_or_has_lmdb_cache(self, embedding_name: str):
return self.is_downloaded(embedding_name) or self.has_lmdb_cache(embedding_name)
def _ensure_external_url_available(self, embedding_url: str):
embedding_name = _get_embedding_name_for_filename(embedding_url)
if not self.is_downloaded_or_has_lmdb_cache(embedding_name):
return self.download_and_install_embedding(embedding_url)
if not self.get_embedding_config(embedding_name):
self.add_embedding_config(
_get_embedding_config_for_filename(embedding_url)
)
return embedding_name
def _ensure_registered_embedding_available(self, embedding_name: str):
self.validate_embedding(embedding_name)
if self.is_downloaded_or_has_lmdb_cache(embedding_name):
return embedding_name
embedding_config = self.get_embedding_config(embedding_name)
assert embedding_config, "embedding_config required for %s" % embedding_name
embedding_url = embedding_config.get('url')
assert embedding_url, "embedding_url required for %s" % embedding_name
if self.is_lmdb_cache_file(embedding_url):
self._download_lmdb_cache_embedding(embedding_name, embedding_url)
return embedding_name
embedding_path = embedding_config.get('path')
assert embedding_path, "embedding_path required for %s" % embedding_name
self.download_manager.download(embedding_url, local_file=embedding_path)
return embedding_name
def ensure_lmdb_cache_if_enabled(self, embedding_name: str):
if not self.get_embedding_lmdb_path():
return
Embeddings(embedding_name, path=self.path)
assert self.has_lmdb_cache(embedding_name)
def ensure_available(self, embedding_url_or_name: str):
if is_external_location(embedding_url_or_name):
return self._ensure_external_url_available(embedding_url_or_name)
return self._ensure_registered_embedding_available(self.resolve_alias(
embedding_url_or_name
))
def validate_embedding(self, embedding_name):
if not self.get_embedding_config(embedding_name):
raise ValueError('invalid embedding name: %s' % embedding_name) | sciencebeam-trainer-delft | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/embedding/manager.py | manager.py | import json
import logging
import os
from pathlib import Path
from typing import Dict, List, Optional
from sciencebeam_trainer_delft.utils.download_manager import DownloadManager
from sciencebeam_trainer_delft.embedding.embedding import Embeddings
from sciencebeam_trainer_delft.utils.io import (
is_external_location,
strip_compression_filename_ext
)
LOGGER = logging.getLogger(__name__)
DEFAULT_EMBEDDING_REGISTRY = 'embedding-registry.json'
DEFAULT_DOWNLOAD_DIR = 'data/download'
DEFAULT_EMBEDDING_LMDB_PATH = 'data/db'
DEFAULT_MIN_LMDB_CACHE_SIZE = 1024 * 1024
def _find_embedding_index(embedding_list: List[dict], name: str) -> int:
matching_indices = [i for i, x in enumerate(embedding_list) if x['name'] == name]
if matching_indices:
return matching_indices[0]
return -1
def _get_embedding_name_for_filename(filename: str) -> str:
name = os.path.splitext(os.path.basename(filename))[0]
if name.endswith('.mdb'):
return os.path.splitext(name)[0]
return name
def _get_embedding_format_for_filename(filename: str) -> str:
if '.bin' in filename:
return 'bin'
return 'vec'
def _get_embedding_type_for_filename(filename: str) -> str:
if 'glove' in filename.lower():
return 'glove'
return filename
def _get_embedding_config_for_filename(filename: str) -> dict:
return {
'name': _get_embedding_name_for_filename(filename),
'format': _get_embedding_format_for_filename(filename),
'type': _get_embedding_type_for_filename(filename),
'lang': 'en'
}
class EmbeddingManager:
def __init__(
self, path: str = DEFAULT_EMBEDDING_REGISTRY,
download_manager: DownloadManager = None,
download_dir: str = DEFAULT_DOWNLOAD_DIR,
default_embedding_lmdb_path: str = DEFAULT_EMBEDDING_LMDB_PATH,
min_lmdb_cache_size: int = DEFAULT_MIN_LMDB_CACHE_SIZE):
assert download_manager
self.path = path
self.download_manager = download_manager
self.download_dir = download_dir
self.default_embedding_lmdb_path = default_embedding_lmdb_path
self.min_lmdb_cache_size = min_lmdb_cache_size
def _load(self) -> dict:
return json.loads(Path(self.path).read_text())
def _save(self, registry_data: dict):
LOGGER.debug('saving registry data: %s', registry_data)
return Path(self.path).write_text(json.dumps(registry_data, indent=4))
def _get_registry_data(self) -> dict:
try:
return self._load()
except FileNotFoundError:
return {}
def get_embedding_lmdb_path(self):
registry_data = self._get_registry_data()
return registry_data.get('embedding-lmdb-path', self.default_embedding_lmdb_path)
def is_embedding_lmdb_cache_enabled(self):
return self.get_embedding_lmdb_path()
def set_embedding_lmdb_cache_path(self, embedding_lmdb_cache_path: str):
registry_data = self._get_registry_data()
registry_data['embedding-lmdb-path'] = embedding_lmdb_cache_path
self._save(registry_data)
def disable_embedding_lmdb_cache(self):
self.set_embedding_lmdb_cache_path(None)
def add_embedding_config(self, embedding_config: dict):
LOGGER.debug('adding config: %s', embedding_config)
embedding_name = embedding_config['name']
registry_data = self._get_registry_data()
if 'embeddings' not in registry_data:
registry_data['embeddings'] = []
embedding_list = registry_data['embeddings']
index = _find_embedding_index(embedding_list, embedding_name)
if index < 0:
embedding_list.append(embedding_config)
else:
embedding_list[index] = embedding_config
if 'embedding-lmdb-path' not in registry_data:
registry_data['embedding-lmdb-path'] = self.default_embedding_lmdb_path
self._save(registry_data)
def get_embedding_config(self, embedding_name: str) -> Optional[dict]:
embedding_list = self._get_registry_data().get('embeddings', [])
index = _find_embedding_index(embedding_list, embedding_name)
if index < 0:
LOGGER.info('embedding not found with name "%s" in %s', embedding_name, embedding_list)
return None
return embedding_list[index]
def set_embedding_aliases(self, embedding_aliases: Dict[str, str]):
registry_data = self._get_registry_data()
registry_data['embedding-aliases'] = embedding_aliases
self._save(registry_data)
def get_embedding_aliases(self) -> dict:
registry_data = self._get_registry_data()
return registry_data.get('embedding-aliases', {})
def resolve_alias(self, embedding_name: str):
return self.get_embedding_aliases().get(embedding_name, embedding_name)
def is_lmdb_cache_file(self, embedding_url: str) -> bool:
return strip_compression_filename_ext(
embedding_url
).endswith('.mdb')
def download_and_install_source_embedding(self, embedding_url: str) -> str:
download_file = self.download_manager.download_if_url(embedding_url)
filename = os.path.basename(strip_compression_filename_ext(embedding_url))
embedding_config = _get_embedding_config_for_filename(filename)
embedding_name = embedding_config['name']
self.add_embedding_config({
**embedding_config,
'path': str(download_file)
})
return embedding_name
def _download_lmdb_cache_embedding(self, embedding_name: str, embedding_url: str):
self.download_manager.download(
embedding_url,
local_file=str(self.get_embedding_lmdb_cache_data_path(embedding_name))
)
def download_and_install_lmdb_cache_embedding(self, embedding_url: str) -> str:
embedding_config = _get_embedding_config_for_filename(embedding_url)
embedding_name = embedding_config['name']
self._download_lmdb_cache_embedding(embedding_name, embedding_url)
self.add_embedding_config(embedding_config)
return embedding_name
def download_and_install_embedding(self, embedding_url: str) -> str:
if self.is_lmdb_cache_file(embedding_url):
return self.download_and_install_lmdb_cache_embedding(embedding_url)
return self.download_and_install_source_embedding(embedding_url)
def download_and_install_embedding_if_url(self, embedding_url_or_name: str):
if is_external_location(embedding_url_or_name):
return self.download_and_install_embedding(embedding_url_or_name)
return embedding_url_or_name
def get_embedding_lmdb_cache_data_path(self, embedding_name: str):
embedding_lmdb_path = self.get_embedding_lmdb_path()
if not embedding_lmdb_path:
return None
embedding_lmdb_dir = Path(embedding_lmdb_path).joinpath(embedding_name)
return embedding_lmdb_dir.joinpath('data.mdb')
def has_lmdb_cache(self, embedding_name: str):
embedding_lmdb_file = self.get_embedding_lmdb_cache_data_path(
embedding_name
)
if not embedding_lmdb_file:
return False
exists = embedding_lmdb_file.is_file()
size = exists and embedding_lmdb_file.stat().st_size
valid = exists and size >= self.min_lmdb_cache_size
LOGGER.debug(
'embedding_lmdb_file: %s (exists: %s, valid: %s, size: %s)',
embedding_lmdb_file, exists, valid, size
)
if valid:
LOGGER.info(
'has already lmdb cache: %s (%s bytes)',
embedding_lmdb_file, size
)
return valid
def is_downloaded(self, embedding_name: str):
embedding_config = self.get_embedding_config(embedding_name)
if not embedding_config:
return False
embedding_path = embedding_config.get('path')
if not embedding_path or not Path(embedding_path).exists():
return False
LOGGER.info('already downloaded: %s', embedding_name)
return True
def is_downloaded_or_has_lmdb_cache(self, embedding_name: str):
return self.is_downloaded(embedding_name) or self.has_lmdb_cache(embedding_name)
def _ensure_external_url_available(self, embedding_url: str):
embedding_name = _get_embedding_name_for_filename(embedding_url)
if not self.is_downloaded_or_has_lmdb_cache(embedding_name):
return self.download_and_install_embedding(embedding_url)
if not self.get_embedding_config(embedding_name):
self.add_embedding_config(
_get_embedding_config_for_filename(embedding_url)
)
return embedding_name
def _ensure_registered_embedding_available(self, embedding_name: str):
self.validate_embedding(embedding_name)
if self.is_downloaded_or_has_lmdb_cache(embedding_name):
return embedding_name
embedding_config = self.get_embedding_config(embedding_name)
assert embedding_config, "embedding_config required for %s" % embedding_name
embedding_url = embedding_config.get('url')
assert embedding_url, "embedding_url required for %s" % embedding_name
if self.is_lmdb_cache_file(embedding_url):
self._download_lmdb_cache_embedding(embedding_name, embedding_url)
return embedding_name
embedding_path = embedding_config.get('path')
assert embedding_path, "embedding_path required for %s" % embedding_name
self.download_manager.download(embedding_url, local_file=embedding_path)
return embedding_name
def ensure_lmdb_cache_if_enabled(self, embedding_name: str):
if not self.get_embedding_lmdb_path():
return
Embeddings(embedding_name, path=self.path)
assert self.has_lmdb_cache(embedding_name)
def ensure_available(self, embedding_url_or_name: str):
if is_external_location(embedding_url_or_name):
return self._ensure_external_url_available(embedding_url_or_name)
return self._ensure_registered_embedding_available(self.resolve_alias(
embedding_url_or_name
))
def validate_embedding(self, embedding_name):
if not self.get_embedding_config(embedding_name):
raise ValueError('invalid embedding name: %s' % embedding_name) | 0.750095 | 0.122104 |
import argparse
import logging
from typing import Dict, List
from sciencebeam_trainer_delft.utils.misc import parse_dict, merge_dicts
from sciencebeam_trainer_delft.utils.cli import (
initialize_and_call_main,
SubCommand,
SubCommandProcessor
)
from sciencebeam_trainer_delft.embedding.manager import (
EmbeddingManager,
DownloadManager,
DEFAULT_EMBEDDING_REGISTRY
)
LOGGER = logging.getLogger(__name__)
class Commands:
DISABLE_LMDB_CACHE = 'disable-lmdb-cache'
SET_LMDB_PATH = 'set-lmdb-path'
PRELOAD = 'preload'
OVERRIDE_EMBEDDING_URL = 'override-embedding-url'
def _add_registry_path_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--registry-path",
default=DEFAULT_EMBEDDING_REGISTRY,
help="Path to the embedding registry"
)
def _get_embedding_manager(args: argparse.Namespace) -> EmbeddingManager:
return EmbeddingManager(
path=args.registry_path,
download_manager=DownloadManager()
)
class DisableLmdbCacheSubCommand(SubCommand):
def __init__(self):
super().__init__(Commands.DISABLE_LMDB_CACHE, 'Disable LMDB cache')
def add_arguments(self, parser: argparse.ArgumentParser):
_add_registry_path_argument(parser)
def run(self, args: argparse.Namespace):
embedding_manager = _get_embedding_manager(args)
embedding_manager.disable_embedding_lmdb_cache()
class SetLmdbPathSubCommand(SubCommand):
def __init__(self):
super().__init__(Commands.SET_LMDB_PATH, 'Set LMDB cache path')
def add_arguments(self, parser: argparse.ArgumentParser):
_add_registry_path_argument(parser)
parser.add_argument(
"--lmdb-cache-path",
required=True,
help="Path to the LMDB cache"
)
def run(self, args: argparse.Namespace):
embedding_manager = _get_embedding_manager(args)
embedding_manager.set_embedding_lmdb_cache_path(
args.lmdb_cache_path
)
class PreloadSubCommand(SubCommand):
def __init__(self):
super().__init__(Commands.PRELOAD, 'Ensure embedding(s) are ready to use')
def add_arguments(self, parser: argparse.ArgumentParser):
_add_registry_path_argument(parser)
parser.add_argument(
"--embedding",
required=True,
help="Name of embedding(s) to preload"
)
def run(self, args: argparse.Namespace):
embedding_manager = _get_embedding_manager(args)
embedding_name = embedding_manager.ensure_available(args.embedding)
embedding_manager.ensure_lmdb_cache_if_enabled(embedding_name)
def parse_embedding_url_override_expr(embedding_url_override_expr: str) -> Dict[str, str]:
LOGGER.debug('embedding_url_override_expr: %s', embedding_url_override_expr)
return parse_dict(embedding_url_override_expr, delimiter='|')
class OverrideEmbeddingUrlSubCommand(SubCommand):
def __init__(self):
super().__init__(
Commands.OVERRIDE_EMBEDDING_URL,
'Override the URL of embeddings so that they can be loaded from another location'
)
def add_arguments(self, parser: argparse.ArgumentParser):
_add_registry_path_argument(parser)
parser.add_argument(
"--override-url",
nargs='+',
required=True,
type=parse_embedding_url_override_expr,
help=(
"The urls to override, in the form: <embedding name>=<url>"
"\n (multiple urls can be specified by using the pipe ('|') separator"
" or using the --override-url parameter multiple times"
)
)
def run(self, args: argparse.Namespace):
url_by_embedding_name = merge_dicts(args.override_url)
LOGGER.debug('url_by_embedding_name: %s', url_by_embedding_name)
embedding_manager = _get_embedding_manager(args)
for embedding_name, embedding_url in url_by_embedding_name.items():
LOGGER.info('setting url of embedding %s to %s', embedding_name, embedding_url)
embedding_config = embedding_manager.get_embedding_config(embedding_name)
assert embedding_config
embedding_manager.add_embedding_config({
**embedding_config,
'url': embedding_url
})
SUB_COMMANDS = [
DisableLmdbCacheSubCommand(),
SetLmdbPathSubCommand(),
PreloadSubCommand(),
OverrideEmbeddingUrlSubCommand()
]
def main(argv: List[str] = None):
processor = SubCommandProcessor(
SUB_COMMANDS,
description='Manage Embeddings'
)
processor.main(argv)
if __name__ == "__main__":
initialize_and_call_main(main) | sciencebeam-trainer-delft | /sciencebeam_trainer_delft-0.0.31.tar.gz/sciencebeam_trainer_delft-0.0.31/sciencebeam_trainer_delft/embedding/cli.py | cli.py | import argparse
import logging
from typing import Dict, List
from sciencebeam_trainer_delft.utils.misc import parse_dict, merge_dicts
from sciencebeam_trainer_delft.utils.cli import (
initialize_and_call_main,
SubCommand,
SubCommandProcessor
)
from sciencebeam_trainer_delft.embedding.manager import (
EmbeddingManager,
DownloadManager,
DEFAULT_EMBEDDING_REGISTRY
)
LOGGER = logging.getLogger(__name__)
class Commands:
DISABLE_LMDB_CACHE = 'disable-lmdb-cache'
SET_LMDB_PATH = 'set-lmdb-path'
PRELOAD = 'preload'
OVERRIDE_EMBEDDING_URL = 'override-embedding-url'
def _add_registry_path_argument(parser: argparse.ArgumentParser):
parser.add_argument(
"--registry-path",
default=DEFAULT_EMBEDDING_REGISTRY,
help="Path to the embedding registry"
)
def _get_embedding_manager(args: argparse.Namespace) -> EmbeddingManager:
return EmbeddingManager(
path=args.registry_path,
download_manager=DownloadManager()
)
class DisableLmdbCacheSubCommand(SubCommand):
def __init__(self):
super().__init__(Commands.DISABLE_LMDB_CACHE, 'Disable LMDB cache')
def add_arguments(self, parser: argparse.ArgumentParser):
_add_registry_path_argument(parser)
def run(self, args: argparse.Namespace):
embedding_manager = _get_embedding_manager(args)
embedding_manager.disable_embedding_lmdb_cache()
class SetLmdbPathSubCommand(SubCommand):
def __init__(self):
super().__init__(Commands.SET_LMDB_PATH, 'Set LMDB cache path')
def add_arguments(self, parser: argparse.ArgumentParser):
_add_registry_path_argument(parser)
parser.add_argument(
"--lmdb-cache-path",
required=True,
help="Path to the LMDB cache"
)
def run(self, args: argparse.Namespace):
embedding_manager = _get_embedding_manager(args)
embedding_manager.set_embedding_lmdb_cache_path(
args.lmdb_cache_path
)
class PreloadSubCommand(SubCommand):
def __init__(self):
super().__init__(Commands.PRELOAD, 'Ensure embedding(s) are ready to use')
def add_arguments(self, parser: argparse.ArgumentParser):
_add_registry_path_argument(parser)
parser.add_argument(
"--embedding",
required=True,
help="Name of embedding(s) to preload"
)
def run(self, args: argparse.Namespace):
embedding_manager = _get_embedding_manager(args)
embedding_name = embedding_manager.ensure_available(args.embedding)
embedding_manager.ensure_lmdb_cache_if_enabled(embedding_name)
def parse_embedding_url_override_expr(embedding_url_override_expr: str) -> Dict[str, str]:
LOGGER.debug('embedding_url_override_expr: %s', embedding_url_override_expr)
return parse_dict(embedding_url_override_expr, delimiter='|')
class OverrideEmbeddingUrlSubCommand(SubCommand):
def __init__(self):
super().__init__(
Commands.OVERRIDE_EMBEDDING_URL,
'Override the URL of embeddings so that they can be loaded from another location'
)
def add_arguments(self, parser: argparse.ArgumentParser):
_add_registry_path_argument(parser)
parser.add_argument(
"--override-url",
nargs='+',
required=True,
type=parse_embedding_url_override_expr,
help=(
"The urls to override, in the form: <embedding name>=<url>"
"\n (multiple urls can be specified by using the pipe ('|') separator"
" or using the --override-url parameter multiple times"
)
)
def run(self, args: argparse.Namespace):
url_by_embedding_name = merge_dicts(args.override_url)
LOGGER.debug('url_by_embedding_name: %s', url_by_embedding_name)
embedding_manager = _get_embedding_manager(args)
for embedding_name, embedding_url in url_by_embedding_name.items():
LOGGER.info('setting url of embedding %s to %s', embedding_name, embedding_url)
embedding_config = embedding_manager.get_embedding_config(embedding_name)
assert embedding_config
embedding_manager.add_embedding_config({
**embedding_config,
'url': embedding_url
})
SUB_COMMANDS = [
DisableLmdbCacheSubCommand(),
SetLmdbPathSubCommand(),
PreloadSubCommand(),
OverrideEmbeddingUrlSubCommand()
]
def main(argv: List[str] = None):
processor = SubCommandProcessor(
SUB_COMMANDS,
description='Manage Embeddings'
)
processor.main(argv)
if __name__ == "__main__":
initialize_and_call_main(main) | 0.832849 | 0.121321 |
# ScienceBeam Utils
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](LICENSE)
Provides utility functions related to the [ScienceBeam](https://github.com/elifesciences/sciencebeam) project.
Please refer to the [development documentation](https://github.com/elifesciences/sciencebeam-utils/blob/develop/doc/development.md)
if you wish to contribute to the project.
Most tools are not yet documented. Please feel free to browse the code or tests, or raise an issue.
## Pre-requisites
- Python 3
- [Apache Beam](https://beam.apache.org/)
[Apache Beam](https://beam.apache.org/) may be used to for preprocessing but also its transparent FileSystems API which makes it easy to access files in the cloud.
## Install
```bash
pip install apache_beam[gcp]
```
```bash
pip install sciencebeam-utils
```
## CLI Tools
### Find File Pairs
The preferred input layout is a directory containing a gzipped pdf (`.pdf.gz`) and gzipped xml (`.nxml.gz`), e.g.:
- manuscript_1/
- manuscript_1.pdf.gz
- manuscript_1.nxml.gz
- manuscript_2/
- manuscript_2.pdf.gz
- manuscript_2.nxml.gz
Using compressed files is optional but recommended to reduce file storage cost.
The parent directory per manuscript is optional. If that is not the case then the name before the extension must be identical (which is recommended in general).
Run:
```bash
python -m sciencebeam_utils.tools.find_file_pairs \
--data-path <source directory> \
--source-pattern *.pdf.gz --xml-pattern *.nxml.gz \
--out <output file list csv/tsv>
```
e.g.:
```bash
python -m sciencebeam_utils.tools.find_file_pairs \
--data-path gs://some-bucket/some-dataset \
--source-pattern *.pdf.gz --xml-pattern *.nxml.gz \
--out gs://some-bucket/some-dataset/file-list.tsv
```
That will create the TSV (tab separated) file `file-list.tsv` with the following columns:
- _source_url_
- _xml_url_
That file could also be generated using any other preferred method.
### Split File List
To separate the file list into a _training_, _validation_ and _test_ dataset, the following script can be used:
```bash
python -m sciencebeam_utils.tools.split_csv_dataset \
--input <csv/tsv file list> \
--train 0.5 --validation 0.2 --test 0.3 --random --fill
```
e.g.:
```bash
python -m sciencebeam_utils.tools.split_csv_dataset \
--input gs://some-bucket/some-dataset/file-list.tsv \
--train 0.5 --validation 0.2 --test 0.3 --random --fill
```
That will create three separate files in the same directory:
- `file-list-train.tsv`
- `file-list-validation.tsv`
- `file-list-test.tsv`
The file pairs will be randomly selected (_--random_) and one group will also include all remaining file pairs that wouldn't get include due to rounding (_--fill_).
As with the previous step, you may decide to use your own process instead.
Note: those files shouldn't change anymore once you used those files
### Get Output Files
Since ScienceBeam is intended to convert files, there will be output files. To make it specific what the filenames are,
the output files are also kept in a file list. This tool will generate the file list (it doesn't matter whether the files actually exist for this purpose).
e.g.
```bash
python -m sciencebeam_utils.tools.get_output_files \
--source-file-list path/to/source/file-list-train.tsv \
--source-file-column=source_url \
--output-file-suffix=.xml \
--output-file-list path/to/results/file-list.lst
```
By adding the `--check` argument, it will check whether the output files exist (see below).
### Check File List
After generating an output file list, this tool can be used whether the output files exist or are complete.
e.g.
```bash
python -m sciencebeam_utils.tools.check_file_list \
--file-list path/to/results/file-list.lst \
--file-column=source_url \
--limit=100
```
This will check the first 100 output files and report on it. The command will fail if none of the output files exist.
| sciencebeam-utils | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/README.md | README.md | pip install apache_beam[gcp]
pip install sciencebeam-utils
python -m sciencebeam_utils.tools.find_file_pairs \
--data-path <source directory> \
--source-pattern *.pdf.gz --xml-pattern *.nxml.gz \
--out <output file list csv/tsv>
python -m sciencebeam_utils.tools.find_file_pairs \
--data-path gs://some-bucket/some-dataset \
--source-pattern *.pdf.gz --xml-pattern *.nxml.gz \
--out gs://some-bucket/some-dataset/file-list.tsv
python -m sciencebeam_utils.tools.split_csv_dataset \
--input <csv/tsv file list> \
--train 0.5 --validation 0.2 --test 0.3 --random --fill
python -m sciencebeam_utils.tools.split_csv_dataset \
--input gs://some-bucket/some-dataset/file-list.tsv \
--train 0.5 --validation 0.2 --test 0.3 --random --fill
python -m sciencebeam_utils.tools.get_output_files \
--source-file-list path/to/source/file-list-train.tsv \
--source-file-column=source_url \
--output-file-suffix=.xml \
--output-file-list path/to/results/file-list.lst
python -m sciencebeam_utils.tools.check_file_list \
--file-list path/to/results/file-list.lst \
--file-column=source_url \
--limit=100 | 0.321247 | 0.929376 |
from __future__ import absolute_import
import logging
from io import StringIO
from backports import csv # pylint: disable=no-name-in-module
from six import text_type
import apache_beam as beam
from apache_beam.io.textio import WriteToText
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filebasedsource import FileBasedSource
from sciencebeam_utils.beam_utils.utils import (
TransformAndLog
)
from sciencebeam_utils.utils.csv import (
csv_delimiter_by_filename
)
def get_logger():
return logging.getLogger(__name__)
def DictToList(fields):
def wrapper(x):
get_logger().debug('DictToList: %s -> %s', fields, x)
return [x.get(field) for field in fields]
return wrapper
def _to_text(value):
try:
return text_type(value, encoding='utf-8')
except TypeError:
return text_type(value)
def format_csv_rows(rows, delimiter=','):
get_logger().debug('format_csv_rows, rows: %s', rows)
out = StringIO()
writer = csv.writer(out, delimiter=text_type(delimiter))
writer.writerows([
[_to_text(x) for x in row]
for row in rows
])
result = out.getvalue().rstrip('\r\n')
get_logger().debug('format_csv_rows, result: %s', result)
return result
class WriteDictCsv(beam.PTransform):
def __init__(self, path, columns, file_name_suffix=None):
super(WriteDictCsv, self).__init__()
self.path = path
self.columns = columns
self.file_name_suffix = file_name_suffix
self.delimiter = csv_delimiter_by_filename(path + file_name_suffix)
def expand(self, input_or_inputs):
return (
input_or_inputs |
"ToList" >> beam.Map(DictToList(self.columns)) |
"Format" >> TransformAndLog(
beam.Map(lambda x: format_csv_rows([x], delimiter=self.delimiter)),
log_prefix='formatted csv: ',
log_level='debug'
) |
"Utf8Encode" >> beam.Map(lambda x: x.encode('utf-8')) |
"Write" >> WriteToText(
self.path,
file_name_suffix=self.file_name_suffix,
header=format_csv_rows([self.columns], delimiter=self.delimiter).encode('utf-8')
)
)
def _strip_quotes(s):
return s[1:-1] if len(s) >= 2 and s[0] == '"' and s[-1] == '"' else s
# copied and modified from https://github.com/pabloem/beam_utils
# (move back if still active)
class ReadLineIterator:
def __init__(self, obj):
self._obj = obj
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
line = self._obj.readline().decode('utf-8')
if line is None or line == '':
raise StopIteration
return line
class CsvFileSource(FileBasedSource):
""" A source for a GCS or local comma-separated-file
Parses a text file assuming newline-delimited lines,
and comma-delimited fields. Assumes UTF-8 encoding.
"""
def __init__( # pylint: disable=too-many-arguments
self, file_pattern,
compression_type=CompressionTypes.AUTO,
delimiter=',', header=True, dictionary_output=True,
validate=True, limit=None):
""" Initialize a CsvFileSource.
Args:
delimiter: The delimiter character in the CSV file.
header: Whether the input file has a header or not.
Default: True
dictionary_output: The kind of records that the CsvFileSource outputs.
If True, then it will output dict()'s, if False it will output list()'s.
Default: True
Raises:
ValueError: If the input arguments are not consistent.
"""
super(CsvFileSource, self).__init__(
file_pattern,
compression_type=compression_type,
validate=validate,
splittable=False # Can't just split anywhere
)
self.delimiter = delimiter
self.header = header
self.dictionary_output = dictionary_output
self.limit = limit
self._file = None
if not self.header and dictionary_output:
raise ValueError(
'header is required for the CSV reader to provide dictionary output'
)
def read_records(self, file_name, offset_range_tracker):
# If a multi-file pattern was specified as a source then make sure the
# start/end offsets use the default values for reading the entire file.
headers = None
self._file = self.open_file(file_name)
reader = csv.reader(ReadLineIterator(self._file), delimiter=text_type(self.delimiter))
line_no = 0
for i, row in enumerate(reader):
if self.header and i == 0:
headers = row
continue
if self.limit and line_no >= self.limit:
break
line_no += 1
if self.dictionary_output:
yield dict(zip(headers, row))
else:
yield row
class ReadDictCsv(beam.PTransform):
"""
Simplified CSV parser, which does not support:
* multi-line values
* delimiter within value
"""
def __init__(self, filename, header=True, limit=None):
super(ReadDictCsv, self).__init__()
if not header:
raise RuntimeError('header required')
self.filename = filename
self.columns = None
self.delimiter = csv_delimiter_by_filename(filename)
self.limit = limit
self.row_num = 0
def expand(self, input_or_inputs):
return (
input_or_inputs |
beam.io.Read(CsvFileSource(
self.filename,
delimiter=self.delimiter,
limit=self.limit
))
) | sciencebeam-utils | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/sciencebeam_utils/beam_utils/csv.py | csv.py | from __future__ import absolute_import
import logging
from io import StringIO
from backports import csv # pylint: disable=no-name-in-module
from six import text_type
import apache_beam as beam
from apache_beam.io.textio import WriteToText
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filebasedsource import FileBasedSource
from sciencebeam_utils.beam_utils.utils import (
TransformAndLog
)
from sciencebeam_utils.utils.csv import (
csv_delimiter_by_filename
)
def get_logger():
return logging.getLogger(__name__)
def DictToList(fields):
def wrapper(x):
get_logger().debug('DictToList: %s -> %s', fields, x)
return [x.get(field) for field in fields]
return wrapper
def _to_text(value):
try:
return text_type(value, encoding='utf-8')
except TypeError:
return text_type(value)
def format_csv_rows(rows, delimiter=','):
get_logger().debug('format_csv_rows, rows: %s', rows)
out = StringIO()
writer = csv.writer(out, delimiter=text_type(delimiter))
writer.writerows([
[_to_text(x) for x in row]
for row in rows
])
result = out.getvalue().rstrip('\r\n')
get_logger().debug('format_csv_rows, result: %s', result)
return result
class WriteDictCsv(beam.PTransform):
def __init__(self, path, columns, file_name_suffix=None):
super(WriteDictCsv, self).__init__()
self.path = path
self.columns = columns
self.file_name_suffix = file_name_suffix
self.delimiter = csv_delimiter_by_filename(path + file_name_suffix)
def expand(self, input_or_inputs):
return (
input_or_inputs |
"ToList" >> beam.Map(DictToList(self.columns)) |
"Format" >> TransformAndLog(
beam.Map(lambda x: format_csv_rows([x], delimiter=self.delimiter)),
log_prefix='formatted csv: ',
log_level='debug'
) |
"Utf8Encode" >> beam.Map(lambda x: x.encode('utf-8')) |
"Write" >> WriteToText(
self.path,
file_name_suffix=self.file_name_suffix,
header=format_csv_rows([self.columns], delimiter=self.delimiter).encode('utf-8')
)
)
def _strip_quotes(s):
return s[1:-1] if len(s) >= 2 and s[0] == '"' and s[-1] == '"' else s
# copied and modified from https://github.com/pabloem/beam_utils
# (move back if still active)
class ReadLineIterator:
def __init__(self, obj):
self._obj = obj
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
line = self._obj.readline().decode('utf-8')
if line is None or line == '':
raise StopIteration
return line
class CsvFileSource(FileBasedSource):
""" A source for a GCS or local comma-separated-file
Parses a text file assuming newline-delimited lines,
and comma-delimited fields. Assumes UTF-8 encoding.
"""
def __init__( # pylint: disable=too-many-arguments
self, file_pattern,
compression_type=CompressionTypes.AUTO,
delimiter=',', header=True, dictionary_output=True,
validate=True, limit=None):
""" Initialize a CsvFileSource.
Args:
delimiter: The delimiter character in the CSV file.
header: Whether the input file has a header or not.
Default: True
dictionary_output: The kind of records that the CsvFileSource outputs.
If True, then it will output dict()'s, if False it will output list()'s.
Default: True
Raises:
ValueError: If the input arguments are not consistent.
"""
super(CsvFileSource, self).__init__(
file_pattern,
compression_type=compression_type,
validate=validate,
splittable=False # Can't just split anywhere
)
self.delimiter = delimiter
self.header = header
self.dictionary_output = dictionary_output
self.limit = limit
self._file = None
if not self.header and dictionary_output:
raise ValueError(
'header is required for the CSV reader to provide dictionary output'
)
def read_records(self, file_name, offset_range_tracker):
# If a multi-file pattern was specified as a source then make sure the
# start/end offsets use the default values for reading the entire file.
headers = None
self._file = self.open_file(file_name)
reader = csv.reader(ReadLineIterator(self._file), delimiter=text_type(self.delimiter))
line_no = 0
for i, row in enumerate(reader):
if self.header and i == 0:
headers = row
continue
if self.limit and line_no >= self.limit:
break
line_no += 1
if self.dictionary_output:
yield dict(zip(headers, row))
else:
yield row
class ReadDictCsv(beam.PTransform):
"""
Simplified CSV parser, which does not support:
* multi-line values
* delimiter within value
"""
def __init__(self, filename, header=True, limit=None):
super(ReadDictCsv, self).__init__()
if not header:
raise RuntimeError('header required')
self.filename = filename
self.columns = None
self.delimiter = csv_delimiter_by_filename(filename)
self.limit = limit
self.row_num = 0
def expand(self, input_or_inputs):
return (
input_or_inputs |
beam.io.Read(CsvFileSource(
self.filename,
delimiter=self.delimiter,
limit=self.limit
))
) | 0.630799 | 0.184217 |
import errno
import logging
import os
import subprocess
from getpass import getuser
from time import gmtime, strftime
from six import text_type
def get_logger():
return logging.getLogger(__name__)
def create_fn_api_runner():
# pylint: disable=import-outside-toplevel
from apache_beam.runners.portability.fn_api_runner import FnApiRunner
return FnApiRunner()
def get_cloud_project():
cmd = [
'gcloud', '-q', 'config', 'list', 'project',
'--format=value(core.project)'
]
with open(os.devnull, 'w', encoding='utf-8') as dev_null:
try:
res = subprocess.check_output(cmd, stderr=dev_null).strip()
if not res:
raise Exception(
'--cloud specified but no Google Cloud Platform '
'project found.\n'
'Please specify your project name with the --project '
'flag or set a default project: '
'gcloud config set project YOUR_PROJECT_NAME'
)
if not isinstance(res, text_type):
res = res.decode('utf-8')
return res
except OSError as e:
if e.errno == errno.ENOENT:
raise Exception(
'gcloud is not installed. The Google Cloud SDK is '
'necessary to communicate with the Cloud ML service. '
'Please install and set up gcloud.'
) from e
raise
def get_default_job_name(name, suffix=''):
timestamp_str = strftime("%Y%m%d-%H%M%S", gmtime())
return '%s-%s%s-%s' % (name or 'beamapp', getuser(), suffix or '', timestamp_str)
def add_cloud_args(parser):
parser.add_argument(
'--cloud',
default=False,
action='store_true'
)
parser.add_argument(
'--runner',
required=False,
default=None,
help='Runner.'
)
parser.add_argument(
'--project',
type=str,
help='The cloud project name to be used for running this pipeline'
)
parser.add_argument(
'--region',
default=None,
type=str,
help='The Google Compute Engine region for creating'
)
parser.add_argument(
'--num-workers', '--num_workers',
default=1,
type=int,
help='The number of workers.'
)
parser.add_argument(
'--max-workers', '--max_num_workers',
dest='max_num_workers',
type=int,
help='The number of maximum workers (with auto-scaling).'
)
parser.add_argument(
'--job-name', '--job_name',
type=str, required=False,
help='The name of the cloud job'
)
parser.add_argument(
'--job-name-suffix', type=str, required=False,
help='A suffix appended to the job name'
)
return parser
def process_cloud_args(parsed_args, output_path, name=None):
if parsed_args.num_workers:
parsed_args.autoscaling_algorithm = 'NONE'
parsed_args.max_num_workers = parsed_args.num_workers
parsed_args.setup_file = './setup.py'
if parsed_args.cloud:
# Flags which need to be set for cloud runs.
default_values = {
'temp_location': os.path.join(os.path.dirname(output_path), 'temp'),
'runner': 'DataflowRunner',
'save_main_session': True,
}
if not parsed_args.project:
parsed_args.project = get_cloud_project()
if not parsed_args.job_name:
parsed_args.job_name = get_default_job_name(name, parsed_args.job_name_suffix)
else:
# Flags which need to be set for local runs.
default_values = {
'runner': 'DirectRunner',
}
get_logger().info('default_values: %s', default_values)
for kk, vv in default_values.items():
if kk not in parsed_args or not vars(parsed_args)[kk]:
vars(parsed_args)[kk] = vv
if parsed_args.runner == 'FnApiRunner':
parsed_args.runner = create_fn_api_runner() | sciencebeam-utils | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/sciencebeam_utils/beam_utils/main.py | main.py | import errno
import logging
import os
import subprocess
from getpass import getuser
from time import gmtime, strftime
from six import text_type
def get_logger():
return logging.getLogger(__name__)
def create_fn_api_runner():
# pylint: disable=import-outside-toplevel
from apache_beam.runners.portability.fn_api_runner import FnApiRunner
return FnApiRunner()
def get_cloud_project():
cmd = [
'gcloud', '-q', 'config', 'list', 'project',
'--format=value(core.project)'
]
with open(os.devnull, 'w', encoding='utf-8') as dev_null:
try:
res = subprocess.check_output(cmd, stderr=dev_null).strip()
if not res:
raise Exception(
'--cloud specified but no Google Cloud Platform '
'project found.\n'
'Please specify your project name with the --project '
'flag or set a default project: '
'gcloud config set project YOUR_PROJECT_NAME'
)
if not isinstance(res, text_type):
res = res.decode('utf-8')
return res
except OSError as e:
if e.errno == errno.ENOENT:
raise Exception(
'gcloud is not installed. The Google Cloud SDK is '
'necessary to communicate with the Cloud ML service. '
'Please install and set up gcloud.'
) from e
raise
def get_default_job_name(name, suffix=''):
timestamp_str = strftime("%Y%m%d-%H%M%S", gmtime())
return '%s-%s%s-%s' % (name or 'beamapp', getuser(), suffix or '', timestamp_str)
def add_cloud_args(parser):
parser.add_argument(
'--cloud',
default=False,
action='store_true'
)
parser.add_argument(
'--runner',
required=False,
default=None,
help='Runner.'
)
parser.add_argument(
'--project',
type=str,
help='The cloud project name to be used for running this pipeline'
)
parser.add_argument(
'--region',
default=None,
type=str,
help='The Google Compute Engine region for creating'
)
parser.add_argument(
'--num-workers', '--num_workers',
default=1,
type=int,
help='The number of workers.'
)
parser.add_argument(
'--max-workers', '--max_num_workers',
dest='max_num_workers',
type=int,
help='The number of maximum workers (with auto-scaling).'
)
parser.add_argument(
'--job-name', '--job_name',
type=str, required=False,
help='The name of the cloud job'
)
parser.add_argument(
'--job-name-suffix', type=str, required=False,
help='A suffix appended to the job name'
)
return parser
def process_cloud_args(parsed_args, output_path, name=None):
if parsed_args.num_workers:
parsed_args.autoscaling_algorithm = 'NONE'
parsed_args.max_num_workers = parsed_args.num_workers
parsed_args.setup_file = './setup.py'
if parsed_args.cloud:
# Flags which need to be set for cloud runs.
default_values = {
'temp_location': os.path.join(os.path.dirname(output_path), 'temp'),
'runner': 'DataflowRunner',
'save_main_session': True,
}
if not parsed_args.project:
parsed_args.project = get_cloud_project()
if not parsed_args.job_name:
parsed_args.job_name = get_default_job_name(name, parsed_args.job_name_suffix)
else:
# Flags which need to be set for local runs.
default_values = {
'runner': 'DirectRunner',
}
get_logger().info('default_values: %s', default_values)
for kk, vv in default_values.items():
if kk not in parsed_args or not vars(parsed_args)[kk]:
vars(parsed_args)[kk] = vv
if parsed_args.runner == 'FnApiRunner':
parsed_args.runner = create_fn_api_runner() | 0.328745 | 0.063978 |
import logging
from random import getrandbits
import apache_beam as beam
from apache_beam.metrics.metric import Metrics
def get_logger():
return logging.getLogger(__name__)
def Spy(f):
def spy_wrapper(x):
f(x)
return x
return spy_wrapper
def MapSpy(f):
return beam.Map(Spy(f))
def _default_exception_log_fn(exception, value):
get_logger().warning(
'caught exception (ignoring item): %s, input: %.100s...',
exception, value, exc_info=exception
)
def MapOrLog(fn, log_fn=None, error_count=None):
if log_fn is None:
log_fn = _default_exception_log_fn
error_counter = (
Metrics.counter('MapOrLog', error_count)
if error_count
else None
)
def wrapper(x):
try:
yield fn(x)
except Exception as e: # pylint: disable=broad-except
if error_counter:
error_counter.inc()
log_fn(e, x)
return beam.FlatMap(wrapper)
LEVEL_MAP = {
'info': logging.INFO,
'debug': logging.DEBUG
}
def Count(name, counter_value_fn):
counter = Metrics.counter('Count', name)
def wrapper(x):
counter.inc(counter_value_fn(x) if counter_value_fn else 1)
return x
return name >> beam.Map(wrapper)
class GroupTransforms(beam.PTransform):
"""
Convenience method to allow a PTransform for grouping purpose
to be defined using a lambda function.
(Completely unrelated to GroupBy transforms)
"""
def __init__(self, expand_fn):
super(GroupTransforms, self).__init__()
self.expand_fn = expand_fn
def expand(self, input_or_inputs):
return self.expand_fn(input_or_inputs)
def TransformAndCount(transform, counter_name, counter_value_fn=None):
return GroupTransforms(lambda pcoll: (
pcoll |
transform |
"Count" >> Count(counter_name, counter_value_fn)
))
def _identity(x):
return x
def _get_default_output_log_fn(log_level, log_prefix, log_value_fn):
if log_value_fn is None:
log_value_fn = _identity
log_level = LEVEL_MAP.get(log_level, log_level)
def _log_fn(x):
get_logger().log(
log_level, '%s%.50s...', log_prefix, log_value_fn(x)
)
return _log_fn
def TransformAndLog(transform, log_fn=None, log_prefix='', log_value_fn=None, log_level='info'):
if log_fn is None:
log_fn = _get_default_output_log_fn(log_level, log_prefix, log_value_fn)
return GroupTransforms(lambda pcoll: (
pcoll |
transform |
"Log" >> MapSpy(log_fn)
))
def random_key():
return getrandbits(32)
def _default_random_key_fn(_):
return random_key()
def PreventFusion(key_fn=None, name="PreventFusion"):
"""
Prevents fusion to allow better distribution across workers.
See:
https://cloud.google.com/dataflow/service/dataflow-service-desc#preventing-fusion
TODO Replace by: https://github.com/apache/beam/pull/4040
"""
if key_fn is None:
key_fn = _default_random_key_fn
return name >> GroupTransforms(lambda pcoll: (
pcoll |
"AddKey" >> beam.Map(lambda x: (key_fn(x), x)) |
"GroupByKey" >> beam.GroupByKey() |
"Ungroup" >> beam.FlatMap(lambda element: element[1])
)) | sciencebeam-utils | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/sciencebeam_utils/beam_utils/utils.py | utils.py | import logging
from random import getrandbits
import apache_beam as beam
from apache_beam.metrics.metric import Metrics
def get_logger():
return logging.getLogger(__name__)
def Spy(f):
def spy_wrapper(x):
f(x)
return x
return spy_wrapper
def MapSpy(f):
return beam.Map(Spy(f))
def _default_exception_log_fn(exception, value):
get_logger().warning(
'caught exception (ignoring item): %s, input: %.100s...',
exception, value, exc_info=exception
)
def MapOrLog(fn, log_fn=None, error_count=None):
if log_fn is None:
log_fn = _default_exception_log_fn
error_counter = (
Metrics.counter('MapOrLog', error_count)
if error_count
else None
)
def wrapper(x):
try:
yield fn(x)
except Exception as e: # pylint: disable=broad-except
if error_counter:
error_counter.inc()
log_fn(e, x)
return beam.FlatMap(wrapper)
LEVEL_MAP = {
'info': logging.INFO,
'debug': logging.DEBUG
}
def Count(name, counter_value_fn):
counter = Metrics.counter('Count', name)
def wrapper(x):
counter.inc(counter_value_fn(x) if counter_value_fn else 1)
return x
return name >> beam.Map(wrapper)
class GroupTransforms(beam.PTransform):
"""
Convenience method to allow a PTransform for grouping purpose
to be defined using a lambda function.
(Completely unrelated to GroupBy transforms)
"""
def __init__(self, expand_fn):
super(GroupTransforms, self).__init__()
self.expand_fn = expand_fn
def expand(self, input_or_inputs):
return self.expand_fn(input_or_inputs)
def TransformAndCount(transform, counter_name, counter_value_fn=None):
return GroupTransforms(lambda pcoll: (
pcoll |
transform |
"Count" >> Count(counter_name, counter_value_fn)
))
def _identity(x):
return x
def _get_default_output_log_fn(log_level, log_prefix, log_value_fn):
if log_value_fn is None:
log_value_fn = _identity
log_level = LEVEL_MAP.get(log_level, log_level)
def _log_fn(x):
get_logger().log(
log_level, '%s%.50s...', log_prefix, log_value_fn(x)
)
return _log_fn
def TransformAndLog(transform, log_fn=None, log_prefix='', log_value_fn=None, log_level='info'):
if log_fn is None:
log_fn = _get_default_output_log_fn(log_level, log_prefix, log_value_fn)
return GroupTransforms(lambda pcoll: (
pcoll |
transform |
"Log" >> MapSpy(log_fn)
))
def random_key():
return getrandbits(32)
def _default_random_key_fn(_):
return random_key()
def PreventFusion(key_fn=None, name="PreventFusion"):
"""
Prevents fusion to allow better distribution across workers.
See:
https://cloud.google.com/dataflow/service/dataflow-service-desc#preventing-fusion
TODO Replace by: https://github.com/apache/beam/pull/4040
"""
if key_fn is None:
key_fn = _default_random_key_fn
return name >> GroupTransforms(lambda pcoll: (
pcoll |
"AddKey" >> beam.Map(lambda x: (key_fn(x), x)) |
"GroupByKey" >> beam.GroupByKey() |
"Ungroup" >> beam.FlatMap(lambda element: element[1])
)) | 0.639398 | 0.198763 |
from __future__ import absolute_import
import os
import logging
from itertools import islice
from backports import csv # pylint: disable=no-name-in-module
from six import text_type
from apache_beam.io.filesystems import FileSystems
from sciencebeam_utils.utils.csv import (
csv_delimiter_by_filename
)
from sciencebeam_utils.beam_utils.io import open_file
from .file_path import (
relative_path,
join_if_relative_path
)
LOGGER = logging.getLogger(__name__)
def is_csv_or_tsv_file_list(file_list_path):
return '.csv' in file_list_path or '.tsv' in file_list_path
def load_plain_file_list(file_list_path, limit=None):
with open_file(file_list_path, 'r') as f:
lines = (x.rstrip() for x in f)
if limit:
lines = islice(lines, 0, limit)
return list(lines)
def load_csv_or_tsv_file_list(file_list_path, column, header=True, limit=None):
delimiter = csv_delimiter_by_filename(file_list_path)
with open_file(file_list_path, 'r') as f:
reader = csv.reader(f, delimiter=text_type(delimiter))
if not header:
assert isinstance(column, int)
column_index = column
else:
header_row = next(reader)
if isinstance(column, int):
column_index = column
else:
try:
column_index = header_row.index(column)
except ValueError as exc:
raise ValueError(
'column %s not found, available columns: %s' %
(column, header_row)
) from exc
lines = (x[column_index] for x in reader)
if limit:
lines = islice(lines, 0, limit)
return list(lines)
def to_absolute_file_list(base_path, file_list):
return [join_if_relative_path(base_path, s) for s in file_list]
def to_relative_file_list(base_path, file_list):
return [relative_path(base_path, s) for s in file_list]
def load_file_list(file_list_path, column, header=True, limit=None, to_absolute=True):
if is_csv_or_tsv_file_list(file_list_path):
file_list = load_csv_or_tsv_file_list(
file_list_path, column=column, header=header, limit=limit
)
else:
file_list = load_plain_file_list(file_list_path, limit=limit)
if to_absolute:
file_list = to_absolute_file_list(
os.path.dirname(file_list_path), file_list
)
return file_list
def save_plain_file_list(file_list_path, file_list):
with FileSystems.create(file_list_path) as f:
f.write('\n'.join(file_list).encode('utf-8'))
def save_csv_or_tsv_file_list(file_list_path, file_list, column, header=True):
if header:
file_list = [column] + file_list
save_plain_file_list(file_list_path, file_list)
def save_file_list(file_list_path, file_list, column, header=True):
if is_csv_or_tsv_file_list(file_list_path):
return save_csv_or_tsv_file_list(
file_list_path, file_list, column=column, header=header
)
return save_plain_file_list(file_list_path, file_list) | sciencebeam-utils | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/sciencebeam_utils/utils/file_list.py | file_list.py | from __future__ import absolute_import
import os
import logging
from itertools import islice
from backports import csv # pylint: disable=no-name-in-module
from six import text_type
from apache_beam.io.filesystems import FileSystems
from sciencebeam_utils.utils.csv import (
csv_delimiter_by_filename
)
from sciencebeam_utils.beam_utils.io import open_file
from .file_path import (
relative_path,
join_if_relative_path
)
LOGGER = logging.getLogger(__name__)
def is_csv_or_tsv_file_list(file_list_path):
return '.csv' in file_list_path or '.tsv' in file_list_path
def load_plain_file_list(file_list_path, limit=None):
with open_file(file_list_path, 'r') as f:
lines = (x.rstrip() for x in f)
if limit:
lines = islice(lines, 0, limit)
return list(lines)
def load_csv_or_tsv_file_list(file_list_path, column, header=True, limit=None):
delimiter = csv_delimiter_by_filename(file_list_path)
with open_file(file_list_path, 'r') as f:
reader = csv.reader(f, delimiter=text_type(delimiter))
if not header:
assert isinstance(column, int)
column_index = column
else:
header_row = next(reader)
if isinstance(column, int):
column_index = column
else:
try:
column_index = header_row.index(column)
except ValueError as exc:
raise ValueError(
'column %s not found, available columns: %s' %
(column, header_row)
) from exc
lines = (x[column_index] for x in reader)
if limit:
lines = islice(lines, 0, limit)
return list(lines)
def to_absolute_file_list(base_path, file_list):
return [join_if_relative_path(base_path, s) for s in file_list]
def to_relative_file_list(base_path, file_list):
return [relative_path(base_path, s) for s in file_list]
def load_file_list(file_list_path, column, header=True, limit=None, to_absolute=True):
if is_csv_or_tsv_file_list(file_list_path):
file_list = load_csv_or_tsv_file_list(
file_list_path, column=column, header=header, limit=limit
)
else:
file_list = load_plain_file_list(file_list_path, limit=limit)
if to_absolute:
file_list = to_absolute_file_list(
os.path.dirname(file_list_path), file_list
)
return file_list
def save_plain_file_list(file_list_path, file_list):
with FileSystems.create(file_list_path) as f:
f.write('\n'.join(file_list).encode('utf-8'))
def save_csv_or_tsv_file_list(file_list_path, file_list, column, header=True):
if header:
file_list = [column] + file_list
save_plain_file_list(file_list_path, file_list)
def save_file_list(file_list_path, file_list, column, header=True):
if is_csv_or_tsv_file_list(file_list_path):
return save_csv_or_tsv_file_list(
file_list_path, file_list, column=column, header=header
)
return save_plain_file_list(file_list_path, file_list) | 0.322099 | 0.11808 |
import logging
import os
from functools import reduce # pylint: disable=redefined-builtin
from typing import Iterable, List, Tuple
from apache_beam.io.filesystems import FileSystems
from sciencebeam_utils.utils.collection import (
groupby_to_dict,
sort_and_groupby_to_dict
)
from .file_path import strip_ext
LOGGER = logging.getLogger(__name__)
def find_matching_filenames(pattern):
return (x.path for x in FileSystems.match([pattern])[0].metadata_list)
def group_files_by_parent_directory(filenames):
return groupby_to_dict(sorted(filenames), os.path.dirname)
def group_files_by_name_excl_ext(filenames):
return sort_and_groupby_to_dict(filenames, strip_ext)
def zip_by_keys(*dict_list):
keys = reduce(lambda agg, v: agg | set(v.keys()), dict_list, set())
return (
[d.get(k) for d in dict_list]
for k in sorted(keys)
)
def group_file_pairs_by_parent_directory_or_name(
files_by_type: List[List[str]]
) -> Iterable[Tuple[str, ...]]:
grouped_files_by_pattern = [
group_files_by_parent_directory(files) for files in files_by_type
]
for files_in_group_by_pattern in zip_by_keys(*grouped_files_by_pattern):
if all(len(files or []) == 1 for files in files_in_group_by_pattern):
yield tuple([ # pylint: disable=consider-using-generator
files[0] for files in files_in_group_by_pattern
])
else:
grouped_by_name = [
group_files_by_name_excl_ext(files or [])
for files in files_in_group_by_pattern
]
for files_by_name in zip_by_keys(*grouped_by_name):
if all(len(files or []) == 1 for files in files_by_name):
yield tuple([ # pylint: disable=consider-using-generator
files[0] for files in files_by_name
])
else:
LOGGER.info(
'no exclusively matching files found: %s',
list(files_by_name)
)
def find_file_pairs_grouped_by_parent_directory_or_name(patterns):
matching_files_by_pattern = [
list(find_matching_filenames(pattern)) for pattern in patterns
]
LOGGER.info(
'found number of files %s',
', '.join(
'%s: %d' % (pattern, len(files))
for pattern, files in zip(patterns, matching_files_by_pattern)
)
)
patterns_without_files = [
pattern
for pattern, files in zip(patterns, matching_files_by_pattern)
if len(files) == 0
]
if patterns_without_files:
raise RuntimeError('no files found for: %s' % patterns_without_files)
return group_file_pairs_by_parent_directory_or_name(
matching_files_by_pattern
) | sciencebeam-utils | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/sciencebeam_utils/utils/file_pairs.py | file_pairs.py | import logging
import os
from functools import reduce # pylint: disable=redefined-builtin
from typing import Iterable, List, Tuple
from apache_beam.io.filesystems import FileSystems
from sciencebeam_utils.utils.collection import (
groupby_to_dict,
sort_and_groupby_to_dict
)
from .file_path import strip_ext
LOGGER = logging.getLogger(__name__)
def find_matching_filenames(pattern):
return (x.path for x in FileSystems.match([pattern])[0].metadata_list)
def group_files_by_parent_directory(filenames):
return groupby_to_dict(sorted(filenames), os.path.dirname)
def group_files_by_name_excl_ext(filenames):
return sort_and_groupby_to_dict(filenames, strip_ext)
def zip_by_keys(*dict_list):
keys = reduce(lambda agg, v: agg | set(v.keys()), dict_list, set())
return (
[d.get(k) for d in dict_list]
for k in sorted(keys)
)
def group_file_pairs_by_parent_directory_or_name(
files_by_type: List[List[str]]
) -> Iterable[Tuple[str, ...]]:
grouped_files_by_pattern = [
group_files_by_parent_directory(files) for files in files_by_type
]
for files_in_group_by_pattern in zip_by_keys(*grouped_files_by_pattern):
if all(len(files or []) == 1 for files in files_in_group_by_pattern):
yield tuple([ # pylint: disable=consider-using-generator
files[0] for files in files_in_group_by_pattern
])
else:
grouped_by_name = [
group_files_by_name_excl_ext(files or [])
for files in files_in_group_by_pattern
]
for files_by_name in zip_by_keys(*grouped_by_name):
if all(len(files or []) == 1 for files in files_by_name):
yield tuple([ # pylint: disable=consider-using-generator
files[0] for files in files_by_name
])
else:
LOGGER.info(
'no exclusively matching files found: %s',
list(files_by_name)
)
def find_file_pairs_grouped_by_parent_directory_or_name(patterns):
matching_files_by_pattern = [
list(find_matching_filenames(pattern)) for pattern in patterns
]
LOGGER.info(
'found number of files %s',
', '.join(
'%s: %d' % (pattern, len(files))
for pattern, files in zip(patterns, matching_files_by_pattern)
)
)
patterns_without_files = [
pattern
for pattern, files in zip(patterns, matching_files_by_pattern)
if len(files) == 0
]
if patterns_without_files:
raise RuntimeError('no files found for: %s' % patterns_without_files)
return group_file_pairs_by_parent_directory_or_name(
matching_files_by_pattern
) | 0.514888 | 0.170854 |
from __future__ import absolute_import
import os
from apache_beam.io.filesystems import FileSystems
def get_ext(filename):
name, ext = os.path.splitext(filename)
if ext == '.gz':
ext = get_ext(name) + ext
return ext
def strip_ext(filename):
# strip of gz, assuming there will be another extension before .gz
if filename.endswith('.gz'):
filename = filename[:-3]
return os.path.splitext(filename)[0]
def relative_path(base_path, path):
if not base_path:
return path
if not base_path.endswith('/'):
base_path += '/'
return path[len(base_path):] if path.startswith(base_path) else path
def is_relative_path(path):
return not path.startswith('/') and '://' not in path
def join_if_relative_path(base_path, path):
return (
FileSystems.join(base_path, path)
if base_path and is_relative_path(path)
else path
)
def change_ext(path, old_ext, new_ext):
if old_ext is None:
old_ext = os.path.splitext(path)[1]
if old_ext == '.gz':
path = path[:-len(old_ext)]
old_ext = os.path.splitext(path)[1]
if old_ext and path.endswith(old_ext):
return path[:-len(old_ext)] + new_ext
return path + new_ext
def get_output_file(filename, source_base_path, output_base_path, output_file_suffix):
return FileSystems.join(
output_base_path,
change_ext(
relative_path(source_base_path, filename),
None, output_file_suffix
)
)
def base_path_for_file_list(file_list):
common_prefix = os.path.commonprefix(file_list)
i = max(common_prefix.rfind('/'), common_prefix.rfind('\\'))
if i >= 0:
return common_prefix[:i]
return ''
def get_or_validate_base_path(file_list, base_path):
common_path = base_path_for_file_list(file_list)
if base_path:
if not common_path.startswith(base_path):
raise AssertionError(
"invalid base path '%s', common path is: '%s'" % (base_path, common_path)
)
return base_path
return common_path | sciencebeam-utils | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/sciencebeam_utils/utils/file_path.py | file_path.py | from __future__ import absolute_import
import os
from apache_beam.io.filesystems import FileSystems
def get_ext(filename):
name, ext = os.path.splitext(filename)
if ext == '.gz':
ext = get_ext(name) + ext
return ext
def strip_ext(filename):
# strip of gz, assuming there will be another extension before .gz
if filename.endswith('.gz'):
filename = filename[:-3]
return os.path.splitext(filename)[0]
def relative_path(base_path, path):
if not base_path:
return path
if not base_path.endswith('/'):
base_path += '/'
return path[len(base_path):] if path.startswith(base_path) else path
def is_relative_path(path):
return not path.startswith('/') and '://' not in path
def join_if_relative_path(base_path, path):
return (
FileSystems.join(base_path, path)
if base_path and is_relative_path(path)
else path
)
def change_ext(path, old_ext, new_ext):
if old_ext is None:
old_ext = os.path.splitext(path)[1]
if old_ext == '.gz':
path = path[:-len(old_ext)]
old_ext = os.path.splitext(path)[1]
if old_ext and path.endswith(old_ext):
return path[:-len(old_ext)] + new_ext
return path + new_ext
def get_output_file(filename, source_base_path, output_base_path, output_file_suffix):
return FileSystems.join(
output_base_path,
change_ext(
relative_path(source_base_path, filename),
None, output_file_suffix
)
)
def base_path_for_file_list(file_list):
common_prefix = os.path.commonprefix(file_list)
i = max(common_prefix.rfind('/'), common_prefix.rfind('\\'))
if i >= 0:
return common_prefix[:i]
return ''
def get_or_validate_base_path(file_list, base_path):
common_path = base_path_for_file_list(file_list)
if base_path:
if not common_path.startswith(base_path):
raise AssertionError(
"invalid base path '%s', common path is: '%s'" % (base_path, common_path)
)
return base_path
return common_path | 0.387227 | 0.047847 |
import argparse
import logging
from backports import csv # pylint: disable=no-name-in-module
from six import text_type
from sciencebeam_utils.utils.csv import (
csv_delimiter_by_filename,
write_csv_rows
)
from sciencebeam_utils.beam_utils.io import (
open_file,
dirname,
mkdirs_if_not_exists
)
from sciencebeam_utils.utils.file_path import (
join_if_relative_path,
relative_path
)
from sciencebeam_utils.utils.file_pairs import (
find_file_pairs_grouped_by_parent_directory_or_name
)
from sciencebeam_utils.tools.tool_utils import (
setup_logging,
add_default_args,
process_default_args
)
LOGGER = logging.getLogger(__name__)
def parse_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-path', type=str, required=True,
help='base data path'
)
parser.add_argument(
'--source-pattern', type=str, required=True,
help='source pattern'
)
parser.add_argument(
'--xml-pattern', type=str, required=True,
help='xml pattern'
)
parser.add_argument(
'--out', type=str, required=True,
help='output csv/tsv file'
)
parser.add_argument(
'--use-relative-paths', action='store_true',
help='create a file list with relative paths (relative to the data path)'
)
add_default_args(parser)
return parser.parse_args(argv)
def save_file_pairs_to_csv(output_path, source_xml_pairs):
mkdirs_if_not_exists(dirname(output_path))
delimiter = csv_delimiter_by_filename(output_path)
mime_type = 'text/tsv' if delimiter == '\t' else 'text/csv'
with open_file(output_path, 'w', mime_type=mime_type) as f:
writer = csv.writer(f, delimiter=text_type(delimiter))
write_csv_rows(writer, [['source_url', 'xml_url']])
write_csv_rows(writer, source_xml_pairs)
LOGGER.info('written results to %s', output_path)
def to_relative_file_pairs(base_path, file_pairs):
return (
(relative_path(base_path, source_url), relative_path(base_path, xml_url))
for source_url, xml_url in file_pairs
)
def run(args):
LOGGER.info('finding file pairs')
source_xml_pairs = find_file_pairs_grouped_by_parent_directory_or_name([
join_if_relative_path(args.data_path, args.source_pattern),
join_if_relative_path(args.data_path, args.xml_pattern)
])
if args.use_relative_paths:
source_xml_pairs = to_relative_file_pairs(args.data_path, source_xml_pairs)
source_xml_pairs = list(source_xml_pairs)
save_file_pairs_to_csv(args.out, source_xml_pairs)
def main(argv=None):
args = parse_args(argv)
process_default_args(args)
run(args)
if __name__ == '__main__':
setup_logging()
main() | sciencebeam-utils | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/sciencebeam_utils/tools/find_file_pairs.py | find_file_pairs.py | import argparse
import logging
from backports import csv # pylint: disable=no-name-in-module
from six import text_type
from sciencebeam_utils.utils.csv import (
csv_delimiter_by_filename,
write_csv_rows
)
from sciencebeam_utils.beam_utils.io import (
open_file,
dirname,
mkdirs_if_not_exists
)
from sciencebeam_utils.utils.file_path import (
join_if_relative_path,
relative_path
)
from sciencebeam_utils.utils.file_pairs import (
find_file_pairs_grouped_by_parent_directory_or_name
)
from sciencebeam_utils.tools.tool_utils import (
setup_logging,
add_default_args,
process_default_args
)
LOGGER = logging.getLogger(__name__)
def parse_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-path', type=str, required=True,
help='base data path'
)
parser.add_argument(
'--source-pattern', type=str, required=True,
help='source pattern'
)
parser.add_argument(
'--xml-pattern', type=str, required=True,
help='xml pattern'
)
parser.add_argument(
'--out', type=str, required=True,
help='output csv/tsv file'
)
parser.add_argument(
'--use-relative-paths', action='store_true',
help='create a file list with relative paths (relative to the data path)'
)
add_default_args(parser)
return parser.parse_args(argv)
def save_file_pairs_to_csv(output_path, source_xml_pairs):
mkdirs_if_not_exists(dirname(output_path))
delimiter = csv_delimiter_by_filename(output_path)
mime_type = 'text/tsv' if delimiter == '\t' else 'text/csv'
with open_file(output_path, 'w', mime_type=mime_type) as f:
writer = csv.writer(f, delimiter=text_type(delimiter))
write_csv_rows(writer, [['source_url', 'xml_url']])
write_csv_rows(writer, source_xml_pairs)
LOGGER.info('written results to %s', output_path)
def to_relative_file_pairs(base_path, file_pairs):
return (
(relative_path(base_path, source_url), relative_path(base_path, xml_url))
for source_url, xml_url in file_pairs
)
def run(args):
LOGGER.info('finding file pairs')
source_xml_pairs = find_file_pairs_grouped_by_parent_directory_or_name([
join_if_relative_path(args.data_path, args.source_pattern),
join_if_relative_path(args.data_path, args.xml_pattern)
])
if args.use_relative_paths:
source_xml_pairs = to_relative_file_pairs(args.data_path, source_xml_pairs)
source_xml_pairs = list(source_xml_pairs)
save_file_pairs_to_csv(args.out, source_xml_pairs)
def main(argv=None):
args = parse_args(argv)
process_default_args(args)
run(args)
if __name__ == '__main__':
setup_logging()
main() | 0.327991 | 0.076236 |
import argparse
import logging
import errno
from math import trunc
from random import shuffle
from datetime import datetime
from itertools import chain
from typing import List
from backports import csv # pylint: disable=no-name-in-module
from six import text_type
from sciencebeam_utils.beam_utils.io import open_file
from sciencebeam_utils.utils.csv import (
csv_delimiter_by_filename,
write_csv_rows
)
from sciencebeam_utils.utils.file_path import (
strip_ext,
get_ext
)
from sciencebeam_utils.tools.tool_utils import (
setup_logging,
add_default_args,
process_default_args
)
LOGGER = logging.getLogger(__name__)
Row = List[str]
def extract_proportions_from_args(args):
digits = 3
proportions = [
(name, round(p, digits))
for name, p in [
('train', args.train),
('test', args.test),
('validation', args.validation)
]
if p and p > 0
]
if sum(p for _, p in proportions) > 1.0:
raise ValueError('proportions add up to more than 1.0')
if not args.test:
proportions.append(('test', 1.0 - sum(p for _, p in proportions)))
elif not args.validation:
proportions.append(('validation', round(1.0 - sum(p for _, p in proportions), digits)))
proportions = [(name, p) for name, p in proportions if p > 0]
return proportions
def get_chunk_size_list(size, percentages, fill=False):
chunk_size_list = [int(trunc(p * size)) for p in percentages]
if fill:
chunk_size_list[-1] = size - sum(chunk_size_list[:-1])
return chunk_size_list
def split_row_chunks(rows, chunk_size_list):
chunk_offset_list = [0]
for chunk_size in chunk_size_list[0:-1]:
chunk_offset_list.append(chunk_offset_list[-1] + chunk_size)
LOGGER.debug('chunk_offset_list: %s', chunk_offset_list)
LOGGER.debug('chunk_size_list: %s', chunk_size_list)
return [
rows[chunk_offset:chunk_offset + chunk_size]
for chunk_offset, chunk_size in zip(chunk_offset_list, chunk_size_list)
]
def _to_hashable(value):
return tuple(value)
def _to_row_set(rows):
return {_to_hashable(row) for row in rows}
def _split_rows_without_existing_split(rows, percentages, fill=False):
chunk_size_list = get_chunk_size_list(len(rows), percentages, fill=fill)
return split_row_chunks(rows, chunk_size_list)
def _substract_list(list1, list2):
return [a - b for a, b in zip(list1, list2)]
def split_rows(
rows: List[Row],
percentages: List[float],
fill: bool = False,
existing_split: List[List[Row]] = None) -> List[List[Row]]:
if not existing_split:
return _split_rows_without_existing_split(rows, percentages, fill=fill)
LOGGER.debug('existing_split: %s', existing_split)
all_current_rows = _to_row_set(rows)
all_existing_rows = _to_row_set(chain(*existing_split))
not_existing_rows = all_existing_rows - all_current_rows
if not_existing_rows:
LOGGER.warning(
'some rows (%d of %d) from the existing split do not exist'
' in the source list and will be removed, e.g.: %s',
len(not_existing_rows), len(all_existing_rows), list(not_existing_rows)[:3]
)
existing_split = [
[row for row in existing_rows if _to_hashable(row) in all_current_rows]
for existing_rows in existing_split
]
remaining_rows = [row for row in rows if _to_hashable(row) not in all_existing_rows]
chunk_size_list = get_chunk_size_list(len(rows), percentages, fill=fill)
existing_chunk_size_list = [len(existing_rows) for existing_rows in existing_split]
remaining_chunk_size_list = _substract_list(chunk_size_list, existing_chunk_size_list)
return [
existing_rows + new_split
for existing_rows, new_split in zip(
existing_split, split_row_chunks(remaining_rows, remaining_chunk_size_list)
)
]
def output_filenames_for_names(names, prefix, ext):
return [
prefix + ('' if prefix.endswith('/') else '-') + name + ext
for name in names
]
def parse_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input', type=str, required=True,
help='input csv/tsv file'
)
parser.add_argument(
'--train', type=float, required=True,
help='Train dataset proportion'
)
parser.add_argument(
'--test', type=float, required=False,
help='Test dataset proportion '
'(if not specified it is assumed to be the remaining percentage)'
)
parser.add_argument(
'--validation', type=float, required=False,
help='Validation dataset proportion (requires test-proportion)'
)
parser.add_argument(
'--random', action='store_true', default=False,
help='randomise samples before doing the split'
)
parser.add_argument(
'--fill', action='store_true', default=False,
help='use up all of the remaining data rows for the last set'
)
parser.add_argument(
'--no-extend-existing', action='store_true', default=False,
help='do not extend and preserve the existing split (new entries will be addedby default)'
)
parser.add_argument(
'--no-header', action='store_true', default=False,
help='input file does not contain a header'
)
parser.add_argument(
'--out', type=str, required=False,
help='output csv/tsv file prefix or directory (if ending with slash)'
' will use input file name by default'
)
add_default_args(parser)
return parser.parse_args(argv)
def process_args(args):
if not args.out:
args.out = strip_ext(args.input)
def read_csv_with_header(input_filename, delimiter, no_header):
with open_file(input_filename, 'r') as f:
reader = csv.reader(f, delimiter=text_type(delimiter))
header_row = None if no_header else next(reader)
data_rows = list(reader)
return header_row, data_rows
def read_csv_data(input_filename, delimiter, no_header):
_, data_rows = read_csv_with_header(input_filename, delimiter, no_header)
return data_rows
def load_file_sets(filenames, delimiter, no_header):
return [
read_csv_data(filename, delimiter, no_header)
for filename in filenames
]
def load_file_sets_or_none(filenames, delimiter, no_header):
try:
return load_file_sets(filenames, delimiter, no_header)
except IOError as e:
if e.errno == errno.ENOENT:
return None
raise e
def save_file_set(output_filename, delimiter, header_row, set_data_rows):
mime_type = 'text/tsv' if delimiter == '\t' else 'text/csv'
with open_file(output_filename, 'w', mime_type=mime_type) as f:
writer = csv.writer(f, delimiter=text_type(delimiter))
if header_row:
write_csv_rows(writer, [header_row])
write_csv_rows(writer, set_data_rows)
def save_file_sets(output_filenames, delimiter, header_row, data_rows_by_set):
for output_filename, set_data_rows in zip(output_filenames, data_rows_by_set):
LOGGER.info('set size: %d (%s)', len(set_data_rows), output_filename)
save_file_set(output_filename, delimiter, header_row, set_data_rows)
def get_backup_file_suffix():
return '.backup-%s' % datetime.utcnow().strftime(r'%Y%m%d-%H%M%S')
def run(args):
LOGGER.debug('args: %s', args)
process_args(args)
ext = get_ext(args.input)
proportions = extract_proportions_from_args(args)
output_filenames = output_filenames_for_names(
[name for name, _ in proportions],
args.out,
ext
)
LOGGER.info('proportions: %s', proportions)
LOGGER.info('output_filenames: %s', output_filenames)
delimiter = csv_delimiter_by_filename(args.input)
header_row, data_rows = read_csv_with_header(args.input, delimiter, args.no_header)
LOGGER.info('number of rows: %d', len(data_rows))
if args.random:
shuffle(data_rows)
existing_file_sets = load_file_sets_or_none(output_filenames, delimiter, args.no_header)
data_rows_by_set = split_rows(
data_rows,
[p for _, p in proportions],
fill=args.fill,
existing_split=existing_file_sets if not args.no_extend_existing else None
)
if existing_file_sets:
backup_suffix = get_backup_file_suffix()
save_file_sets(
[s + backup_suffix for s in output_filenames],
delimiter,
header_row,
existing_file_sets
)
save_file_sets(
output_filenames,
delimiter,
header_row,
data_rows_by_set
)
def main(argv=None):
args = parse_args(argv)
process_default_args(args)
run(args)
if __name__ == '__main__':
setup_logging()
main() | sciencebeam-utils | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/sciencebeam_utils/tools/split_csv_dataset.py | split_csv_dataset.py | import argparse
import logging
import errno
from math import trunc
from random import shuffle
from datetime import datetime
from itertools import chain
from typing import List
from backports import csv # pylint: disable=no-name-in-module
from six import text_type
from sciencebeam_utils.beam_utils.io import open_file
from sciencebeam_utils.utils.csv import (
csv_delimiter_by_filename,
write_csv_rows
)
from sciencebeam_utils.utils.file_path import (
strip_ext,
get_ext
)
from sciencebeam_utils.tools.tool_utils import (
setup_logging,
add_default_args,
process_default_args
)
LOGGER = logging.getLogger(__name__)
Row = List[str]
def extract_proportions_from_args(args):
digits = 3
proportions = [
(name, round(p, digits))
for name, p in [
('train', args.train),
('test', args.test),
('validation', args.validation)
]
if p and p > 0
]
if sum(p for _, p in proportions) > 1.0:
raise ValueError('proportions add up to more than 1.0')
if not args.test:
proportions.append(('test', 1.0 - sum(p for _, p in proportions)))
elif not args.validation:
proportions.append(('validation', round(1.0 - sum(p for _, p in proportions), digits)))
proportions = [(name, p) for name, p in proportions if p > 0]
return proportions
def get_chunk_size_list(size, percentages, fill=False):
chunk_size_list = [int(trunc(p * size)) for p in percentages]
if fill:
chunk_size_list[-1] = size - sum(chunk_size_list[:-1])
return chunk_size_list
def split_row_chunks(rows, chunk_size_list):
chunk_offset_list = [0]
for chunk_size in chunk_size_list[0:-1]:
chunk_offset_list.append(chunk_offset_list[-1] + chunk_size)
LOGGER.debug('chunk_offset_list: %s', chunk_offset_list)
LOGGER.debug('chunk_size_list: %s', chunk_size_list)
return [
rows[chunk_offset:chunk_offset + chunk_size]
for chunk_offset, chunk_size in zip(chunk_offset_list, chunk_size_list)
]
def _to_hashable(value):
return tuple(value)
def _to_row_set(rows):
return {_to_hashable(row) for row in rows}
def _split_rows_without_existing_split(rows, percentages, fill=False):
chunk_size_list = get_chunk_size_list(len(rows), percentages, fill=fill)
return split_row_chunks(rows, chunk_size_list)
def _substract_list(list1, list2):
return [a - b for a, b in zip(list1, list2)]
def split_rows(
rows: List[Row],
percentages: List[float],
fill: bool = False,
existing_split: List[List[Row]] = None) -> List[List[Row]]:
if not existing_split:
return _split_rows_without_existing_split(rows, percentages, fill=fill)
LOGGER.debug('existing_split: %s', existing_split)
all_current_rows = _to_row_set(rows)
all_existing_rows = _to_row_set(chain(*existing_split))
not_existing_rows = all_existing_rows - all_current_rows
if not_existing_rows:
LOGGER.warning(
'some rows (%d of %d) from the existing split do not exist'
' in the source list and will be removed, e.g.: %s',
len(not_existing_rows), len(all_existing_rows), list(not_existing_rows)[:3]
)
existing_split = [
[row for row in existing_rows if _to_hashable(row) in all_current_rows]
for existing_rows in existing_split
]
remaining_rows = [row for row in rows if _to_hashable(row) not in all_existing_rows]
chunk_size_list = get_chunk_size_list(len(rows), percentages, fill=fill)
existing_chunk_size_list = [len(existing_rows) for existing_rows in existing_split]
remaining_chunk_size_list = _substract_list(chunk_size_list, existing_chunk_size_list)
return [
existing_rows + new_split
for existing_rows, new_split in zip(
existing_split, split_row_chunks(remaining_rows, remaining_chunk_size_list)
)
]
def output_filenames_for_names(names, prefix, ext):
return [
prefix + ('' if prefix.endswith('/') else '-') + name + ext
for name in names
]
def parse_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input', type=str, required=True,
help='input csv/tsv file'
)
parser.add_argument(
'--train', type=float, required=True,
help='Train dataset proportion'
)
parser.add_argument(
'--test', type=float, required=False,
help='Test dataset proportion '
'(if not specified it is assumed to be the remaining percentage)'
)
parser.add_argument(
'--validation', type=float, required=False,
help='Validation dataset proportion (requires test-proportion)'
)
parser.add_argument(
'--random', action='store_true', default=False,
help='randomise samples before doing the split'
)
parser.add_argument(
'--fill', action='store_true', default=False,
help='use up all of the remaining data rows for the last set'
)
parser.add_argument(
'--no-extend-existing', action='store_true', default=False,
help='do not extend and preserve the existing split (new entries will be addedby default)'
)
parser.add_argument(
'--no-header', action='store_true', default=False,
help='input file does not contain a header'
)
parser.add_argument(
'--out', type=str, required=False,
help='output csv/tsv file prefix or directory (if ending with slash)'
' will use input file name by default'
)
add_default_args(parser)
return parser.parse_args(argv)
def process_args(args):
if not args.out:
args.out = strip_ext(args.input)
def read_csv_with_header(input_filename, delimiter, no_header):
with open_file(input_filename, 'r') as f:
reader = csv.reader(f, delimiter=text_type(delimiter))
header_row = None if no_header else next(reader)
data_rows = list(reader)
return header_row, data_rows
def read_csv_data(input_filename, delimiter, no_header):
_, data_rows = read_csv_with_header(input_filename, delimiter, no_header)
return data_rows
def load_file_sets(filenames, delimiter, no_header):
return [
read_csv_data(filename, delimiter, no_header)
for filename in filenames
]
def load_file_sets_or_none(filenames, delimiter, no_header):
try:
return load_file_sets(filenames, delimiter, no_header)
except IOError as e:
if e.errno == errno.ENOENT:
return None
raise e
def save_file_set(output_filename, delimiter, header_row, set_data_rows):
mime_type = 'text/tsv' if delimiter == '\t' else 'text/csv'
with open_file(output_filename, 'w', mime_type=mime_type) as f:
writer = csv.writer(f, delimiter=text_type(delimiter))
if header_row:
write_csv_rows(writer, [header_row])
write_csv_rows(writer, set_data_rows)
def save_file_sets(output_filenames, delimiter, header_row, data_rows_by_set):
for output_filename, set_data_rows in zip(output_filenames, data_rows_by_set):
LOGGER.info('set size: %d (%s)', len(set_data_rows), output_filename)
save_file_set(output_filename, delimiter, header_row, set_data_rows)
def get_backup_file_suffix():
return '.backup-%s' % datetime.utcnow().strftime(r'%Y%m%d-%H%M%S')
def run(args):
LOGGER.debug('args: %s', args)
process_args(args)
ext = get_ext(args.input)
proportions = extract_proportions_from_args(args)
output_filenames = output_filenames_for_names(
[name for name, _ in proportions],
args.out,
ext
)
LOGGER.info('proportions: %s', proportions)
LOGGER.info('output_filenames: %s', output_filenames)
delimiter = csv_delimiter_by_filename(args.input)
header_row, data_rows = read_csv_with_header(args.input, delimiter, args.no_header)
LOGGER.info('number of rows: %d', len(data_rows))
if args.random:
shuffle(data_rows)
existing_file_sets = load_file_sets_or_none(output_filenames, delimiter, args.no_header)
data_rows_by_set = split_rows(
data_rows,
[p for _, p in proportions],
fill=args.fill,
existing_split=existing_file_sets if not args.no_extend_existing else None
)
if existing_file_sets:
backup_suffix = get_backup_file_suffix()
save_file_sets(
[s + backup_suffix for s in output_filenames],
delimiter,
header_row,
existing_file_sets
)
save_file_sets(
output_filenames,
delimiter,
header_row,
data_rows_by_set
)
def main(argv=None):
args = parse_args(argv)
process_default_args(args)
run(args)
if __name__ == '__main__':
setup_logging()
main() | 0.499512 | 0.259914 |
from __future__ import division
import argparse
import logging
from concurrent.futures import ThreadPoolExecutor
from apache_beam.io.filesystems import FileSystems
from sciencebeam_utils.utils.file_list import (
load_file_list
)
from sciencebeam_utils.tools.tool_utils import (
setup_logging,
add_limit_args,
add_default_args,
process_default_args
)
LOGGER = logging.getLogger(__name__)
DEFAULT_EXAMPLE_COUNT = 3
def parse_args(argv=None):
parser = argparse.ArgumentParser(
'Check file list'
)
source = parser.add_argument_group('source')
source.add_argument(
'--file-list', type=str, required=True,
help='path to source file list (tsv/csv/lst)'
)
source.add_argument(
'--file-column', type=str, required=False,
default='url',
help='csv/tsv column (ignored for plain file list)'
)
parser.add_argument(
'--example-count', type=int, required=False,
default=DEFAULT_EXAMPLE_COUNT,
help='number of missing examples to display'
)
add_limit_args(parser)
add_default_args(parser)
return parser.parse_args(argv)
def map_file_list_to_file_exists(file_list):
with ThreadPoolExecutor(max_workers=50) as executor:
return list(executor.map(FileSystems.exists, file_list))
def format_file_list(file_list):
return str(file_list)
def format_file_exists_results(
file_exists,
file_list,
example_count: int = DEFAULT_EXAMPLE_COUNT
):
if not file_exists:
return 'empty file list'
file_exists_count = sum(file_exists)
file_missing_count = len(file_exists) - file_exists_count
files_missing = [s for s, exists in zip(file_list, file_exists) if not exists]
return (
'files exist: %d (%.0f%%), files missing: %d (%.0f%%)%s' % (
file_exists_count, 100.0 * file_exists_count / len(file_exists),
file_missing_count, 100.0 * file_missing_count / len(file_exists),
(
' (example missing: %s)' % format_file_list(files_missing[:example_count])
if files_missing
else ''
)
)
)
def check_files_and_report_result(
file_list,
example_count: int = DEFAULT_EXAMPLE_COUNT
):
file_exists = map_file_list_to_file_exists(file_list)
LOGGER.info(
'%s', format_file_exists_results(file_exists, file_list, example_count=example_count)
)
assert sum(file_exists) > 0
def run(opt):
file_list = load_file_list(
opt.file_list,
column=opt.file_column,
limit=opt.limit
)
check_files_and_report_result(file_list, example_count=opt.example_count)
def main(argv=None):
args = parse_args(argv)
process_default_args(args)
run(args)
if __name__ == '__main__':
setup_logging()
main() | sciencebeam-utils | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/sciencebeam_utils/tools/check_file_list.py | check_file_list.py | from __future__ import division
import argparse
import logging
from concurrent.futures import ThreadPoolExecutor
from apache_beam.io.filesystems import FileSystems
from sciencebeam_utils.utils.file_list import (
load_file_list
)
from sciencebeam_utils.tools.tool_utils import (
setup_logging,
add_limit_args,
add_default_args,
process_default_args
)
LOGGER = logging.getLogger(__name__)
DEFAULT_EXAMPLE_COUNT = 3
def parse_args(argv=None):
parser = argparse.ArgumentParser(
'Check file list'
)
source = parser.add_argument_group('source')
source.add_argument(
'--file-list', type=str, required=True,
help='path to source file list (tsv/csv/lst)'
)
source.add_argument(
'--file-column', type=str, required=False,
default='url',
help='csv/tsv column (ignored for plain file list)'
)
parser.add_argument(
'--example-count', type=int, required=False,
default=DEFAULT_EXAMPLE_COUNT,
help='number of missing examples to display'
)
add_limit_args(parser)
add_default_args(parser)
return parser.parse_args(argv)
def map_file_list_to_file_exists(file_list):
with ThreadPoolExecutor(max_workers=50) as executor:
return list(executor.map(FileSystems.exists, file_list))
def format_file_list(file_list):
return str(file_list)
def format_file_exists_results(
file_exists,
file_list,
example_count: int = DEFAULT_EXAMPLE_COUNT
):
if not file_exists:
return 'empty file list'
file_exists_count = sum(file_exists)
file_missing_count = len(file_exists) - file_exists_count
files_missing = [s for s, exists in zip(file_list, file_exists) if not exists]
return (
'files exist: %d (%.0f%%), files missing: %d (%.0f%%)%s' % (
file_exists_count, 100.0 * file_exists_count / len(file_exists),
file_missing_count, 100.0 * file_missing_count / len(file_exists),
(
' (example missing: %s)' % format_file_list(files_missing[:example_count])
if files_missing
else ''
)
)
)
def check_files_and_report_result(
file_list,
example_count: int = DEFAULT_EXAMPLE_COUNT
):
file_exists = map_file_list_to_file_exists(file_list)
LOGGER.info(
'%s', format_file_exists_results(file_exists, file_list, example_count=example_count)
)
assert sum(file_exists) > 0
def run(opt):
file_list = load_file_list(
opt.file_list,
column=opt.file_column,
limit=opt.limit
)
check_files_and_report_result(file_list, example_count=opt.example_count)
def main(argv=None):
args = parse_args(argv)
process_default_args(args)
run(args)
if __name__ == '__main__':
setup_logging()
main() | 0.446495 | 0.108898 |
import argparse
import logging
from sciencebeam_utils.utils.file_list import (
load_file_list,
save_file_list,
to_relative_file_list
)
from sciencebeam_utils.utils.file_path import (
join_if_relative_path,
get_or_validate_base_path,
get_output_file
)
from sciencebeam_utils.tools.check_file_list import (
DEFAULT_EXAMPLE_COUNT,
check_files_and_report_result
)
from sciencebeam_utils.tools.tool_utils import (
setup_logging,
add_limit_args,
add_default_args,
process_default_args
)
LOGGER = logging.getLogger(__name__)
def parse_args(argv=None):
parser = argparse.ArgumentParser(
'Get output files based on source files and suffix.'
)
source = parser.add_argument_group('source')
source.add_argument(
'--source-file-list', type=str, required=True,
help='path to source file list (tsv/csv/lst)'
)
source.add_argument(
'--source-file-column', type=str, required=False,
default='url',
help='csv/tsv column (ignored for plain file list)'
)
source.add_argument(
'--source-base-path', type=str, required=False,
help='base data path for source file urls'
)
output = parser.add_argument_group('output')
output.add_argument(
'--output-file-list', type=str, required=True,
help='path to output file list (tsv/csv/lst)'
)
output.add_argument(
'--output-file-column', type=str, required=False,
default='url',
help='csv/tsv column (ignored for plain file list)'
)
output.add_argument(
'--output-file-suffix', type=str, required=False,
help='file suffix (will be added to source urls after removing ext)'
)
output.add_argument(
'--output-base-path', type=str, required=False,
help='base output path (by default source base path with"-results" suffix)'
)
output.add_argument(
'--use-relative-paths', action='store_true',
help='create a file list with relative paths (relative to the output data path)'
)
add_limit_args(parser)
parser.add_argument(
'--check', action='store_true', default=False,
help='check whether the output files exist'
)
parser.add_argument(
'--check-limit', type=int, required=False,
help='limit the files to check'
)
parser.add_argument(
'--example-count', type=int, required=False,
default=DEFAULT_EXAMPLE_COUNT,
help='number of missing examples to display'
)
add_default_args(parser)
return parser.parse_args(argv)
def get_output_file_list(file_list, source_base_path, output_base_path, output_file_suffix):
return [
get_output_file(filename, source_base_path, output_base_path, output_file_suffix)
for filename in file_list
]
def run(opt):
source_file_list = load_file_list(
join_if_relative_path(
opt.source_base_path,
opt.source_file_list
),
column=opt.source_file_column,
limit=opt.limit
)
source_base_path = get_or_validate_base_path(
source_file_list, opt.source_base_path
)
target_file_list = get_output_file_list(
source_file_list, source_base_path, opt.output_base_path, opt.output_file_suffix
)
if opt.check:
check_file_list = (
target_file_list[:opt.check_limit] if opt.check_limit
else target_file_list
)
LOGGER.info(
'checking %d (out of %d) files...',
len(check_file_list), len(target_file_list)
)
check_files_and_report_result(
check_file_list,
example_count=opt.example_count
)
if opt.use_relative_paths:
target_file_list = to_relative_file_list(opt.output_base_path, target_file_list)
LOGGER.info(
'saving file list (with %d files) to: %s',
len(target_file_list), opt.output_file_list
)
save_file_list(
opt.output_file_list,
target_file_list,
column=opt.output_file_column
)
def process_args(args):
if not args.output_base_path:
args.output_base_path = args.source_base_path + '-results'
def main(argv=None):
args = parse_args(argv)
process_default_args(args)
process_args(args)
run(args)
if __name__ == '__main__':
setup_logging()
main() | sciencebeam-utils | /sciencebeam_utils-0.1.5.tar.gz/sciencebeam_utils-0.1.5/sciencebeam_utils/tools/get_output_files.py | get_output_files.py | import argparse
import logging
from sciencebeam_utils.utils.file_list import (
load_file_list,
save_file_list,
to_relative_file_list
)
from sciencebeam_utils.utils.file_path import (
join_if_relative_path,
get_or_validate_base_path,
get_output_file
)
from sciencebeam_utils.tools.check_file_list import (
DEFAULT_EXAMPLE_COUNT,
check_files_and_report_result
)
from sciencebeam_utils.tools.tool_utils import (
setup_logging,
add_limit_args,
add_default_args,
process_default_args
)
LOGGER = logging.getLogger(__name__)
def parse_args(argv=None):
parser = argparse.ArgumentParser(
'Get output files based on source files and suffix.'
)
source = parser.add_argument_group('source')
source.add_argument(
'--source-file-list', type=str, required=True,
help='path to source file list (tsv/csv/lst)'
)
source.add_argument(
'--source-file-column', type=str, required=False,
default='url',
help='csv/tsv column (ignored for plain file list)'
)
source.add_argument(
'--source-base-path', type=str, required=False,
help='base data path for source file urls'
)
output = parser.add_argument_group('output')
output.add_argument(
'--output-file-list', type=str, required=True,
help='path to output file list (tsv/csv/lst)'
)
output.add_argument(
'--output-file-column', type=str, required=False,
default='url',
help='csv/tsv column (ignored for plain file list)'
)
output.add_argument(
'--output-file-suffix', type=str, required=False,
help='file suffix (will be added to source urls after removing ext)'
)
output.add_argument(
'--output-base-path', type=str, required=False,
help='base output path (by default source base path with"-results" suffix)'
)
output.add_argument(
'--use-relative-paths', action='store_true',
help='create a file list with relative paths (relative to the output data path)'
)
add_limit_args(parser)
parser.add_argument(
'--check', action='store_true', default=False,
help='check whether the output files exist'
)
parser.add_argument(
'--check-limit', type=int, required=False,
help='limit the files to check'
)
parser.add_argument(
'--example-count', type=int, required=False,
default=DEFAULT_EXAMPLE_COUNT,
help='number of missing examples to display'
)
add_default_args(parser)
return parser.parse_args(argv)
def get_output_file_list(file_list, source_base_path, output_base_path, output_file_suffix):
return [
get_output_file(filename, source_base_path, output_base_path, output_file_suffix)
for filename in file_list
]
def run(opt):
source_file_list = load_file_list(
join_if_relative_path(
opt.source_base_path,
opt.source_file_list
),
column=opt.source_file_column,
limit=opt.limit
)
source_base_path = get_or_validate_base_path(
source_file_list, opt.source_base_path
)
target_file_list = get_output_file_list(
source_file_list, source_base_path, opt.output_base_path, opt.output_file_suffix
)
if opt.check:
check_file_list = (
target_file_list[:opt.check_limit] if opt.check_limit
else target_file_list
)
LOGGER.info(
'checking %d (out of %d) files...',
len(check_file_list), len(target_file_list)
)
check_files_and_report_result(
check_file_list,
example_count=opt.example_count
)
if opt.use_relative_paths:
target_file_list = to_relative_file_list(opt.output_base_path, target_file_list)
LOGGER.info(
'saving file list (with %d files) to: %s',
len(target_file_list), opt.output_file_list
)
save_file_list(
opt.output_file_list,
target_file_list,
column=opt.output_file_column
)
def process_args(args):
if not args.output_base_path:
args.output_base_path = args.source_base_path + '-results'
def main(argv=None):
args = parse_args(argv)
process_default_args(args)
process_args(args)
run(args)
if __name__ == '__main__':
setup_logging()
main() | 0.396419 | 0.088387 |
[![image](https://zenodo.org/badge/81351748.svg)](https://zenodo.org/badge/latestdoi/81351748)
[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/scivision/sciencedates.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/scivision/sciencedates/context:python)
[![Actions Status](https://github.com/scivision/sciencedates/workflows/ci/badge.svg)](https://github.com/scivision/sciencedates/actions)
[![Python versions (PyPI)](https://img.shields.io/pypi/pyversions/sciencedates.svg)](https://pypi.python.org/pypi/sciencedates)
[![PyPi Download stats](http://pepy.tech/badge/sciencedates)](http://pepy.tech/project/sciencedates)
# Science Dates & Times
Date & time conversions used in the sciences.
The assumption is that datetimes are **timezone-naive**, as this is required in Numpy *et al* for `numpy.datetime64`.
## Install
```sh
python -m pip install sciencedates
```
## Usage
### Datetime => Year, DayOfYear
```python
import sciencedates as sd
T = '2013-07-02T12'
yeardoy, utsec = sd.datetime2yd(T)
```
Results in year,DayOfYear; UTC fraction of day [seconds]
> (2013102, 72000.0)
## Julia
Julia [examples](./julia) are provided
## Matlab / GNU Octave
Matlab / GNU Octave [examples](./matlab) are provided
## Fortran
Fortran [examples](./fortran) are provided.
For Python-like modern Fortran datetime, see
[Datetime-Fortran](https://github.com/wavebitscientific/datetime-fortran).
| sciencedates | /sciencedates-1.5.0.tar.gz/sciencedates-1.5.0/README.md | README.md | python -m pip install sciencedates
import sciencedates as sd
T = '2013-07-02T12'
yeardoy, utsec = sd.datetime2yd(T) | 0.335569 | 0.955026 |
### Science in Data Dates
A simple library for generating dates in common scenarios for data analysis.
### Usage
from scienceindata_dates import scienceindata_dates
### Functions
create_date_from_str: Create a Datetime object from a string with specific date_format.
<br>get_today: Create a Date Object or String of Today.
<br>get_yesterday: Create a Date Object or String of Yesterday.
<br>get_current_month: Return current month.
<br>get_last_month: Return last month date.
<br>get_previous_nmonth: Return last n-month.
<br>get_same_day_last_week: Return the same day of the week of last week.
<br>get_same_day_last_month: Return the same day of last month. | scienceindata-dates | /scienceindata_dates-0.0.3.tar.gz/scienceindata_dates-0.0.3/README.md | README.md | ### Science in Data Dates
A simple library for generating dates in common scenarios for data analysis.
### Usage
from scienceindata_dates import scienceindata_dates
### Functions
create_date_from_str: Create a Datetime object from a string with specific date_format.
<br>get_today: Create a Date Object or String of Today.
<br>get_yesterday: Create a Date Object or String of Yesterday.
<br>get_current_month: Return current month.
<br>get_last_month: Return last month date.
<br>get_previous_nmonth: Return last n-month.
<br>get_same_day_last_week: Return the same day of the week of last week.
<br>get_same_day_last_month: Return the same day of last month. | 0.694406 | 0.742375 |
# In[1]:
from datetime import datetime, timedelta
# In[2]:
def create_date_from_str(date_str, date_format='%Y%m%d'):
'''
Create a Datetime object from a string with specific date_format.
date_str: a date string (required).
date_format: the date format of date_str. Default is %Y%m%d.
'''
return datetime.strptime(date_str, date_format)
# In[3]:
def get_today(date_format='%Y%m%d', date_object=False):
'''
Create a Date Object or String of Today.
date_format: desired date format of the output. Default is %Y%m%d.
date_object: If true, returns a datetime object. If false, return a string with date_format format.
'''
t = datetime.today()
tody = datetime(t.year, t.month, t.day)
if (date_object == True):
return tody
else:
return datetime.strftime(tody, date_format)
# In[4]:
def get_yesterday(date_format='%Y%m%d', date_object=False):
'''
Create a Date Object or String of Yesterday.
date_format: desired date format of the output. Default is %Y%m%d.
date_object: If true, returns a datetime object. If false, return a string with date_format format.
'''
t = datetime.today() - timedelta(days=1)
yesterday = datetime(t.year, t.month, t.day)
if (date_object == True):
return yesterday
else:
return datetime.strftime(yesterday, date_format)
# In[5]:
def get_current_month(date_format='%Y%m', date_object=False):
'''
Return current month.
date_format: desired string format. Default is %Y%m.
date_object: If true, returns a Datetime object with day = 1.
'''
t = datetime.today()
if (date_object == True):
return datetime(t.year, t.month, 1)
else:
return datetime.strftime(t, date_format)
# In[6]:
def get_last_month(date_format='%Y%m', date_object=False):
'''
Return last month date.
date_format: desired string format. Default is %Y%m.
date_object: If true, returns a Datetime object with day = 1.
'''
current_month = get_current_month(date_object=True)
t = current_month - timedelta(days=current_month.day)
if (date_object == True):
return datetime(t.year, t.month, 1)
else:
return datetime.strftime(t, date_format)
# In[7]:
def get_previous_nmonth(n, date_format='%Y%m', date_object=False):
'''
Return last n-month.
n: number of previous month, n >= 0. (Required)
date_format: desired string format. Default is %Y%m.
date_object: If true, returns a Datetime object with day = 1.
'''
t = get_current_month(date_object=True)
for i in range(n):
t = t - timedelta(days=t.day)
if (date_object == True):
return datetime(t.year, t.month, 1)
else:
return datetime.strftime(t, date_format)
# In[16]:
def get_same_day_last_week(date, date_format='%Y%m%d', date_object=False):
'''
Return the same day of the week of last week.
date: a date object. (Required)
date_format: desired date format of the output. Default is %Y%m%d.
date_object: If true, returns a datetime object. If false, return a string with date_format format.
'''
t = date - timedelta(days=7)
if (date_object == True):
return t
else:
return datetime.strftime(t, date_format)
# In[9]:
def get_same_day_last_month(date, date_format='%Y%m%d', date_object=False):
'''
Return the same day of last month.
date: a date object. (Required)
date_format: desired date format of the output. Default is %Y%m%d.
date_object: If true, returns a datetime object. If false, return a string with date_format format.
'''
t = date - timedelta(days=date.day)
t = t.replace(day=date.day)
if (date_object == True):
return t
else:
return datetime.strftime(t, date_format)
# In[ ]:
#!jupyter nbconvert --to script human_date.ipynb | scienceindata-dates | /scienceindata_dates-0.0.3.tar.gz/scienceindata_dates-0.0.3/src/scienceindata_dates/scienceindata_dates.py | scienceindata_dates.py |
# In[1]:
from datetime import datetime, timedelta
# In[2]:
def create_date_from_str(date_str, date_format='%Y%m%d'):
'''
Create a Datetime object from a string with specific date_format.
date_str: a date string (required).
date_format: the date format of date_str. Default is %Y%m%d.
'''
return datetime.strptime(date_str, date_format)
# In[3]:
def get_today(date_format='%Y%m%d', date_object=False):
'''
Create a Date Object or String of Today.
date_format: desired date format of the output. Default is %Y%m%d.
date_object: If true, returns a datetime object. If false, return a string with date_format format.
'''
t = datetime.today()
tody = datetime(t.year, t.month, t.day)
if (date_object == True):
return tody
else:
return datetime.strftime(tody, date_format)
# In[4]:
def get_yesterday(date_format='%Y%m%d', date_object=False):
'''
Create a Date Object or String of Yesterday.
date_format: desired date format of the output. Default is %Y%m%d.
date_object: If true, returns a datetime object. If false, return a string with date_format format.
'''
t = datetime.today() - timedelta(days=1)
yesterday = datetime(t.year, t.month, t.day)
if (date_object == True):
return yesterday
else:
return datetime.strftime(yesterday, date_format)
# In[5]:
def get_current_month(date_format='%Y%m', date_object=False):
'''
Return current month.
date_format: desired string format. Default is %Y%m.
date_object: If true, returns a Datetime object with day = 1.
'''
t = datetime.today()
if (date_object == True):
return datetime(t.year, t.month, 1)
else:
return datetime.strftime(t, date_format)
# In[6]:
def get_last_month(date_format='%Y%m', date_object=False):
'''
Return last month date.
date_format: desired string format. Default is %Y%m.
date_object: If true, returns a Datetime object with day = 1.
'''
current_month = get_current_month(date_object=True)
t = current_month - timedelta(days=current_month.day)
if (date_object == True):
return datetime(t.year, t.month, 1)
else:
return datetime.strftime(t, date_format)
# In[7]:
def get_previous_nmonth(n, date_format='%Y%m', date_object=False):
'''
Return last n-month.
n: number of previous month, n >= 0. (Required)
date_format: desired string format. Default is %Y%m.
date_object: If true, returns a Datetime object with day = 1.
'''
t = get_current_month(date_object=True)
for i in range(n):
t = t - timedelta(days=t.day)
if (date_object == True):
return datetime(t.year, t.month, 1)
else:
return datetime.strftime(t, date_format)
# In[16]:
def get_same_day_last_week(date, date_format='%Y%m%d', date_object=False):
'''
Return the same day of the week of last week.
date: a date object. (Required)
date_format: desired date format of the output. Default is %Y%m%d.
date_object: If true, returns a datetime object. If false, return a string with date_format format.
'''
t = date - timedelta(days=7)
if (date_object == True):
return t
else:
return datetime.strftime(t, date_format)
# In[9]:
def get_same_day_last_month(date, date_format='%Y%m%d', date_object=False):
'''
Return the same day of last month.
date: a date object. (Required)
date_format: desired date format of the output. Default is %Y%m%d.
date_object: If true, returns a datetime object. If false, return a string with date_format format.
'''
t = date - timedelta(days=date.day)
t = t.replace(day=date.day)
if (date_object == True):
return t
else:
return datetime.strftime(t, date_format)
# In[ ]:
#!jupyter nbconvert --to script human_date.ipynb | 0.789356 | 0.374076 |
# Packaging and uploading to pypi
These steps are derived from the offical [Packaging Python Projects article](https://packaging.python.org/tutorials/packaging-projects/).
0. Remember to update the version number tuple in `scienceio/__init__.py`.
1. [Create a release](https://docs.github.com/en/github/administering-a-repository/releasing-projects-on-github/managing-releases-in-a-repository) in our GitHub repo.
2. Clone this repo at the point of the newly-created release into a working directory.
3. Create and activate a virtual environment in the repo root directory.
4. Install `build`: `python3 -m pip install --upgrade build`
5. In the repo root directory, run: `python3 -m build`
6. Verify that a `dist/` subdirectory is created, and that it contains both a `*.tar.gz` and a `*.whl` file matching the new package version.
7. Create a TestPyPI account if needed, as described [here](https://packaging.python.org/tutorials/packaging-projects/#uploading-the-distribution-archives), saving the API key.
8. Install `twine`: `python3 -m pip install --upgrade twine`
9. Upload package to TestPyPI, using `__token__` and your TestPyPI API key for the username and password, respectively: `python3 -m twine upload --repository testpypi dist/*`
10. Create a new demo project to test installation of the new package.
11. Create and activate a demo virtualenv.
12. Install the package from TestPyPI in the demo virtualenv: `python3 -m pip install --index-url https://test.pypi.org/simple/ --no-deps scienceio`
13. Test that the package can be imported and the version number is present: `python -c 'import scienceio; print(scienceio.__version__)'`
14. Create a PyPI account if needed, as described [here](https://packaging.python.org/tutorials/packaging-projects/#uploading-the-distribution-archives), saving the API key.
15. Upload package to PyPI, using `__token__` and your PyPI API key for the username and password, respectively: `python3 -m twine upload --repository testpypi dist/*`
16. Done!
### Important files
- `pyproject.toml` tells build tools (like pip and build) what is required to build your project
- `setup.cfg` is the configuration file for setuptools. Content on this file is considered static metadata and should be preferred.
- `setup.py` setup.py is the build script for setuptools. This is dynamic data and should be used only when absolutely necessary, for any items that are dynamic or determined at install-time, as well as extension modules or extensions to setuptools.
**Help:** https://packaging.python.org/tutorials/packaging-projects/
| scienceio | /scienceio-2.2.0.tar.gz/scienceio-2.2.0/PUBLISHING.md | PUBLISHING.md | # Packaging and uploading to pypi
These steps are derived from the offical [Packaging Python Projects article](https://packaging.python.org/tutorials/packaging-projects/).
0. Remember to update the version number tuple in `scienceio/__init__.py`.
1. [Create a release](https://docs.github.com/en/github/administering-a-repository/releasing-projects-on-github/managing-releases-in-a-repository) in our GitHub repo.
2. Clone this repo at the point of the newly-created release into a working directory.
3. Create and activate a virtual environment in the repo root directory.
4. Install `build`: `python3 -m pip install --upgrade build`
5. In the repo root directory, run: `python3 -m build`
6. Verify that a `dist/` subdirectory is created, and that it contains both a `*.tar.gz` and a `*.whl` file matching the new package version.
7. Create a TestPyPI account if needed, as described [here](https://packaging.python.org/tutorials/packaging-projects/#uploading-the-distribution-archives), saving the API key.
8. Install `twine`: `python3 -m pip install --upgrade twine`
9. Upload package to TestPyPI, using `__token__` and your TestPyPI API key for the username and password, respectively: `python3 -m twine upload --repository testpypi dist/*`
10. Create a new demo project to test installation of the new package.
11. Create and activate a demo virtualenv.
12. Install the package from TestPyPI in the demo virtualenv: `python3 -m pip install --index-url https://test.pypi.org/simple/ --no-deps scienceio`
13. Test that the package can be imported and the version number is present: `python -c 'import scienceio; print(scienceio.__version__)'`
14. Create a PyPI account if needed, as described [here](https://packaging.python.org/tutorials/packaging-projects/#uploading-the-distribution-archives), saving the API key.
15. Upload package to PyPI, using `__token__` and your PyPI API key for the username and password, respectively: `python3 -m twine upload --repository testpypi dist/*`
16. Done!
### Important files
- `pyproject.toml` tells build tools (like pip and build) what is required to build your project
- `setup.cfg` is the configuration file for setuptools. Content on this file is considered static metadata and should be preferred.
- `setup.py` setup.py is the build script for setuptools. This is dynamic data and should be used only when absolutely necessary, for any items that are dynamic or determined at install-time, as well as extension modules or extensions to setuptools.
**Help:** https://packaging.python.org/tutorials/packaging-projects/
| 0.730578 | 0.658706 |
# ScienceIO
The official ScienceIO Python SDK for the ScienceIO API.
This package is available via [Pypi:scienceIO](https://pypi.org/project/scienceio/). View full documentation at [ScienceIO Docs](https://docs.science.io/docs).
## Usage and examples
1. Create a ScienceIO API key. Both the API key ID and the key secret will be
needed in order to set up the ScienceIO Python SDK for use.
2. Install the ScienceIO Python SDK.
```python
pip install scienceio
```
3. Create a directory `.scienceio` in your home directory (Mac users, your home directory is `/Users/{username}`. Linux users, your home directory is `/home/{username}`. Windows users, your home directory is `C:\\Users\{username})` Inside this directory, create a text file called `config` containing your credentials:
```
[SETTINGS]
KEY_ID={your ScienceIO API key ID}
KEY_SECRET={your ScienceIO API key secret}
```
4. Use the ScienceIO Python SDK to annotate text:
```python
scio = ScienceIO()
input_text = (
'The COVID-19 pandemic has shown a markedly low proportion of '
'cases among children 1–4. Age disparities in observed cases could be '
'explained by children having lower susceptibility to infection, lower '
'propensity to show clinical symptoms or both.'
)
results = scio.structure(input_text)
print(results)
```
| scienceio | /scienceio-2.2.0.tar.gz/scienceio-2.2.0/README.md | README.md | # ScienceIO
The official ScienceIO Python SDK for the ScienceIO API.
This package is available via [Pypi:scienceIO](https://pypi.org/project/scienceio/). View full documentation at [ScienceIO Docs](https://docs.science.io/docs).
## Usage and examples
1. Create a ScienceIO API key. Both the API key ID and the key secret will be
needed in order to set up the ScienceIO Python SDK for use.
2. Install the ScienceIO Python SDK.
```python
pip install scienceio
```
3. Create a directory `.scienceio` in your home directory (Mac users, your home directory is `/Users/{username}`. Linux users, your home directory is `/home/{username}`. Windows users, your home directory is `C:\\Users\{username})` Inside this directory, create a text file called `config` containing your credentials:
```
[SETTINGS]
KEY_ID={your ScienceIO API key ID}
KEY_SECRET={your ScienceIO API key secret}
```
4. Use the ScienceIO Python SDK to annotate text:
```python
scio = ScienceIO()
input_text = (
'The COVID-19 pandemic has shown a markedly low proportion of '
'cases among children 1–4. Age disparities in observed cases could be '
'explained by children having lower susceptibility to infection, lower '
'propensity to show clinical symptoms or both.'
)
results = scio.structure(input_text)
print(results)
```
| 0.772959 | 0.90261 |
# ScienceIO API Demo
In this demo, we'll:
- Set up a user account
- Make our first request
- Put the request in a pandas dataframe
```
import pandas as pd
from IPython.display import display, JSON
from scienceio import ScienceIO
```
## Make a request to the API
```
# instantiate the API
scio = ScienceIO()
# Now we'll submit a query
query_text = (
"""
Welcome to ScienceIO Annotate.
In this onboarding, we will cover the basic of ScienceIO Annotate,
and how it enables you to extract biomedical information from documents
for downstream use.
The ScienceIO biomedical language platform can identify and extract over 9 million terms,
related to healthcare and biomedical sciences.
These include:
Medical conditions, such as ALS, triple negative cancer, cardiac hyptertrophy, MERS, or liver cirrhosis
Medical procedures, such as magnetic resonance imaging, fMRI, spinal fusion, stress tests, or organ transplants
Medical devices, such as suture, artificial limb, or ventricular assist device
Chemicals and drugs, such as acetaminophen, pembrolizumab, statins
Genes and mutations, such as MET, TRK, PPARa, or EGFR T790M
Species and viruses, such as patients or humans, rats, monkeys, E.coli, or SARS-CoV-2
Anatomy and physiology, such as liver, lung, fibroblasts, T-cells, heart rate, or cardiac output
Cell biology, such as DNA, RNA, GPCRs, or ribosomes
Context for other information, such as when a drug is delivered by injection or orally
The patient was admitted with chest pains and shortness of breath.
Elevated blood pressure and presence of troponin-T and troponin-I in the blood
indicated a STEMI or myocardial infarction. They were administered
statins and put on saline drip. A stent was placed in the
descending coronary artery. Troponin-t and Troponin-I levels
stabilized. Chest pains abated and blood pressure normalized.
"""
)
response_query = scio.annotate(text=query_text)
df = pd.DataFrame(response_query['annotations'])
display(df)
```
| scienceio | /scienceio-2.2.0.tar.gz/scienceio-2.2.0/examples/example-annotate-onboarding-text.ipynb | example-annotate-onboarding-text.ipynb | import pandas as pd
from IPython.display import display, JSON
from scienceio import ScienceIO
# instantiate the API
scio = ScienceIO()
# Now we'll submit a query
query_text = (
"""
Welcome to ScienceIO Annotate.
In this onboarding, we will cover the basic of ScienceIO Annotate,
and how it enables you to extract biomedical information from documents
for downstream use.
The ScienceIO biomedical language platform can identify and extract over 9 million terms,
related to healthcare and biomedical sciences.
These include:
Medical conditions, such as ALS, triple negative cancer, cardiac hyptertrophy, MERS, or liver cirrhosis
Medical procedures, such as magnetic resonance imaging, fMRI, spinal fusion, stress tests, or organ transplants
Medical devices, such as suture, artificial limb, or ventricular assist device
Chemicals and drugs, such as acetaminophen, pembrolizumab, statins
Genes and mutations, such as MET, TRK, PPARa, or EGFR T790M
Species and viruses, such as patients or humans, rats, monkeys, E.coli, or SARS-CoV-2
Anatomy and physiology, such as liver, lung, fibroblasts, T-cells, heart rate, or cardiac output
Cell biology, such as DNA, RNA, GPCRs, or ribosomes
Context for other information, such as when a drug is delivered by injection or orally
The patient was admitted with chest pains and shortness of breath.
Elevated blood pressure and presence of troponin-T and troponin-I in the blood
indicated a STEMI or myocardial infarction. They were administered
statins and put on saline drip. A stent was placed in the
descending coronary artery. Troponin-t and Troponin-I levels
stabilized. Chest pains abated and blood pressure normalized.
"""
)
response_query = scio.annotate(text=query_text)
df = pd.DataFrame(response_query['annotations'])
display(df) | 0.398406 | 0.940844 |
# ScienceIO API Demo
In this demo, we'll:
- Log in with our user account
- Make our first request
- Put the request in a pandas dataframe and analyze
```
import pandas as pd
import yaml
from IPython.display import display, JSON
from analytics import *
from scienceio import ScienceIO
```
## Initialize client
```
scio = ScienceIO()
```
If the account already exists (based on the login), you will be notified and can proceed to logging in.
## Make a request to the API
```
query_text = (
"""
The patient is a 21-day-old Caucasian male here for 2 days of congestion -
mom has been suctioning yellow discharge from the patient's nares, plus she has noticed
some mild problems with his breathing while feeding (but negative for any perioral cyanosis or retractions).
One day ago, mom also noticed a tactile temperature and gave the patient Tylenol. Baby also has
had some decreased p.o. intake. His normal breast-feeding is down from 20 minutes q.2h.
to 5 to 10 minutes secondary to his respiratory congestion. He sleeps well, but has been more tired
and has been fussy over the past 2 days. The parents noticed no improvement with albuterol treatments given
in the ER. His urine output has also decreased; normally he has 8 to 10 wet and 5 dirty diapers per 24 hours,
now he has down to 4 wet diapers per 24 hours. Mom denies any diarrhea. His bowel movements are yellow colored and soft in nature.
"""
)
# Make a request
response_query = scio.annotate(text=query_text)
# We can pass `spans` directly to a dataframe
df = pd.DataFrame(response_query['annotations'])
display(df)
report(df)
# This is the text we'll send to the API
query_text = (
"""
The COVID-19 pandemic has shown a markedly low proportion of
cases among children. Age disparities in observed COVID-19 cases could be
explained by children having lower susceptibility to infection, lower
propensity to show clinical symptoms or both. COVID-19 has shown to induce
Kawasaki syndrome in some children. Kawasaki (KD), or Mucocutaneous Lymph Node Syndrome
as it is often called, is an acute illness that affects mostly children 6 months to 5 years
old—median age in the KD cohort was 25 months—and features inflammation of small and medium
blood vessels. Although the cause of KD is unknown, it is believed to occur in genetically
predisposed children after exposure to an environmental trigger such as an infection.
"""
)
# Make a request
response_query = scio.annotate(text=query_text)
# We can pass `spans` directly to a dataframe
df = pd.DataFrame(response_query['annotations'])
display(df)
```
## Viewing the results
```
report(df)
# top mentions
get_top(df, "text")
# top concepts
get_top(df, "concept_name")
# top concept types
get_top(df, "concept_type")
```
| scienceio | /scienceio-2.2.0.tar.gz/scienceio-2.2.0/examples/example-analytics-2.ipynb | example-analytics-2.ipynb | import pandas as pd
import yaml
from IPython.display import display, JSON
from analytics import *
from scienceio import ScienceIO
scio = ScienceIO()
query_text = (
"""
The patient is a 21-day-old Caucasian male here for 2 days of congestion -
mom has been suctioning yellow discharge from the patient's nares, plus she has noticed
some mild problems with his breathing while feeding (but negative for any perioral cyanosis or retractions).
One day ago, mom also noticed a tactile temperature and gave the patient Tylenol. Baby also has
had some decreased p.o. intake. His normal breast-feeding is down from 20 minutes q.2h.
to 5 to 10 minutes secondary to his respiratory congestion. He sleeps well, but has been more tired
and has been fussy over the past 2 days. The parents noticed no improvement with albuterol treatments given
in the ER. His urine output has also decreased; normally he has 8 to 10 wet and 5 dirty diapers per 24 hours,
now he has down to 4 wet diapers per 24 hours. Mom denies any diarrhea. His bowel movements are yellow colored and soft in nature.
"""
)
# Make a request
response_query = scio.annotate(text=query_text)
# We can pass `spans` directly to a dataframe
df = pd.DataFrame(response_query['annotations'])
display(df)
report(df)
# This is the text we'll send to the API
query_text = (
"""
The COVID-19 pandemic has shown a markedly low proportion of
cases among children. Age disparities in observed COVID-19 cases could be
explained by children having lower susceptibility to infection, lower
propensity to show clinical symptoms or both. COVID-19 has shown to induce
Kawasaki syndrome in some children. Kawasaki (KD), or Mucocutaneous Lymph Node Syndrome
as it is often called, is an acute illness that affects mostly children 6 months to 5 years
old—median age in the KD cohort was 25 months—and features inflammation of small and medium
blood vessels. Although the cause of KD is unknown, it is believed to occur in genetically
predisposed children after exposure to an environmental trigger such as an infection.
"""
)
# Make a request
response_query = scio.annotate(text=query_text)
# We can pass `spans` directly to a dataframe
df = pd.DataFrame(response_query['annotations'])
display(df)
report(df)
# top mentions
get_top(df, "text")
# top concepts
get_top(df, "concept_name")
# top concept types
get_top(df, "concept_type") | 0.479747 | 0.902395 |
import argparse
import pandas as pd
from convert_data_model import convert_data_model
def count_text(df) -> int:
"""len(df) = # of text spans"""
return len(df)
def count_text_unique(df) -> int:
"""unique text spans (no correction for caps/lower/etc.)"""
return df.text.nunique()
def count_concepts_unique(df) -> int:
"""unique biomedical concepts"""
return df.concept_id.nunique()
def count_types_unique(df) -> int:
"""unique concept types"""
return df.concept_type.nunique()
def quantizer(score):
"""
Quantizes scores with desired range
Run with:
df[col] = df[col].apply(lambda x: quantizer(x))
to transform column into quantized values (or set to new column)
"""
if score >= 0.99:
return "Very High"
elif score >= 0.9:
return "High"
elif score >= 0.7:
return "Moderate"
elif score >= 0.5:
return "Low"
else:
return "Very Low"
def quantize_scores(df):
"""Quantize the scores in the dataframe"""
df["score_id"] = df["score_id"].apply(lambda x: quantizer(x))
df["score_type"] = df["score_type"].apply(lambda x: quantizer(x))
return df
def get_score_counts(df, col="score_id"):
"""Returns counts by score"""
return (
df.groupby(col)
.count()["pos_start"]
.reset_index()
.sort_values(by="pos_start", ascending=False)
.rename(columns={"pos_start": "mentions"})
.reset_index(drop=True)
)
def get_score_dict(df, col="score_id"):
"""Returns a dict of counts by score_id"""
# get counts
conf = get_score_counts(df, col)
# zip two columns to create dict
conf_dict = dict(zip(conf[col], conf["mentions"]))
# add zero values
for k in ["Very High", "High", "Moderate", "Low", "Very Low"]:
if not k in conf_dict:
conf_dict[k] = 0
return conf_dict
def get_top(df, col="concept_name", N: int = 10):
"""get top N values by count"""
return (
df.groupby(col)
.count()["pos_start"]
.reset_index()
.sort_values(by="pos_start", ascending=False)
.rename(columns={"pos_start": "mentions"})
.reset_index(drop=True)
.head(n=N)
)
def get_top_dict(df, col="concept_name", N: int = 10):
"""Get top values as dict with ordered lists"""
return get_top(df, col, N).to_dict("list")
def report(df):
"""get report of basic summary stats"""
print(
f"Found {count_text(df)} mentions of healthcare information ({count_text_unique(df)} unique)."
)
print(
f"Found {count_concepts_unique(df)} unique concepts, spanning {count_types_unique(df)} categories."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("filename", type=str, default=None, help="File to analyze")
args = parser.parse_args()
# read file
df = pd.read_csv(args.filename, sep=None, engine="python")
# convert and quantize
df = convert_data_model(df)
df = quantize_scores(df) | scienceio | /scienceio-2.2.0.tar.gz/scienceio-2.2.0/examples/analytics.py | analytics.py |
import argparse
import pandas as pd
from convert_data_model import convert_data_model
def count_text(df) -> int:
"""len(df) = # of text spans"""
return len(df)
def count_text_unique(df) -> int:
"""unique text spans (no correction for caps/lower/etc.)"""
return df.text.nunique()
def count_concepts_unique(df) -> int:
"""unique biomedical concepts"""
return df.concept_id.nunique()
def count_types_unique(df) -> int:
"""unique concept types"""
return df.concept_type.nunique()
def quantizer(score):
"""
Quantizes scores with desired range
Run with:
df[col] = df[col].apply(lambda x: quantizer(x))
to transform column into quantized values (or set to new column)
"""
if score >= 0.99:
return "Very High"
elif score >= 0.9:
return "High"
elif score >= 0.7:
return "Moderate"
elif score >= 0.5:
return "Low"
else:
return "Very Low"
def quantize_scores(df):
"""Quantize the scores in the dataframe"""
df["score_id"] = df["score_id"].apply(lambda x: quantizer(x))
df["score_type"] = df["score_type"].apply(lambda x: quantizer(x))
return df
def get_score_counts(df, col="score_id"):
"""Returns counts by score"""
return (
df.groupby(col)
.count()["pos_start"]
.reset_index()
.sort_values(by="pos_start", ascending=False)
.rename(columns={"pos_start": "mentions"})
.reset_index(drop=True)
)
def get_score_dict(df, col="score_id"):
"""Returns a dict of counts by score_id"""
# get counts
conf = get_score_counts(df, col)
# zip two columns to create dict
conf_dict = dict(zip(conf[col], conf["mentions"]))
# add zero values
for k in ["Very High", "High", "Moderate", "Low", "Very Low"]:
if not k in conf_dict:
conf_dict[k] = 0
return conf_dict
def get_top(df, col="concept_name", N: int = 10):
"""get top N values by count"""
return (
df.groupby(col)
.count()["pos_start"]
.reset_index()
.sort_values(by="pos_start", ascending=False)
.rename(columns={"pos_start": "mentions"})
.reset_index(drop=True)
.head(n=N)
)
def get_top_dict(df, col="concept_name", N: int = 10):
"""Get top values as dict with ordered lists"""
return get_top(df, col, N).to_dict("list")
def report(df):
"""get report of basic summary stats"""
print(
f"Found {count_text(df)} mentions of healthcare information ({count_text_unique(df)} unique)."
)
print(
f"Found {count_concepts_unique(df)} unique concepts, spanning {count_types_unique(df)} categories."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("filename", type=str, default=None, help="File to analyze")
args = parser.parse_args()
# read file
df = pd.read_csv(args.filename, sep=None, engine="python")
# convert and quantize
df = convert_data_model(df)
df = quantize_scores(df) | 0.758242 | 0.430506 |
# ScienceIO API Analytics
In this demo, we'll:
- Log in with our user account
- Make our first request
- Put the request in a pandas dataframe and analyze
```
import pandas as pd
import yaml
from IPython.display import display, JSON
from analytics import *
from scienceio import ScienceIO
```
## Initialize client
```
scio = ScienceIO()
```
If the account already exists (based on the login), you will be notified and can proceed to logging in.
## Make a request to the API
```
# This is the text we'll send to the API
query_text = (
"""
The COVID-19 pandemic has shown a markedly low proportion of
cases among children. Age disparities in observed COVID-19 cases could be
explained by children having lower susceptibility to infection, lower
propensity to show clinical symptoms or both. COVID-19 has shown to induce
Kawasaki syndrome in some children. Kawasaki (KD), or Mucocutaneous Lymph Node Syndrome
as it is often called, is an acute illness that affects mostly children 6 months to 5 years
old—median age in the KD cohort was 25 months—and features inflammation of small and medium
blood vessels. Although the cause of KD is unknown, it is believed to occur in genetically
predisposed children after exposure to an environmental trigger such as an infection.
"""
)
# Make a request
response_query = scio.annotate(text=query_text)
# We can pass `annotations` directly to a dataframe
df = pd.DataFrame(response_query['annotations'])
df = quantize_scores(df)
display(df.head())
```
## Viewing the results
```
report(df)
# top mentions
get_top_dict(df, "text")
# top concepts
get_top_dict(df, "concept_name")
# top concept types
get_top_dict(df, "concept_type")
# count for score_id
get_score_dict(df)
```
| scienceio | /scienceio-2.2.0.tar.gz/scienceio-2.2.0/examples/example-analytics-1.ipynb | example-analytics-1.ipynb | import pandas as pd
import yaml
from IPython.display import display, JSON
from analytics import *
from scienceio import ScienceIO
scio = ScienceIO()
# This is the text we'll send to the API
query_text = (
"""
The COVID-19 pandemic has shown a markedly low proportion of
cases among children. Age disparities in observed COVID-19 cases could be
explained by children having lower susceptibility to infection, lower
propensity to show clinical symptoms or both. COVID-19 has shown to induce
Kawasaki syndrome in some children. Kawasaki (KD), or Mucocutaneous Lymph Node Syndrome
as it is often called, is an acute illness that affects mostly children 6 months to 5 years
old—median age in the KD cohort was 25 months—and features inflammation of small and medium
blood vessels. Although the cause of KD is unknown, it is believed to occur in genetically
predisposed children after exposure to an environmental trigger such as an infection.
"""
)
# Make a request
response_query = scio.annotate(text=query_text)
# We can pass `annotations` directly to a dataframe
df = pd.DataFrame(response_query['annotations'])
df = quantize_scores(df)
display(df.head())
report(df)
# top mentions
get_top_dict(df, "text")
# top concepts
get_top_dict(df, "concept_name")
# top concept types
get_top_dict(df, "concept_type")
# count for score_id
get_score_dict(df) | 0.537041 | 0.886273 |
import argparse
import pandas as pd
def beta_mapper():
"""For converting previous data models to beta model"""
return {
"text": "text",
"start": "pos_start",
"end": "pos_end",
"text_norm": "concept_id",
"entity": "concept_id",
"canonical_name": "concept_name",
"entity_p": "score_id",
"tag": "concept_type",
"entity_type": "concept_type",
"entity_type_p": "score_type",
"entity_subtype": "concept_subtype",
}
def remap_concept_types(df):
"""Convert legacy concept types to beta types"""
type_mapper = {
"Activity": "Context",
"Anatomy": "Anatomy & Physiology",
"Boolean": "Context",
"Cardinal": "Context",
"Cell": "Cell Biology",
"Cell Component": "Cell Biology",
"CellLine": "Cell Biology",
"Chemical": "Chemicals & Drugs",
"Concept": "Context",
"Device": "Medical Devices",
"Disease": "Medical Conditions",
"Gene": "Genetics",
"Geography": "Context",
"Group": "Context",
"Mutation": "Genetics",
"None": "None",
"Nucleic Acid, Nucleoside, or Nucleotide": "Genetics",
"Object": "Context",
"Occupation": "Context",
"Organization": "Context",
"Phenomenon": "Anatomy & Physiology",
"Physiology": "Anatomy & Physiology",
"Procedure": "Medical Procedures",
"Species": "Species & Viruses",
}
if "concept_type" in df.columns:
return df.replace({"concept_type": type_mapper})
else:
return df.replace({"entity_type": type_mapper})
def convert_data_model(df):
"""Remap column names and change types"""
df = df.rename(columns=beta_mapper())
df = remap_concept_types(df)
return df
def save_dataframe(df, filename: str = "input.tsv", sep: str = "\t"):
"""Save locally"""
if "." in filename:
filename = filename.split(".")[0] + "_REMAPPED.tsv"
df.to_csv(filename, sep=sep, index=False)
def main(filename):
"""Convert file to proper data model"""
print(f"Reading: {filename}")
df = pd.read_csv(filename, sep=None, engine="python")
df_remapped = convert_data_model(df)
save_dataframe(df_remapped, filename)
print(f"Saved to: {filename}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"filename", type=str, default=None, help="File to have data model remapped"
)
args = parser.parse_args()
main(filename=args.filename) | scienceio | /scienceio-2.2.0.tar.gz/scienceio-2.2.0/examples/convert_data_model.py | convert_data_model.py |
import argparse
import pandas as pd
def beta_mapper():
"""For converting previous data models to beta model"""
return {
"text": "text",
"start": "pos_start",
"end": "pos_end",
"text_norm": "concept_id",
"entity": "concept_id",
"canonical_name": "concept_name",
"entity_p": "score_id",
"tag": "concept_type",
"entity_type": "concept_type",
"entity_type_p": "score_type",
"entity_subtype": "concept_subtype",
}
def remap_concept_types(df):
"""Convert legacy concept types to beta types"""
type_mapper = {
"Activity": "Context",
"Anatomy": "Anatomy & Physiology",
"Boolean": "Context",
"Cardinal": "Context",
"Cell": "Cell Biology",
"Cell Component": "Cell Biology",
"CellLine": "Cell Biology",
"Chemical": "Chemicals & Drugs",
"Concept": "Context",
"Device": "Medical Devices",
"Disease": "Medical Conditions",
"Gene": "Genetics",
"Geography": "Context",
"Group": "Context",
"Mutation": "Genetics",
"None": "None",
"Nucleic Acid, Nucleoside, or Nucleotide": "Genetics",
"Object": "Context",
"Occupation": "Context",
"Organization": "Context",
"Phenomenon": "Anatomy & Physiology",
"Physiology": "Anatomy & Physiology",
"Procedure": "Medical Procedures",
"Species": "Species & Viruses",
}
if "concept_type" in df.columns:
return df.replace({"concept_type": type_mapper})
else:
return df.replace({"entity_type": type_mapper})
def convert_data_model(df):
"""Remap column names and change types"""
df = df.rename(columns=beta_mapper())
df = remap_concept_types(df)
return df
def save_dataframe(df, filename: str = "input.tsv", sep: str = "\t"):
"""Save locally"""
if "." in filename:
filename = filename.split(".")[0] + "_REMAPPED.tsv"
df.to_csv(filename, sep=sep, index=False)
def main(filename):
"""Convert file to proper data model"""
print(f"Reading: {filename}")
df = pd.read_csv(filename, sep=None, engine="python")
df_remapped = convert_data_model(df)
save_dataframe(df_remapped, filename)
print(f"Saved to: {filename}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"filename", type=str, default=None, help="File to have data model remapped"
)
args = parser.parse_args()
main(filename=args.filename) | 0.580471 | 0.417568 |
# ScienceIO API Setup
In this demo, we'll:
- Set up a user account
- Make our first request
- Put the request in a pandas dataframe
```
from scienceio import ScienceIO
import pandas as pd
from IPython.display import display, JSON
```
## Set up an account
```
# Register an account
ScienceIO.register(
first_name="Demo",
last_name="User",
email='[email protected]'
)
```
You will be sent an email asking to verify your email.
If the account already exists (based on the email), you will be notified and can proceed to logging in.
Create a config file
Create ~/.scio/config with the following content
```
[SETTINGS]
[email protected]
```
## Make a request to the API
```
# instantiate the API with your credentials
scio = ScienceIO()
# Now we'll submit a query
query_text = (
'The COVID-19 pandemic has shown a markedly low proportion of '
'cases among children 1–4. Age disparities in observed cases could be '
'explained by children having lower susceptibility to infection, lower '
'propensity to show clinical symptoms or both.'
)
response_query = scio.annotate(text=query_text)
# Let's check the keys
list(response_query.keys())
```
`text` is the original text submitted
`request_id` is the unique ID for this request
`characters` is the number of characters in text
`billed_characters` is the number of characters billed (min: 300)
`spans` is a list where each item is a biomedical concept identified by the model
## Viewing the results
```
try:
import pandas as pd
except:
!pip install pandas
import pandas as pd
# We can pass `spans` directly to a dataframe
df = pd.DataFrame(response_query['annotations'])
display(df)
```
| scienceio | /scienceio-2.2.0.tar.gz/scienceio-2.2.0/examples/example-onboarding.ipynb | example-onboarding.ipynb | from scienceio import ScienceIO
import pandas as pd
from IPython.display import display, JSON
# Register an account
ScienceIO.register(
first_name="Demo",
last_name="User",
email='[email protected]'
)
[SETTINGS]
[email protected]
# instantiate the API with your credentials
scio = ScienceIO()
# Now we'll submit a query
query_text = (
'The COVID-19 pandemic has shown a markedly low proportion of '
'cases among children 1–4. Age disparities in observed cases could be '
'explained by children having lower susceptibility to infection, lower '
'propensity to show clinical symptoms or both.'
)
response_query = scio.annotate(text=query_text)
# Let's check the keys
list(response_query.keys())
try:
import pandas as pd
except:
!pip install pandas
import pandas as pd
# We can pass `spans` directly to a dataframe
df = pd.DataFrame(response_query['annotations'])
display(df) | 0.354657 | 0.824179 |
============================================================
ScienceLogic - a Python library for the ScienceLogic EM7 API
============================================================
.. image:: https://img.shields.io/pypi/v/sciencelogic.svg
:target: https://pypi.python.org/pypi/sciencelogic
.. image:: https://img.shields.io/travis/tonybaloney/sciencelogic.svg
:target: https://travis-ci.org/tonybaloney/sciencelogic
.. image:: https://coveralls.io/repos/github/tonybaloney/sciencelogic/badge.svg?branch=master
:target: https://coveralls.io/github/tonybaloney/sciencelogic?branch=master
.. image:: https://readthedocs.org/projects/sciencelogic/badge/?version=latest
:target: https://readthedocs.org/projects/sciencelogic/?badge=latest
:alt: Documentation Status
Client library for sciencelogic EM7
* Free software: MIT license
* Documentation: https://sciencelogic.readthedocs.org.
Usage
--------
To use Python EM7 in a project
.. code-block:: python
from sciencelogic.client import Client
c = Client('jazz', 'hands!', 'https://au-monitoring.mcp-services.net/')
# API details
print(c.sysinfo)
Credits
---------
This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
Thanks to Georgi Dimitrov (@georgijd) for his contributions and testing
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
| sciencelogic | /sciencelogic-0.6.0.tar.gz/sciencelogic-0.6.0/README.rst | README.rst | ============================================================
ScienceLogic - a Python library for the ScienceLogic EM7 API
============================================================
.. image:: https://img.shields.io/pypi/v/sciencelogic.svg
:target: https://pypi.python.org/pypi/sciencelogic
.. image:: https://img.shields.io/travis/tonybaloney/sciencelogic.svg
:target: https://travis-ci.org/tonybaloney/sciencelogic
.. image:: https://coveralls.io/repos/github/tonybaloney/sciencelogic/badge.svg?branch=master
:target: https://coveralls.io/github/tonybaloney/sciencelogic?branch=master
.. image:: https://readthedocs.org/projects/sciencelogic/badge/?version=latest
:target: https://readthedocs.org/projects/sciencelogic/?badge=latest
:alt: Documentation Status
Client library for sciencelogic EM7
* Free software: MIT license
* Documentation: https://sciencelogic.readthedocs.org.
Usage
--------
To use Python EM7 in a project
.. code-block:: python
from sciencelogic.client import Client
c = Client('jazz', 'hands!', 'https://au-monitoring.mcp-services.net/')
# API details
print(c.sysinfo)
Credits
---------
This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
Thanks to Georgi Dimitrov (@georgijd) for his contributions and testing
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
| 0.801431 | 0.396886 |
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/tonybaloney/sciencelogic/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "feature"
is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
PythonEM7 could always use more documentation, whether as part of the
official PythonEM7 docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/tonybaloney/sciencelogic/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `sciencelogic` for local development.
1. Fork the `sciencelogic` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/sciencelogic.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ virtualenv sciencelogic
$ cd sciencelogic/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
$ flake8 sciencelogic tests
$ python setup.py test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.6, 2.7, 3.3, 3.4 and 3.5, and for PyPy. Check
https://travis-ci.org/tonybaloney/sciencelogic/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ python -m unittest tests.test_sciencelogic
| sciencelogic | /sciencelogic-0.6.0.tar.gz/sciencelogic-0.6.0/CONTRIBUTING.rst | CONTRIBUTING.rst | .. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/tonybaloney/sciencelogic/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "feature"
is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
PythonEM7 could always use more documentation, whether as part of the
official PythonEM7 docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/tonybaloney/sciencelogic/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `sciencelogic` for local development.
1. Fork the `sciencelogic` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/sciencelogic.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ virtualenv sciencelogic
$ cd sciencelogic/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
$ flake8 sciencelogic tests
$ python setup.py test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.6, 2.7, 3.3, 3.4 and 3.5, and for PyPy. Check
https://travis-ci.org/tonybaloney/sciencelogic/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ python -m unittest tests.test_sciencelogic
| 0.591959 | 0.444324 |
.. sciencelogic documentation master file, created by
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to PythonEM7's documentation!
======================================
Contents:
.. toctree::
:maxdepth: 2
readme
installation
usage
api
contributing
authors
history
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| sciencelogic | /sciencelogic-0.6.0.tar.gz/sciencelogic-0.6.0/docs/index.rst | index.rst | .. sciencelogic documentation master file, created by
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to PythonEM7's documentation!
======================================
Contents:
.. toctree::
:maxdepth: 2
readme
installation
usage
api
contributing
authors
history
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 0.524882 | 0.132178 |
<div id="top"></div>
<h1 align="center">
<br>
Sciencer Toolkit
</h1>
<h4 align="center">A smarter way to find articles.</h4>
<p align="center">
<a href="https://pypi.org/project/sciencer/">
<img src="https://img.shields.io/pypi/status/sciencer.svg?style=flat-square"
alt="PyPi Package Version"></a>
<a href="https://github.com/SciencerIO/sciencer-toolkit/issues">
<img src="https://img.shields.io/github/issues-raw/SciencerIO/sciencer-toolkit.svg?style=flat-square&logo=github&logoColor=white"
alt="GitHub issues"></a>
<a href="https://github.com/SciencerIO/sciencer-toolkit/pulls">
<img src="https://img.shields.io/github/issues-pr-raw/SciencerIO/sciencer-toolkit.svg?style=flat-square&logo=github&logoColor=white"
alt="GitHub pull requests"></a>
<a href="https://github.com/SciencerIO/sciencer-toolkit/LICENSE">
<img src="https://img.shields.io/github/license/SciencerIO/sciencer-toolkit.svg?style=flat-square"
alt="License: MIT License"></a>
</p>
<p align="center">
<a href="#about">About</a> -
<a href="#usage">Usage</a> -
<a href="#roadmap">Roadmap</a> -
<a href="#contributing">Contributing</a>
</p>
<p align="center">
<a href="#collectors">Collectors</a> -
<a href="#expanders">Expanders</a> -
<a href="#filters">Filters</a> -
<a href="#providers">Providers</a>
</p>
---
## About
Sciencer Toolkit enables researchers to **programmatically conduct a literature review** using an intuitive yet flexible interface.
At its core, Sciencer collects sets of papers.
The initial set of papers is created through the use of **Collectors** (e.g. paper doi, author name).
Then, Sciencer iteratively finds new papers using **Expanders** (e.g. authors, citations, references).
Finally, new found papers need to satisfy a series of **Filters** in order to be accepted into the current set.
Being iterative in nature, Sciencer allows you to repeat the above steps has many times as you'd like.
This project was motivated by the absence of tools to automate systematic reviews using clear and well-defined criteria.
Still, for literature reviews that do not need to follow specific criteria, there are a several tools that can help to discover new papers.
## Usage
```python
# Create the Sciencer Core Component
sciencer = Sciencer()
# Define provider
sciencer.add_provider(SemanticScholarProvider())
# Define collectors
## this collector will gather all known papers authored by "John Doe" into de set
sciencer.add_collector(sciencer.collectors.CollectByAuthorID("John Doe"))
## this collector will collect the paper with DOI "1234567890" into the set
sciencer.add_collector(sciencer.collectors.CollectByDOI("1234567890"))
## this collector will collect the papers with
sciencer.add_collector(sciencer.collectors.CollectByTerms(["Term 1", "Term 2", "Term 3"]))
# Define expanders
## this expander will gather all known papers written by authors in the current set.
sciencer.add_expander(sciencer.expanders.ExpandByAuthors())
## this expander will gather all the referenced papers
sciencer.add_expander(sciencer.expanders.ExpandByReferences())
## this expander will gather all the cited papers
sciencer.add_expander(sciencer.expanders.ExpandByCitations())
# Define filters
## this filter will reject papers that were published before 2010 and after 2030
sciencer.add_filter(sciencer.filters.FilterByYear(min_year=2010, max_year=2030))
## this filter will reject all the papers that do not have the word social on the abstract
sciencer.add_filter(sciencer.filters.FilterByAbstract("social"))
## this filter will reject all the papers that do not have the field of study Computer Science
sciencer.add_filter(sciencer.filters.FilterByFieldOfStudy("Computer Science"))
# Run one iteration
results = sciencer.iterate()
```
For more examples on how to use the Sciencer toolkit, please check the directory `examples/`.
<p align="right">(<a href="#top">back to top</a>)</p>
## Collectors
| Name | Description | Parameters |
| --------- | :------------------------------------------- | :---------------------------------------- |
| Author ID | Collects all the papers written by an author | Authors's SemanticScholar ID |
| Paper DOI | Collects a paper by its DOI | Paper's DOI |
| Terms | Collects papers by terms | Query Terms <br> Maximum Number of Papers |
<p align="right">(<a href="#top">back to top</a>)</p>
## Expanders
| Name | Description |
| ---------- | :-------------------------------- |
| Authors | Expands a paper by its authors |
| References | Expands a paper by its references |
| References | Expands a paper by its citations |
<p align="right">(<a href="#top">back to top</a>)</p>
## Filters
| Name | Description | Parameters |
| ----------------- | :------------------------------------ | ----------------------------------------------------------------------------------- |
| By Year | Filters a paper by its year | The lowest acceptable year (inclusive) <br> The highest acceptable year (inclusive) |
| By Abstract Words | Filters a paper by its abstract | The collection of words the abstract should include (at least one) |
| By Field Of Study | Filters a paper by its field of study | The field of study the paper should have |
<p align="right">(<a href="#top">back to top</a>)</p>
## Providers
| Name | Provider | Features |
| :--------------: | :--------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------- |
| Semantic Scholar | [Semantic Scholar Academic Graph API](https://www.semanticscholar.org/product/api) | **Search by Author** (Name, S2ID) <br> **Search By Paper ID** (S2ID, DOI, ArXiv, MAG, ACL, PubMed, Corpus) |
| DBLP | [DBLP Search API](https://dblp.org/faq/How+to+use+the+dblp+search+API.html) | *Work in Progress* |
<p align="right">(<a href="#top">back to top</a>)</p>
## Roadmap
- [ ] Create Paper's and Author's Cache
- [x] Add Bulk Expanders (to avoid redundancy)
- [ ] Add support for multithreading
- [ ] Add Collectors
- [ ] Add Collect by Venue/Proceedings
- [ ] Add Expanders
- [x] Add Expand by Citations
- [x] Add Expand by References
- [ ] Add Expand by Venue/Proceedings
- [ ] Add Filters
- [ ] Add Filter by Number of Citations
- [x] Add Filter by Topic
- [ ] Add Filter by Keywords
- [ ] Add Compound Filters
- [x] Add utility to write results to a *.csv
See the [open issues](https://github.com/SciencerIO/sciencer-toolkit/issues) for a full list of proposed features (and known issues).
<p align="right">(<a href="#top">back to top</a>)</p>
## Contributing
Want to **add a new provider, filter or expander**?
Looking to improve **the core functionality of sciencer toolkit**.
We look forward to include your contributions in the toolkit!
If you have a suggestion that would improve the toolkit just send us a Pull Request!
If you are looking for an additional collector/filter/expander/provider or just want to report a bug, you can also simply open an issue with the tag "enchament" or "bug", respectively.
<p align="right">(<a href="#top">back to top</a>)</p>
| sciencer | /sciencer-0.1.3.tar.gz/sciencer-0.1.3/README.md | README.md | # Create the Sciencer Core Component
sciencer = Sciencer()
# Define provider
sciencer.add_provider(SemanticScholarProvider())
# Define collectors
## this collector will gather all known papers authored by "John Doe" into de set
sciencer.add_collector(sciencer.collectors.CollectByAuthorID("John Doe"))
## this collector will collect the paper with DOI "1234567890" into the set
sciencer.add_collector(sciencer.collectors.CollectByDOI("1234567890"))
## this collector will collect the papers with
sciencer.add_collector(sciencer.collectors.CollectByTerms(["Term 1", "Term 2", "Term 3"]))
# Define expanders
## this expander will gather all known papers written by authors in the current set.
sciencer.add_expander(sciencer.expanders.ExpandByAuthors())
## this expander will gather all the referenced papers
sciencer.add_expander(sciencer.expanders.ExpandByReferences())
## this expander will gather all the cited papers
sciencer.add_expander(sciencer.expanders.ExpandByCitations())
# Define filters
## this filter will reject papers that were published before 2010 and after 2030
sciencer.add_filter(sciencer.filters.FilterByYear(min_year=2010, max_year=2030))
## this filter will reject all the papers that do not have the word social on the abstract
sciencer.add_filter(sciencer.filters.FilterByAbstract("social"))
## this filter will reject all the papers that do not have the field of study Computer Science
sciencer.add_filter(sciencer.filters.FilterByFieldOfStudy("Computer Science"))
# Run one iteration
results = sciencer.iterate()
| 0.528777 | 0.817538 |
const path = require("path");
const webpack = require("webpack");
const HtmlWebpackPlugin = require("html-webpack-plugin");
const BundleAnalyzerPlugin = require('webpack-bundle-analyzer').BundleAnalyzerPlugin;
// const ModuleFederationPlugin = require("webpack/lib/container/ModuleFederationPlugin");
// const deps = require("./package.json").dependencies;
const shimJS = path.resolve(__dirname, "src", "emptyshim.js");
function shim(regExp) {
return new webpack.NormalModuleReplacementPlugin(regExp, shimJS);
}
const JUPYTER_HOST = 'http://localhost:8686';
const IS_PRODUCTION = process.argv.indexOf('--mode=production') > -1;
const mode = IS_PRODUCTION ? "production" : "development";
// inline-source-map
const devtool = IS_PRODUCTION ? false : "inline-cheap-source-map";
const minimize = IS_PRODUCTION ? true : false;
module.exports = {
entry: "./src/Example",
mode: mode,
devServer: {
port: 3063,
client: { overlay: false },
historyApiFallback: true,
// static: path.join(__dirname, "dist"),
proxy: {
'/api/jupyter': {
target: JUPYTER_HOST,
ws: true,
secure: false,
changeOrigin: true,
},
'/plotly.js': {
target: JUPYTER_HOST + '/api/jupyter/pool/react',
ws: false,
secure: false,
changeOrigin: true,
},
},
},
watchOptions: {
aggregateTimeout: 300,
poll: 2000, // Seems to stabilise HMR file change detection
ignored: "/node_modules/"
},
devtool,
optimization: {
minimize,
// usedExports: true,
},
output: {
publicPath: "http://localhost:3063/",
filename: '[name].[contenthash].datalayerIo.js',
},
resolve: {
extensions: [".ts", ".tsx", ".js", ".jsx"],
alias: {
path: "path-browserify",
stream: "stream-browserify",
},
},
module: {
rules: [
/*
{
test: /bootstrap\.tsx$/,
loader: "bundle-loader",
options: {
lazy: true,
},
},
*/
{
test: /\.tsx?$/,
loader: "babel-loader",
options: {
plugins: [
"@babel/plugin-proposal-class-properties",
],
presets: [
["@babel/preset-react", {
runtime: 'automatic',
/* importSource: '@emotion/react' */
},
],
"@babel/preset-typescript",
],
cacheDirectory: true
},
exclude: /node_modules/,
},
{
test: /\.m?js$/,
resolve: {
fullySpecified: false,
},
},
{
test: /\.jsx?$/,
loader: "babel-loader",
options: {
presets: ["@babel/preset-react"],
cacheDirectory: true
}
},
{
test: /\.css?$/i,
use: ['style-loader', 'css-loader'],
},
{
// In .css files, svg is loaded as a data URI.
test: /\.svg(\?v=\d+\.\d+\.\d+)?$/,
issuer: /\.css$/,
use: {
loader: 'svg-url-loader',
options: { encoding: 'none', limit: 10000 }
}
},
{
test: /\.svg(\?v=\d+\.\d+\.\d+)?$/,
issuer: /\.tsx$/,
use: [
'@svgr/webpack'
],
},
{
// In .ts and .tsx files (both of which compile to .js), svg files
// must be loaded as a raw string instead of data URIs.
test: /\.svg(\?v=\d+\.\d+\.\d+)?$/,
issuer: /\.js$/,
use: {
loader: 'raw-loader'
}
},
{
test: /\.(png|jpg|jpeg|gif|ttf|woff|woff2|eot)(\?v=[0-9]\.[0-9]\.[0-9])?$/,
use: [{ loader: 'url-loader', options: { limit: 10000 } }],
},
]
},
plugins: [
!IS_PRODUCTION ?
new webpack.ProvidePlugin({
process: 'process/browser'
})
:
new webpack.ProvidePlugin({
process: 'process/browser'
}),
new BundleAnalyzerPlugin({
analyzerMode: IS_PRODUCTION ? "static" : "disabled", // server, static, json, disabled.
openAnalyzer: false,
generateStatsFile: false,
}),
/*
shim(/@fortawesome/),
shim(/moment/),
shim(/react-jvectormap/),
shim(/react-slick/),
shim(/react-tagsinput/),
*/
new HtmlWebpackPlugin({
template: "./public/index.html",
}),
],
}; | sciences | /sciences-0.0.2.tar.gz/sciences-0.0.2/webpack.config.js | webpack.config.js | const path = require("path");
const webpack = require("webpack");
const HtmlWebpackPlugin = require("html-webpack-plugin");
const BundleAnalyzerPlugin = require('webpack-bundle-analyzer').BundleAnalyzerPlugin;
// const ModuleFederationPlugin = require("webpack/lib/container/ModuleFederationPlugin");
// const deps = require("./package.json").dependencies;
const shimJS = path.resolve(__dirname, "src", "emptyshim.js");
function shim(regExp) {
return new webpack.NormalModuleReplacementPlugin(regExp, shimJS);
}
const JUPYTER_HOST = 'http://localhost:8686';
const IS_PRODUCTION = process.argv.indexOf('--mode=production') > -1;
const mode = IS_PRODUCTION ? "production" : "development";
// inline-source-map
const devtool = IS_PRODUCTION ? false : "inline-cheap-source-map";
const minimize = IS_PRODUCTION ? true : false;
module.exports = {
entry: "./src/Example",
mode: mode,
devServer: {
port: 3063,
client: { overlay: false },
historyApiFallback: true,
// static: path.join(__dirname, "dist"),
proxy: {
'/api/jupyter': {
target: JUPYTER_HOST,
ws: true,
secure: false,
changeOrigin: true,
},
'/plotly.js': {
target: JUPYTER_HOST + '/api/jupyter/pool/react',
ws: false,
secure: false,
changeOrigin: true,
},
},
},
watchOptions: {
aggregateTimeout: 300,
poll: 2000, // Seems to stabilise HMR file change detection
ignored: "/node_modules/"
},
devtool,
optimization: {
minimize,
// usedExports: true,
},
output: {
publicPath: "http://localhost:3063/",
filename: '[name].[contenthash].datalayerIo.js',
},
resolve: {
extensions: [".ts", ".tsx", ".js", ".jsx"],
alias: {
path: "path-browserify",
stream: "stream-browserify",
},
},
module: {
rules: [
/*
{
test: /bootstrap\.tsx$/,
loader: "bundle-loader",
options: {
lazy: true,
},
},
*/
{
test: /\.tsx?$/,
loader: "babel-loader",
options: {
plugins: [
"@babel/plugin-proposal-class-properties",
],
presets: [
["@babel/preset-react", {
runtime: 'automatic',
/* importSource: '@emotion/react' */
},
],
"@babel/preset-typescript",
],
cacheDirectory: true
},
exclude: /node_modules/,
},
{
test: /\.m?js$/,
resolve: {
fullySpecified: false,
},
},
{
test: /\.jsx?$/,
loader: "babel-loader",
options: {
presets: ["@babel/preset-react"],
cacheDirectory: true
}
},
{
test: /\.css?$/i,
use: ['style-loader', 'css-loader'],
},
{
// In .css files, svg is loaded as a data URI.
test: /\.svg(\?v=\d+\.\d+\.\d+)?$/,
issuer: /\.css$/,
use: {
loader: 'svg-url-loader',
options: { encoding: 'none', limit: 10000 }
}
},
{
test: /\.svg(\?v=\d+\.\d+\.\d+)?$/,
issuer: /\.tsx$/,
use: [
'@svgr/webpack'
],
},
{
// In .ts and .tsx files (both of which compile to .js), svg files
// must be loaded as a raw string instead of data URIs.
test: /\.svg(\?v=\d+\.\d+\.\d+)?$/,
issuer: /\.js$/,
use: {
loader: 'raw-loader'
}
},
{
test: /\.(png|jpg|jpeg|gif|ttf|woff|woff2|eot)(\?v=[0-9]\.[0-9]\.[0-9])?$/,
use: [{ loader: 'url-loader', options: { limit: 10000 } }],
},
]
},
plugins: [
!IS_PRODUCTION ?
new webpack.ProvidePlugin({
process: 'process/browser'
})
:
new webpack.ProvidePlugin({
process: 'process/browser'
}),
new BundleAnalyzerPlugin({
analyzerMode: IS_PRODUCTION ? "static" : "disabled", // server, static, json, disabled.
openAnalyzer: false,
generateStatsFile: false,
}),
/*
shim(/@fortawesome/),
shim(/moment/),
shim(/react-jvectormap/),
shim(/react-slick/),
shim(/react-tagsinput/),
*/
new HtmlWebpackPlugin({
template: "./public/index.html",
}),
],
}; | 0.356447 | 0.113826 |
# Making a new release of datalayer
The extension can be published to `PyPI` and `npm` manually or using the [Jupyter Releaser](https://github.com/jupyter-server/jupyter_releaser).
## Manual release
### Python package
This extension can be distributed as Python
packages. All of the Python
packaging instructions in the `pyproject.toml` file to wrap your extension in a
Python package. Before generating a package, we first need to install `build`.
```bash
pip install build twine hatch
```
Bump the version using `hatch`. By default this will create a tag.
See the docs on [hatch-nodejs-version](https://github.com/agoose77/hatch-nodejs-version#semver) for details.
```bash
hatch version <new-version>
```
To create a Python source package (`.tar.gz`) and the binary package (`.whl`) in the `dist/` directory, do:
```bash
python -m build
```
> `python setup.py sdist bdist_wheel` is deprecated and will not work for this package.
Then to upload the package to PyPI, do:
```bash
twine upload dist/*
```
### NPM package
To publish the frontend part of the extension as a NPM package, do:
```bash
npm login
npm publish --access public
```
## Automated releases with the Jupyter Releaser
The extension repository should already be compatible with the Jupyter Releaser.
Check out the [workflow documentation](https://github.com/jupyter-server/jupyter_releaser#typical-workflow) for more information.
Here is a summary of the steps to cut a new release:
- Fork the [`jupyter-releaser` repo](https://github.com/jupyter-server/jupyter_releaser)
- Add `ADMIN_GITHUB_TOKEN`, `PYPI_TOKEN` and `NPM_TOKEN` to the Github Secrets in the fork
- Go to the Actions panel
- Run the "Draft Changelog" workflow
- Merge the Changelog PR
- Run the "Draft Release" workflow
- Run the "Publish Release" workflow
## Publishing to `conda-forge`
If the package is not on conda forge yet, check the documentation to learn how to add it: https://conda-forge.org/docs/maintainer/adding_pkgs.html
Otherwise a bot should pick up the new version publish to PyPI, and open a new PR on the feedstock repository automatically.
| sciences | /sciences-0.0.2.tar.gz/sciences-0.0.2/RELEASE.md | RELEASE.md | pip install build twine hatch
hatch version <new-version>
python -m build
twine upload dist/*
npm login
npm publish --access public | 0.405449 | 0.928018 |
import { JupyterFrontEnd, JupyterFrontEndPlugin } from '@jupyterlab/application';
import { ISettingRegistry } from '@jupyterlab/settingregistry';
import { MainAreaWidget, ICommandPalette } from '@jupyterlab/apputils';
import { ILauncher } from '@jupyterlab/launcher';
import { reactIcon } from '@jupyterlab/ui-components';
import { requestAPI } from './handler';
import { DatalayerWidget } from './widget';
import '../style/index.css';
/**
* The command IDs used by the react-widget plugin.
*/
namespace CommandIDs {
export const create = 'create-react-widget';
}
/**
* Initialization data for the @datalayer/sciences extension.
*/
const plugin: JupyterFrontEndPlugin<void> = {
id: '@datalayer/sciences:plugin',
autoStart: true,
requires: [ICommandPalette],
optional: [ISettingRegistry, ILauncher],
activate: (
app: JupyterFrontEnd,
palette: ICommandPalette,
settingRegistry: ISettingRegistry | null,
launcher: ILauncher
) => {
const { commands } = app;
const command = CommandIDs.create;
commands.addCommand(command, {
caption: 'Show Sciences',
label: 'Sciences',
icon: (args: any) => reactIcon,
execute: () => {
const content = new DatalayerWidget();
const widget = new MainAreaWidget<DatalayerWidget>({ content });
widget.title.label = 'Sciences';
widget.title.icon = reactIcon;
app.shell.add(widget, 'main');
}
});
const category = 'Sciences';
palette.addItem({ command, category, args: { origin: 'from palette' } });
if (launcher) {
launcher.add({
command
});
}
console.log('JupyterLab extension @datalayer/sciences is activated!');
if (settingRegistry) {
settingRegistry
.load(plugin.id)
.then(settings => {
console.log('@datalayer/sciences settings loaded:', settings.composite);
})
.catch(reason => {
console.error('Failed to load settings for @datalayer/sciences.', reason);
});
}
requestAPI<any>('get_example')
.then(data => {
console.log(data);
})
.catch(reason => {
console.error(
`The datalayer server extension appears to be missing.\n${reason}`
);
});
}
};
export default plugin; | sciences | /sciences-0.0.2.tar.gz/sciences-0.0.2/src/index.ts | index.ts | import { JupyterFrontEnd, JupyterFrontEndPlugin } from '@jupyterlab/application';
import { ISettingRegistry } from '@jupyterlab/settingregistry';
import { MainAreaWidget, ICommandPalette } from '@jupyterlab/apputils';
import { ILauncher } from '@jupyterlab/launcher';
import { reactIcon } from '@jupyterlab/ui-components';
import { requestAPI } from './handler';
import { DatalayerWidget } from './widget';
import '../style/index.css';
/**
* The command IDs used by the react-widget plugin.
*/
namespace CommandIDs {
export const create = 'create-react-widget';
}
/**
* Initialization data for the @datalayer/sciences extension.
*/
const plugin: JupyterFrontEndPlugin<void> = {
id: '@datalayer/sciences:plugin',
autoStart: true,
requires: [ICommandPalette],
optional: [ISettingRegistry, ILauncher],
activate: (
app: JupyterFrontEnd,
palette: ICommandPalette,
settingRegistry: ISettingRegistry | null,
launcher: ILauncher
) => {
const { commands } = app;
const command = CommandIDs.create;
commands.addCommand(command, {
caption: 'Show Sciences',
label: 'Sciences',
icon: (args: any) => reactIcon,
execute: () => {
const content = new DatalayerWidget();
const widget = new MainAreaWidget<DatalayerWidget>({ content });
widget.title.label = 'Sciences';
widget.title.icon = reactIcon;
app.shell.add(widget, 'main');
}
});
const category = 'Sciences';
palette.addItem({ command, category, args: { origin: 'from palette' } });
if (launcher) {
launcher.add({
command
});
}
console.log('JupyterLab extension @datalayer/sciences is activated!');
if (settingRegistry) {
settingRegistry
.load(plugin.id)
.then(settings => {
console.log('@datalayer/sciences settings loaded:', settings.composite);
})
.catch(reason => {
console.error('Failed to load settings for @datalayer/sciences.', reason);
});
}
requestAPI<any>('get_example')
.then(data => {
console.log(data);
})
.catch(reason => {
console.error(
`The datalayer server extension appears to be missing.\n${reason}`
);
});
}
};
export default plugin; | 0.496338 | 0.065009 |
<h1 align="center">
ScienceWorld
</h1>
<p align="center">
<!-- Version badge using shields.io -->
<a href="https://github.com/allenai/ScienceWorld/releases">
<img src="https://img.shields.io/github/v/release/allenai/ScienceWorld">
</a>
<!-- Link to tutorials badge using shields.io -->
<a href="https://huggingface.co/spaces/MarcCote/ScienceWorld">
<img src="https://img.shields.io/badge/🤗-Demo-yellow">
</a>
<!-- Follow on twitter badge using shields.io -->
<a href="https://sciworld.apps.allenai.org">
<img src="https://img.shields.io/badge/Website-green">
</a>
</p>
ScienceWorld is a text-based virtual environment centered around accomplishing tasks from the standardized elementary science curriculum. This code accompanies the paper [ScienceWorld: Is your Textual Agent Smarter than a 5th grader?](https://arxiv.org/abs/2203.07540).
<h3 align="center"><img src="https://github.com/allenai/ScienceWorld/blob/main/media/scienceworld_environment.png" width="75%"/></h3>
### Demo and examples
You can try ScienceWorld yourself via our [HuggingFace Space](https://huggingface.co/spaces/MarcCote/ScienceWorld) or read some of the [playthrough transcripts](https://sciworld.apps.allenai.org/explore).
### Citation
```
@misc{scienceworld2022,
title={ScienceWorld: Is your Agent Smarter than a 5th Grader?},
author={Ruoyao Wang and Peter Jansen and Marc-Alexandre C{\^o}t{\'e} and Prithviraj Ammanabrolu},
year={2022},
eprint={2203.07540},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2203.07540}
}
```
# Quickstart
**Before running:** You will have to have `Java 1.8+` installed on your system (shipped with most linux distributions).
Install with pip:
```bash
conda create --name scienceworld python=3.8
conda activate scienceworld
pip install scienceworld
```
Run an example random agent, on task 13 (classification: place a non-living thing in a box), for 5 episodes:
> python examples/random_agent.py --task-num=13 --num-episodes=5 --simplifications-preset easy
Run a user console where you can interact with the environment, on task 3 (change of state: melting):
> python examples/human.py --task-num=3 --num-episodes=5
# Web Server Demo
A web server demo is also available, that allows running a ScienceWorld user console that can be interacted with in a web browser.
<h3 align="center"><img src="https://github.com/allenai/ScienceWorld/blob/main/media/web_demo_screenshot.png" width="75%"/></h3>
To run the web server demo:
```bash
conda create --name scienceworld python=3.8
conda activate scienceworld
pip install scienceworld[webserver]
```
Run the web server:
> python examples/scienceworld-web-server-example.py
Point your web browser to:
`localhost:8080`
# ScienceWorld Design
ScienceWorld is written in Scala (2.12.9), and compiles using `sbt` into a JAR file that is run with Java. For convenience, a Python API is provided (Python >= 3.7), which interfaces using the `py4j` package.
# Tasks
The tasks are listed in the table below along with their number of variations. Either the task ID or its name can be used to a task with `env.load()`.
| Task ID | Task Name | # Variations |
|-------|----------------------------------------------------|------|
| 1-1 | boil | 30 |
| 1-2 | melt | 30 |
| 1-3 | freeze | 30 |
| 1-4 | change-the-state-of-matter-of | 30 |
| 2-1 | use-thermometer | 540 |
| 2-2 | measure-melting-point-known-substance | 436 |
| 2-3 | measure-melting-point-unknown-substance | 300 |
| 3-1 | power-component | 20 |
| 3-2 | power-component-renewable-vs-nonrenewable-energy | 20 |
| 3-3 | test-conductivity | 900 |
| 3-4 | test-conductivity-of-unknown-substances | 600 |
| 4-1 | find-living-thing | 300 |
| 4-2 | find-non-living-thing | 300 |
| 4-3 | find-plant | 300 |
| 4-4 | find-animal | 300 |
| 5-1 | grow-plant | 126 |
| 5-2 | grow-fruit | 126 |
| 6-1 | chemistry-mix | 32 |
| 6-2 | chemistry-mix-paint-secondary-color | 36 |
| 6-3 | chemistry-mix-paint-tertiary-color | 36 |
| 7-1 | lifespan-longest-lived | 125 |
| 7-2 | lifespan-shortest-lived | 125 |
| 7-3 | lifespan-longest-lived-then-shortest-lived | 125 |
| 8-1 | identify-life-stages-1 | 14 |
| 8-2 | identify-life-stages-2 | 10 |
| 9-1 | inclined-plane-determine-angle | 168 |
| 9-2 | inclined-plane-friction-named-surfaces | 1386 |
| 9-3 | inclined-plane-friction-unnamed-surfaces | 162 |
| 10-1 | mendelian-genetics-known-plant | 120 |
| 10-2 | mendelian-genetics-unknown-plant | 480 |
# Baseline Agents
**DRRN:** https://github.com/cognitiveailab/drrn-scienceworld
**KG-A2C:** https://github.com/cognitiveailab/kga2c-scienceworld
**CALM:** https://github.com/cognitiveailab/calm-scienceworld
**Behavior Cloning and Decision Transformer:** https://github.com/cognitiveailab/t5-scienceworld
| scienceworld | /scienceworld-1.1.3.tar.gz/scienceworld-1.1.3/README.md | README.md | @misc{scienceworld2022,
title={ScienceWorld: Is your Agent Smarter than a 5th Grader?},
author={Ruoyao Wang and Peter Jansen and Marc-Alexandre C{\^o}t{\'e} and Prithviraj Ammanabrolu},
year={2022},
eprint={2203.07540},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2203.07540}
}
conda create --name scienceworld python=3.8
conda activate scienceworld
pip install scienceworld
conda create --name scienceworld python=3.8
conda activate scienceworld
pip install scienceworld[webserver] | 0.512449 | 0.93744 |
Scienco
=======
[![pipeline status][pipeline]][homepage]
[![coverage report][coverage]][homepage]
[![latest version][version]][pypi]
[![python requires][pyversions]][pypi]
Calculate the readability of text using one of a variety of computed indexes including:
- Flesch-Kincaid score
- Automated readability index
- Coleman-Liau index
Requirements
------------
Python 3.8+
Installation
------------
```
$ pip install scienco
```
Usage
-----
```python
>>> import scienco
>>> metrics = scienco.compute_metrics("Lorem ipsum dolor sit amet ...")
>>> metrics
Metrics(sentences=32, words=250, letters=1329, syllables=489, is_russian=False)
>>> indexes = scienco.compute_indexes(sentences=32, words=250, letters=1329, syllables=489, is_russian=False)
>>> indexes
Indexes(flesch_reading_ease_score=33.43, automated_readability_index=7.51, coleman_liau_index=11.67)
```
Distribution
------------
This project is licensed under the terms of the [MIT License](LICENSE).
Links
-----
- Documentation: <https://amalchuk.gitlab.io/scienco>
- Code: <https://gitlab.com/amalchuk/scienco>
- GitHub mirror: <https://github.com/amalchuk/scienco>
[homepage]: <https://gitlab.com/amalchuk/scienco>
[pypi]: <https://pypi.org/project/scienco>
[pipeline]: <https://gitlab.com/amalchuk/scienco/badges/master/pipeline.svg?style=flat-square>
[coverage]: <https://gitlab.com/amalchuk/scienco/badges/master/coverage.svg?style=flat-square>
[version]: <https://img.shields.io/pypi/v/scienco?color=blue&style=flat-square>
[pyversions]: <https://img.shields.io/pypi/pyversions/scienco?color=blue&style=flat-square>
| scienco | /scienco-1.0.1.tar.gz/scienco-1.0.1/README.md | README.md | $ pip install scienco
>>> import scienco
>>> metrics = scienco.compute_metrics("Lorem ipsum dolor sit amet ...")
>>> metrics
Metrics(sentences=32, words=250, letters=1329, syllables=489, is_russian=False)
>>> indexes = scienco.compute_indexes(sentences=32, words=250, letters=1329, syllables=489, is_russian=False)
>>> indexes
Indexes(flesch_reading_ease_score=33.43, automated_readability_index=7.51, coleman_liau_index=11.67) | 0.532668 | 0.688881 |
# sciengdox
A python package for creating scientific and engineering documents
via [`pandoc`](https://pandoc.org/) including inline-executable Python code.
## Key Features
1. [Pandoc filter](https://pandoc.org/filters.html) for converting pandoc
markdown to other formats (especially HTML and PDF).
2. Codeblock execution
3. Helper functions for generating tables, SVG
[matplotlib](https://matplotlib.org/) plots, etc.
# Motivation
This is inspired by [`pweave`](http://mpastell.com/pweave/),
[`codebraid`](https://github.com/gpoore/codebraid),
[`knitr`](https://yihui.name/knitr/), and cousins, but I always seemed to have
to do some pre/post-processing to get things the way I want them. I already use
other pandoc filters (e.g. pandoc-citeproc, pandoc-crossref), so why not simply
have another pandoc filter that will execute inline code and insert the results?
Another key is getting quality diagrams from scientific python code. For
example, pweave automatically inserts generated images, but there doesn't seem
to be a way to get SVG images without, again, pre- and post-processing in
another script. SVG plots are, obviously, scalable and work much better for web
and PDF outputs.
# Development
Use [`poetry`](https://python-poetry.org/) for local environment management.
After cloning the repository:
```shell
$ cd <project-repo>
$ poetry install
$ poetry shell
```
To package and release:
```shell
$ poetry build
$ poetry publish
```
Be sure to
[configure your credentials](https://python-poetry.org/docs/repositories/#configuring-credentials)
prior to publishing.
See also [this page](https://packaging.python.org/tutorials/packaging-projects/).
# Use and Example
An example Pandoc markdown file can be found in `example`. To process this
file, you need to have [`pandoc`](https://pandoc.org/) installed and in your
path. You also need to install the Pandoc filters
[pandoc-crossref](https://github.com/lierdakil/pandoc-crossref) and
[pandoc-citeproc](https://github.com/jgm/pandoc-citeproc) which provide nice
cross-referencing and reference/bibliography handling.
## Installation
When working with macOS or Linux or
[Linux on Windows via WSL](https://gist.github.com/gbingersoll/9e18afb9f4c3acd8674f5595c7e010f5)
`pandoc` and the filters can be installed via [Homebrew](https://brew.sh/). (On
Linux/WSL, install [linuxbrew](https://docs.brew.sh/Homebrew-on-Linux).) Then
simply run:
```shell
$ brew install pandoc
$ brew install pandoc-crossref
$ brew install pandoc-citeproc
$ brew install librsvg
```
Then, of course, you need to install this filter and some other helpers for the
example. The example helpers can be installed into your Python virtual
environment by running:
```shell
$ poetry install -E examples
```
### Windows-specific Install
To set up an environment for Windows from scratch including terminals, editors,
Python, etc., see
[this gist](https://gist.github.com/gbingersoll/c3033f8cb41c3eb865563c0711a30545).
Additional installation steps to use this library include installing `pandoc`
and additional filters and utilities.
Install `pandoc` by [downloading the installer](https://pandoc.org/installing.html)
and following the standard instructions. This should also get you
`pandoc-citeproc.exe` for managing citations.
Install `pandoc-crossref` (for managing intra-document cross-references) by
[downloading](https://github.com/lierdakil/pandoc-crossref/releases) the zipped
Windows release. Unzip it, and move `pandoc-crossref.exe` to a location that is
on your system path. For example, you can move to next to `pandoc-citeproc.exe`
in `C:\Program Files\Pandoc`.
Finally, to handle embedding SVG images in PDF documents, this library relies on
`rsvg-convert`. This can be installed via
[Chocolatey](https://chocolatey.org/). Install the Chocolatey package manager
if you do not already have it, and then run:
```shell
$ choco install rsvg-convert
```
Instead of (or in addition to) Chocolately, you can also install the
[Scoop](https://scoop.sh/) installer. Scoop does not currently have a formula
for `rsvg-convert`, but it can also be installed from
[SourceForge](https://sourceforge.net/projects/tumagcc/files/rsvg-convert-dll-2.40.16.7z/download?use_mirror=phoenixnap)
if you do not want to use Chocolatey.
#### UTF-8 Note
The underlying Pandoc filter for executing Python code embedded in your
documents relies on inter-process communication with a Python REPL behind the
scenes. The default inter-process character encoding for Python on Windows is
[CP-1252](https://en.wikipedia.org/wiki/Windows-1252), and this can cause
problems if your Python scripts generate output with special characters (and if
you are doing any scientific or engineering writing, they definitely will).
Fortunately, this is easily worked-around by setting a Windows environment
variable `PYTHONIOENCODING` to `utf-8`. After setting this, be sure to restart
any open terminal windows for the change to take effect.
#### Matplotlib Note
If you use `matplotlib` for generating plots in inline Python code in your
document, you should explicity set the `Agg` backend early in your document (see
the `example/example.md` in this repo). Without this, document conversion can
hang when the `svg_figure` helper function is called.
Somewhere near the top of your Markdown document, add an executable Python code
block (without `.echo` so it won't appear in the output) that includes:
```python
import matplotlib
matplotlib.use('Agg')
```
#### Panflute Version Note
This plugin relies on the
[`panflute`](https://github.com/sergiocorreia/panflute) Python package as a
bridge between Python and `pandoc`'s Haskell. The `panflute`
[README](https://github.com/sergiocorreia/panflute#supported-pandoc-versions)
lists API compatibility requirements between versions of `panflute` and versions
of `pandoc`. Double-check this if you run into errors that mention `panflute`
when compiling a document.
If you are running an older version of `pandoc` (e.g. 2.9.2) and start a new
project, you will need to explicitly install the compatible `panflute` version
in your environment with e.g. `poetry add [email protected]`. Or
alternatively install a `pandoc` version 2.11.x or later.
### PDF Generation
To generate PDF files through Pandoc, you need to have `xelatex` installed.
On Linux/WSL:
```shell
$ sudo apt-get install texlive-xetex
```
On macOS:
```shell
$ brew install --cask mactex
```
On Windows (without WSL):
[Download the MikTeX installer](https://miktex.org/download) and install as
usual. Then ensure that the binary folder is in your path (e.g.
`C:\Users\<username>\AppData\Local\Programs\MiKTeX 2.9\miktex\bin\x64\`). Note
that the first time you generate a document, MikTex will prompt you to install a
lot of packages, so watch for a MikTeX window popping up (possibly behind other
windows) and follow the prompts.
### Fonts
The example templates rely on having a few fonts installed.
The fonts to get are the Google
[Source Sans Pro](https://fonts.google.com/specimen/Source+Sans+Pro),
[Source Code Pro](https://fonts.google.com/specimen/Source+Code+Pro), and
[Source Serif Pro](https://fonts.google.com/specimen/Source+Serif+Pro) families.
On macOS or Windows (without WSL), these can simply be downloaded and installed
as you would any other font. On Linux via WSL, you can install these normally
on the Windows side and then synchronize the Windows font folder to the Linux
side. To do this, edit (using `sudo`) `/etc/fonts/local.conf` and add:
```xml
<?xml version="1.0"?>
<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
<fontconfig>
<dir>/mnt/c/Windows/Fonts</dir>
</fontconfig>
```
Then update the font cache on the Linux side:
```shell
$ sudo fc-cache -fv
```
### Stylesheets
The example file uses an HTML template that includes a CSS stylesheet that is
generated from [SCSS](https://sass-lang.com/documentation/syntax). To compile
this automatically, you need to have
[SASS installed](https://sass-lang.com/install).
On macOS, this can be installed via Homebrew:
```shell
$ brew install sass/sass/sass
```
On macOS/Linux/WSL/Windows it can be installed as a Node.js package (assuming
you already have [Node.js/npm](https://nodejs.org/) installed):
```shell
$ npm install -g sass
```
## Building
This Python library provides a script, `compiledoc`, that will appear in your
`poetry` or `pipenv` virtual environment's path (or globally) once the library
is installed. In general, you provide an output directory and an input markdown
file, and it will build an HTML output when the `--html` flag is used (and also
by default).
```shell
$ compiledoc -o output --html mydoc.md
```
To build a PDF (via `xelatex`):
```shell
$ compiledoc -o output --pdf mydoc.md
```
To build a Markdown file with executable Python output included (e.g. for
debugging purposes), specify `--md`. This will generate a file in the output
directory with (perhaps confusingly) the same name as the input:
```shell
$ compiledoc -o output --md mydoc.md
```
To build everything, specify `--all`:
```shell
$ compiledoc -o output --all mydoc.md
```
To see all available command line options (for specifying templates, paths to
required external executables, static files like images and bibliography files,
etc.):
```shell
$ compiledoc --help
```
## Building the Example
Once everything is setup, compile the example HTML file by running:
```shell
$ cd example
$ compiledoc -o output example.md
```
Open `example/output/example.html` in your browser or use e.g. the [Live
Server](https://marketplace.visualstudio.com/items?itemName=ritwickdey.LiveServer)
plugin for VS Code.
## Auto Regen
To autoregenerate the document (e.g. the HTML version, the output of which is
watched by the
[Live Server](https://marketplace.visualstudio.com/items?itemName=ritwickdey.LiveServer)
), you can use [Watchman](https://facebook.github.io/watchman/).
To create a trigger on a particular directory (`doc/` in this example) with a
`notebook.md` file (change this to suit your purposes), copy the following into
a temporary `trigger.json` file:
```json
[
"trigger",
"doc/",
{
"name": "build_html",
"expression": [
"anyof",
[
"match",
"notebook.md",
"wholename"
]
],
"command": [
"poetry",
"run",
"compiledoc",
"-o",
"output",
"--html",
"notebook.md"
]
}
]
```
Then from your project root directory run:
```shell
watchman -j < trigger.json
rm trigger.json
```
It is also recommended that you add a `.watchmanconfig` file to the watched
directory (e.g. `doc/`; also add `.watchmanconfig` to your `.gitignore`) with
the following contents:
```json
{
"settle": 3000
}
```
The settle parameter is in milliseconds.
To turn off watchman:
```shell
watchman shutdown-server
```
To turn it back on:
```shell
cd <project-root>
watchman watch doc/
```
To watch the Watchman:
```shell
tail -f /usr/local/var/run/watchman/<username>-state/log
```
(Note that on Windows/WSL, to get `tail` to work the way you expect, you need to
add `---disable-inotify` to the command; and yes, that's three `-` for some
reason.)
## Older pandoc Versions
For `pandoc` 2.9 and earlier, the citation manager `pandoc-citeproc` was a
separate filter that gets added to the compliation pipeline. The path to this
filter can be specified on the command line to `compiledoc` with the
`--pandoc-citeproc PATH` flag.
In newer versions of `pandoc` (2.11 and beyond), the citeproc filter is built-in
to pandoc and is run by adding `--citeproc` to the `pandoc` command-line. The
`compiledoc` script adds this by default unless the flag `--use-pandoc-citeproc`
is added, in which case the older filter will be used.
If you do not with to run `citeproc` at all, you can add the flag
`compiledoc --no-citeproc` to skip citation processing altogether.
| sciengdox | /sciengdox-0.11.0.tar.gz/sciengdox-0.11.0/README.md | README.md | $ cd <project-repo>
$ poetry install
$ poetry shell
$ poetry build
$ poetry publish
$ brew install pandoc
$ brew install pandoc-crossref
$ brew install pandoc-citeproc
$ brew install librsvg
$ poetry install -E examples
$ choco install rsvg-convert
import matplotlib
matplotlib.use('Agg')
$ sudo apt-get install texlive-xetex
$ brew install --cask mactex
<?xml version="1.0"?>
<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
<fontconfig>
<dir>/mnt/c/Windows/Fonts</dir>
</fontconfig>
$ sudo fc-cache -fv
$ brew install sass/sass/sass
$ npm install -g sass
$ compiledoc -o output --html mydoc.md
$ compiledoc -o output --pdf mydoc.md
$ compiledoc -o output --md mydoc.md
$ compiledoc -o output --all mydoc.md
$ compiledoc --help
$ cd example
$ compiledoc -o output example.md
[
"trigger",
"doc/",
{
"name": "build_html",
"expression": [
"anyof",
[
"match",
"notebook.md",
"wholename"
]
],
"command": [
"poetry",
"run",
"compiledoc",
"-o",
"output",
"--html",
"notebook.md"
]
}
]
watchman -j < trigger.json
rm trigger.json
{
"settle": 3000
}
watchman shutdown-server
cd <project-root>
watchman watch doc/
tail -f /usr/local/var/run/watchman/<username>-state/log | 0.275422 | 0.985677 |
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.11.0] - 2023-02-06
### Changed
- Updated dependencies
## [0.10.0] - 2022-03-01
### Changed
- Changed development and setup workflow to use [poetry](https://python-poetry.org)
- Changed code style to follow flake8/black
### Fixed
- Quantity Markdown formatting in newer versions of Pint [#22]
- Documentation issues in README
## [0.9.0] - 2021-05-26
### Added
- Allow `pq()` helper function to print bare numbers (i.e. without units).
- Add `pf()` and `pi()` helpers to print floats and integers.
### Fixed
- Fixed test suite for saving figures.
## [0.8.0] - 2021-05-24
### Added
- Document output type is now accessible to executable Python code in the
document via the global variable document_output_format.
[#19](https://github.com/gbingersoll/sciengdox/issues/19)
- Allow incorporating interactive Plotly plots in HTML output and the SVG
version of the same in PDF output.
[#10](https://github.com/gbingersoll/sciengdox/issues/10)
### Fixed
- PythonRunner now uses the same python executable as is used to run the pandoc
filter. This fixes an issue when running in a venv and necessary packages for
your doc aren't installed globally.
[#18](https://github.com/gbingersoll/sciengdox/issues/18)
## [0.7.0] - 2020-12-09
### Changed
- Updated citeproc handling for pandoc >=2.11. Default is now to use the
`--citeproc` flag on the `pandoc` command line internally.
- Various changes in example template for pandoc >=2.11.
### Fixed
- matplotlib is no longer required to build. The example still uses it, but
you don't have to have it to do a basic document build anymore.
[#16](https://github.com/gbingersoll/sciengdox/issues/16)
## [0.6.3] - 2020-05-27
### Fixed
- Blank lines in code listings are no longer dropped in HTML output.
[#14](https://github.com/gbingersoll/sciengdox/issues/14)
## [0.6.2] - 2020-05-27
### Fixed
- Moved `colorama` library to `install_requires` in `setup.py`.
## [0.6.1] - 2020-05-27
### Fixed
- Added `MANIFEST.in` to make sure `sciengdox/units/unit_defs.txt`, providing
custom pint units definitions, is included in the output package.
## [0.6.0] - 2020-05-27
Initial public release
[unreleased]: https://github.com/gbingersoll/sciengdox/compare/v0.11.0...HEAD
[0.11.0]: https://github.com/gbingersoll/sciengdox/compare/v0.10.0...v0.11.0
[0.10.0]: https://github.com/gbingersoll/sciengdox/compare/v0.9.0...v0.10.0
[0.9.0]: https://github.com/gbingersoll/sciengdox/compare/v0.8.0...v0.9.0
[0.8.0]: https://github.com/gbingersoll/sciengdox/compare/v0.7.0...v0.8.0
[0.7.0]: https://github.com/gbingersoll/sciengdox/compare/v0.6.3...v0.7.0
[0.6.3]: https://github.com/gbingersoll/sciengdox/compare/v0.6.2...v0.6.3
[0.6.2]: https://github.com/gbingersoll/sciengdox/compare/v0.6.1...v0.6.2
[0.6.1]: https://github.com/gbingersoll/sciengdox/compare/v0.6.0...v0.6.1
[0.6.0]: https://github.com/gbingersoll/sciengdox/releases/tag/v0.6.0
| sciengdox | /sciengdox-0.11.0.tar.gz/sciengdox-0.11.0/CHANGELOG.md | CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.11.0] - 2023-02-06
### Changed
- Updated dependencies
## [0.10.0] - 2022-03-01
### Changed
- Changed development and setup workflow to use [poetry](https://python-poetry.org)
- Changed code style to follow flake8/black
### Fixed
- Quantity Markdown formatting in newer versions of Pint [#22]
- Documentation issues in README
## [0.9.0] - 2021-05-26
### Added
- Allow `pq()` helper function to print bare numbers (i.e. without units).
- Add `pf()` and `pi()` helpers to print floats and integers.
### Fixed
- Fixed test suite for saving figures.
## [0.8.0] - 2021-05-24
### Added
- Document output type is now accessible to executable Python code in the
document via the global variable document_output_format.
[#19](https://github.com/gbingersoll/sciengdox/issues/19)
- Allow incorporating interactive Plotly plots in HTML output and the SVG
version of the same in PDF output.
[#10](https://github.com/gbingersoll/sciengdox/issues/10)
### Fixed
- PythonRunner now uses the same python executable as is used to run the pandoc
filter. This fixes an issue when running in a venv and necessary packages for
your doc aren't installed globally.
[#18](https://github.com/gbingersoll/sciengdox/issues/18)
## [0.7.0] - 2020-12-09
### Changed
- Updated citeproc handling for pandoc >=2.11. Default is now to use the
`--citeproc` flag on the `pandoc` command line internally.
- Various changes in example template for pandoc >=2.11.
### Fixed
- matplotlib is no longer required to build. The example still uses it, but
you don't have to have it to do a basic document build anymore.
[#16](https://github.com/gbingersoll/sciengdox/issues/16)
## [0.6.3] - 2020-05-27
### Fixed
- Blank lines in code listings are no longer dropped in HTML output.
[#14](https://github.com/gbingersoll/sciengdox/issues/14)
## [0.6.2] - 2020-05-27
### Fixed
- Moved `colorama` library to `install_requires` in `setup.py`.
## [0.6.1] - 2020-05-27
### Fixed
- Added `MANIFEST.in` to make sure `sciengdox/units/unit_defs.txt`, providing
custom pint units definitions, is included in the output package.
## [0.6.0] - 2020-05-27
Initial public release
[unreleased]: https://github.com/gbingersoll/sciengdox/compare/v0.11.0...HEAD
[0.11.0]: https://github.com/gbingersoll/sciengdox/compare/v0.10.0...v0.11.0
[0.10.0]: https://github.com/gbingersoll/sciengdox/compare/v0.9.0...v0.10.0
[0.9.0]: https://github.com/gbingersoll/sciengdox/compare/v0.8.0...v0.9.0
[0.8.0]: https://github.com/gbingersoll/sciengdox/compare/v0.7.0...v0.8.0
[0.7.0]: https://github.com/gbingersoll/sciengdox/compare/v0.6.3...v0.7.0
[0.6.3]: https://github.com/gbingersoll/sciengdox/compare/v0.6.2...v0.6.3
[0.6.2]: https://github.com/gbingersoll/sciengdox/compare/v0.6.1...v0.6.2
[0.6.1]: https://github.com/gbingersoll/sciengdox/compare/v0.6.0...v0.6.1
[0.6.0]: https://github.com/gbingersoll/sciengdox/releases/tag/v0.6.0
| 0.872361 | 0.540075 |
# scienlib
[![Build Status](https://img.shields.io/github/stars/dylan14567/scienlib.svg)](https://github.com/dylan14567/scienlib)
[![License](https://img.shields.io/github/license/dylan14567/scienlib.svg)](https://github.com/dylan14567/scienlib/blob/main/.github/LICENSE)
[![dylan14567](https://img.shields.io/badge/author-dylan14567-green.svg)](https://github.com/dylan14567)
[![bug_report](https://img.shields.io/badge/bug-report-red.svg)](https://github.com/dylan14567/scienlib/blob/main/.github/ISSUE_TEMPLATE/bug_report.md)
[![security_policy](https://img.shields.io/badge/security-policy-cyan.svg)](https://github.com/dylan14567/scienlib/blob/main/.github/SECURITY.md)
[![Python](https://img.shields.io/badge/language-Python%20-yellow.svg)](https://www.python.org)
Scienlib is a scientific Python library that adds mathematical functions and adds other functions like physics, but also has cybersecurity and other functions.
## Pre-requirements
The requirements to use the system is to have the following python modules installed:
```
wheel
requests
python-nmap
speedtest-cli
```
## Installation
To install scienlib on linux run these commands on your Linux Terminal.
```shell
pip3 install scienlib
```
Once done, it begins to install.
Ready
## Authors
* **Dylan Meca** - *Initial Work* - [dylan14567](https://github.com/dylan14567)
You can also look at the list of all [contributors](https://github.com/dylan14567/scienlib/contributors) who have participated in this project.
## Contributing
Please read [CONTRIBUTING.md](https://github.com/dylan14567/scienlib/blob/main/.github/CONTRIBUTING.md) for details of our code of conduct, and the process for submitting pull requests.
## License
The license for this project is [MIT](https://github.com/dylan14567/scienlib/blob/main/LICENSE)
| scienlib | /scienlib-1.1.tar.gz/scienlib-1.1/README.md | README.md | wheel
requests
python-nmap
speedtest-cli
pip3 install scienlib
| 0.550849 | 0.870212 |
# Security policy
## Supported Versions
| Version | Supported |
| ------- | ------------------ |
| 5.1.x | :white_check_mark: |
| 5.0.x | :x: |
| 4.0.x | :white_check_mark: |
| < 4.0 | :x: |
## Report a vulnerability
In case of error, in the scripts, inform us about the error in: [https://github.com/dylan14567/configserver/blob/master/.github/ISSUE_TEMPLATE/bug_report.md](https://github.com/dylan14567/scienlib/blob/master/.github/ISSUE_TEMPLATE/bug_report.md)
| scienlib | /scienlib-1.1.tar.gz/scienlib-1.1/.github/SECURITY.md | SECURITY.md | # Security policy
## Supported Versions
| Version | Supported |
| ------- | ------------------ |
| 5.1.x | :white_check_mark: |
| 5.0.x | :x: |
| 4.0.x | :white_check_mark: |
| < 4.0 | :x: |
## Report a vulnerability
In case of error, in the scripts, inform us about the error in: [https://github.com/dylan14567/configserver/blob/master/.github/ISSUE_TEMPLATE/bug_report.md](https://github.com/dylan14567/scienlib/blob/master/.github/ISSUE_TEMPLATE/bug_report.md)
| 0.561455 | 0.382257 |
# Contributing
Hello! We are delighted that you want to contribute to the scienlib project. Your help is essential to keep it great.
Following these guidelines helps communicate that you are respecting the time of the developers who manage and develop this open source project. In return, they should reciprocate that respect by addressing your issue, evaluating the changes, and helping you finalize your pull requests.
# How to report a bug
Do you think you found a bug? Check [the list of open issues](https://github.com/dylan14567/scienlib/issues) to see if your bug has already been reported. If not, send a new number.
Here are some tips for writing great bug reports:
* Describe the specific problem (eg, "The widget does not rotate clockwise" versus "an error appears")
* Include steps to reproduce the error, what you expected to happen, and what happened instead
* Check that you are using the latest version of the project and its dependencies.
* Include what version of the project you are using, as well as any relevant dependencies.
* Only includes one error per problem. If you have discovered two errors, present two problems
* Even if you don't know how to correct the error, including a failed test can help others locate it.
# How to suggest a feature or improvement
Here are some general guidelines for proposing changes:
* Each pull request must implement a function or bug fix. If you want to add or correct more than one thing, submit more than one pull request.
* Do not commit changes to files that are irrelevant to their function or bug fixes.
* Please do not increase the version number in your pull request (it will be changed before launch)
* Write a good confirmation message
At a high level, the process for proposing changes is:
1. Clone the project
2. Create a new branch: ``` git checkout -b my-branch-name ```
3. Make your change, add tests, and make sure the tests still pass.
4. Submit a pull request.
5. Wait for your pull request to be reviewed and merged.
Interested in submitting your first pull request? It is easy! You can learn how in this free series [How to contribute to a code project open on GitHub](https://egghead.io/series/how-to-contribute-to-an-open-source-project-on-github)
# Your first contribution
[Contribute to open source on GitHub](https://guides.github.com/activities/contributing-to-open-source)
[Use pull requests](https://help.github.com/articles/using-pull-requests)
[GitHub Help](https://help.github.com)
| scienlib | /scienlib-1.1.tar.gz/scienlib-1.1/.github/CONTRIBUTING.md | CONTRIBUTING.md | # Contributing
Hello! We are delighted that you want to contribute to the scienlib project. Your help is essential to keep it great.
Following these guidelines helps communicate that you are respecting the time of the developers who manage and develop this open source project. In return, they should reciprocate that respect by addressing your issue, evaluating the changes, and helping you finalize your pull requests.
# How to report a bug
Do you think you found a bug? Check [the list of open issues](https://github.com/dylan14567/scienlib/issues) to see if your bug has already been reported. If not, send a new number.
Here are some tips for writing great bug reports:
* Describe the specific problem (eg, "The widget does not rotate clockwise" versus "an error appears")
* Include steps to reproduce the error, what you expected to happen, and what happened instead
* Check that you are using the latest version of the project and its dependencies.
* Include what version of the project you are using, as well as any relevant dependencies.
* Only includes one error per problem. If you have discovered two errors, present two problems
* Even if you don't know how to correct the error, including a failed test can help others locate it.
# How to suggest a feature or improvement
Here are some general guidelines for proposing changes:
* Each pull request must implement a function or bug fix. If you want to add or correct more than one thing, submit more than one pull request.
* Do not commit changes to files that are irrelevant to their function or bug fixes.
* Please do not increase the version number in your pull request (it will be changed before launch)
* Write a good confirmation message
At a high level, the process for proposing changes is:
1. Clone the project
2. Create a new branch: ``` git checkout -b my-branch-name ```
3. Make your change, add tests, and make sure the tests still pass.
4. Submit a pull request.
5. Wait for your pull request to be reviewed and merged.
Interested in submitting your first pull request? It is easy! You can learn how in this free series [How to contribute to a code project open on GitHub](https://egghead.io/series/how-to-contribute-to-an-open-source-project-on-github)
# Your first contribution
[Contribute to open source on GitHub](https://guides.github.com/activities/contributing-to-open-source)
[Use pull requests](https://help.github.com/articles/using-pull-requests)
[GitHub Help](https://help.github.com)
| 0.454714 | 0.520131 |
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
https://dylan1467.github.io/contact.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| scienlib | /scienlib-1.1.tar.gz/scienlib-1.1/.github/CODE_OF_CONDUCT.md | CODE_OF_CONDUCT.md | # Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
https://dylan1467.github.io/contact.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| 0.58439 | 0.685038 |
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.
| scienlib | /scienlib-1.1.tar.gz/scienlib-1.1/.github/ISSUE_TEMPLATE/bug_report.md | bug_report.md | ---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.
| 0.688678 | 0.38168 |
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
| scienlib | /scienlib-1.1.tar.gz/scienlib-1.1/.github/ISSUE_TEMPLATE/feature_request.md | feature_request.md | ---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
| 0.661486 | 0.369799 |
# Colormaps in Matplotlib
## Installation
```bash
pip install "git+https://github.com/gwerbin/scientific-colourmaps@master#egg_info=scientific-colormaps&subdirectory=python"
```
## Usage
- *module* `scientific_colourmaps`
- *function* `load_cmap`
- *arg* `cmap_name`: string. Required.
- *kwarg* `cmap_path`: string or `os.PathLike`. Optional; default: `cmap_path=CMAP_DEFAULT_PATH`
- *return*: instance of `matplotlib.colors.LinearSegmentedColormap`.
- *function* `list_cmaps`
- *kwarg* `cmap_path`: string or `os.PathLike`. Optional; default: `cmap_path=CMAP_DEFAULT_PATH`
- *return* set of strings, containing available cmap names
- *variable* `CMAP_DEFAULT_PATH`, instance of `pathlib.Path`
- value: `pathlib.Path('./ScientificColourMaps4.zip')`
### Example
In the shell:
```bash
wget http://www.fabiocrameri.ch/resources/ScientificColourMaps4.zip
```
In Python:
```python
import numpy as np
import matplotlib.pyplot as plt
from scientific_colourmaps import load_cmap
# Make some fake data to plot
plot_data = np.randn(50, 50)
# Plot using the "nuuk" colormap
cmap_nuuk = load_cmap('nuuk')
plt.imshow(plot_data, cmap=cmap_nuuk)
plt.colorbar()
plt.show()
```
| scientific-colourmaps | /scientific-colourmaps-1.0b0.tar.gz/scientific-colourmaps-1.0b0/README.md | README.md | pip install "git+https://github.com/gwerbin/scientific-colourmaps@master#egg_info=scientific-colormaps&subdirectory=python"
wget http://www.fabiocrameri.ch/resources/ScientificColourMaps4.zip
import numpy as np
import matplotlib.pyplot as plt
from scientific_colourmaps import load_cmap
# Make some fake data to plot
plot_data = np.randn(50, 50)
# Plot using the "nuuk" colormap
cmap_nuuk = load_cmap('nuuk')
plt.imshow(plot_data, cmap=cmap_nuuk)
plt.colorbar()
plt.show() | 0.649356 | 0.722111 |
# Plotter
A simple-to-use Python package which allows you to plot x,y data using Python. You can select axes ranges, axes titles, figure titles, plotting styles, colours and also change axes type between linear and log plots.
To use the package, run the plot.py script using your terminal. You will be prompted to select an Excel file with the relevant data and a GUI will open up where you can select a range of plotting options. You can also download this package from PyPi using the following link: https://pypi.org/project/scientific-data-plotter/
| scientific-data-plotter | /scientific-data-plotter-1.0.1.tar.gz/scientific-data-plotter-1.0.1/README.md | README.md | # Plotter
A simple-to-use Python package which allows you to plot x,y data using Python. You can select axes ranges, axes titles, figure titles, plotting styles, colours and also change axes type between linear and log plots.
To use the package, run the plot.py script using your terminal. You will be prompted to select an Excel file with the relevant data and a GUI will open up where you can select a range of plotting options. You can also download this package from PyPi using the following link: https://pypi.org/project/scientific-data-plotter/
| 0.768993 | 0.623606 |
<h2 align="center"><i>SEM</i>: Scientific Experiment Manager</h2>
<p align="center"><i>Streamline IO operations, storage, and retrieval of your scientific results</i></p>
<p align="center">
<a href="https://github.com/nickruggeri/scientific-experiment-manager/blob/main/LICENSE">
<img alt="License: MIT" src="https://img.shields.io/github/license/nickruggeri/scientific-experiment-manager">
</a>
<a href="https://www.python.org/">
<img alt="Made with Python" src="https://img.shields.io/badge/made%20with-python-1f425f.svg">
</a>
<a href="https://github.com/psf/black">
<img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg">
</a>
</p>
SEM helps streamline IO operations and organization of scientific results
in Python. \
At its core, SEM is based on
[regular expressions](https://docs.python.org/3/library/re.html)
and simply creates, parses and manages intricate folder structures containing
experimental results.
<br/><br/>
## Minimal example
Consider the results organized in the `example/example_results` folder. \
These are different directories containing the results of the same experiment, where two
parameters are varied: the random `seed` and a threshold value `eps`. Every one of the
folders contains some output files from
```
example_results
│
└───seed=111
│ └───eps_1.3
│ │ └───...
│ └───eps_7.4
│ └───...
│
└───seed=222
│ └───...
│
└───seed=333
│ └───...
│
└───useless_files
```
SEM does not take care of loading and/or saving files. \
Rather, it takes care of the folder structure, leaving the user the freedom to manage the result's
format. \
To retrieve the parameters relative to these results, `ResultManager` parses the folders'
names and only returns the path relative to those that match.
```python
import re
from pathlib import Path
from sem.manager import ResultManager
example_res = Path("./example_results")
parsers = [re.compile(r"seed=(?P<seed_value>\d+)"), re.compile(r"eps_(?P<eps>\d+.\d+)")]
manager = ResultManager(root_dir=example_res, parsers=parsers)
manager.parse_paths()
```
In the case above, the parser for `seed_value` expects a positive integer, specified by
the regular expression `"\d+"`, and `eps` a float format. \
The results are stored in
`manager.df`, a pandas
[DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html), which
contains the parsed parameter values, as well as the path to the deepest sub-directories
```
__PATH__ seed_value eps
0 example_results/seed=333/eps_1.1 333 1.1
1 example_results/seed=333/eps_0.3 333 0.3
2 example_results/seed=222/eps_7.4 222 7.4
3 example_results/seed=222/eps_2.7 222 2.7
4 example_results/seed=111/eps_1.3 111 1.3
5 example_results/seed=111/eps_7.4 111 7.4
...
```
Directories whose names don't match the patterns are ignored, e.g.
`example_results/useless_files`. \
Notice that, since they are the results of parsing, all the values in the data frame are
strings. \
The conversion to a different data type can be performed after parsing:
```python
manager.df["seed_value"] = manager.df["seed_value"].astype(int)
manager.df["eps"] = manager.df["eps"].astype(float)
```
### Utilizing the parsed directories
Once the directory names have been parsed, the main utility of the manager is to have a
coupling between the parameters and the results. \
For example, one can read and insert the computational time of every experiment in the
data frame:
```python
def read_comp_time(res_dir):
with open(res_dir / "computational_time.txt", "r") as file:
time = float(file.read())
return time
manager.df["time"] = manager.df["__PATH__"].map(read_comp_time)
```
From there, conventional pandas operations can be used. For example, the average
computational time for seed `111` is given by
```python
df = manager.df
times = df["time"].loc[df["seed_value"] == 111]
times.mean()
```
### Loading more complex objects
Pandas data frames can contain arbitrary objects.
For example, one can create a column of numpy arrays from a model:
```python
import numpy as np
def load_mat(path):
return np.load(path / "result_params.npy")
df["mat"] = df["__PATH__"].map(load_mat)
```
<br/><br/>
## Creating default paths
Standardizing result structure reduces the amount of code needed for
simple IO operations, and eases compatibility across machines, e.g. local vs cloud or
cluster results. \
To this end, <i>SEM</i> offers a way to create saving paths
which only depend on the parameters specified by the user. \
For example, the paths of a repository with three levels and different parameters,
can be created as:
```python
root_dir = Path(".") / "tmp"
for param1 in [True, False]:
for param2 in ["a", "b"]:
for param3 in [1, 2, 3]:
values = [
{"param1": param1, "param2": param2},
"results_of_my_experiments",
{"param3": param3},
]
new_path = ResultManager.create_default_path(
root_dir, values, auto_sort=True
)
new_path.mkdir(parents=True)
print(new_path)
```
which produces
```
tmp/param1=True_param2=a/results_of_my_experiments/param3=1
tmp/param1=True_param2=a/results_of_my_experiments/param3=2
tmp/param1=True_param2=a/results_of_my_experiments/param3=3
tmp/param1=True_param2=b/results_of_my_experiments/param3=1
...
tmp/param1=False_param2=a/results_of_my_experiments/param3=1
...
```
If desired, the argument `auto_sort` imposes a uniform order at every directory level.\
For example, using
`{"param2": param2, "param1": param1}` would produce the same paths a above if
`auto_sort=True`. \
Parsing directories with this structure is similarly easy:
```python
manager = ResultManager.from_arguments(
root_dir,
arguments=[
{"param1": "True|False", "param2": "a|b"},
"results_of_my_experiments",
{"param3": r"\d+"},
],
auto_sort=True
)
manager.parse_paths()
```
which yields
```
__PATH__ param1 param2 param3
0 tmp/param1=False_param2=b/results_of_my_experi... False b 1
1 tmp/param1=False_param2=b/results_of_my_experi... False b 3
2 tmp/param1=False_param2=b/results_of_my_experi... False b 2
3 tmp/param1=True_param2=b/results_of_my_experim... True b 1
...
```
<br/><br/>
## Initialization
Notice that the advantage of using the default directory naming, as opposed to a custom
one, is that the `ResultManager` can be initialized as above, by only specifying the
arguments in `ResultManager.from_arguments`. \
A more flexible initialization for custom paths, can be performed by giving as input
regular expression patterns. For example, an equivalent initialization to that above is
given by:
```python
parsers = [
re.compile("param1=(?P<param1>True|False)_param2=(?P<param2>a|b)"),
re.compile("results_of_my_experiments"),
re.compile("param3=(?P<param3>\d+)"),
]
manager = ResultManager(root_dir, parsers)
manager.parse_paths()
```
<br/><br/>
## Other utilities and tricks
### Filtering results
Another useful `ResultManager` method is `ResultManager.filter`. This method filters the
<i>rows</i> of the results' data frame. Results can be selected by specifying exact
column values or a list of possible values. For example, for a manager whose data frame
has columns
```
__PATH__ param1 param2 param3
0 tmp/param1=False_param2=b/results_of_my_experi... False b 1
1 tmp/param1=False_param2=b/results_of_my_experi... False b 3
2 tmp/param1=False_param2=b/results_of_my_experi... False b 2
3 tmp/param1=True_param2=b/results_of_my_experim... True b 1
...
```
the query
```python
manager.filter_results(
equal={"param1": True},
contained={"param3": [1, 3]}
)
```
yields a filtered data frame
```
__PATH__ param1 param2 param3
3 tmp/param1=True_param2=b/results_of_my_experim... True b 1
4 tmp/param1=True_param2=b/results_of_my_experim... True b 3
9 tmp/param1=True_param2=a/results_of_my_experim... True a 1
10 tmp/param1=True_param2=a/results_of_my_experim... True a 3
```
### Loading fewer results
While results can be filtered a posteriori as just explained, one can also load fewer
results in the first place. \
This is done by specifying an appropriate regular expression
parser in the first place.
For example, to select only configurations where
`param1` is equal to `True`, one can write
```python
parsers = [
re.compile("param1=(?P<param1>True)_param2=(?P<param2>a|b)"),
re.compile("results_of_my_experiments"),
re.compile("param3=(?P<param3>\d+)"),
]
manager = ResultManager(root_dir, parsers)
```
In general, any regular expression with named groups is considered valid, check
[the docs](https://docs.python.org/3/library/re.html)
for further details.
### Common parsing patterns
Some common regular expression patterns are available at `sem.re_patterns`. \
These are strings that can be utilized for initializing parsers
```python
from sem.re_patterns import INT_PATTERN
parsers = [
re.compile("param1=(?P<param1>True|False)_param2=(?P<param2>a|b)"),
re.compile("results_of_my_experiments"),
re.compile(f"param3=(?P<param3>{INT_PATTERN})"),
]
manager = ResultManager(root_dir, parsers)
```
or `ResultManager` arguments
```python
manager = ResultManager.from_arguments(
root_dir,
arguments=[
{"param1": "True|False", "param2": "a|b"},
"results_of_my_experiments",
{"param3": INT_PATTERN},
],
)
```
### Common type conversion from string
Some common type conversion functions from string are available at `sem.str_to_type`. \
These are useful in combination with the `argparse` package, for command line inputs
```python
from argparse import ArgumentParser
from sem.str_to_type import bool_type, unit_float_or_positive_integer, none_or_type
parser = ArgumentParser()
parser.add_argument("--flag", type=bool_type)
parser.add_argument("--train_size", type=unit_float_or_positive_integer)
parser.add_argument("--K", type=none_or_type(int))
```
Importantly, `bool_type` correctly converts both string inputs `"0"` or `"1"`, as well
as the case-insensitive strings `"true"`, `"True"`, `"False"`, etc.
Alternatively, these functions can also be used for type conversion inside pandas data
frames
```python
manager = ResultManager(root_dir, parsers)
manager.parse_paths()
manager.df["flag"] = manager.df["flag"].map(bool_type)
```
<br/><br/>
## Installation
You can install this package by downloading the GitHub repository and, from inside the
downloaded folder, running
```
pip install .
```
| scientific-experiment-manager | /scientific-experiment-manager-0.1.0.tar.gz/scientific-experiment-manager-0.1.0/README.md | README.md | example_results
│
└───seed=111
│ └───eps_1.3
│ │ └───...
│ └───eps_7.4
│ └───...
│
└───seed=222
│ └───...
│
└───seed=333
│ └───...
│
└───useless_files
import re
from pathlib import Path
from sem.manager import ResultManager
example_res = Path("./example_results")
parsers = [re.compile(r"seed=(?P<seed_value>\d+)"), re.compile(r"eps_(?P<eps>\d+.\d+)")]
manager = ResultManager(root_dir=example_res, parsers=parsers)
manager.parse_paths()
__PATH__ seed_value eps
0 example_results/seed=333/eps_1.1 333 1.1
1 example_results/seed=333/eps_0.3 333 0.3
2 example_results/seed=222/eps_7.4 222 7.4
3 example_results/seed=222/eps_2.7 222 2.7
4 example_results/seed=111/eps_1.3 111 1.3
5 example_results/seed=111/eps_7.4 111 7.4
...
manager.df["seed_value"] = manager.df["seed_value"].astype(int)
manager.df["eps"] = manager.df["eps"].astype(float)
def read_comp_time(res_dir):
with open(res_dir / "computational_time.txt", "r") as file:
time = float(file.read())
return time
manager.df["time"] = manager.df["__PATH__"].map(read_comp_time)
df = manager.df
times = df["time"].loc[df["seed_value"] == 111]
times.mean()
import numpy as np
def load_mat(path):
return np.load(path / "result_params.npy")
df["mat"] = df["__PATH__"].map(load_mat)
root_dir = Path(".") / "tmp"
for param1 in [True, False]:
for param2 in ["a", "b"]:
for param3 in [1, 2, 3]:
values = [
{"param1": param1, "param2": param2},
"results_of_my_experiments",
{"param3": param3},
]
new_path = ResultManager.create_default_path(
root_dir, values, auto_sort=True
)
new_path.mkdir(parents=True)
print(new_path)
tmp/param1=True_param2=a/results_of_my_experiments/param3=1
tmp/param1=True_param2=a/results_of_my_experiments/param3=2
tmp/param1=True_param2=a/results_of_my_experiments/param3=3
tmp/param1=True_param2=b/results_of_my_experiments/param3=1
...
tmp/param1=False_param2=a/results_of_my_experiments/param3=1
...
manager = ResultManager.from_arguments(
root_dir,
arguments=[
{"param1": "True|False", "param2": "a|b"},
"results_of_my_experiments",
{"param3": r"\d+"},
],
auto_sort=True
)
manager.parse_paths()
__PATH__ param1 param2 param3
0 tmp/param1=False_param2=b/results_of_my_experi... False b 1
1 tmp/param1=False_param2=b/results_of_my_experi... False b 3
2 tmp/param1=False_param2=b/results_of_my_experi... False b 2
3 tmp/param1=True_param2=b/results_of_my_experim... True b 1
...
parsers = [
re.compile("param1=(?P<param1>True|False)_param2=(?P<param2>a|b)"),
re.compile("results_of_my_experiments"),
re.compile("param3=(?P<param3>\d+)"),
]
manager = ResultManager(root_dir, parsers)
manager.parse_paths()
__PATH__ param1 param2 param3
0 tmp/param1=False_param2=b/results_of_my_experi... False b 1
1 tmp/param1=False_param2=b/results_of_my_experi... False b 3
2 tmp/param1=False_param2=b/results_of_my_experi... False b 2
3 tmp/param1=True_param2=b/results_of_my_experim... True b 1
...
manager.filter_results(
equal={"param1": True},
contained={"param3": [1, 3]}
)
__PATH__ param1 param2 param3
3 tmp/param1=True_param2=b/results_of_my_experim... True b 1
4 tmp/param1=True_param2=b/results_of_my_experim... True b 3
9 tmp/param1=True_param2=a/results_of_my_experim... True a 1
10 tmp/param1=True_param2=a/results_of_my_experim... True a 3
parsers = [
re.compile("param1=(?P<param1>True)_param2=(?P<param2>a|b)"),
re.compile("results_of_my_experiments"),
re.compile("param3=(?P<param3>\d+)"),
]
manager = ResultManager(root_dir, parsers)
from sem.re_patterns import INT_PATTERN
parsers = [
re.compile("param1=(?P<param1>True|False)_param2=(?P<param2>a|b)"),
re.compile("results_of_my_experiments"),
re.compile(f"param3=(?P<param3>{INT_PATTERN})"),
]
manager = ResultManager(root_dir, parsers)
manager = ResultManager.from_arguments(
root_dir,
arguments=[
{"param1": "True|False", "param2": "a|b"},
"results_of_my_experiments",
{"param3": INT_PATTERN},
],
)
from argparse import ArgumentParser
from sem.str_to_type import bool_type, unit_float_or_positive_integer, none_or_type
parser = ArgumentParser()
parser.add_argument("--flag", type=bool_type)
parser.add_argument("--train_size", type=unit_float_or_positive_integer)
parser.add_argument("--K", type=none_or_type(int))
manager = ResultManager(root_dir, parsers)
manager.parse_paths()
manager.df["flag"] = manager.df["flag"].map(bool_type)
pip install . | 0.398758 | 0.973011 |
from __future__ import annotations
import re
from pathlib import Path
from typing import Any, Dict, List, Optional
import pandas as pd
from sem.structure_parsing import recursive_folder_parsing
class ResultManager:
"""A manager for experimental results.
It takes care of collecting results organized in different folders.
"""
def __init__(
self,
root_dir: Optional[Path],
parsers: List[re.Pattern],
dataframe: Optional[pd.DataFrame] = None,
):
"""Initialize the ResultManager.
:param root_dir: root directory.
:param parsers: list of regular expression patterns.
:param dataframe: optional dataframe of already parsed arguments.
"""
self.root_dir = root_dir
self.parsers = parsers
self.df = dataframe
@classmethod
def from_arguments(
cls,
root_dir: Path,
arguments: List[Dict[str, str] | str] | Dict[str, str] | str,
auto_sort: bool = False,
) -> ResultManager:
"""Create an instance of ResultManager from the given arguments.
The single can be specified in two ways:
- as a single string. In this case there will be no parsing and only folders
with the specified name will be parsed
- as a {key: value} dictionary. In this case, every key is the name of a
parameter, and the value is a string specifying the regular expression for
parsing.
If a list of arguments is provided, this specifies a hierarchical folder
structure, every level has parsing specified by the relative list argument.
If auto_sort is set to True, within every dictionary the arguments are sorted in
a canonical way.
:param root_dir: root directory
:param arguments: arguments, see `help(ResultManager)`
:param auto_sort: whether to sort the arguments in the canonical order
:return: a ArgumentManager instance
"""
if not isinstance(arguments, list):
arguments = [arguments]
parsers = [
re.compile(_pattern_from_arguments(arg, auto_sort)) for arg in arguments
]
return ResultManager(root_dir, parsers)
@classmethod
def create_default_path(
cls,
root_dir: str | Path,
values: List[Dict[str, Any] | str] | Dict[str, Any] | str,
auto_sort: bool = False,
) -> Path:
"""Create the default path given
:param root_dir: root directory
:param values: the values specifying the directory structure.
If it is a string, it specifies a simple directory name.
If it is a dictionary, it specifies the {parameter name: value} that the
directory name describes.
If it is a list, it contains the string or dictionaries of values at every
sub-level of root:dir.
:param auto_sort: whether to sort the values specified as dictionaries according
to the canonical order.
:return: the path to a sub-folder with specified names.
"""
root_dir = Path(root_dir)
if not isinstance(values, list):
values = [values]
path = root_dir
for arg in values:
dir_name = (
arg if isinstance(arg, str) else _dirname_from_values(arg, auto_sort)
)
path = path / dir_name
return path
@property
def patterns(self) -> List[str]:
return [parser.pattern for parser in self.parsers]
def parse_paths(self, **kwargs) -> None:
"""Recursively parse the root directory according to the specified parsers."""
records = [
{**{"__PATH__": res_path}, **match_group_args}
for res_path, match_group_args in recursive_folder_parsing(
self.root_dir, self.parsers
)
]
self.df = pd.DataFrame.from_records(records, **kwargs)
def filter_results(
self,
equal: Optional[Dict[str, Any]] = None,
contained: Optional[Dict[str, Any]] = None,
) -> pd.DataFrame:
"""Filter the results in the result dataframe by row.
The rows whose column values are equal and/or contained in those specified, are
returned in the form of a new data frame. Notice that this method is different
from pandas DataFrame's filter, which filters along both axes and only by
column or row name.
:param equal: dictionary of {column name: value} pairs.
Rows with column value equal to that specified are returned.
:param contained: dictionary of {column name: iterable of values} pairs:
Rows with column values contained in those specified are returned.
:return: a new data frame according to the specified filters.
"""
filtered = self.df
if equal is not None:
for key, val in equal.items():
mask = filtered[key] == val
filtered = filtered[mask]
if contained is not None:
for key, val in contained.items():
mask = filtered[key].isin(val)
filtered = filtered[mask]
return filtered
# Utility functions specifying how the ResultManager builds the patterns.
# We use the following name convention for the dictionaries of arguments and values:
# - arguments are utilized to build regular expression patterns. They consist of
# {parameter name: string}, where the string are compiled to regular expression
# patterns
# - values are concrete {parameter name: value} pairs.
def _pattern_from_arguments(arguments: Dict[str, str] | str, auto_sort=False) -> str:
if isinstance(arguments, str):
return arguments
keys = _sorted_parameters(arguments) if auto_sort else arguments
pattern = "_".join(_argument_pattern(key, arguments[key]) for key in keys)
return pattern
def _dirname_from_values(values: Dict[str, str] | str, auto_sort=False) -> str:
if isinstance(values, str):
return values
keys = _sorted_parameters(values) if auto_sort else values
dirname = "_".join(_value_pattern(key, values[key]) for key in keys)
return dirname
def _sorted_parameters(*params):
return sorted(*params)
def _argument_pattern(argument: str, expr: str) -> str:
pattern = f"{argument}=(?P<{argument}>{expr})"
return pattern
def _value_pattern(argument: str, expr: Any) -> str:
pattern = f"{argument}={expr}"
return pattern | scientific-experiment-manager | /scientific-experiment-manager-0.1.0.tar.gz/scientific-experiment-manager-0.1.0/sem/manager.py | manager.py | from __future__ import annotations
import re
from pathlib import Path
from typing import Any, Dict, List, Optional
import pandas as pd
from sem.structure_parsing import recursive_folder_parsing
class ResultManager:
"""A manager for experimental results.
It takes care of collecting results organized in different folders.
"""
def __init__(
self,
root_dir: Optional[Path],
parsers: List[re.Pattern],
dataframe: Optional[pd.DataFrame] = None,
):
"""Initialize the ResultManager.
:param root_dir: root directory.
:param parsers: list of regular expression patterns.
:param dataframe: optional dataframe of already parsed arguments.
"""
self.root_dir = root_dir
self.parsers = parsers
self.df = dataframe
@classmethod
def from_arguments(
cls,
root_dir: Path,
arguments: List[Dict[str, str] | str] | Dict[str, str] | str,
auto_sort: bool = False,
) -> ResultManager:
"""Create an instance of ResultManager from the given arguments.
The single can be specified in two ways:
- as a single string. In this case there will be no parsing and only folders
with the specified name will be parsed
- as a {key: value} dictionary. In this case, every key is the name of a
parameter, and the value is a string specifying the regular expression for
parsing.
If a list of arguments is provided, this specifies a hierarchical folder
structure, every level has parsing specified by the relative list argument.
If auto_sort is set to True, within every dictionary the arguments are sorted in
a canonical way.
:param root_dir: root directory
:param arguments: arguments, see `help(ResultManager)`
:param auto_sort: whether to sort the arguments in the canonical order
:return: a ArgumentManager instance
"""
if not isinstance(arguments, list):
arguments = [arguments]
parsers = [
re.compile(_pattern_from_arguments(arg, auto_sort)) for arg in arguments
]
return ResultManager(root_dir, parsers)
@classmethod
def create_default_path(
cls,
root_dir: str | Path,
values: List[Dict[str, Any] | str] | Dict[str, Any] | str,
auto_sort: bool = False,
) -> Path:
"""Create the default path given
:param root_dir: root directory
:param values: the values specifying the directory structure.
If it is a string, it specifies a simple directory name.
If it is a dictionary, it specifies the {parameter name: value} that the
directory name describes.
If it is a list, it contains the string or dictionaries of values at every
sub-level of root:dir.
:param auto_sort: whether to sort the values specified as dictionaries according
to the canonical order.
:return: the path to a sub-folder with specified names.
"""
root_dir = Path(root_dir)
if not isinstance(values, list):
values = [values]
path = root_dir
for arg in values:
dir_name = (
arg if isinstance(arg, str) else _dirname_from_values(arg, auto_sort)
)
path = path / dir_name
return path
@property
def patterns(self) -> List[str]:
return [parser.pattern for parser in self.parsers]
def parse_paths(self, **kwargs) -> None:
"""Recursively parse the root directory according to the specified parsers."""
records = [
{**{"__PATH__": res_path}, **match_group_args}
for res_path, match_group_args in recursive_folder_parsing(
self.root_dir, self.parsers
)
]
self.df = pd.DataFrame.from_records(records, **kwargs)
def filter_results(
self,
equal: Optional[Dict[str, Any]] = None,
contained: Optional[Dict[str, Any]] = None,
) -> pd.DataFrame:
"""Filter the results in the result dataframe by row.
The rows whose column values are equal and/or contained in those specified, are
returned in the form of a new data frame. Notice that this method is different
from pandas DataFrame's filter, which filters along both axes and only by
column or row name.
:param equal: dictionary of {column name: value} pairs.
Rows with column value equal to that specified are returned.
:param contained: dictionary of {column name: iterable of values} pairs:
Rows with column values contained in those specified are returned.
:return: a new data frame according to the specified filters.
"""
filtered = self.df
if equal is not None:
for key, val in equal.items():
mask = filtered[key] == val
filtered = filtered[mask]
if contained is not None:
for key, val in contained.items():
mask = filtered[key].isin(val)
filtered = filtered[mask]
return filtered
# Utility functions specifying how the ResultManager builds the patterns.
# We use the following name convention for the dictionaries of arguments and values:
# - arguments are utilized to build regular expression patterns. They consist of
# {parameter name: string}, where the string are compiled to regular expression
# patterns
# - values are concrete {parameter name: value} pairs.
def _pattern_from_arguments(arguments: Dict[str, str] | str, auto_sort=False) -> str:
if isinstance(arguments, str):
return arguments
keys = _sorted_parameters(arguments) if auto_sort else arguments
pattern = "_".join(_argument_pattern(key, arguments[key]) for key in keys)
return pattern
def _dirname_from_values(values: Dict[str, str] | str, auto_sort=False) -> str:
if isinstance(values, str):
return values
keys = _sorted_parameters(values) if auto_sort else values
dirname = "_".join(_value_pattern(key, values[key]) for key in keys)
return dirname
def _sorted_parameters(*params):
return sorted(*params)
def _argument_pattern(argument: str, expr: str) -> str:
pattern = f"{argument}=(?P<{argument}>{expr})"
return pattern
def _value_pattern(argument: str, expr: Any) -> str:
pattern = f"{argument}={expr}"
return pattern | 0.942122 | 0.608216 |
# Scientific Information Change
Lightweight package for measuring the information matching score (IMS) between two scientific sentences. This is a measure of the similarity in the information contained in the _findings_ of two different scientific sentences. Useful for research in science communication for matching similar findings as described between scientific papers, news media, and social media at scale and analyzing these findings.
The code and models in this repo come from the following paper:
>Dustin Wright*, Jiaxin Pei*, David Jurgens, and Isabelle Augenstein. 2022. Modeling Information Change in Science Communication with Semantically Matched Paraphrases. In Proceedings of EMNLP 2022. Association for Computational Linguistics.
Please use the following bibtex when referencing this work:
```
@article{modeling-information-change,
title={{Modeling Information Change in Science Communication with Semantically Matched Paraphrases}},
author={Wright, Dustin and Jiaxin, Pei and Jurgens, David and Augenstein, Isabelle},
year={2022},
booktitle = {Proceedings of EMNLP},
publisher = {Association for Computational Linguistics},
year = 2022
}
```
## Installation
Install directly using `pip`:
```
pip install scientific-information-change
```
### Dependencies
```
python>=3.6.0
torch>=1.10.0
sentence-transformers>=2.2.2
numpy
```
If you wish to use CUDA to accelerate inference, install torch with cuda enabled (see https://pytorch.org/)
## Usage
Import the IMS estimator as follows:
```
from scientific_information_change.estimate_similarity import SimilarityEstimator
```
Create the estimator as follows:
```
estimator = SimilarityEstimator()
```
**Note: During your first usage, the package will download a model file automatically, which is about 500MB.**
The similarity estimator takes the following arguments:
```
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name. Defaults to the best model from Wright et al. 2022.
:param device: Device (like ‘cuda’ / ‘cpu’) that should be used for computation. If None, checks if a GPU can be used.
:param use_auth_token: HuggingFace authentication token to download private models.
:param cache_folder: Path to store models
```
If you create the estimator with no arguments, it will default to the best trained model from our EMNLP 2022 paper (`copenlu/spiced` in Huggingface). This is an SBERT model pretrained on a large corpus of >1B sentence pairs and further fine-tuned on SPICED. The model will be run on the best available device (GPU if available)
The estimator has two methods for measuring the IMS between sentences. `estimate_ims` takes a list of sentences $a$ of length $N$ and a list of sentences $b$ of length $M$ and returns a numpy array $S$ of size $N \times M$, where $S_{ij}$ is the IMS between $a_{i}$ and $b_{j}$. For example:
```
estimator.estimate_ims(
a = [
'Higher-income professionals had less tolerance for smartphone use in business meetings.',
'Papers with shorter titles get more citations #science #metascience #sciencemetrics'
],
b = [
'We are intrigued by the result that professionals with higher incomes are less accepting of mobile phone use in meetings.',
'Allowing users to retract recently posted comments may help minimize regret.',
'Our analysis suggests that papers with shorter titles do receive greater numbers of citations.'
]
)
>>> returns: array([[4.370547 , 1.042849 , 1. ],
[1.1089203, 1. , 4.596382 ]], dtype=float32)
```
`estimate_ims_array` takes two lists of sentences of the same length $N$, and returns a list of length $N$ measuring the IMS between corresponding sentences in each list:
```
estimator.estimate_ims_array(
a = [
'Higher-income professionals had less tolerance for smartphone use in business meetings.',
'Papers with shorter titles get more citations #science #metascience #sciencemetrics'
],
b = [
'We are intrigued by the result that professionals with higher incomes are less accepting of mobile phone use in meetings.',
'Our analysis suggests that papers with shorter titles do receive greater numbers of citations.'
]
)
>>> returns: [4.370546817779541, 4.596382141113281]
```
| scientific-information-change | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/README.md | README.md | @article{modeling-information-change,
title={{Modeling Information Change in Science Communication with Semantically Matched Paraphrases}},
author={Wright, Dustin and Jiaxin, Pei and Jurgens, David and Augenstein, Isabelle},
year={2022},
booktitle = {Proceedings of EMNLP},
publisher = {Association for Computational Linguistics},
year = 2022
}
pip install scientific-information-change
python>=3.6.0
torch>=1.10.0
sentence-transformers>=2.2.2
numpy
from scientific_information_change.estimate_similarity import SimilarityEstimator
estimator = SimilarityEstimator()
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name. Defaults to the best model from Wright et al. 2022.
:param device: Device (like ‘cuda’ / ‘cpu’) that should be used for computation. If None, checks if a GPU can be used.
:param use_auth_token: HuggingFace authentication token to download private models.
:param cache_folder: Path to store models
estimator.estimate_ims(
a = [
'Higher-income professionals had less tolerance for smartphone use in business meetings.',
'Papers with shorter titles get more citations #science #metascience #sciencemetrics'
],
b = [
'We are intrigued by the result that professionals with higher incomes are less accepting of mobile phone use in meetings.',
'Allowing users to retract recently posted comments may help minimize regret.',
'Our analysis suggests that papers with shorter titles do receive greater numbers of citations.'
]
)
>>> returns: array([[4.370547 , 1.042849 , 1. ],
[1.1089203, 1. , 4.596382 ]], dtype=float32)
estimator.estimate_ims_array(
a = [
'Higher-income professionals had less tolerance for smartphone use in business meetings.',
'Papers with shorter titles get more citations #science #metascience #sciencemetrics'
],
b = [
'We are intrigued by the result that professionals with higher incomes are less accepting of mobile phone use in meetings.',
'Our analysis suggests that papers with shorter titles do receive greater numbers of citations.'
]
)
>>> returns: [4.370546817779541, 4.596382141113281] | 0.740362 | 0.988119 |
import torch
import random
import numpy as np
import argparse
from sentence_transformers import SentenceTransformer, util
from transformers import DataCollatorWithPadding
from collections import defaultdict
import json
import wandb
import ipdb
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
sns.set()
from utils.data_processor import clean_tweet
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the data to predict on",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='allenai/scibert_scivocab_uncased')
parser.add_argument("--problem_type",
help="The problem type",
type=str, choices=['tweets', 'news'], default='tweets')
parser.add_argument("--output_file", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--learning_rate", help="The learning rate", type=float, default=2e-5)
parser.add_argument("--weight_decay", help="Amount of weight decay", type=float, default=0.0)
parser.add_argument("--dropout_prob", help="The dropout probability", type=float, default=0.1)
parser.add_argument("--n_epochs", help="The number of epochs to run", type=int, default=2)
parser.add_argument("--n_gpu", help="The number of gpus to use", type=int, default=1)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--warmup_steps", help="The number of warmup steps", type=int, default=200)
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
problem_type = args.problem_type
num_labels = 1 if problem_type == 'regression' else 2
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
with open(args.data_loc) as f:
data = [json.loads(l) for l in f]
model = SentenceTransformer(model_name)
predictions = []
if problem_type == 'tweets':
for row in tqdm(data):
tweet_embeddings = model.encode([clean_tweet(t) for t in row['tweets']])
paper_embeddings = model.encode(list(row['paper_sentences']))
# Convert to range [1,5]
scores = (util.cos_sim(tweet_embeddings, paper_embeddings).clip(min=0, max=1) * 4) + 1
for s,tweet in zip(scores, row['full_tweets']):
tweet['paper_sentence_scores'] = s.tolist()
predictions.extend(s.tolist())
elif problem_type == 'news':
for row in tqdm(data):
news_text = [sent['text'] for url in row['news'] for sent in row['news'][url]]
news_embeddings = model.encode(news_text)
paper_text = [p['text'] for p in row['paper']]
paper_embeddings = model.encode(paper_text)
# Convert to range [1,5]
scores = (util.cos_sim(news_embeddings, paper_embeddings).clip(min=0, max=1) * 4) + 1
j = 0
for url in row['news']:
for sent in row['news'][url]:
sent['paper_sentence_scores'] = scores[j].tolist()
predictions.extend(scores[j].tolist())
j += 1
# Get the original data and attach the scores
with open(args.output_file, 'wt') as f:
for d in data:
f.write(json.dumps(d) + '\n')
fig = plt.figure(figsize=(6,5))
sns.kdeplot(predictions)
plt.tight_layout()
plt.savefig('data/dev-dist.png') | scientific-information-change | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/predict_similarity_scoring_unlabelled_sbert.py | predict_similarity_scoring_unlabelled_sbert.py | import torch
import random
import numpy as np
import argparse
from sentence_transformers import SentenceTransformer, util
from transformers import DataCollatorWithPadding
from collections import defaultdict
import json
import wandb
import ipdb
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
sns.set()
from utils.data_processor import clean_tweet
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the data to predict on",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='allenai/scibert_scivocab_uncased')
parser.add_argument("--problem_type",
help="The problem type",
type=str, choices=['tweets', 'news'], default='tweets')
parser.add_argument("--output_file", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--learning_rate", help="The learning rate", type=float, default=2e-5)
parser.add_argument("--weight_decay", help="Amount of weight decay", type=float, default=0.0)
parser.add_argument("--dropout_prob", help="The dropout probability", type=float, default=0.1)
parser.add_argument("--n_epochs", help="The number of epochs to run", type=int, default=2)
parser.add_argument("--n_gpu", help="The number of gpus to use", type=int, default=1)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--warmup_steps", help="The number of warmup steps", type=int, default=200)
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
problem_type = args.problem_type
num_labels = 1 if problem_type == 'regression' else 2
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
with open(args.data_loc) as f:
data = [json.loads(l) for l in f]
model = SentenceTransformer(model_name)
predictions = []
if problem_type == 'tweets':
for row in tqdm(data):
tweet_embeddings = model.encode([clean_tweet(t) for t in row['tweets']])
paper_embeddings = model.encode(list(row['paper_sentences']))
# Convert to range [1,5]
scores = (util.cos_sim(tweet_embeddings, paper_embeddings).clip(min=0, max=1) * 4) + 1
for s,tweet in zip(scores, row['full_tweets']):
tweet['paper_sentence_scores'] = s.tolist()
predictions.extend(s.tolist())
elif problem_type == 'news':
for row in tqdm(data):
news_text = [sent['text'] for url in row['news'] for sent in row['news'][url]]
news_embeddings = model.encode(news_text)
paper_text = [p['text'] for p in row['paper']]
paper_embeddings = model.encode(paper_text)
# Convert to range [1,5]
scores = (util.cos_sim(news_embeddings, paper_embeddings).clip(min=0, max=1) * 4) + 1
j = 0
for url in row['news']:
for sent in row['news'][url]:
sent['paper_sentence_scores'] = scores[j].tolist()
predictions.extend(scores[j].tolist())
j += 1
# Get the original data and attach the scores
with open(args.output_file, 'wt') as f:
for d in data:
f.write(json.dumps(d) + '\n')
fig = plt.figure(figsize=(6,5))
sns.kdeplot(predictions)
plt.tight_layout()
plt.savefig('data/dev-dist.png') | 0.682785 | 0.261312 |
import torch
import random
import numpy as np
import argparse
import pandas as pd
from functools import partial
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification, AutoModel
from transformers import AutoConfig
from transformers import Trainer
from transformers import TrainingArguments
from transformers import DataCollatorWithPadding
from collections import defaultdict
import json
import wandb
import ipdb
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
from utils.data_processor import read_unlabelled_tweet_dataset
from utils.trainer import CustomTrainer
from utils.metrics import compute_f1, acc_f1, compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the data to predict on",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='allenai/scibert_scivocab_uncased')
parser.add_argument("--problem_type",
help="The problem type",
type=str, choices=['regression', 'single_label_classification'], default='regression')
parser.add_argument("--output_file", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--learning_rate", help="The learning rate", type=float, default=2e-5)
parser.add_argument("--weight_decay", help="Amount of weight decay", type=float, default=0.0)
parser.add_argument("--dropout_prob", help="The dropout probability", type=float, default=0.1)
parser.add_argument("--n_epochs", help="The number of epochs to run", type=int, default=2)
parser.add_argument("--n_gpu", help="The number of gpus to use", type=int, default=1)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--warmup_steps", help="The number of warmup steps", type=int, default=200)
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
problem_type = args.problem_type
num_labels = 1 if problem_type == 'regression' else 2
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
# Create the tokenizer and model
tk = AutoTokenizer.from_pretrained(model_name)
dataset = read_unlabelled_tweet_dataset(args.data_loc, tk)
collator = DataCollatorWithPadding(tk)
# Get the F1 metric
compute_metric = compute_regression_metrics if problem_type == 'regression' else partial(acc_f1, 'binary')
# Create the training arguments
training_args = TrainingArguments(
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
learning_rate=args.learning_rate,
weight_decay=args.weight_decay,
max_grad_norm=None,
warmup_steps=args.warmup_steps,
num_train_epochs=args.n_epochs,
seed=seed,
output_dir='./output'
)
# Get the dataset
config = AutoConfig.from_pretrained(model_name, num_labels=num_labels, problem_type=problem_type)
model = AutoModelForSequenceClassification.from_pretrained(model_name, config=config)
trainer = Trainer(
model=model,
args=training_args,
data_collator=collator
)
pred_output = trainer.predict(dataset)
predictions = pred_output.predictions
# Group the scores by tweet ID and sentence
tweet_id_to_scores = defaultdict(list)
for id_,pred in zip(dataset['tweet_id'], predictions):
tweet_id_to_scores[id_].append(float(pred))
# Open the original data
with open(args.data_loc) as f:
data = [json.loads(l) for l in f]
for row in data:
for tweet in row['full_tweets']:
tweet['sentence_scores'] = tweet_id_to_scores[tweet['tweet_id']]
# Get the original data and attach the scores
with open(args.output_file, 'wt') as f:
for d in data:
f.write(json.dumps(d) + '\n')
sns.kdeplot(predictions.squeeze())
plt.savefig('data/dev-dist.png') | scientific-information-change | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/predict_similarity_scoring_unlabelled.py | predict_similarity_scoring_unlabelled.py | import torch
import random
import numpy as np
import argparse
import pandas as pd
from functools import partial
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification, AutoModel
from transformers import AutoConfig
from transformers import Trainer
from transformers import TrainingArguments
from transformers import DataCollatorWithPadding
from collections import defaultdict
import json
import wandb
import ipdb
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
from utils.data_processor import read_unlabelled_tweet_dataset
from utils.trainer import CustomTrainer
from utils.metrics import compute_f1, acc_f1, compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the data to predict on",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='allenai/scibert_scivocab_uncased')
parser.add_argument("--problem_type",
help="The problem type",
type=str, choices=['regression', 'single_label_classification'], default='regression')
parser.add_argument("--output_file", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--learning_rate", help="The learning rate", type=float, default=2e-5)
parser.add_argument("--weight_decay", help="Amount of weight decay", type=float, default=0.0)
parser.add_argument("--dropout_prob", help="The dropout probability", type=float, default=0.1)
parser.add_argument("--n_epochs", help="The number of epochs to run", type=int, default=2)
parser.add_argument("--n_gpu", help="The number of gpus to use", type=int, default=1)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--warmup_steps", help="The number of warmup steps", type=int, default=200)
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
problem_type = args.problem_type
num_labels = 1 if problem_type == 'regression' else 2
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
# Create the tokenizer and model
tk = AutoTokenizer.from_pretrained(model_name)
dataset = read_unlabelled_tweet_dataset(args.data_loc, tk)
collator = DataCollatorWithPadding(tk)
# Get the F1 metric
compute_metric = compute_regression_metrics if problem_type == 'regression' else partial(acc_f1, 'binary')
# Create the training arguments
training_args = TrainingArguments(
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
learning_rate=args.learning_rate,
weight_decay=args.weight_decay,
max_grad_norm=None,
warmup_steps=args.warmup_steps,
num_train_epochs=args.n_epochs,
seed=seed,
output_dir='./output'
)
# Get the dataset
config = AutoConfig.from_pretrained(model_name, num_labels=num_labels, problem_type=problem_type)
model = AutoModelForSequenceClassification.from_pretrained(model_name, config=config)
trainer = Trainer(
model=model,
args=training_args,
data_collator=collator
)
pred_output = trainer.predict(dataset)
predictions = pred_output.predictions
# Group the scores by tweet ID and sentence
tweet_id_to_scores = defaultdict(list)
for id_,pred in zip(dataset['tweet_id'], predictions):
tweet_id_to_scores[id_].append(float(pred))
# Open the original data
with open(args.data_loc) as f:
data = [json.loads(l) for l in f]
for row in data:
for tweet in row['full_tweets']:
tweet['sentence_scores'] = tweet_id_to_scores[tweet['tweet_id']]
# Get the original data and attach the scores
with open(args.output_file, 'wt') as f:
for d in data:
f.write(json.dumps(d) + '\n')
sns.kdeplot(predictions.squeeze())
plt.savefig('data/dev-dist.png') | 0.785144 | 0.242295 |
import torch
import random
import numpy as np
import argparse
import json
import os
from functools import partial
from datasets import load_metric
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification, AutoModel
from transformers import AutoConfig
from transformers import Trainer
from transformers import TrainingArguments
from transformers import DataCollatorWithPadding
import wandb
import pandas as pd
import ipdb
from utils.data_processor import read_datasets
from utils.trainer import CustomTrainer
from utils.metrics import compute_f1, acc_f1, compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the training data",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='allenai/scibert_scivocab_uncased')
parser.add_argument("--problem_type",
help="The problem type",
type=str, choices=['regression', 'single_label_classification'], default='regression')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--learning_rate", help="The learning rate", type=float, default=2e-5)
parser.add_argument("--weight_decay", help="Amount of weight decay", type=float, default=0.0)
parser.add_argument("--dropout_prob", help="The dropout probability", type=float, default=0.1)
parser.add_argument("--n_epochs", help="The number of epochs to run", type=int, default=2)
parser.add_argument("--n_gpu", help="The number of gpus to use", type=int, default=1)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--warmup_steps", help="The number of warmup steps", type=int, default=200)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
parser.add_argument("--train_split", help="Decide which data to train on", type=str, default='all', choices=['all', 'tweets', 'news'])
parser.add_argument("--test_split", help="Decide which test split to use", type=str, default='all', choices=['all', 'tweets', 'news'])
parser.add_argument("--use_context", help="Flag to switch to using the context", action='store_true')
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
problem_type = args.problem_type
test_filter = args.test_filter
use_context = args.use_context
num_labels = 1 if problem_type == 'regression' else 2
train_split = args.train_split
test_split = args.test_split
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
config = {
'run_name': args.run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'output_dir': args.output_dir,
'batch_size': args.batch_size,
'learning_rate': args.learning_rate,
'weight_decay': args.weight_decay,
'warmup_steps': args.warmup_steps,
'epochs': args.n_epochs,
'seed': args.seed,
'problem_type': args.problem_type,
'test_filter': args.test_filter,
'use_context': use_context,
'train_split': train_split,
'test_split': test_split
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
# Create the tokenizer and model
if 'scibert' in model_name or 'citebert' in model_name:
tk = AutoTokenizer.from_pretrained(model_name, model_max_length=512)
else:
tk = AutoTokenizer.from_pretrained(model_name)
dataset = read_datasets(args.data_loc, tk, problem_type, test_filter, train_split, test_split, use_context)
collator = DataCollatorWithPadding(tk)
# Get the F1 metric
compute_metric = compute_regression_metrics if problem_type == 'regression' else partial(acc_f1, 'binary')
# Create the training arguments
training_args = TrainingArguments(
output_dir=args.output_dir,
do_train=True,
do_eval=True,
evaluation_strategy='epoch',
per_device_train_batch_size=args.batch_size,
learning_rate=args.learning_rate,
weight_decay=args.weight_decay,
max_grad_norm=None,
warmup_steps=args.warmup_steps,
num_train_epochs=args.n_epochs,
logging_dir=args.output_dir,
save_strategy='epoch',
seed=seed,
run_name=args.run_name,
load_best_model_at_end=True,
report_to=['tensorboard', 'wandb']
)
# Get the dataset
config = AutoConfig.from_pretrained(model_name, num_labels=num_labels, problem_type=problem_type)
model = AutoModelForSequenceClassification.from_pretrained(model_name, config=config)
if problem_type == 'regression':
trainer = Trainer(
model=model,
args=training_args,
train_dataset=dataset['train'],
eval_dataset=dataset['validation'],
compute_metrics=compute_metric,
data_collator=collator
)
else:
# Get label weights
labels = dataset["train"]['label']
weight = torch.tensor(len(labels) / (2 * np.bincount(labels)))
weight = weight.type(torch.FloatTensor)
# Create the trainer and train
trainer = CustomTrainer(
model=model,
args=training_args,
train_dataset=dataset['train'],
eval_dataset=dataset['validation'],
compute_metrics=compute_metric,
data_collator=collator,
weight=weight
)
train_output = trainer.train()
model.save_pretrained(args.output_dir)
tk.save_pretrained(args.output_dir)
pred_output = trainer.predict(dataset['test'])
pred_metrics = pred_output.metrics
wandb.log(pred_metrics)
if test_split == 'all':
# Get tweet performance
tweet_data = dataset['test'].filter(lambda example: example['source'] == 'tweets')
tweet_metrics = trainer.predict(tweet_data, metric_key_prefix='tweet')
pred_metrics.update(tweet_metrics.metrics)
# Rank the errors for analysis
preds = tweet_metrics.predictions.squeeze()
labels = tweet_data['final_score_hard']
AE = np.abs(preds - np.array(labels))
ranking = np.argsort(AE)[::-1]
analysis_dframe = [[tweet_data['News Finding'][r], tweet_data['Paper Finding'][r], preds[r], labels[r]] for r in ranking]
analysis_dframe = pd.DataFrame(analysis_dframe, columns=['Tweet', 'Paper', 'Pred', 'Label'])
analysis_dframe.to_csv(f"{args.metrics_dir}/tweet_errors.csv", index=False)
# Get news performance
news_data = dataset['test'].filter(lambda example: example['source'] == 'news')
news_metrics = trainer.predict(news_data, metric_key_prefix='news')
pred_metrics.update(news_metrics.metrics)
# Iterate through the categories
for cat in categories:
curr_dataset = dataset['test'].filter(lambda example: cat in example['instance_id'])
# Predict
pred_output = trainer.predict(curr_dataset, metric_key_prefix=cat)
pred_metrics.update(pred_output.metrics)
tweet_curr = curr_dataset.filter(lambda example: example['source'] == 'tweets')
pred_output = trainer.predict(tweet_curr, metric_key_prefix=cat + '_tweet')
pred_metrics.update(pred_output.metrics)
news_curr = curr_dataset.filter(lambda example: example['source'] == 'news')
pred_output = trainer.predict(news_curr, metric_key_prefix=cat + '_news')
pred_metrics.update(pred_output.metrics)
wandb.log(pred_metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(pred_metrics)) | scientific-information-change | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/train_supervised.py | train_supervised.py | import torch
import random
import numpy as np
import argparse
import json
import os
from functools import partial
from datasets import load_metric
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification, AutoModel
from transformers import AutoConfig
from transformers import Trainer
from transformers import TrainingArguments
from transformers import DataCollatorWithPadding
import wandb
import pandas as pd
import ipdb
from utils.data_processor import read_datasets
from utils.trainer import CustomTrainer
from utils.metrics import compute_f1, acc_f1, compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the training data",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='allenai/scibert_scivocab_uncased')
parser.add_argument("--problem_type",
help="The problem type",
type=str, choices=['regression', 'single_label_classification'], default='regression')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--learning_rate", help="The learning rate", type=float, default=2e-5)
parser.add_argument("--weight_decay", help="Amount of weight decay", type=float, default=0.0)
parser.add_argument("--dropout_prob", help="The dropout probability", type=float, default=0.1)
parser.add_argument("--n_epochs", help="The number of epochs to run", type=int, default=2)
parser.add_argument("--n_gpu", help="The number of gpus to use", type=int, default=1)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--warmup_steps", help="The number of warmup steps", type=int, default=200)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
parser.add_argument("--train_split", help="Decide which data to train on", type=str, default='all', choices=['all', 'tweets', 'news'])
parser.add_argument("--test_split", help="Decide which test split to use", type=str, default='all', choices=['all', 'tweets', 'news'])
parser.add_argument("--use_context", help="Flag to switch to using the context", action='store_true')
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
problem_type = args.problem_type
test_filter = args.test_filter
use_context = args.use_context
num_labels = 1 if problem_type == 'regression' else 2
train_split = args.train_split
test_split = args.test_split
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
config = {
'run_name': args.run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'output_dir': args.output_dir,
'batch_size': args.batch_size,
'learning_rate': args.learning_rate,
'weight_decay': args.weight_decay,
'warmup_steps': args.warmup_steps,
'epochs': args.n_epochs,
'seed': args.seed,
'problem_type': args.problem_type,
'test_filter': args.test_filter,
'use_context': use_context,
'train_split': train_split,
'test_split': test_split
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
# Create the tokenizer and model
if 'scibert' in model_name or 'citebert' in model_name:
tk = AutoTokenizer.from_pretrained(model_name, model_max_length=512)
else:
tk = AutoTokenizer.from_pretrained(model_name)
dataset = read_datasets(args.data_loc, tk, problem_type, test_filter, train_split, test_split, use_context)
collator = DataCollatorWithPadding(tk)
# Get the F1 metric
compute_metric = compute_regression_metrics if problem_type == 'regression' else partial(acc_f1, 'binary')
# Create the training arguments
training_args = TrainingArguments(
output_dir=args.output_dir,
do_train=True,
do_eval=True,
evaluation_strategy='epoch',
per_device_train_batch_size=args.batch_size,
learning_rate=args.learning_rate,
weight_decay=args.weight_decay,
max_grad_norm=None,
warmup_steps=args.warmup_steps,
num_train_epochs=args.n_epochs,
logging_dir=args.output_dir,
save_strategy='epoch',
seed=seed,
run_name=args.run_name,
load_best_model_at_end=True,
report_to=['tensorboard', 'wandb']
)
# Get the dataset
config = AutoConfig.from_pretrained(model_name, num_labels=num_labels, problem_type=problem_type)
model = AutoModelForSequenceClassification.from_pretrained(model_name, config=config)
if problem_type == 'regression':
trainer = Trainer(
model=model,
args=training_args,
train_dataset=dataset['train'],
eval_dataset=dataset['validation'],
compute_metrics=compute_metric,
data_collator=collator
)
else:
# Get label weights
labels = dataset["train"]['label']
weight = torch.tensor(len(labels) / (2 * np.bincount(labels)))
weight = weight.type(torch.FloatTensor)
# Create the trainer and train
trainer = CustomTrainer(
model=model,
args=training_args,
train_dataset=dataset['train'],
eval_dataset=dataset['validation'],
compute_metrics=compute_metric,
data_collator=collator,
weight=weight
)
train_output = trainer.train()
model.save_pretrained(args.output_dir)
tk.save_pretrained(args.output_dir)
pred_output = trainer.predict(dataset['test'])
pred_metrics = pred_output.metrics
wandb.log(pred_metrics)
if test_split == 'all':
# Get tweet performance
tweet_data = dataset['test'].filter(lambda example: example['source'] == 'tweets')
tweet_metrics = trainer.predict(tweet_data, metric_key_prefix='tweet')
pred_metrics.update(tweet_metrics.metrics)
# Rank the errors for analysis
preds = tweet_metrics.predictions.squeeze()
labels = tweet_data['final_score_hard']
AE = np.abs(preds - np.array(labels))
ranking = np.argsort(AE)[::-1]
analysis_dframe = [[tweet_data['News Finding'][r], tweet_data['Paper Finding'][r], preds[r], labels[r]] for r in ranking]
analysis_dframe = pd.DataFrame(analysis_dframe, columns=['Tweet', 'Paper', 'Pred', 'Label'])
analysis_dframe.to_csv(f"{args.metrics_dir}/tweet_errors.csv", index=False)
# Get news performance
news_data = dataset['test'].filter(lambda example: example['source'] == 'news')
news_metrics = trainer.predict(news_data, metric_key_prefix='news')
pred_metrics.update(news_metrics.metrics)
# Iterate through the categories
for cat in categories:
curr_dataset = dataset['test'].filter(lambda example: cat in example['instance_id'])
# Predict
pred_output = trainer.predict(curr_dataset, metric_key_prefix=cat)
pred_metrics.update(pred_output.metrics)
tweet_curr = curr_dataset.filter(lambda example: example['source'] == 'tweets')
pred_output = trainer.predict(tweet_curr, metric_key_prefix=cat + '_tweet')
pred_metrics.update(pred_output.metrics)
news_curr = curr_dataset.filter(lambda example: example['source'] == 'news')
pred_output = trainer.predict(news_curr, metric_key_prefix=cat + '_news')
pred_metrics.update(pred_output.metrics)
wandb.log(pred_metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(pred_metrics)) | 0.662687 | 0.22378 |
import argparse
import random
from functools import partial
import wandb
import json
import os
import numpy as np
import torch
import torch.nn.functional as F
from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
from transformers import DataCollatorWithPadding
from transformers import Trainer
from transformers import TrainingArguments
from utils.data_processor import read_datasets
from utils.metrics import acc_f1, compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the training data",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='coderpotter/adversarial-paraphrasing-detector')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
run_name = args.run_name
test_filter = args.test_filter
config = {
'run_name': run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'test_filter': test_filter
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
# Create the tokenizer and model
tk = AutoTokenizer.from_pretrained(model_name)
# config = AutoConfig.from_pretrained(model_name, num_labels=3)
# Initialization warning is apparently normal: https://github.com/huggingface/transformers/issues/5421
model = AutoModelForSequenceClassification.from_pretrained(model_name)
dataset = read_datasets(args.data_loc, tk, test_filter=test_filter)
labels = np.array(dataset['test']['label'])
dataset = dataset.remove_columns("label")
collator = DataCollatorWithPadding(tk)
# Create the training arguments
training_args = TrainingArguments(
output_dir=args.output_dir,
do_train=True,
do_eval=True,
evaluation_strategy='epoch',
max_grad_norm=None,
logging_dir=args.output_dir,
save_strategy='epoch',
seed=seed,
run_name=args.run_name,
load_best_model_at_end=True,
report_to=['tensorboard', 'wandb']
)
# Get the dataset
# Create the trainer and train
trainer = Trainer(
model=model,
args=training_args,
train_dataset=dataset['train'],
eval_dataset=dataset['validation'],
data_collator=collator
)
pred_output = trainer.predict(dataset['test'])
logits_orig = pred_output.predictions
# Take a softmax over logits
probs = F.softmax(torch.tensor(logits_orig), -1)
similarity = probs[:, 1]
# Convert to range [1,5]
preds = (similarity * 4) + 1
metrics = compute_regression_metrics((preds, labels), prefix='unsupervised_')
tweet_selector = [source == 'tweets' for source in dataset['test']['source']]
tweet_idx = np.where(tweet_selector)
tweet_metrics = compute_regression_metrics((preds[tweet_idx], labels[tweet_idx]),
prefix='tweet_')
metrics.update(tweet_metrics)
news_selector = [source == 'news' for source in dataset['test']['source']]
news_idx = np.where(news_selector)
news_metrics = compute_regression_metrics((preds[news_idx], labels[news_idx]),
prefix='news_')
metrics.update(news_metrics)
for cat in categories:
selector = [cat in instance_id for instance_id in dataset['test']['instance_id']]
# Predict
idx = np.where(selector)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]), prefix=cat + '_')
metrics.update(metrics_cat)
tweets = list(np.array(selector) & np.array(tweet_selector))
idx = np.where(tweets)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]), prefix=cat + '_tweet_')
metrics.update(metrics_cat)
news = list(np.array(selector) & np.array(news_selector))
idx = np.where(news)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]),
prefix=cat + '_news_')
metrics.update(metrics_cat)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | scientific-information-change | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/eval_unsupervised_paraphrase_detection.py | eval_unsupervised_paraphrase_detection.py | import argparse
import random
from functools import partial
import wandb
import json
import os
import numpy as np
import torch
import torch.nn.functional as F
from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
from transformers import DataCollatorWithPadding
from transformers import Trainer
from transformers import TrainingArguments
from utils.data_processor import read_datasets
from utils.metrics import acc_f1, compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the training data",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='coderpotter/adversarial-paraphrasing-detector')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
run_name = args.run_name
test_filter = args.test_filter
config = {
'run_name': run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'test_filter': test_filter
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
# Create the tokenizer and model
tk = AutoTokenizer.from_pretrained(model_name)
# config = AutoConfig.from_pretrained(model_name, num_labels=3)
# Initialization warning is apparently normal: https://github.com/huggingface/transformers/issues/5421
model = AutoModelForSequenceClassification.from_pretrained(model_name)
dataset = read_datasets(args.data_loc, tk, test_filter=test_filter)
labels = np.array(dataset['test']['label'])
dataset = dataset.remove_columns("label")
collator = DataCollatorWithPadding(tk)
# Create the training arguments
training_args = TrainingArguments(
output_dir=args.output_dir,
do_train=True,
do_eval=True,
evaluation_strategy='epoch',
max_grad_norm=None,
logging_dir=args.output_dir,
save_strategy='epoch',
seed=seed,
run_name=args.run_name,
load_best_model_at_end=True,
report_to=['tensorboard', 'wandb']
)
# Get the dataset
# Create the trainer and train
trainer = Trainer(
model=model,
args=training_args,
train_dataset=dataset['train'],
eval_dataset=dataset['validation'],
data_collator=collator
)
pred_output = trainer.predict(dataset['test'])
logits_orig = pred_output.predictions
# Take a softmax over logits
probs = F.softmax(torch.tensor(logits_orig), -1)
similarity = probs[:, 1]
# Convert to range [1,5]
preds = (similarity * 4) + 1
metrics = compute_regression_metrics((preds, labels), prefix='unsupervised_')
tweet_selector = [source == 'tweets' for source in dataset['test']['source']]
tweet_idx = np.where(tweet_selector)
tweet_metrics = compute_regression_metrics((preds[tweet_idx], labels[tweet_idx]),
prefix='tweet_')
metrics.update(tweet_metrics)
news_selector = [source == 'news' for source in dataset['test']['source']]
news_idx = np.where(news_selector)
news_metrics = compute_regression_metrics((preds[news_idx], labels[news_idx]),
prefix='news_')
metrics.update(news_metrics)
for cat in categories:
selector = [cat in instance_id for instance_id in dataset['test']['instance_id']]
# Predict
idx = np.where(selector)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]), prefix=cat + '_')
metrics.update(metrics_cat)
tweets = list(np.array(selector) & np.array(tweet_selector))
idx = np.where(tweets)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]), prefix=cat + '_tweet_')
metrics.update(metrics_cat)
news = list(np.array(selector) & np.array(news_selector))
idx = np.where(news)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]),
prefix=cat + '_news_')
metrics.update(metrics_cat)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | 0.625209 | 0.257479 |
import torch
import random
import numpy as np
import argparse
import wandb
import torch.nn.functional as F
import torch
import json
import os
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification
from transformers import Trainer
from transformers import TrainingArguments
from transformers import DataCollatorWithPadding
from datasets import load_dataset
from functools import partial
import ipdb
from utils.data_processor import read_datasets
from utils.metrics import acc_f1, compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the training data",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='ynie/roberta-large-snli_mnli_fever_anli_R1_R2_R3-nli')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
run_name = args.run_name
test_filter = args.test_filter
config = {
'run_name': run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'test_filter': test_filter
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
# Create the tokenizer and model
tk = AutoTokenizer.from_pretrained(model_name)
# config = AutoConfig.from_pretrained(model_name, num_labels=3)
# Initialization warning is apparently normal: https://github.com/huggingface/transformers/issues/5421
model = AutoModelForSequenceClassification.from_pretrained(model_name)
dataset = read_datasets(args.data_loc, tk, test_filter=test_filter)
labels = np.array(dataset['test']['label'])
dataset = dataset.remove_columns("label")
collator = DataCollatorWithPadding(tk)
# Create the training arguments
training_args = TrainingArguments(
output_dir=args.output_dir,
evaluation_strategy='epoch',
max_grad_norm=None,
logging_dir=args.output_dir,
save_strategy='epoch',
seed=seed,
run_name=args.run_name,
load_best_model_at_end=True,
report_to=['tensorboard', 'wandb']
)
# Get the dataset
# Create the trainer and train
trainer = Trainer(
model=model,
args=training_args,
data_collator=collator
)
pred_output = trainer.predict(dataset['test'])
logits_orig = pred_output.predictions
# Take a softmax over logits
probs = F.softmax(torch.tensor(logits_orig), -1)
similarity = probs[:,0]
# Convert to range [1,5]
preds = (similarity * 4) + 1
metrics = compute_regression_metrics((preds, labels), prefix='unsupervised_')
tweet_selector = [source == 'tweets' for source in dataset['test']['source']]
tweet_idx = np.where(tweet_selector)
tweet_metrics = compute_regression_metrics((preds[tweet_idx], labels[tweet_idx]),
prefix='tweet_')
metrics.update(tweet_metrics)
news_selector = [source == 'news' for source in dataset['test']['source']]
news_idx = np.where(news_selector)
news_metrics = compute_regression_metrics((preds[news_idx], labels[news_idx]),
prefix='news_')
metrics.update(news_metrics)
for cat in categories:
selector = [cat in instance_id for instance_id in dataset['test']['instance_id']]
# Predict
idx = np.where(selector)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]), prefix=cat + '_')
metrics.update(metrics_cat)
tweets = list(np.array(selector) & np.array(tweet_selector))
idx = np.where(tweets)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]), prefix=cat + '_tweet_')
metrics.update(metrics_cat)
news = list(np.array(selector) & np.array(news_selector))
idx = np.where(news)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]),
prefix=cat + '_news_')
metrics.update(metrics_cat)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | scientific-information-change | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/eval_unsupervised_nli.py | eval_unsupervised_nli.py | import torch
import random
import numpy as np
import argparse
import wandb
import torch.nn.functional as F
import torch
import json
import os
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification
from transformers import Trainer
from transformers import TrainingArguments
from transformers import DataCollatorWithPadding
from datasets import load_dataset
from functools import partial
import ipdb
from utils.data_processor import read_datasets
from utils.metrics import acc_f1, compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the training data",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='ynie/roberta-large-snli_mnli_fever_anli_R1_R2_R3-nli')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
run_name = args.run_name
test_filter = args.test_filter
config = {
'run_name': run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'test_filter': test_filter
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
# Create the tokenizer and model
tk = AutoTokenizer.from_pretrained(model_name)
# config = AutoConfig.from_pretrained(model_name, num_labels=3)
# Initialization warning is apparently normal: https://github.com/huggingface/transformers/issues/5421
model = AutoModelForSequenceClassification.from_pretrained(model_name)
dataset = read_datasets(args.data_loc, tk, test_filter=test_filter)
labels = np.array(dataset['test']['label'])
dataset = dataset.remove_columns("label")
collator = DataCollatorWithPadding(tk)
# Create the training arguments
training_args = TrainingArguments(
output_dir=args.output_dir,
evaluation_strategy='epoch',
max_grad_norm=None,
logging_dir=args.output_dir,
save_strategy='epoch',
seed=seed,
run_name=args.run_name,
load_best_model_at_end=True,
report_to=['tensorboard', 'wandb']
)
# Get the dataset
# Create the trainer and train
trainer = Trainer(
model=model,
args=training_args,
data_collator=collator
)
pred_output = trainer.predict(dataset['test'])
logits_orig = pred_output.predictions
# Take a softmax over logits
probs = F.softmax(torch.tensor(logits_orig), -1)
similarity = probs[:,0]
# Convert to range [1,5]
preds = (similarity * 4) + 1
metrics = compute_regression_metrics((preds, labels), prefix='unsupervised_')
tweet_selector = [source == 'tweets' for source in dataset['test']['source']]
tweet_idx = np.where(tweet_selector)
tweet_metrics = compute_regression_metrics((preds[tweet_idx], labels[tweet_idx]),
prefix='tweet_')
metrics.update(tweet_metrics)
news_selector = [source == 'news' for source in dataset['test']['source']]
news_idx = np.where(news_selector)
news_metrics = compute_regression_metrics((preds[news_idx], labels[news_idx]),
prefix='news_')
metrics.update(news_metrics)
for cat in categories:
selector = [cat in instance_id for instance_id in dataset['test']['instance_id']]
# Predict
idx = np.where(selector)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]), prefix=cat + '_')
metrics.update(metrics_cat)
tweets = list(np.array(selector) & np.array(tweet_selector))
idx = np.where(tweets)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]), prefix=cat + '_tweet_')
metrics.update(metrics_cat)
news = list(np.array(selector) & np.array(news_selector))
idx = np.where(news)
metrics_cat = compute_regression_metrics((preds[idx], labels[idx]),
prefix=cat + '_news_')
metrics.update(metrics_cat)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | 0.646014 | 0.269736 |
import torch
from torch import nn
import random
import numpy as np
import argparse
import json
import os
import torch.nn.functional as F
from sentence_transformers import SentenceTransformer, losses, evaluation, models
from torch.utils.data import DataLoader
import wandb
import pandas as pd
import ipdb
from utils.data_processor import read_datasets_sentence_transformers, read_dataset_raw, filter_data_sentence_transformers
from utils.data_processor import LABEL_COLUMN
from utils.metrics import compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the training data",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='allenai/scibert_scivocab_uncased')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--learning_rate", help="The learning rate", type=float, default=1e-5)
parser.add_argument("--weight_decay", help="Amount of weight decay", type=float, default=0.0)
parser.add_argument("--dropout_prob", help="The dropout probability", type=float, default=0.1)
parser.add_argument("--n_epochs", help="The number of epochs to run", type=int, default=2)
parser.add_argument("--n_gpu", help="The number of gpus to use", type=int, default=1)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--warmup_steps", help="The number of warmup steps", type=int, default=200)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
parser.add_argument("--train_split", help="Decide which data to train on", type=str, default='all',
choices=['all', 'tweets', 'news'])
parser.add_argument("--test_split", help="Decide which test split to use", type=str, default='all',
choices=['all', 'tweets', 'news'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
train_split = args.train_split
test_split = args.test_split
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
config = {
'run_name': args.run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'output_dir': args.output_dir,
'batch_size': args.batch_size,
'learning_rate': args.learning_rate,
'weight_decay': args.weight_decay,
'warmup_steps': args.warmup_steps,
'epochs': args.n_epochs,
'seed': args.seed,
'train_split': train_split,
'test_split': test_split
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
# Create the model
model = SentenceTransformer(model_name)
dataset = read_datasets_sentence_transformers(args.data_loc, args.test_filter, train_split, test_split)
train_dataloader = DataLoader(dataset['train'], shuffle=True, batch_size=args.batch_size)
dev_data = read_dataset_raw(f"{args.data_loc}/dev.csv")
dev_sentences1 = list(dev_data['Paper Finding'])
dev_sentences2 = list(dev_data['News Finding'])
dev_scores = [(s-1)/4 for s in dev_data[LABEL_COLUMN]]
evaluator = evaluation.EmbeddingSimilarityEvaluator(dev_sentences1, dev_sentences2, dev_scores)
train_loss = losses.CosineSimilarityLoss(model)
# Same loss used to train mpnet model
#train_loss = losses.MultipleNegativesRankingLoss(model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=args.n_epochs,
evaluator=evaluator,
evaluation_steps=len(train_dataloader),
output_path=args.output_dir,
save_best_model=True,
optimizer_params={'lr': args.learning_rate}
)
# Test
test_data = read_dataset_raw(f"{args.data_loc}/test.csv")
test_data = filter_data_sentence_transformers(test_data, args.test_filter, test_split)
if args.test_filter == 'easy':
test_data = test_data[test_data['instance_id'].str.contains('easy')]
elif args.test_filter == 'hard':
test_data = test_data[~test_data['instance_id'].str.contains('easy')]
paper_embeddings = model.encode(list(test_data['Paper Finding']))
news_embeddings = model.encode(list(test_data['News Finding']))
scores = F.cosine_similarity(torch.Tensor(paper_embeddings), torch.Tensor(news_embeddings), dim=1).clip(
min=0).squeeze().cpu().numpy()
# Convert to range [1,5], assume anything below 0 is 0
preds = (scores * 4) + 1
labels = test_data[LABEL_COLUMN]
metrics = compute_regression_metrics((preds, labels), prefix='test_')
if test_split == 'all':
tweet_selector = test_data['source'] == 'tweets'
tweet_metrics = compute_regression_metrics((preds[tweet_selector], labels[tweet_selector]), prefix='tweet_')
metrics.update(tweet_metrics)
tweet_data = test_data[tweet_selector]
tweets = list(tweet_data['News Finding'])
paper = list(tweet_data['Paper Finding'])
preds_tweet = preds[tweet_selector]
labels_tweet = list(labels[tweet_selector])
AE = np.abs(preds_tweet - np.array(labels_tweet))
assert len(tweets) == len(paper)
assert len(tweets) == len(preds_tweet)
assert len(tweets) == len(labels_tweet)
ranking = np.argsort(AE)[::-1]
analysis_dframe = [[tweets[r], paper[r], preds_tweet[r], labels_tweet[r]] for r in
ranking]
analysis_dframe = pd.DataFrame(analysis_dframe, columns=['Tweet', 'Paper', 'Pred', 'Label'])
analysis_dframe.to_csv(f"{args.metrics_dir}/tweet_errors.csv", index=False)
news_selector = test_data['source'] == 'news'
news_metrics = compute_regression_metrics((preds[news_selector], labels[news_selector]), prefix='news_')
metrics.update(news_metrics)
wandb.log(metrics)
for cat in categories:
selector = test_data['instance_id'].str.contains(cat)
# Predict
metrics_cat = compute_regression_metrics((preds[selector], labels[selector]), prefix=cat + '_')
metrics.update(metrics_cat)
tweet_selector = selector & (test_data['source'] == 'tweets')
metrics_cat = compute_regression_metrics((preds[tweet_selector], labels[tweet_selector]), prefix=cat + '_tweet_')
metrics.update(metrics_cat)
news_selector = selector & (test_data['source'] == 'news')
metrics_cat = compute_regression_metrics((preds[news_selector], labels[news_selector]),
prefix=cat + '_news_')
metrics.update(metrics_cat)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | scientific-information-change | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/train_supervised_sentence_transformers.py | train_supervised_sentence_transformers.py | import torch
from torch import nn
import random
import numpy as np
import argparse
import json
import os
import torch.nn.functional as F
from sentence_transformers import SentenceTransformer, losses, evaluation, models
from torch.utils.data import DataLoader
import wandb
import pandas as pd
import ipdb
from utils.data_processor import read_datasets_sentence_transformers, read_dataset_raw, filter_data_sentence_transformers
from utils.data_processor import LABEL_COLUMN
from utils.metrics import compute_regression_metrics
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the training data",
type=str, required=True)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='allenai/scibert_scivocab_uncased')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--batch_size", help="The batch size", type=int, default=8)
parser.add_argument("--learning_rate", help="The learning rate", type=float, default=1e-5)
parser.add_argument("--weight_decay", help="Amount of weight decay", type=float, default=0.0)
parser.add_argument("--dropout_prob", help="The dropout probability", type=float, default=0.1)
parser.add_argument("--n_epochs", help="The number of epochs to run", type=int, default=2)
parser.add_argument("--n_gpu", help="The number of gpus to use", type=int, default=1)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--warmup_steps", help="The number of warmup steps", type=int, default=200)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
parser.add_argument("--train_split", help="Decide which data to train on", type=str, default='all',
choices=['all', 'tweets', 'news'])
parser.add_argument("--test_split", help="Decide which test split to use", type=str, default='all',
choices=['all', 'tweets', 'news'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
train_split = args.train_split
test_split = args.test_split
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
config = {
'run_name': args.run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'output_dir': args.output_dir,
'batch_size': args.batch_size,
'learning_rate': args.learning_rate,
'weight_decay': args.weight_decay,
'warmup_steps': args.warmup_steps,
'epochs': args.n_epochs,
'seed': args.seed,
'train_split': train_split,
'test_split': test_split
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# See if CUDA available
device = torch.device("cpu")
if torch.cuda.is_available():
print("Training on GPU")
device = torch.device("cuda:0")
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
# Create the model
model = SentenceTransformer(model_name)
dataset = read_datasets_sentence_transformers(args.data_loc, args.test_filter, train_split, test_split)
train_dataloader = DataLoader(dataset['train'], shuffle=True, batch_size=args.batch_size)
dev_data = read_dataset_raw(f"{args.data_loc}/dev.csv")
dev_sentences1 = list(dev_data['Paper Finding'])
dev_sentences2 = list(dev_data['News Finding'])
dev_scores = [(s-1)/4 for s in dev_data[LABEL_COLUMN]]
evaluator = evaluation.EmbeddingSimilarityEvaluator(dev_sentences1, dev_sentences2, dev_scores)
train_loss = losses.CosineSimilarityLoss(model)
# Same loss used to train mpnet model
#train_loss = losses.MultipleNegativesRankingLoss(model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=args.n_epochs,
evaluator=evaluator,
evaluation_steps=len(train_dataloader),
output_path=args.output_dir,
save_best_model=True,
optimizer_params={'lr': args.learning_rate}
)
# Test
test_data = read_dataset_raw(f"{args.data_loc}/test.csv")
test_data = filter_data_sentence_transformers(test_data, args.test_filter, test_split)
if args.test_filter == 'easy':
test_data = test_data[test_data['instance_id'].str.contains('easy')]
elif args.test_filter == 'hard':
test_data = test_data[~test_data['instance_id'].str.contains('easy')]
paper_embeddings = model.encode(list(test_data['Paper Finding']))
news_embeddings = model.encode(list(test_data['News Finding']))
scores = F.cosine_similarity(torch.Tensor(paper_embeddings), torch.Tensor(news_embeddings), dim=1).clip(
min=0).squeeze().cpu().numpy()
# Convert to range [1,5], assume anything below 0 is 0
preds = (scores * 4) + 1
labels = test_data[LABEL_COLUMN]
metrics = compute_regression_metrics((preds, labels), prefix='test_')
if test_split == 'all':
tweet_selector = test_data['source'] == 'tweets'
tweet_metrics = compute_regression_metrics((preds[tweet_selector], labels[tweet_selector]), prefix='tweet_')
metrics.update(tweet_metrics)
tweet_data = test_data[tweet_selector]
tweets = list(tweet_data['News Finding'])
paper = list(tweet_data['Paper Finding'])
preds_tweet = preds[tweet_selector]
labels_tweet = list(labels[tweet_selector])
AE = np.abs(preds_tweet - np.array(labels_tweet))
assert len(tweets) == len(paper)
assert len(tweets) == len(preds_tweet)
assert len(tweets) == len(labels_tweet)
ranking = np.argsort(AE)[::-1]
analysis_dframe = [[tweets[r], paper[r], preds_tweet[r], labels_tweet[r]] for r in
ranking]
analysis_dframe = pd.DataFrame(analysis_dframe, columns=['Tweet', 'Paper', 'Pred', 'Label'])
analysis_dframe.to_csv(f"{args.metrics_dir}/tweet_errors.csv", index=False)
news_selector = test_data['source'] == 'news'
news_metrics = compute_regression_metrics((preds[news_selector], labels[news_selector]), prefix='news_')
metrics.update(news_metrics)
wandb.log(metrics)
for cat in categories:
selector = test_data['instance_id'].str.contains(cat)
# Predict
metrics_cat = compute_regression_metrics((preds[selector], labels[selector]), prefix=cat + '_')
metrics.update(metrics_cat)
tweet_selector = selector & (test_data['source'] == 'tweets')
metrics_cat = compute_regression_metrics((preds[tweet_selector], labels[tweet_selector]), prefix=cat + '_tweet_')
metrics.update(metrics_cat)
news_selector = selector & (test_data['source'] == 'news')
metrics_cat = compute_regression_metrics((preds[news_selector], labels[news_selector]),
prefix=cat + '_news_')
metrics.update(metrics_cat)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | 0.744749 | 0.218899 |
import torch
import random
import numpy as np
import argparse
from sentence_transformers import SentenceTransformer
import wandb
import json
import os
import ipdb
import torch.nn.functional as F
from utils.data_processor import read_dataset_raw
from utils.metrics import compute_regression_metrics
from utils.data_processor import LABEL_COLUMN
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the test data",
type=str, required=True)
parser.add_argument("--eval_data_loc",
help="The location of the validation data",
type=str, required=True)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='all-MiniLM-L6-v2')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
run_name = args.run_name
test_filter = args.test_filter
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
config = {
'run_name': run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'test_filter': args.test_filter
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
# Load the data
val_data = read_dataset_raw(args.eval_data_loc)
test_data = read_dataset_raw(args.data_loc)
if args.test_filter == 'easy':
test_data = test_data[test_data['instance_id'].str.contains('easy')]
elif args.test_filter == 'hard':
test_data = test_data[~test_data['instance_id'].str.contains('easy')]
# Load the model
model = SentenceTransformer(model_name)
paper_embeddings = model.encode(list(test_data['Paper Finding']))
news_embeddings = model.encode(list(test_data['News Finding']))
scores = F.cosine_similarity(torch.Tensor(paper_embeddings), torch.Tensor(news_embeddings), dim=1).clip(min=0).squeeze().cpu().numpy()
# Convert to range [1,5], assume anything below 0 is 0
preds = (scores * 4) + 1
labels = test_data[LABEL_COLUMN]
metrics = compute_regression_metrics((preds,labels), prefix='unsupervised_')
tweet_selector = test_data['source'] == 'tweets'
tweet_metrics = compute_regression_metrics((preds[tweet_selector], labels[tweet_selector]), prefix='tweet_')
metrics.update(tweet_metrics)
news_selector = test_data['source'] == 'news'
news_metrics = compute_regression_metrics((preds[news_selector], labels[news_selector]), prefix='news_')
metrics.update(news_metrics)
for cat in categories:
selector = test_data['instance_id'].str.contains(cat)
# Predict
metrics_cat = compute_regression_metrics((preds[selector], labels[selector]), prefix=cat + '_')
metrics.update(metrics_cat)
tweet_selector = selector & (test_data['source'] == 'tweets')
metrics_cat = compute_regression_metrics((preds[tweet_selector], labels[tweet_selector]), prefix=cat + '_tweet_')
metrics.update(metrics_cat)
news_selector = selector & (test_data['source'] == 'news')
metrics_cat = compute_regression_metrics((preds[news_selector], labels[news_selector]),
prefix=cat + '_news_')
metrics.update(metrics_cat)
wandb.log(metrics)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | scientific-information-change | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/eval_unsupervised_sts.py | eval_unsupervised_sts.py | import torch
import random
import numpy as np
import argparse
from sentence_transformers import SentenceTransformer
import wandb
import json
import os
import ipdb
import torch.nn.functional as F
from utils.data_processor import read_dataset_raw
from utils.metrics import compute_regression_metrics
from utils.data_processor import LABEL_COLUMN
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the test data",
type=str, required=True)
parser.add_argument("--eval_data_loc",
help="The location of the validation data",
type=str, required=True)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='all-MiniLM-L6-v2')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--test_filter", help="Decide which test samples to test on", type=str, default='none', choices=['none', 'easy', 'hard'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
run_name = args.run_name
test_filter = args.test_filter
categories = ['Medicine', 'Biology', 'Psychology', 'Computer_Science']
config = {
'run_name': run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'test_filter': args.test_filter
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
# Load the data
val_data = read_dataset_raw(args.eval_data_loc)
test_data = read_dataset_raw(args.data_loc)
if args.test_filter == 'easy':
test_data = test_data[test_data['instance_id'].str.contains('easy')]
elif args.test_filter == 'hard':
test_data = test_data[~test_data['instance_id'].str.contains('easy')]
# Load the model
model = SentenceTransformer(model_name)
paper_embeddings = model.encode(list(test_data['Paper Finding']))
news_embeddings = model.encode(list(test_data['News Finding']))
scores = F.cosine_similarity(torch.Tensor(paper_embeddings), torch.Tensor(news_embeddings), dim=1).clip(min=0).squeeze().cpu().numpy()
# Convert to range [1,5], assume anything below 0 is 0
preds = (scores * 4) + 1
labels = test_data[LABEL_COLUMN]
metrics = compute_regression_metrics((preds,labels), prefix='unsupervised_')
tweet_selector = test_data['source'] == 'tweets'
tweet_metrics = compute_regression_metrics((preds[tweet_selector], labels[tweet_selector]), prefix='tweet_')
metrics.update(tweet_metrics)
news_selector = test_data['source'] == 'news'
news_metrics = compute_regression_metrics((preds[news_selector], labels[news_selector]), prefix='news_')
metrics.update(news_metrics)
for cat in categories:
selector = test_data['instance_id'].str.contains(cat)
# Predict
metrics_cat = compute_regression_metrics((preds[selector], labels[selector]), prefix=cat + '_')
metrics.update(metrics_cat)
tweet_selector = selector & (test_data['source'] == 'tweets')
metrics_cat = compute_regression_metrics((preds[tweet_selector], labels[tweet_selector]), prefix=cat + '_tweet_')
metrics.update(metrics_cat)
news_selector = selector & (test_data['source'] == 'news')
metrics_cat = compute_regression_metrics((preds[news_selector], labels[news_selector]),
prefix=cat + '_news_')
metrics.update(metrics_cat)
wandb.log(metrics)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | 0.671363 | 0.266406 |
import torch
import random
import numpy as np
import argparse
from sentence_transformers import SentenceTransformer, util
from transformers import AutoModelForSequenceClassification
from transformers import AutoConfig
from transformers import AutoTokenizer
from transformers import Trainer
from transformers import TrainingArguments
from transformers import DataCollatorWithPadding
import pandas as pd
from datasets import Dataset
import wandb
import json
import os
from tqdm import tqdm
from rank_bm25 import BM25Okapi
from datasets import load_dataset
import ipdb
import torch.nn.functional as F
from utils.data_processor import read_covert_dataset, read_covidfact_dataset
from utils.metrics import acc_f1, compute_regression_metrics
from utils.rank_metrics import mean_average_precision, mean_reciprocal_rank
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
def bm25(claims, evidence):
corpus = [e[1].split(" ") for e in evidence]
bm25 = BM25Okapi(corpus)
ranked_lists = []
for claim,ev_id in tqdm(claims):
# Create dataset to pass through model
preds = bm25.get_scores(claim.split(" "))
# Get order
rank = np.argsort(preds)[::-1]
# Get labels
labels = np.zeros(len(evidence))
labels[ev_id] = 1
ranked_lists.append(labels[rank])
return {'MAP': mean_average_precision(ranked_lists), 'MRR': mean_reciprocal_rank(ranked_lists)}
def sts_model(claims, evidence, args):
#Load the model
model = SentenceTransformer(args.model_name)
evidence_embeddings = model.encode([e[1] for e in evidence])
claim_embeddings = model.encode([c[0] for c in claims])
scores = util.cos_sim(claim_embeddings, evidence_embeddings)
ranked_lists = []
for row_score,(claim, ev_id) in zip(scores, claims):
# Get order
rank = np.argsort(row_score.numpy())[::-1]
# Get labels
labels = np.zeros(len(evidence))
labels[ev_id] = 1
ranked_lists.append(labels[rank])
return {'MAP': mean_average_precision(ranked_lists), 'MRR': mean_reciprocal_rank(ranked_lists)}
def trained_model(claims, evidence, args):
tk = AutoTokenizer.from_pretrained(args.base_model, model_max_length=512)
config = AutoConfig.from_pretrained(args.model_name, num_labels=1)
model = AutoModelForSequenceClassification.from_pretrained(args.model_name, config=config)
training_args = TrainingArguments(
output_dir=args.output_dir
)
collator = DataCollatorWithPadding(tk)
trainer = Trainer(
model=model,
args=training_args,
data_collator=collator
)
def preprocess(examples):
batch = tk(examples['claim'], text_pair=examples['evidence'], truncation=True)
return batch
# Iterate through each claim and get a ranked list
ranked_lists = []
for claim,ev_id in tqdm(claims):
# Create dataset to pass through model
pairs = [[e[0], claim, e[1]] for e in evidence]
dframe = pd.DataFrame(pairs, columns=['id', 'claim', 'evidence'])
dataset = Dataset.from_pandas(dframe)
dataset = dataset.map(preprocess, batched=True)
pred_output = trainer.predict(dataset)
preds = pred_output.predictions.squeeze()
# Get order
rank = np.argsort(preds)[::-1]
# Get labels
labels = np.zeros(len(evidence))
labels[ev_id] = 1
ranked_lists.append(labels[rank])
return {'MAP': mean_average_precision(ranked_lists), 'MRR': mean_reciprocal_rank(ranked_lists)}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the COVERT data",
type=str, required=True)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='copenlu/citebert')
parser.add_argument("--base_model",
help="The base model",
type=str, default='copenlu/citebert')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--dataset",
help="The name of the dataset to use",
type=str, default='covert', choices=['covert', 'covidfact'])
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--eval_type", help="Decide which test samples to test on", type=str, default='ours', choices=['ours', 'sts', 'bm25'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
run_name = args.run_name
eval_type = args.eval_type
dataset_name = args.dataset
config = {
'run_name': run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'eval_type': args.eval_type,
'dataset': dataset_name
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
# Load the data
if dataset_name == 'covert':
claim_label,evidence = read_covert_dataset(args.data_loc)
elif dataset_name == 'covidfact':
claim_label, evidence = read_covidfact_dataset(args.data_loc)
if eval_type == 'ours':
metrics = trained_model(claim_label, evidence, args)
elif eval_type == 'bm25':
metrics = bm25(claim_label, evidence)
elif eval_type == 'sts':
metrics = sts_model(claim_label, evidence, args)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | scientific-information-change | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/eval_evidence_retrieval.py | eval_evidence_retrieval.py | import torch
import random
import numpy as np
import argparse
from sentence_transformers import SentenceTransformer, util
from transformers import AutoModelForSequenceClassification
from transformers import AutoConfig
from transformers import AutoTokenizer
from transformers import Trainer
from transformers import TrainingArguments
from transformers import DataCollatorWithPadding
import pandas as pd
from datasets import Dataset
import wandb
import json
import os
from tqdm import tqdm
from rank_bm25 import BM25Okapi
from datasets import load_dataset
import ipdb
import torch.nn.functional as F
from utils.data_processor import read_covert_dataset, read_covidfact_dataset
from utils.metrics import acc_f1, compute_regression_metrics
from utils.rank_metrics import mean_average_precision, mean_reciprocal_rank
def enforce_reproducibility(seed=1000):
# Sets seed manually for both CPU and CUDA
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# For atomic operations there is currently
# no simple way to enforce determinism, as
# the order of parallel operations is not known.
# CUDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# System based
random.seed(seed)
np.random.seed(seed)
def bm25(claims, evidence):
corpus = [e[1].split(" ") for e in evidence]
bm25 = BM25Okapi(corpus)
ranked_lists = []
for claim,ev_id in tqdm(claims):
# Create dataset to pass through model
preds = bm25.get_scores(claim.split(" "))
# Get order
rank = np.argsort(preds)[::-1]
# Get labels
labels = np.zeros(len(evidence))
labels[ev_id] = 1
ranked_lists.append(labels[rank])
return {'MAP': mean_average_precision(ranked_lists), 'MRR': mean_reciprocal_rank(ranked_lists)}
def sts_model(claims, evidence, args):
#Load the model
model = SentenceTransformer(args.model_name)
evidence_embeddings = model.encode([e[1] for e in evidence])
claim_embeddings = model.encode([c[0] for c in claims])
scores = util.cos_sim(claim_embeddings, evidence_embeddings)
ranked_lists = []
for row_score,(claim, ev_id) in zip(scores, claims):
# Get order
rank = np.argsort(row_score.numpy())[::-1]
# Get labels
labels = np.zeros(len(evidence))
labels[ev_id] = 1
ranked_lists.append(labels[rank])
return {'MAP': mean_average_precision(ranked_lists), 'MRR': mean_reciprocal_rank(ranked_lists)}
def trained_model(claims, evidence, args):
tk = AutoTokenizer.from_pretrained(args.base_model, model_max_length=512)
config = AutoConfig.from_pretrained(args.model_name, num_labels=1)
model = AutoModelForSequenceClassification.from_pretrained(args.model_name, config=config)
training_args = TrainingArguments(
output_dir=args.output_dir
)
collator = DataCollatorWithPadding(tk)
trainer = Trainer(
model=model,
args=training_args,
data_collator=collator
)
def preprocess(examples):
batch = tk(examples['claim'], text_pair=examples['evidence'], truncation=True)
return batch
# Iterate through each claim and get a ranked list
ranked_lists = []
for claim,ev_id in tqdm(claims):
# Create dataset to pass through model
pairs = [[e[0], claim, e[1]] for e in evidence]
dframe = pd.DataFrame(pairs, columns=['id', 'claim', 'evidence'])
dataset = Dataset.from_pandas(dframe)
dataset = dataset.map(preprocess, batched=True)
pred_output = trainer.predict(dataset)
preds = pred_output.predictions.squeeze()
# Get order
rank = np.argsort(preds)[::-1]
# Get labels
labels = np.zeros(len(evidence))
labels[ev_id] = 1
ranked_lists.append(labels[rank])
return {'MAP': mean_average_precision(ranked_lists), 'MRR': mean_reciprocal_rank(ranked_lists)}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_loc",
help="The location of the COVERT data",
type=str, required=True)
parser.add_argument("--seed", type=int, help="Random seed", default=1000)
parser.add_argument("--model_name",
help="The name of the model to train. Can be a directory for a local model",
type=str, default='copenlu/citebert')
parser.add_argument("--base_model",
help="The base model",
type=str, default='copenlu/citebert')
parser.add_argument("--output_dir", help="Top level directory to save the models", required=True, type=str)
parser.add_argument("--dataset",
help="The name of the dataset to use",
type=str, default='covert', choices=['covert', 'covidfact'])
parser.add_argument("--run_name", help="A name for this run", required=True, type=str)
parser.add_argument("--tags", help="Tags to pass to wandb", required=False, type=str, default=[], nargs='+')
parser.add_argument("--metrics_dir", help="Directory to store metrics for making latex tables", required=True, type=str)
parser.add_argument("--eval_type", help="Decide which test samples to test on", type=str, default='ours', choices=['ours', 'sts', 'bm25'])
args = parser.parse_args()
seed = args.seed
model_name = args.model_name
run_name = args.run_name
eval_type = args.eval_type
dataset_name = args.dataset
config = {
'run_name': run_name,
'seed': seed,
'model_name': model_name,
'output_dir': args.output_dir,
'tags': args.tags,
'eval_type': args.eval_type,
'dataset': dataset_name
}
run = wandb.init(
name=args.run_name,
config=config,
reinit=True,
tags=args.tags
)
# Enforce reproducibility
# Always first
enforce_reproducibility(seed)
# Load the data
if dataset_name == 'covert':
claim_label,evidence = read_covert_dataset(args.data_loc)
elif dataset_name == 'covidfact':
claim_label, evidence = read_covidfact_dataset(args.data_loc)
if eval_type == 'ours':
metrics = trained_model(claim_label, evidence, args)
elif eval_type == 'bm25':
metrics = bm25(claim_label, evidence)
elif eval_type == 'sts':
metrics = sts_model(claim_label, evidence, args)
wandb.log(metrics)
if not os.path.exists(f"{args.metrics_dir}"):
os.makedirs(f"{args.metrics_dir}")
with open(f"{args.metrics_dir}/{seed}.json", 'wt') as f:
f.write(json.dumps(metrics)) | 0.700588 | 0.269416 |
import ipdb
import numpy as np
from datasets import load_dataset
from datasets import Dataset
from functools import partial
import pandas as pd
import json
from sentence_transformers import InputExample
import re
LABEL_COLUMN = 'final_score_hard'
def clean_tweet(text):
no_html = re.sub(r'http\S+', '', text)
return re.sub(r'@([A-Za-z]+[A-Za-z0-9-_]+)', '@username', no_html)
def prepare_context_sample(text, context):
text_loc = context.find(text)
text_end = text_loc + len(text)
return f"{context[:text_loc]}{{{{{text}}}}}{context[text_end:]}"
def preprocess_matching_data(tk, type, use_context, examples):
if use_context:
paper = [prepare_context_sample(f,c) for f,c in zip(examples['Paper Finding'], examples['Paper Context'])]
news = [prepare_context_sample(n,c) for n,c in zip(examples['News Finding'], examples['News Context'])]
batch = tk(paper, text_pair=news, truncation=True)
else:
batch = tk(examples['Paper Finding'], text_pair=examples['News Finding'], truncation=True)
if LABEL_COLUMN in examples or 'binary_label' in examples:
if type == 'regression':
batch['label'] = [float(l) for l in examples[LABEL_COLUMN]]
else:
batch['label'] = [int(l) for l in examples['binary_label']]
return batch
def filter_data(data, filter, split):
if filter == 'easy':
data = data.filter(lambda example: 'easy' in example['instance_id'])
elif filter == 'hard':
data = data.filter(lambda example: 'easy' not in example['instance_id'])
# TODO: See what the actual fields/names are
if split == 'all':
return data
elif split == 'tweets':
return data.filter(lambda example: example['source'] == 'tweets')
elif split == 'news':
return data.filter(lambda example: example['source'] == 'news')
def read_datasets(data_loc, tokenizer, type='regression', test_filter='none',
train_split='all', test_split='all', use_context=False):
# Something like this
dataset = load_dataset('csv', data_files={'train': f"{data_loc}/train.csv",
'validation': f"{data_loc}/dev.csv",
'test': f"{data_loc}/test.csv"})
dataset['train'] = filter_data(dataset['train'], 'none', train_split)
dataset['validation'] = filter_data(dataset['validation'], test_filter, test_split)
dataset['test'] = filter_data(dataset['test'], test_filter, test_split)
dataset.cleanup_cache_files()
dataset = dataset.map(partial(preprocess_matching_data, tokenizer, type, use_context), batched=True)
return dataset
def read_dataset(data_loc, tokenizer, type='regression'):
# Something like this
dataset = load_dataset('csv', data_files={'data': f"{data_loc}"})
dataset.cleanup_cache_files()
dataset = dataset.map(partial(preprocess_matching_data, tokenizer, type, False), batched=True,
remove_columns=dataset['train'].column_names)
return dataset
def preprocess_matching_data_domains(tk, examples):
selector = np.array(examples[LABEL_COLUMN]) > 3
batch = tk(list(np.array(examples['Paper Finding'])[selector]) + list(np.array(examples['News Finding'])[selector]),
truncation=True)
batch['label'] = [0] * sum(selector) + [1] * sum(selector)
return batch
def read_datasets_domain(data_loc, tokenizer):
# Something like this
dataset = load_dataset('csv', data_files={'train': f"{data_loc}/train.csv",
'validation': f"{data_loc}/dev.csv",
'test': f"{data_loc}/test.csv"})
dataset.cleanup_cache_files()
dataset = dataset.map(partial(preprocess_matching_data_domains, tokenizer), batched=True,
remove_columns=dataset['train'].column_names)
return dataset
def read_dataset_raw(data_loc):
return pd.read_csv(data_loc).fillna('')
def read_unlabelled_tweet_dataset(data_loc, tokenizer):
with open(data_loc) as f:
data = [json.loads(l) for l in f]
dframe = []
for row in data:
for tweet in row['full_tweets']:
for sent in row['paper_sentences']:
dframe.append([row['doi'], tweet['tweet_id'], clean_tweet(json.loads(tweet['tweet'])['text']), sent])
dframe = pd.DataFrame(dframe, columns=['doi', 'tweet_id', 'News Finding', 'Paper Finding'])
dataset = Dataset.from_pandas(dframe)
dataset = dataset.map(partial(preprocess_matching_data, tokenizer, type, False), batched=True)
return dataset
def filter_data_sentence_transformers(data, filter, split):
if filter == 'easy':
data = data[data['instance_id'].str.contains('easy')]
elif filter == 'hard':
data = data[~data['instance_id'].str.contains('easy')]
# TODO: See what the actual fields/names are
if split == 'all':
return data
elif split == 'tweets':
return data[data['source'] == 'tweets']
elif split == 'news':
return data[data['source'] == 'news']
def read_datasets_sentence_transformers(data_loc, test_filter='none', train_split='all', test_split='all'):
# Something like this
dataset = {}
for split in ['train', 'dev', 'test']:
data = read_dataset_raw(f"{data_loc}/{split}.csv")
if split == 'train':
data = filter_data_sentence_transformers(data, 'none', train_split)
else:
data = filter_data_sentence_transformers(data, test_filter, test_split)
samples = []
for i,row in data.iterrows():
samples.append(InputExample(texts=[row['Paper Finding'], row['News Finding']],
label=(row[LABEL_COLUMN] - 1) / 4)) # Convert score to [0,1]
name = split if split != 'dev' else 'validation'
dataset[name] = samples
return dataset
def read_covert_dataset(data_loc):
with open(data_loc) as f:
all_data = [json.loads(l) for l in f]
# Collect all of the evidence and use that as the corpus
id_ = 0
claim_labels = []
evidence = []
# Because there are repeats
ev_to_id = {}
for row in all_data:
claim = clean_tweet(row['claim'])
labs_curr = []
for ev in row['evidence']:
if ev[0] != 'NOT ENOUGH INFO' and isinstance(ev[2], str):
if ev[2] in ev_to_id:
labs_curr.append(ev_to_id[ev[2]])
else:
labs_curr.append(id_)
evidence.append([id_, ev[2]])
ev_to_id[ev[2]] = id_
id_ += 1
claim_labels.append([claim, labs_curr])
print(len(evidence))
return claim_labels,evidence
def read_covidfact_dataset(data_loc):
with open(data_loc) as f:
all_data = [json.loads(l) for l in f]
# Collect all of the evidence and use that as the corpus
id_ = 0
claim_labels = []
evidence = []
ev_to_id = {}
for row in all_data:
claim = row['claim']
labs_curr = []
for ev in row['evidence']:
if ev in ev_to_id:
labs_curr.append(ev_to_id[ev])
else:
labs_curr.append(id_)
evidence.append([id_, ev])
ev_to_id[ev] = id_
id_ += 1
claim_labels.append([claim, labs_curr])
print(len(evidence))
return claim_labels,evidence | scientific-information-change | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/utils/data_processor.py | data_processor.py | import ipdb
import numpy as np
from datasets import load_dataset
from datasets import Dataset
from functools import partial
import pandas as pd
import json
from sentence_transformers import InputExample
import re
LABEL_COLUMN = 'final_score_hard'
def clean_tweet(text):
no_html = re.sub(r'http\S+', '', text)
return re.sub(r'@([A-Za-z]+[A-Za-z0-9-_]+)', '@username', no_html)
def prepare_context_sample(text, context):
text_loc = context.find(text)
text_end = text_loc + len(text)
return f"{context[:text_loc]}{{{{{text}}}}}{context[text_end:]}"
def preprocess_matching_data(tk, type, use_context, examples):
if use_context:
paper = [prepare_context_sample(f,c) for f,c in zip(examples['Paper Finding'], examples['Paper Context'])]
news = [prepare_context_sample(n,c) for n,c in zip(examples['News Finding'], examples['News Context'])]
batch = tk(paper, text_pair=news, truncation=True)
else:
batch = tk(examples['Paper Finding'], text_pair=examples['News Finding'], truncation=True)
if LABEL_COLUMN in examples or 'binary_label' in examples:
if type == 'regression':
batch['label'] = [float(l) for l in examples[LABEL_COLUMN]]
else:
batch['label'] = [int(l) for l in examples['binary_label']]
return batch
def filter_data(data, filter, split):
if filter == 'easy':
data = data.filter(lambda example: 'easy' in example['instance_id'])
elif filter == 'hard':
data = data.filter(lambda example: 'easy' not in example['instance_id'])
# TODO: See what the actual fields/names are
if split == 'all':
return data
elif split == 'tweets':
return data.filter(lambda example: example['source'] == 'tweets')
elif split == 'news':
return data.filter(lambda example: example['source'] == 'news')
def read_datasets(data_loc, tokenizer, type='regression', test_filter='none',
train_split='all', test_split='all', use_context=False):
# Something like this
dataset = load_dataset('csv', data_files={'train': f"{data_loc}/train.csv",
'validation': f"{data_loc}/dev.csv",
'test': f"{data_loc}/test.csv"})
dataset['train'] = filter_data(dataset['train'], 'none', train_split)
dataset['validation'] = filter_data(dataset['validation'], test_filter, test_split)
dataset['test'] = filter_data(dataset['test'], test_filter, test_split)
dataset.cleanup_cache_files()
dataset = dataset.map(partial(preprocess_matching_data, tokenizer, type, use_context), batched=True)
return dataset
def read_dataset(data_loc, tokenizer, type='regression'):
# Something like this
dataset = load_dataset('csv', data_files={'data': f"{data_loc}"})
dataset.cleanup_cache_files()
dataset = dataset.map(partial(preprocess_matching_data, tokenizer, type, False), batched=True,
remove_columns=dataset['train'].column_names)
return dataset
def preprocess_matching_data_domains(tk, examples):
selector = np.array(examples[LABEL_COLUMN]) > 3
batch = tk(list(np.array(examples['Paper Finding'])[selector]) + list(np.array(examples['News Finding'])[selector]),
truncation=True)
batch['label'] = [0] * sum(selector) + [1] * sum(selector)
return batch
def read_datasets_domain(data_loc, tokenizer):
# Something like this
dataset = load_dataset('csv', data_files={'train': f"{data_loc}/train.csv",
'validation': f"{data_loc}/dev.csv",
'test': f"{data_loc}/test.csv"})
dataset.cleanup_cache_files()
dataset = dataset.map(partial(preprocess_matching_data_domains, tokenizer), batched=True,
remove_columns=dataset['train'].column_names)
return dataset
def read_dataset_raw(data_loc):
return pd.read_csv(data_loc).fillna('')
def read_unlabelled_tweet_dataset(data_loc, tokenizer):
with open(data_loc) as f:
data = [json.loads(l) for l in f]
dframe = []
for row in data:
for tweet in row['full_tweets']:
for sent in row['paper_sentences']:
dframe.append([row['doi'], tweet['tweet_id'], clean_tweet(json.loads(tweet['tweet'])['text']), sent])
dframe = pd.DataFrame(dframe, columns=['doi', 'tweet_id', 'News Finding', 'Paper Finding'])
dataset = Dataset.from_pandas(dframe)
dataset = dataset.map(partial(preprocess_matching_data, tokenizer, type, False), batched=True)
return dataset
def filter_data_sentence_transformers(data, filter, split):
if filter == 'easy':
data = data[data['instance_id'].str.contains('easy')]
elif filter == 'hard':
data = data[~data['instance_id'].str.contains('easy')]
# TODO: See what the actual fields/names are
if split == 'all':
return data
elif split == 'tweets':
return data[data['source'] == 'tweets']
elif split == 'news':
return data[data['source'] == 'news']
def read_datasets_sentence_transformers(data_loc, test_filter='none', train_split='all', test_split='all'):
# Something like this
dataset = {}
for split in ['train', 'dev', 'test']:
data = read_dataset_raw(f"{data_loc}/{split}.csv")
if split == 'train':
data = filter_data_sentence_transformers(data, 'none', train_split)
else:
data = filter_data_sentence_transformers(data, test_filter, test_split)
samples = []
for i,row in data.iterrows():
samples.append(InputExample(texts=[row['Paper Finding'], row['News Finding']],
label=(row[LABEL_COLUMN] - 1) / 4)) # Convert score to [0,1]
name = split if split != 'dev' else 'validation'
dataset[name] = samples
return dataset
def read_covert_dataset(data_loc):
with open(data_loc) as f:
all_data = [json.loads(l) for l in f]
# Collect all of the evidence and use that as the corpus
id_ = 0
claim_labels = []
evidence = []
# Because there are repeats
ev_to_id = {}
for row in all_data:
claim = clean_tweet(row['claim'])
labs_curr = []
for ev in row['evidence']:
if ev[0] != 'NOT ENOUGH INFO' and isinstance(ev[2], str):
if ev[2] in ev_to_id:
labs_curr.append(ev_to_id[ev[2]])
else:
labs_curr.append(id_)
evidence.append([id_, ev[2]])
ev_to_id[ev[2]] = id_
id_ += 1
claim_labels.append([claim, labs_curr])
print(len(evidence))
return claim_labels,evidence
def read_covidfact_dataset(data_loc):
with open(data_loc) as f:
all_data = [json.loads(l) for l in f]
# Collect all of the evidence and use that as the corpus
id_ = 0
claim_labels = []
evidence = []
ev_to_id = {}
for row in all_data:
claim = row['claim']
labs_curr = []
for ev in row['evidence']:
if ev in ev_to_id:
labs_curr.append(ev_to_id[ev])
else:
labs_curr.append(id_)
evidence.append([id_, ev])
ev_to_id[ev] = id_
id_ += 1
claim_labels.append([claim, labs_curr])
print(len(evidence))
return claim_labels,evidence | 0.275422 | 0.349838 |
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
from typing import List, AnyStr, Tuple, Dict
from sklearn.metrics import mean_squared_error
from scipy.stats import pearsonr, spearmanr
import ipdb
def accuracy(preds: np.ndarray, labels: np.ndarray) -> float:
return np.sum(preds == labels).astype(np.float32) / float(labels.shape[0])
def acc_f1(averaging, eval_pred) -> Dict:
logits, labels = eval_pred
if len(logits.shape) > 1:
preds = np.argmax(logits, axis=-1)
else:
preds = logits
acc = accuracy(preds, labels)
P, R, F1, _ = precision_recall_fscore_support(labels, preds, average=averaging)
return {'accuracy': acc, 'precision': P, 'recall': R, 'f1': F1}
def compute_regression_metrics(eval_pred, clip_value=(1.0,5.0), prefix=''):
predictions, labels = eval_pred
predictions = np.clip(predictions, clip_value[0], clip_value[1])
mse = mean_squared_error(labels, predictions)
if len(predictions.shape) > 1:
predictions = predictions[:,0]
rho = pearsonr(predictions, labels.squeeze())
psi = spearmanr(predictions, labels.squeeze())
return {f"{prefix}mse": mse, f'{prefix}rho': rho[0], f'{prefix}rho-p': rho[1], f'{prefix}psi': psi[0], f'{prefix}psi-p': psi[1]}
def compute_f1(f1_metric, average, eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return f1_metric.compute(predictions=predictions, references=labels, average=average)
def compute_rouge(tokenizer, metric, eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
preds = np.where(preds != -100, preds, tokenizer.pad_token_id)
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds = [pred.strip() for pred in decoded_preds]
decoded_labels = [lab.strip() for lab in decoded_labels]
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
#result = {"rouge": result["score"]}
result = {
'rouge1_low_p': result['rouge1'].low.precision,
'rouge1_low_r': result['rouge1'].low.recall,
'rouge1_low_fmeasure': result['rouge1'].low.fmeasure,
'rouge1_mid_p': result['rouge1'].mid.precision,
'rouge1_mid_r': result['rouge1'].mid.recall,
'rouge1_mid_fmeasure': result['rouge1'].mid.fmeasure,
'rouge1_high_p': result['rouge1'].high.precision,
'rouge1_high_r': result['rouge1'].high.recall,
'rouge1_high_fmeasure': result['rouge1'].high.fmeasure,
'rouge2_low_p': result['rouge2'].low.precision,
'rouge2_low_r': result['rouge2'].low.recall,
'rouge2_low_fmeasure': result['rouge2'].low.fmeasure,
'rouge2_mid_p': result['rouge2'].mid.precision,
'rouge2_mid_r': result['rouge2'].mid.recall,
'rouge2_mid_fmeasure': result['rouge2'].mid.fmeasure,
'rouge2_high_p': result['rouge2'].high.precision,
'rouge2_high_r': result['rouge2'].high.recall,
'rouge2_high_fmeasure': result['rouge2'].high.fmeasure,
'rougeL_low_p': result['rougeL'].low.precision,
'rougeL_low_r': result['rougeL'].low.recall,
'rougeL_low_fmeasure': result['rougeL'].low.fmeasure,
'rougeL_mid_p': result['rougeL'].mid.precision,
'rougeL_mid_r': result['rougeL'].mid.recall,
'rougeL_mid_fmeasure': result['rougeL'].mid.fmeasure,
'rougeL_high_p': result['rougeL'].high.precision,
'rougeL_high_r': result['rougeL'].high.recall,
'rougeL_high_fmeasure': result['rougeL'].high.fmeasure,
'rougeLsum_low_p': result['rougeLsum'].low.precision,
'rougeLsum_low_r': result['rougeLsum'].low.recall,
'rougeLsum_low_fmeasure': result['rougeLsum'].low.fmeasure,
'rougeLsum_mid_p': result['rougeLsum'].mid.precision,
'rougeLsum_mid_r': result['rougeLsum'].mid.recall,
'rougeLsum_mid_fmeasure': result['rougeLsum'].mid.fmeasure,
'rougeLsum_high_p': result['rougeLsum'].high.precision,
'rougeLsum_high_r': result['rougeLsum'].high.recall,
'rougeLsum_high_fmeasure': result['rougeLsum'].high.fmeasure,
}
#prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
#result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 6) for k, v in result.items()}
return result | scientific-information-change | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/utils/metrics.py | metrics.py | import numpy as np
from sklearn.metrics import precision_recall_fscore_support
from typing import List, AnyStr, Tuple, Dict
from sklearn.metrics import mean_squared_error
from scipy.stats import pearsonr, spearmanr
import ipdb
def accuracy(preds: np.ndarray, labels: np.ndarray) -> float:
return np.sum(preds == labels).astype(np.float32) / float(labels.shape[0])
def acc_f1(averaging, eval_pred) -> Dict:
logits, labels = eval_pred
if len(logits.shape) > 1:
preds = np.argmax(logits, axis=-1)
else:
preds = logits
acc = accuracy(preds, labels)
P, R, F1, _ = precision_recall_fscore_support(labels, preds, average=averaging)
return {'accuracy': acc, 'precision': P, 'recall': R, 'f1': F1}
def compute_regression_metrics(eval_pred, clip_value=(1.0,5.0), prefix=''):
predictions, labels = eval_pred
predictions = np.clip(predictions, clip_value[0], clip_value[1])
mse = mean_squared_error(labels, predictions)
if len(predictions.shape) > 1:
predictions = predictions[:,0]
rho = pearsonr(predictions, labels.squeeze())
psi = spearmanr(predictions, labels.squeeze())
return {f"{prefix}mse": mse, f'{prefix}rho': rho[0], f'{prefix}rho-p': rho[1], f'{prefix}psi': psi[0], f'{prefix}psi-p': psi[1]}
def compute_f1(f1_metric, average, eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return f1_metric.compute(predictions=predictions, references=labels, average=average)
def compute_rouge(tokenizer, metric, eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
preds = np.where(preds != -100, preds, tokenizer.pad_token_id)
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds = [pred.strip() for pred in decoded_preds]
decoded_labels = [lab.strip() for lab in decoded_labels]
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
#result = {"rouge": result["score"]}
result = {
'rouge1_low_p': result['rouge1'].low.precision,
'rouge1_low_r': result['rouge1'].low.recall,
'rouge1_low_fmeasure': result['rouge1'].low.fmeasure,
'rouge1_mid_p': result['rouge1'].mid.precision,
'rouge1_mid_r': result['rouge1'].mid.recall,
'rouge1_mid_fmeasure': result['rouge1'].mid.fmeasure,
'rouge1_high_p': result['rouge1'].high.precision,
'rouge1_high_r': result['rouge1'].high.recall,
'rouge1_high_fmeasure': result['rouge1'].high.fmeasure,
'rouge2_low_p': result['rouge2'].low.precision,
'rouge2_low_r': result['rouge2'].low.recall,
'rouge2_low_fmeasure': result['rouge2'].low.fmeasure,
'rouge2_mid_p': result['rouge2'].mid.precision,
'rouge2_mid_r': result['rouge2'].mid.recall,
'rouge2_mid_fmeasure': result['rouge2'].mid.fmeasure,
'rouge2_high_p': result['rouge2'].high.precision,
'rouge2_high_r': result['rouge2'].high.recall,
'rouge2_high_fmeasure': result['rouge2'].high.fmeasure,
'rougeL_low_p': result['rougeL'].low.precision,
'rougeL_low_r': result['rougeL'].low.recall,
'rougeL_low_fmeasure': result['rougeL'].low.fmeasure,
'rougeL_mid_p': result['rougeL'].mid.precision,
'rougeL_mid_r': result['rougeL'].mid.recall,
'rougeL_mid_fmeasure': result['rougeL'].mid.fmeasure,
'rougeL_high_p': result['rougeL'].high.precision,
'rougeL_high_r': result['rougeL'].high.recall,
'rougeL_high_fmeasure': result['rougeL'].high.fmeasure,
'rougeLsum_low_p': result['rougeLsum'].low.precision,
'rougeLsum_low_r': result['rougeLsum'].low.recall,
'rougeLsum_low_fmeasure': result['rougeLsum'].low.fmeasure,
'rougeLsum_mid_p': result['rougeLsum'].mid.precision,
'rougeLsum_mid_r': result['rougeLsum'].mid.recall,
'rougeLsum_mid_fmeasure': result['rougeLsum'].mid.fmeasure,
'rougeLsum_high_p': result['rougeLsum'].high.precision,
'rougeLsum_high_r': result['rougeLsum'].high.recall,
'rougeLsum_high_fmeasure': result['rougeLsum'].high.fmeasure,
}
#prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
#result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 6) for k, v in result.items()}
return result | 0.825343 | 0.645357 |
import torch
from torch import nn
from torch.optim import SGD
from transformers import AutoModel
from tqdm import tqdm
import torch.nn.functional as F
import ipdb
class GradientReversal(torch.autograd.Function):
"""
Basic layer for doing gradient reversal
"""
lambd = 1.0
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_output):
return GradientReversal.lambd * grad_output.neg()
class DomainAdversarialModel(nn.Module):
"""
A really basic wrapper around BERT
"""
def __init__(self, model: AutoModel, n_classes: int = 2, **kwargs):
super(DomainAdversarialModel, self).__init__()
self.model = AutoModel.from_pretrained(model)
self.domain_classifier = nn.Linear(self.model.config.hidden_size, n_classes)
def forward(
self,
input_ids: torch.LongTensor,
attention_mask: torch.LongTensor,
labels: torch.LongTensor = None,
**kwargs
):
# 1) Get the CLS representation from BERT
outputs = self.model(
input_ids,
attention_mask=attention_mask
)
# (b x n_classes)
cls_hidden_state = outputs.pooler_output
adv_input = GradientReversal.apply(cls_hidden_state)
adv_logits = self.domain_classifier(adv_input)
outputs['logits'] = adv_logits
loss_fn = nn.CrossEntropyLoss()
if labels is not None:
loss = loss_fn(adv_logits, labels)
outputs['loss'] = loss
return outputs
def save_pretrained(self, output_dir: str):
self.model.save_pretrained(output_dir)
# Optimize the softmax temperature to minimize the negative log likelihood
class Temp(nn.Module):
def __init__(self):
super().__init__()
self.T = nn.Parameter(torch.ones(1))
def forward(self, logits):
return logits / self.T
def calculate_log_likelihood(model, loader, T, device):
# loader = torch.utils.data.DataLoader(dset, batch_size=32,
# num_workers=2)
with torch.no_grad():
labels_all = []
preds_all = []
for i, batch in enumerate(tqdm(loader), 0):
# get the inputs; data is a list of [inputs, labels]
for b in batch:
batch[b] = batch[b].to(device)
labels = batch.pop('labels')
# forward + backward + optimize
outputs = model(**batch)
logits = outputs['logits']
logits /= T
preds = F.log_softmax(logits, dim=-1)
labels_all.append(labels.detach())
preds_all.append(preds.detach())
nll = F.nll_loss(torch.concat(preds_all), torch.concat(labels_all), reduction='mean')
return nll.item()
def calibrate_temperature(model, loader, device):
# loader = torch.utils.data.DataLoader(dset, batch_size=32,
# num_workers=2)
T = Temp().to(device)
optim = SGD(T.parameters(), lr=1e-3)
patience = 10
c = 0
eps = 1e-5
t_curr = 1.0
done = False
print(f"NLL before calibration: {calculate_log_likelihood(model, loader, t_curr, device)}")
for epoch in range(3): # loop over the dataset multiple times
for i, batch in enumerate(tqdm(loader), 0):
# get the inputs; data is a list of [inputs, labels]
for b in batch:
batch[b] = batch[b].to(device)
labels = batch.pop('labels')
# zero the parameter gradients
optim.zero_grad()
# forward + backward + optimize
outputs = model(**batch)
logits = outputs['logits']
logits = T(logits)
preds = F.log_softmax(logits, dim=-1)
nll = F.nll_loss(preds, labels, reduction='mean')
nll.backward()
optim.step()
if abs(t_curr - T.T.item()) > eps:
c = 0
else:
c += 1
if c == patience:
done = True
break
t_curr = T.T.item()
if done:
break
print(f"NLL after calibration: {calculate_log_likelihood(model, loader, t_curr, device)}")
return t_curr | scientific-information-change | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/utils/model.py | model.py | import torch
from torch import nn
from torch.optim import SGD
from transformers import AutoModel
from tqdm import tqdm
import torch.nn.functional as F
import ipdb
class GradientReversal(torch.autograd.Function):
"""
Basic layer for doing gradient reversal
"""
lambd = 1.0
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_output):
return GradientReversal.lambd * grad_output.neg()
class DomainAdversarialModel(nn.Module):
"""
A really basic wrapper around BERT
"""
def __init__(self, model: AutoModel, n_classes: int = 2, **kwargs):
super(DomainAdversarialModel, self).__init__()
self.model = AutoModel.from_pretrained(model)
self.domain_classifier = nn.Linear(self.model.config.hidden_size, n_classes)
def forward(
self,
input_ids: torch.LongTensor,
attention_mask: torch.LongTensor,
labels: torch.LongTensor = None,
**kwargs
):
# 1) Get the CLS representation from BERT
outputs = self.model(
input_ids,
attention_mask=attention_mask
)
# (b x n_classes)
cls_hidden_state = outputs.pooler_output
adv_input = GradientReversal.apply(cls_hidden_state)
adv_logits = self.domain_classifier(adv_input)
outputs['logits'] = adv_logits
loss_fn = nn.CrossEntropyLoss()
if labels is not None:
loss = loss_fn(adv_logits, labels)
outputs['loss'] = loss
return outputs
def save_pretrained(self, output_dir: str):
self.model.save_pretrained(output_dir)
# Optimize the softmax temperature to minimize the negative log likelihood
class Temp(nn.Module):
def __init__(self):
super().__init__()
self.T = nn.Parameter(torch.ones(1))
def forward(self, logits):
return logits / self.T
def calculate_log_likelihood(model, loader, T, device):
# loader = torch.utils.data.DataLoader(dset, batch_size=32,
# num_workers=2)
with torch.no_grad():
labels_all = []
preds_all = []
for i, batch in enumerate(tqdm(loader), 0):
# get the inputs; data is a list of [inputs, labels]
for b in batch:
batch[b] = batch[b].to(device)
labels = batch.pop('labels')
# forward + backward + optimize
outputs = model(**batch)
logits = outputs['logits']
logits /= T
preds = F.log_softmax(logits, dim=-1)
labels_all.append(labels.detach())
preds_all.append(preds.detach())
nll = F.nll_loss(torch.concat(preds_all), torch.concat(labels_all), reduction='mean')
return nll.item()
def calibrate_temperature(model, loader, device):
# loader = torch.utils.data.DataLoader(dset, batch_size=32,
# num_workers=2)
T = Temp().to(device)
optim = SGD(T.parameters(), lr=1e-3)
patience = 10
c = 0
eps = 1e-5
t_curr = 1.0
done = False
print(f"NLL before calibration: {calculate_log_likelihood(model, loader, t_curr, device)}")
for epoch in range(3): # loop over the dataset multiple times
for i, batch in enumerate(tqdm(loader), 0):
# get the inputs; data is a list of [inputs, labels]
for b in batch:
batch[b] = batch[b].to(device)
labels = batch.pop('labels')
# zero the parameter gradients
optim.zero_grad()
# forward + backward + optimize
outputs = model(**batch)
logits = outputs['logits']
logits = T(logits)
preds = F.log_softmax(logits, dim=-1)
nll = F.nll_loss(preds, labels, reduction='mean')
nll.backward()
optim.step()
if abs(t_curr - T.T.item()) > eps:
c = 0
else:
c += 1
if c == patience:
done = True
break
t_curr = T.T.item()
if done:
break
print(f"NLL after calibration: {calculate_log_likelihood(model, loader, t_curr, device)}")
return t_curr | 0.93528 | 0.508971 |
import numpy as np
def mean_reciprocal_rank(rs):
"""Score is reciprocal of the rank of the first relevant item
First element is 'rank 1'. Relevance is binary (nonzero is relevant).
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
>>> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.61111111111111105
>>> rs = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> mean_reciprocal_rank(rs)
0.5
>>> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.75
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean reciprocal rank
"""
rs = (np.asarray(r).nonzero()[0] for r in rs)
return np.mean([1. / (r[0] + 1) if r.size else 0. for r in rs])
def r_precision(r):
"""Score is precision after all relevant documents have been retrieved
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> r_precision(r)
0.33333333333333331
>>> r = [0, 1, 0]
>>> r_precision(r)
0.5
>>> r = [1, 0, 0]
>>> r_precision(r)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
R Precision
"""
r = np.asarray(r) != 0
z = r.nonzero()[0]
if not z.size:
return 0.
return np.mean(r[:z[-1] + 1])
def precision_at_k(r, k):
"""Score is precision @ k
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> precision_at_k(r, 1)
0.0
>>> precision_at_k(r, 2)
0.0
>>> precision_at_k(r, 3)
0.33333333333333331
>>> precision_at_k(r, 4)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
ValueError: Relevance score length < k
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError('Relevance score length < k')
return np.mean(r)
def average_precision(r):
"""Score is average precision (area under PR curve)
Relevance is binary (nonzero is relevant).
>>> r = [1, 1, 0, 1, 0, 1, 0, 0, 0, 1]
>>> delta_r = 1. / sum(r)
>>> sum([sum(r[:x + 1]) / (x + 1.) * delta_r for x, y in enumerate(r) if y])
0.7833333333333333
>>> average_precision(r)
0.78333333333333333
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Average precision
"""
r = np.asarray(r) != 0
out = [precision_at_k(r, k + 1) for k in range(r.size) if r[k]]
if not out:
return 0.
return np.mean(out)
def mean_average_precision(rs):
"""Score is mean average precision
Relevance is binary (nonzero is relevant).
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1]]
>>> mean_average_precision(rs)
0.78333333333333333
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1], [0]]
>>> mean_average_precision(rs)
0.39166666666666666
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean average precision
"""
return np.mean([average_precision(r) for r in rs])
def dcg_at_k(r, k, method=0):
"""Score is discounted cumulative gain (dcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
>>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
>>> dcg_at_k(r, 1)
3.0
>>> dcg_at_k(r, 1, method=1)
3.0
>>> dcg_at_k(r, 2)
5.0
>>> dcg_at_k(r, 2, method=1)
4.2618595071429155
>>> dcg_at_k(r, 10)
9.6051177391888114
>>> dcg_at_k(r, 11)
9.6051177391888114
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Discounted cumulative gain
"""
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=0):
"""Score is normalized discounted cumulative gain (ndcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
>>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
>>> ndcg_at_k(r, 1)
1.0
>>> r = [2, 1, 2, 0]
>>> ndcg_at_k(r, 4)
0.9203032077642922
>>> ndcg_at_k(r, 4, method=1)
0.96519546960144276
>>> ndcg_at_k([0], 1)
0.0
>>> ndcg_at_k([1], 2)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Normalized discounted cumulative gain
"""
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
if __name__ == "__main__":
import doctest
doctest.testmod() | scientific-information-change | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/matching_experiments/utils/rank_metrics.py | rank_metrics.py | import numpy as np
def mean_reciprocal_rank(rs):
"""Score is reciprocal of the rank of the first relevant item
First element is 'rank 1'. Relevance is binary (nonzero is relevant).
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
>>> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.61111111111111105
>>> rs = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> mean_reciprocal_rank(rs)
0.5
>>> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.75
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean reciprocal rank
"""
rs = (np.asarray(r).nonzero()[0] for r in rs)
return np.mean([1. / (r[0] + 1) if r.size else 0. for r in rs])
def r_precision(r):
"""Score is precision after all relevant documents have been retrieved
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> r_precision(r)
0.33333333333333331
>>> r = [0, 1, 0]
>>> r_precision(r)
0.5
>>> r = [1, 0, 0]
>>> r_precision(r)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
R Precision
"""
r = np.asarray(r) != 0
z = r.nonzero()[0]
if not z.size:
return 0.
return np.mean(r[:z[-1] + 1])
def precision_at_k(r, k):
"""Score is precision @ k
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> precision_at_k(r, 1)
0.0
>>> precision_at_k(r, 2)
0.0
>>> precision_at_k(r, 3)
0.33333333333333331
>>> precision_at_k(r, 4)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
ValueError: Relevance score length < k
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError('Relevance score length < k')
return np.mean(r)
def average_precision(r):
"""Score is average precision (area under PR curve)
Relevance is binary (nonzero is relevant).
>>> r = [1, 1, 0, 1, 0, 1, 0, 0, 0, 1]
>>> delta_r = 1. / sum(r)
>>> sum([sum(r[:x + 1]) / (x + 1.) * delta_r for x, y in enumerate(r) if y])
0.7833333333333333
>>> average_precision(r)
0.78333333333333333
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Average precision
"""
r = np.asarray(r) != 0
out = [precision_at_k(r, k + 1) for k in range(r.size) if r[k]]
if not out:
return 0.
return np.mean(out)
def mean_average_precision(rs):
"""Score is mean average precision
Relevance is binary (nonzero is relevant).
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1]]
>>> mean_average_precision(rs)
0.78333333333333333
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1], [0]]
>>> mean_average_precision(rs)
0.39166666666666666
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean average precision
"""
return np.mean([average_precision(r) for r in rs])
def dcg_at_k(r, k, method=0):
"""Score is discounted cumulative gain (dcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
>>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
>>> dcg_at_k(r, 1)
3.0
>>> dcg_at_k(r, 1, method=1)
3.0
>>> dcg_at_k(r, 2)
5.0
>>> dcg_at_k(r, 2, method=1)
4.2618595071429155
>>> dcg_at_k(r, 10)
9.6051177391888114
>>> dcg_at_k(r, 11)
9.6051177391888114
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Discounted cumulative gain
"""
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=0):
"""Score is normalized discounted cumulative gain (ndcg)
Relevance is positive real values. Can use binary
as the previous methods.
Example from
http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf
>>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]
>>> ndcg_at_k(r, 1)
1.0
>>> r = [2, 1, 2, 0]
>>> ndcg_at_k(r, 4)
0.9203032077642922
>>> ndcg_at_k(r, 4, method=1)
0.96519546960144276
>>> ndcg_at_k([0], 1)
0.0
>>> ndcg_at_k([1], 2)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: Number of results to consider
method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]
If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]
Returns:
Normalized discounted cumulative gain
"""
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
if __name__ == "__main__":
import doctest
doctest.testmod() | 0.923842 | 0.822688 |
from sentence_transformers import SentenceTransformer, util
from typing import Optional, AnyStr, List
import numpy as np
import torch
import torch.nn.functional as F
class SimilarityEstimator(object):
"""
Estimator of information matching score (IMS) between two scientific sentences
"""
def __init__(
self,
model_name_or_path: Optional[AnyStr] = 'copenlu/spiced',
device: Optional[AnyStr] = None,
use_auth_token: Optional[bool] = False,
cache_folder: Optional[AnyStr] = None
):
"""
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name. Defaults to the best model from Wright et al. 2022.
:param device: Device (like ‘cuda’ / ‘cpu’) that should be used for computation. If None, checks if a GPU can be used.
:param use_auth_token: HuggingFace authentication token to download private models.
:param cache_folder: Path to store models
"""
self.model_name_or_path = model_name_or_path
self.device = device
self.use_auth_token = use_auth_token
self.cache_folder = cache_folder
self.model = SentenceTransformer(
model_name_or_path=model_name_or_path,
device=device,
use_auth_token=use_auth_token,
cache_folder=cache_folder
)
def estimate_ims(
self,
a: List[AnyStr],
b: List[AnyStr]
) -> np.ndarray:
"""
Estimate the information matching score between all sentences in 'a' and all sentences in 'b'. Score will a scalar between 1 and 5, where 1 means no information similarity and 5 means the information is exactly the same between the two sentences.
:param a: A list of sentences
:param b: Second list of sentences
:return: A matrix S of size $N$x$M$ where $N$ is the length of list $a$, $M$ is the length of list $b$, and entry $S_{ij}$ is the information matching score between sentence $a_{i}$ and $b_{j}$
"""
sentence1_embedding = self.model.encode(a)
sentence2_embedding = self.model.encode(b)
S = (util.cos_sim(sentence1_embedding, sentence2_embedding).clip(min=0, max=1) * 4) + 1
return S.detach().numpy()
def estimate_ims_array(
self,
a: List[AnyStr],
b: List[AnyStr]
) -> List:
"""
Estimate the information matching score between each sentence in $a$ and its corresponding $b$ (i.e. $a_{i}$ and $b_{i}$). Score will a scalar between 1 and 5, where 1 means no information similarity and 5 means the information is exactly the same between the two sentences.
:param a: A list of sentences
:param b: Second list of sentences of the same size as $a$
:return: A list $s$ of size $N$ where $N$ is the length of both list $a$ and list $b$ and entry $s_{i}$ is the information matching score between $a_{i}$ and $b_{i}$
"""
assert len(a) == len(b), f"len(a) != len(b), lists of sentences must be equal length. len(a) == {len(a)}, len(b) == {len(b)}"
sentence1_embedding = self.model.encode(a)
sentence2_embedding = self.model.encode(b)
scores = F.cosine_similarity(torch.Tensor(sentence1_embedding), torch.Tensor(sentence2_embedding), dim=1).clip(
min=0).squeeze().cpu().numpy()
# Convert to range [1,5], assume anything below 0 is 0
s = (scores * 4) + 1
return s.tolist() | scientific-information-change | /scientific-information-change-1.0.0.tar.gz/scientific-information-change-1.0.0/scientific_information_change/estimate_similarity.py | estimate_similarity.py | from sentence_transformers import SentenceTransformer, util
from typing import Optional, AnyStr, List
import numpy as np
import torch
import torch.nn.functional as F
class SimilarityEstimator(object):
"""
Estimator of information matching score (IMS) between two scientific sentences
"""
def __init__(
self,
model_name_or_path: Optional[AnyStr] = 'copenlu/spiced',
device: Optional[AnyStr] = None,
use_auth_token: Optional[bool] = False,
cache_folder: Optional[AnyStr] = None
):
"""
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name. Defaults to the best model from Wright et al. 2022.
:param device: Device (like ‘cuda’ / ‘cpu’) that should be used for computation. If None, checks if a GPU can be used.
:param use_auth_token: HuggingFace authentication token to download private models.
:param cache_folder: Path to store models
"""
self.model_name_or_path = model_name_or_path
self.device = device
self.use_auth_token = use_auth_token
self.cache_folder = cache_folder
self.model = SentenceTransformer(
model_name_or_path=model_name_or_path,
device=device,
use_auth_token=use_auth_token,
cache_folder=cache_folder
)
def estimate_ims(
self,
a: List[AnyStr],
b: List[AnyStr]
) -> np.ndarray:
"""
Estimate the information matching score between all sentences in 'a' and all sentences in 'b'. Score will a scalar between 1 and 5, where 1 means no information similarity and 5 means the information is exactly the same between the two sentences.
:param a: A list of sentences
:param b: Second list of sentences
:return: A matrix S of size $N$x$M$ where $N$ is the length of list $a$, $M$ is the length of list $b$, and entry $S_{ij}$ is the information matching score between sentence $a_{i}$ and $b_{j}$
"""
sentence1_embedding = self.model.encode(a)
sentence2_embedding = self.model.encode(b)
S = (util.cos_sim(sentence1_embedding, sentence2_embedding).clip(min=0, max=1) * 4) + 1
return S.detach().numpy()
def estimate_ims_array(
self,
a: List[AnyStr],
b: List[AnyStr]
) -> List:
"""
Estimate the information matching score between each sentence in $a$ and its corresponding $b$ (i.e. $a_{i}$ and $b_{i}$). Score will a scalar between 1 and 5, where 1 means no information similarity and 5 means the information is exactly the same between the two sentences.
:param a: A list of sentences
:param b: Second list of sentences of the same size as $a$
:return: A list $s$ of size $N$ where $N$ is the length of both list $a$ and list $b$ and entry $s_{i}$ is the information matching score between $a_{i}$ and $b_{i}$
"""
assert len(a) == len(b), f"len(a) != len(b), lists of sentences must be equal length. len(a) == {len(a)}, len(b) == {len(b)}"
sentence1_embedding = self.model.encode(a)
sentence2_embedding = self.model.encode(b)
scores = F.cosine_similarity(torch.Tensor(sentence1_embedding), torch.Tensor(sentence2_embedding), dim=1).clip(
min=0).squeeze().cpu().numpy()
# Convert to range [1,5], assume anything below 0 is 0
s = (scores * 4) + 1
return s.tolist() | 0.949494 | 0.651036 |
# Gaussian-Binomial--Distributions
<!--[![Contributors][contributors-shield]][contributors-url]
<!--[![Forks][forks-shield]][forks-url] -->
<!--[![Issues][issues-shield]][issues-url]
<!--[![MIT License][license-shield]][license-url]
[![LinkedInKhaled][linkedin-khaled-shield]][linkedin-khaled-url]
<!-- TABLE OF CONTENTS -->
<details open="open">
<summary>Table of Contents</summary>
<ol>
<li><a href="#about-the-project">About The Project</a>
<li><a href="#installation">Installation</a></li>
<li><a href="#Dependencies">Dependencies</a></li>
<li><a href="#license">License</a></li>
<li><a href="#contact">Contact</a></li>
<li><a href="#acknowledgements">Acknowledgements</a></li>
</ol>
</details>
<!-- ABOUT THE PROJECT -->
## About The Project
This package is for publishers who wants a ready use plot configuration like label font size, datetime config, figure size and most important details needed in which the plotted figure can be directly included in the paper.
## Dependencies
- [Python3.6](https://www.python.org/downloads/)
- [matplotlib](https://matplotlib.org/)
- [Numpy](https://numpy.org/)
- [Pandas](https://pandas.pydata.org/)
## Installation
`pip install .`
<!-- CONTRIBUTING -->
## Contributing
1. Fork the Project
2. Commit your Changes
3. Push to the Branch
4. Open a Pull Request
<!-- LICENSE -->
## License
Distributed under the MIT License. See `LICENSE` for more information.
<!-- CONTACT -->
## Contact
Khaled Alamin - [@LinkedIn](https://www.linkedin.com/in/khaled-alamin/)
<!--Project Link: [https://github.com/your_username/repo_name](https://github.com/your_username/repo_name)-->
<!-- ACKNOWLEDGEMENTS -->
## Acknowledgements
* [Udacity data science nanodegree](https://classroom.udacity.com/nanodegrees/nd025/dashboard/overview)
* [GitHub Emoji Cheat Sheet](https://www.webpagefx.com/tools/emoji-cheat-sheet)
* [Img Shields](https://shields.io)
* [Choose an Open Source License](https://choosealicense.com)
* [GitHub Pages](https://pages.github.com)
* [Animate.css](https://daneden.github.io/animate.css)
* [Loaders.css](https://connoratherton.com/loaders)
* [Slick Carousel](https://kenwheeler.github.io/slick)
* [Smooth Scroll](https://github.com/cferdinandi/smooth-scroll)
* [Sticky Kit](http://leafo.net/sticky-kit)
* [JVectorMap](http://jvectormap.com)
* [Font Awesome](https://fontawesome.com)
<!-- MARKDOWN LINKS & IMAGES -->
<!-- https://www.markdownguide.org/basic-syntax/#reference-style-links -->
[contributors-shield]: https://img.shields.io/github/contributors/othneildrew/Best-README-Template.svg?style=for-the-badge
[contributors-url]: https://github.com/KhaledAlamin/Huawei_embedded_dnn/graphs/contributors
[forks-shield]: https://img.shields.io/github/forks/othneildrew/Best-README-Template.svg?style=for-the-badge
[forks-url]: https://github.com/KhaledAlamin/Huawei_embedded_dnn/network/members
[stars-shield]: https://img.shields.io/github/stars/othneildrew/Best-README-Template.svg?style=for-the-badge
[stars-url]: https://github.com/KhaledAlamin/Huawei_embedded_dnn/stargazers
[issues-shield]: https://img.shields.io/github/issues/othneildrew/Best-README-Template.svg?style=for-the-badge
[issues-url]: https://github.com/KhaledAlamin/Huawei_embedded_dnn/issues
[license-shield]: https://img.shields.io/github/license/othneildrew/Best-README-Template.svg?style=for-the-badge
[license-url]: https://github.com/KhaledAlamin/Huawei_embedded_dnn/blob/main/LICENSE.txt
[linkedin-khaled-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=for-the-badge&logo=linkedin&colorB=555
[linkedin-khaled-url]: https://www.linkedin.com/in/khaled-alamin/
[linkedin-eyas-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=for-the-badge&logo=linkedin&colorB=555
[linkedin-eyas-url]: https://www.linkedin.com/in/EyasAli/
| scientific-paper-matplotlib | /scientific_paper_matplotlib-0.0.6.tar.gz/scientific_paper_matplotlib-0.0.6/README.md | README.md | # Gaussian-Binomial--Distributions
<!--[![Contributors][contributors-shield]][contributors-url]
<!--[![Forks][forks-shield]][forks-url] -->
<!--[![Issues][issues-shield]][issues-url]
<!--[![MIT License][license-shield]][license-url]
[![LinkedInKhaled][linkedin-khaled-shield]][linkedin-khaled-url]
<!-- TABLE OF CONTENTS -->
<details open="open">
<summary>Table of Contents</summary>
<ol>
<li><a href="#about-the-project">About The Project</a>
<li><a href="#installation">Installation</a></li>
<li><a href="#Dependencies">Dependencies</a></li>
<li><a href="#license">License</a></li>
<li><a href="#contact">Contact</a></li>
<li><a href="#acknowledgements">Acknowledgements</a></li>
</ol>
</details>
<!-- ABOUT THE PROJECT -->
## About The Project
This package is for publishers who wants a ready use plot configuration like label font size, datetime config, figure size and most important details needed in which the plotted figure can be directly included in the paper.
## Dependencies
- [Python3.6](https://www.python.org/downloads/)
- [matplotlib](https://matplotlib.org/)
- [Numpy](https://numpy.org/)
- [Pandas](https://pandas.pydata.org/)
## Installation
`pip install .`
<!-- CONTRIBUTING -->
## Contributing
1. Fork the Project
2. Commit your Changes
3. Push to the Branch
4. Open a Pull Request
<!-- LICENSE -->
## License
Distributed under the MIT License. See `LICENSE` for more information.
<!-- CONTACT -->
## Contact
Khaled Alamin - [@LinkedIn](https://www.linkedin.com/in/khaled-alamin/)
<!--Project Link: [https://github.com/your_username/repo_name](https://github.com/your_username/repo_name)-->
<!-- ACKNOWLEDGEMENTS -->
## Acknowledgements
* [Udacity data science nanodegree](https://classroom.udacity.com/nanodegrees/nd025/dashboard/overview)
* [GitHub Emoji Cheat Sheet](https://www.webpagefx.com/tools/emoji-cheat-sheet)
* [Img Shields](https://shields.io)
* [Choose an Open Source License](https://choosealicense.com)
* [GitHub Pages](https://pages.github.com)
* [Animate.css](https://daneden.github.io/animate.css)
* [Loaders.css](https://connoratherton.com/loaders)
* [Slick Carousel](https://kenwheeler.github.io/slick)
* [Smooth Scroll](https://github.com/cferdinandi/smooth-scroll)
* [Sticky Kit](http://leafo.net/sticky-kit)
* [JVectorMap](http://jvectormap.com)
* [Font Awesome](https://fontawesome.com)
<!-- MARKDOWN LINKS & IMAGES -->
<!-- https://www.markdownguide.org/basic-syntax/#reference-style-links -->
[contributors-shield]: https://img.shields.io/github/contributors/othneildrew/Best-README-Template.svg?style=for-the-badge
[contributors-url]: https://github.com/KhaledAlamin/Huawei_embedded_dnn/graphs/contributors
[forks-shield]: https://img.shields.io/github/forks/othneildrew/Best-README-Template.svg?style=for-the-badge
[forks-url]: https://github.com/KhaledAlamin/Huawei_embedded_dnn/network/members
[stars-shield]: https://img.shields.io/github/stars/othneildrew/Best-README-Template.svg?style=for-the-badge
[stars-url]: https://github.com/KhaledAlamin/Huawei_embedded_dnn/stargazers
[issues-shield]: https://img.shields.io/github/issues/othneildrew/Best-README-Template.svg?style=for-the-badge
[issues-url]: https://github.com/KhaledAlamin/Huawei_embedded_dnn/issues
[license-shield]: https://img.shields.io/github/license/othneildrew/Best-README-Template.svg?style=for-the-badge
[license-url]: https://github.com/KhaledAlamin/Huawei_embedded_dnn/blob/main/LICENSE.txt
[linkedin-khaled-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=for-the-badge&logo=linkedin&colorB=555
[linkedin-khaled-url]: https://www.linkedin.com/in/khaled-alamin/
[linkedin-eyas-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=for-the-badge&logo=linkedin&colorB=555
[linkedin-eyas-url]: https://www.linkedin.com/in/EyasAli/
| 0.565059 | 0.345147 |
from __future__ import annotations
from pathlib import Path
from functools import wraps
from typing import TypeVar, List, Tuple, Union, Callable, Optional
from warnings import warn, filterwarnings, catch_warnings
from textwrap import dedent
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy as np
from numpy import amin, amax
from .plot_settings import apply_styles, rwth_cycle
from .types_ import Vector, Matrix
mpl.use("Agg")
In = TypeVar("In", List[float], Tuple[float],
Vector)
In2D = TypeVar("In2D", list[list[float]], list[Vector], tuple[Vector],
Matrix)
def fix_inputs(input_1: In, input_2: In)\
-> tuple[Vector, Vector]:
"""
Remove nans and infinities from the input vectors.
Parameters
---------
input_1, input_2:
X/Y-axis data of plot
Returns
------
New vectors x and y with nans removed.
"""
if len(input_1) != len(input_2):
raise ValueError(
"The sizes of the input vectors are not the same.")
nan_count = np.count_nonzero(np.isnan(input_2))
inf_count = np.count_nonzero(np.isinf(input_2))
if nan_count != 0 or inf_count != 0:
new_input_1 = np.empty(len(input_1)-nan_count-inf_count)
new_input_2 = np.empty(len(new_input_1))
position = 0
for x_input, y_input in zip(input_1, input_2):
if not np.isnan(y_input) or np.isinf(y_input):
new_input_1[position] = x_input
new_input_2[position] = y_input
position += 1
return new_input_1, new_input_2
return np.array(input_1), np.array(input_2)
def check_inputs(input_1: In, input_2: In, label_1: str, label_2: str)\
-> bool:
"""
Check the input vectors to see, if they are large enough.
Parameters
---------
input_1, input_2:
X/Y-axis data of plot
label_1, label_2:
Labels of the X/Y axis
Returns
------
True, if the plot can be created.
"""
if len(input_1) <= 1 or len(input_2) <= 1:
warn(
"There are not enough points in the following plots:"
f"label1: {label_1} label2: {label_2}. It cannot be drawn.")
return False
if min(input_1) == max(input_1):
warn(
"The area of the x-axis is not large enough in the following plot:"
f"label1: {label_1} label2: {label_2}. It cannot be drawn.")
return False
if min(input_2) == max(input_2):
warn(
"The area of the y-axis is not large enough in the following plot:"
f"label1: {label_1} label2: {label_2}. It cannot be drawn.")
return False
infinity = np.isinf(input_1).any() or np.isinf(input_2).any()
if infinity:
warn(dedent(f"""There are infinities in the data of the following plot:
label1: {label_1}, label2: {label_2}. It cannot be drawn."""),
RuntimeWarning)
return False
nan = np.isnan(input_1).any() or np.isnan(input_2).any()
if nan:
warn(dedent(f"""There are nans in the data of the following plot:
label1: {label_1}, label2: {label_2}. It cannot be drawn."""),
RuntimeWarning)
return False
return True
@apply_styles
def plot_fit(X: In, Y: In,
fit_function: Callable[..., float],
xlabel: str, ylabel: str, filename: Union[str, Path], *,
args: Optional[Tuple[float]] = None,
logscale: bool = False) -> None:
"""Creates a plot of data and a fit and saves it to 'filename'."""
X, Y = fix_inputs(X, Y) # type: ignore
if not check_inputs(
X, Y, xlabel, ylabel):
return
n_fit = 1000
_fit_function: Callable[[float], float]
if args is not None:
@wraps(fit_function)
def _fit_function(x: float) -> float:
"""This is the function, which has been fitted"""
return _fit_function(x, *args)
else:
_fit_function = fit_function
plt.plot(X, Y, label="data")
X_fit = [min(X) + (max(X) - min(X)) * i / (n_fit - 1)
for i in range(n_fit)]
Y_fit = [_fit_function(x) for x in X_fit]
plt.plot(X_fit, Y_fit, label="fit")
if logscale:
plt.xscale("log")
plt.yscale("log")
plt.xlim(min(X), max(X))
if logscale:
plt.ylim(min(Y) * 0.97, max(Y) * 1.02)
else:
plt.ylim(
min(Y) - (max(Y) - min(Y)) * 0.02,
max(Y) + (max(Y) - min(Y)) * 0.02
)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.legend()
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles(three_d=True)
def plot_surface(X: In2D, Y: In2D, Z: In2D,
xlabel: str, ylabel: str, zlabel: str,
filename: Union[str, Path], *,
log_scale: bool = False,
set_z_lim: bool = True,
colorscheme: str = "rwth_gradient",
figsize: tuple[float, float] = (4.33, 3.5),
labelpad: Optional[float] = None,
nbins: Optional[int] = None) -> None:
"""create a 2D surface plot of meshgrid-like valued Xs, Ys and Zs"""
if not check_inputs(
np.array(X).flatten(),
np.array(Z).flatten(), xlabel, zlabel):
return
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
fig.subplots_adjust(left=-0.02, right=0.75, bottom=0.15, top=0.98)
ax.plot_surface(X, Y, Z, cmap=colorscheme)
ax.set_box_aspect(aspect=None, zoom=.8)
if labelpad is None:
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel, rotation=90)
else:
ax.set_xlabel(xlabel, labelpad=labelpad)
ax.set_ylabel(ylabel, labelpad=labelpad)
ax.set_zlabel(zlabel, rotation=90, labelpad=labelpad)
assert ax.zaxis is not None
ax.set_xlim(amin(X), amax(X)) # type: ignore
ax.set_ylim(amin(Y), amax(Y)) # type: ignore
if set_z_lim:
if not log_scale:
ax.set_zlim(
amin(Z) - (amax(Z) - amin(Z)) * 0.02, # type: ignore
amax(Z) + (amax(Z) - amin(Z)) * 0.02 # type: ignore
)
else:
ax.set_zlim(
amin(Z) * 0.97, amax(Z) * 1.02) # type: ignore
if log_scale:
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_zscale("log")
for spine in ax.spines.values():
spine.set_visible(False)
ax.xaxis.pane.set_alpha(0.3)
ax.yaxis.pane.set_alpha(0.3)
ax.zaxis.pane.set_alpha(0.3)
if nbins is not None:
ax.xaxis.set_major_locator(
MaxNLocator(nbins)
)
ax.yaxis.set_major_locator(
MaxNLocator(nbins)
)
fig.set_size_inches(*figsize)
with catch_warnings():
filterwarnings("ignore", message=".*Tight layout")
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles
def plot(X: In, Y: In, xlabel: str, ylabel: str,
filename: Union[Path, str], *, logscale: bool = False,
ylim: Optional[tuple[float, float]] = None,
yticks: bool = True, cycler: int = 0) -> None:
"""Create a simple 1D plot"""
X, Y = fix_inputs(X, Y) # type: ignore
if not check_inputs(
X, Y, xlabel, ylabel):
return
if len(X) <= 1 or len(Y) <= 1:
raise ValueError(
f"The data for plot {filename} contains empty rows!")
if cycler > 0:
for _ in range(cycler):
plt.plot([], [])
plt.plot(X, Y, linestyle="-")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if logscale:
plt.xscale("log")
plt.yscale("log")
if ylim is None:
plt.ylim(min(Y) * 0.97, max(Y) * 1.02)
elif ylim is None:
plt.ylim(
min(Y) - (max(Y) - min(Y)) * 0.02,
max(Y) + (max(Y) - min(Y)) * 0.02
)
if ylim is not None:
plt.ylim(*ylim)
plt.xlim(min(X), max(X))
if not yticks:
plt.yticks([])
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles
def two_plots(x1: In, y1: In, label1: str,
x2: In, y2: In, label2: str,
xlabel: str, ylabel: str,
filename: Union[Path, str], *,
logscale: bool = False, cycle: int = 0,
color: tuple[int, int] = (0, 1),
outer: bool = False) -> None:
"""Create a simple 1D plot with two different graphs inside of a single
plot and a single y-axis.
Keyword arguments:
cycle -- skip this many colours in the colour-wheel before plotting
color -- use these indeces in the colour-wheel when creating a plot
outer -- use the outer limits on the x-axis rather than the inner limit
"""
x1, y1 = fix_inputs(x1, y1) # type: ignore
x2, y2 = fix_inputs(x2, y2) # type: ignore
if not (
check_inputs(x1, y1, xlabel, label1)
or check_inputs(x2, y2, xlabel, label2)):
return
if len(x1) <= 1 or len(y1) <= 1 or len(y2) <= 1 or len(x2) <= 1:
raise ValueError(
f"The data for plot {filename} contains empty rows!")
if cycle > 0:
color = (color[0] + cycle, color[1] + cycle)
# access colour
prop_cycle = plt.rcParams["axes.prop_cycle"]
try:
linestyle = prop_cycle.by_key()["linestyle"]
except KeyError:
linestyle = rwth_cycle.by_key()["linestyle"]
colors = prop_cycle.by_key()["color"]
if max(color) >= len(colors):
colors += colors
linestyle += linestyle
plt.plot(x1, y1, label=label1,
color=colors[color[0]],
linestyle=linestyle[0])
plt.plot(x2, y2, label=label2,
color=colors[color[1]],
linestyle=linestyle[1])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
min_ = min(min(y1), min(y2))
max_ = max(max(y1), max(y2))
if not logscale:
plt.ylim(
min_ - (max_ - min_) * 0.02,
max_ + (max_ - min_) * 0.02
)
else:
plt.xscale("log")
plt.yscale("log")
plt.ylim(
min_ * 0.97, max_ * 1.02)
if outer:
plt.xlim(min(min(x1), min(x2)),
max(max(x1), max(x2)))
else:
plt.xlim(max(min(x1), min(x2)),
min(max(x1), max(x2)))
plt.legend()
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles
def three_plots(x1: In, y1: In, label1: str,
x2: In, y2: In, label2: str,
x3: In, y3: In, label3: str,
xlabel: str, ylabel: str,
filename: Union[Path, str], *,
logscale: bool = False,
xmin: Optional[float] = None,
xmax: Optional[float] = None) -> None:
"""Create a simple 1D plot with three different graphs inside of a single
plot and a single y-axis."""
x1, y1 = fix_inputs(x1, y1) # type: ignore
x2, y2 = fix_inputs(x2, y2) # type: ignore
x3, y3 = fix_inputs(x3, y3) # type: ignore
if not (
check_inputs(x1, y1, xlabel, label1)
or check_inputs(x2, y3, xlabel, label1)
or check_inputs(x3, y3, xlabel, label3)):
return
if any(len(x) <= 1 for x in (x1, x2, y1, y2, x3, y3)):
raise ValueError(
f"The data for plot {filename} contains empty rows!")
plt.plot(x1, y1, label=label1)
plt.plot(x2, y2, label=label2, linestyle="dashed")
plt.plot(x3, y3, label=label3, linestyle="dotted")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
min_ = min(min(y1), min(y2), min(y3))
max_ = max(max(y1), max(y2), max(y3))
if not logscale:
plt.ylim(
min_ - (max_ - min_) * 0.02,
max_ + (max_ - min_) * 0.02
)
else:
plt.xscale("log")
plt.yscale("log")
plt.ylim(
min_ * 0.97, max_ * 1.02)
if xmin is not None and xmax is not None:
plt.xlim(xmin, xmax)
else:
plt.xlim(min(x1), max(x1))
plt.legend()
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles
def two_axis_plots(x1: In, y1: In, label1: str,
x2: In, y2: In, label2: str,
xlabel: str, ylabel: str,
ylabel2: str,
filename: Union[Path, str], *,
ticks: Optional[tuple[list[float], list[str]]] = None,
xlim: Optional[tuple[float, float]] = None,
color: tuple[int, int] = (0, 1))\
-> None:
"""Create a simple 1D plot with two different graphs inside of a single
plot with two y-axis.
The variable "ticks" sets costum y-ticks on the second y-axis. The first
argument gives the position of the ticks and the second argument gives the
values to be shown.
Color selects the indeces of the chosen color-wheel, which should be taken
for the different plots. The default is (1,2)."""
x1, y1 = fix_inputs(x1, y1) # type: ignore
x2, y2 = fix_inputs(x2, y2) # type: ignore
if not check_inputs(
y1, y2, label1, label2):
return
if len(x1) <= 1 or len(y1) <= 1 or len(y2) <= 1 or len(x2) <= 1:
raise ValueError(
f"The data for plot {filename} contains empty rows!")
fig = plt.figure()
ax1 = fig.add_subplot(111)
# access colour
prop_cycle = plt.rcParams["axes.prop_cycle"]
try:
linestyle = prop_cycle.by_key()["linestyle"]
except KeyError:
linestyle = rwth_cycle.by_key()["linestyle"]
colors = prop_cycle.by_key()["color"]
if max(color) >= len(colors):
colors += colors
linestyle += linestyle
# first plot
lines = ax1.plot(x1, y1, label=label1,
color=colors[color[0]],
linestyle=linestyle[0])
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
ax1.set_ylim(
min(y1) - (max(y1) - min(y1)) * 0.02,
max(y1) + (max(y1) - min(y1)) * 0.02
)
# second plot
ax2 = ax1.twinx()
lines += ax2.plot(x2, y2, label=label2,
color=colors[color[1]],
linestyle=linestyle[1])
ax2.set_ylabel(ylabel2)
ax2.set_ylim(
min(y2) - (max(y2) - min(y2)) * 0.02,
max(y2) + (max(y2) - min(y2)) * 0.02
)
# general settings
if xlim is None:
plt.xlim(min(x1), max(x1))
else:
plt.xlim(*xlim)
labels = [line.get_label() for line in lines]
plt.legend(lines, labels)
# ticks
if ticks is not None:
ax2.set_yticks(ticks[0])
ax2.set_yticklabels(ticks[1])
plt.tight_layout()
plt.savefig(filename)
plt.close()
def make_invisible(ax: plt.Axes) -> None:
"""Make all patch spines invisible."""
ax.set_frame_on(True)
ax.patch.set_visible(False)
for spine in ax.spines.values():
spine.set_visible(False)
@apply_styles
def three_axis_plots(x1: In, y1: In, label1: str,
x2: In, y2: In, label2: str,
x3: In, y3: In, label3: str,
xlabel: str, ylabel: str,
ylabel2: str, ylabel3: str,
filename: Union[Path, str], *,
ticks: Optional[tuple[list[float], list[str]]] = None,
xlim: Optional[tuple[float, float]] = None,
color: tuple[int, int, int] = (0, 1, 2),
legend: bool = True)\
-> None:
"""Create a simple 1D plot with two different graphs inside of a single
plot with two y-axis.
The variable "ticks" sets costum y-ticks on the second y-axis. The first
argument gives the position of the ticks and the second argument gives the
values to be shown.
Color selects the indeces of the chosen color-wheel, which should be taken
for the different plots. The default is (1,2)."""
# pylint: disable=R0915
x1, y1 = fix_inputs(x1, y1) # type: ignore
x2, y2 = fix_inputs(x2, y2) # type: ignore
x3, y3 = fix_inputs(x3, y3) # type: ignore
if not check_inputs(
y1, y2, label1, label2):
return
if not check_inputs(
x3, y3, xlabel, label3):
return
if len(x1) <= 1 or len(y1) <= 1 or len(y2) <= 1 or len(x2) <= 1:
raise ValueError(
f"The data for plot {filename} contains empty rows!")
assert len(color) == 3
fig, ax1 = plt.subplots()
fig.subplots_adjust(right=0.75)
# access colour
prop_cycle = plt.rcParams["axes.prop_cycle"]
try:
linestyle = prop_cycle.by_key()["linestyle"]
except KeyError:
linestyle = rwth_cycle.by_key()["linestyle"]
colors = prop_cycle.by_key()["color"]
if max(color) >= len(colors):
colors += colors
linestyle += linestyle
# first plot
lines = ax1.plot(x1, y1, label=label1,
color=colors[color[0]],
linestyle=linestyle[0])
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
ax1.set_ylim(
min(y1) - (max(y1) - min(y1)) * 0.02,
max(y1) + (max(y1) - min(y1)) * 0.02
)
ax1.yaxis.label.set_color(colors[color[0]])
ax1.tick_params(axis="y", colors=colors[color[0]])
# second plot
ax2 = ax1.twinx()
lines += ax2.plot(x2, y2, label=label2,
color=colors[color[1]],
linestyle=linestyle[1])
ax2.set_ylabel(ylabel2)
ax2.set_ylim(
min(y2) - (max(y2) - min(y2)) * 0.02,
max(y2) + (max(y2) - min(y2)) * 0.02
)
ax2.yaxis.label.set_color(colors[color[1]])
ax2.tick_params(axis="y", colors=colors[color[1]])
# third plot
ax3 = ax1.twinx()
make_invisible(ax3)
ax3.spines["right"].set_position(("axes", 1.25))
ax3.spines["right"].set_visible(True)
lines += ax3.plot(x3, y3, label=label3,
color=colors[color[2]],
linestyle=linestyle[2])
ax3.set_ylabel(ylabel3)
ax3.set_ylim(
min(y3) - (max(y3) - min(y3)) * 0.02,
max(y3) + (max(y3) - min(y3)) * 0.02
)
ax3.yaxis.label.set_color(colors[color[2]])
ax3.tick_params(axis="y", colors=colors[color[2]])
# general settings
if xlim is None:
plt.xlim(min(x1), max(x1))
else:
plt.xlim(*xlim)
labels = [line.get_label() for line in lines]
if legend:
plt.legend(lines, labels)
# ticks
if ticks is not None:
ax2.set_yticks(ticks[0])
ax2.set_yticklabels(ticks[1])
plt.tight_layout()
plt.savefig(filename)
plt.close() | scientific-plots | /scientific_plots-1.7.2-py3-none-any.whl/scientific_plots/default_plots.py | default_plots.py | from __future__ import annotations
from pathlib import Path
from functools import wraps
from typing import TypeVar, List, Tuple, Union, Callable, Optional
from warnings import warn, filterwarnings, catch_warnings
from textwrap import dedent
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy as np
from numpy import amin, amax
from .plot_settings import apply_styles, rwth_cycle
from .types_ import Vector, Matrix
mpl.use("Agg")
In = TypeVar("In", List[float], Tuple[float],
Vector)
In2D = TypeVar("In2D", list[list[float]], list[Vector], tuple[Vector],
Matrix)
def fix_inputs(input_1: In, input_2: In)\
-> tuple[Vector, Vector]:
"""
Remove nans and infinities from the input vectors.
Parameters
---------
input_1, input_2:
X/Y-axis data of plot
Returns
------
New vectors x and y with nans removed.
"""
if len(input_1) != len(input_2):
raise ValueError(
"The sizes of the input vectors are not the same.")
nan_count = np.count_nonzero(np.isnan(input_2))
inf_count = np.count_nonzero(np.isinf(input_2))
if nan_count != 0 or inf_count != 0:
new_input_1 = np.empty(len(input_1)-nan_count-inf_count)
new_input_2 = np.empty(len(new_input_1))
position = 0
for x_input, y_input in zip(input_1, input_2):
if not np.isnan(y_input) or np.isinf(y_input):
new_input_1[position] = x_input
new_input_2[position] = y_input
position += 1
return new_input_1, new_input_2
return np.array(input_1), np.array(input_2)
def check_inputs(input_1: In, input_2: In, label_1: str, label_2: str)\
-> bool:
"""
Check the input vectors to see, if they are large enough.
Parameters
---------
input_1, input_2:
X/Y-axis data of plot
label_1, label_2:
Labels of the X/Y axis
Returns
------
True, if the plot can be created.
"""
if len(input_1) <= 1 or len(input_2) <= 1:
warn(
"There are not enough points in the following plots:"
f"label1: {label_1} label2: {label_2}. It cannot be drawn.")
return False
if min(input_1) == max(input_1):
warn(
"The area of the x-axis is not large enough in the following plot:"
f"label1: {label_1} label2: {label_2}. It cannot be drawn.")
return False
if min(input_2) == max(input_2):
warn(
"The area of the y-axis is not large enough in the following plot:"
f"label1: {label_1} label2: {label_2}. It cannot be drawn.")
return False
infinity = np.isinf(input_1).any() or np.isinf(input_2).any()
if infinity:
warn(dedent(f"""There are infinities in the data of the following plot:
label1: {label_1}, label2: {label_2}. It cannot be drawn."""),
RuntimeWarning)
return False
nan = np.isnan(input_1).any() or np.isnan(input_2).any()
if nan:
warn(dedent(f"""There are nans in the data of the following plot:
label1: {label_1}, label2: {label_2}. It cannot be drawn."""),
RuntimeWarning)
return False
return True
@apply_styles
def plot_fit(X: In, Y: In,
fit_function: Callable[..., float],
xlabel: str, ylabel: str, filename: Union[str, Path], *,
args: Optional[Tuple[float]] = None,
logscale: bool = False) -> None:
"""Creates a plot of data and a fit and saves it to 'filename'."""
X, Y = fix_inputs(X, Y) # type: ignore
if not check_inputs(
X, Y, xlabel, ylabel):
return
n_fit = 1000
_fit_function: Callable[[float], float]
if args is not None:
@wraps(fit_function)
def _fit_function(x: float) -> float:
"""This is the function, which has been fitted"""
return _fit_function(x, *args)
else:
_fit_function = fit_function
plt.plot(X, Y, label="data")
X_fit = [min(X) + (max(X) - min(X)) * i / (n_fit - 1)
for i in range(n_fit)]
Y_fit = [_fit_function(x) for x in X_fit]
plt.plot(X_fit, Y_fit, label="fit")
if logscale:
plt.xscale("log")
plt.yscale("log")
plt.xlim(min(X), max(X))
if logscale:
plt.ylim(min(Y) * 0.97, max(Y) * 1.02)
else:
plt.ylim(
min(Y) - (max(Y) - min(Y)) * 0.02,
max(Y) + (max(Y) - min(Y)) * 0.02
)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.legend()
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles(three_d=True)
def plot_surface(X: In2D, Y: In2D, Z: In2D,
xlabel: str, ylabel: str, zlabel: str,
filename: Union[str, Path], *,
log_scale: bool = False,
set_z_lim: bool = True,
colorscheme: str = "rwth_gradient",
figsize: tuple[float, float] = (4.33, 3.5),
labelpad: Optional[float] = None,
nbins: Optional[int] = None) -> None:
"""create a 2D surface plot of meshgrid-like valued Xs, Ys and Zs"""
if not check_inputs(
np.array(X).flatten(),
np.array(Z).flatten(), xlabel, zlabel):
return
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
fig.subplots_adjust(left=-0.02, right=0.75, bottom=0.15, top=0.98)
ax.plot_surface(X, Y, Z, cmap=colorscheme)
ax.set_box_aspect(aspect=None, zoom=.8)
if labelpad is None:
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel, rotation=90)
else:
ax.set_xlabel(xlabel, labelpad=labelpad)
ax.set_ylabel(ylabel, labelpad=labelpad)
ax.set_zlabel(zlabel, rotation=90, labelpad=labelpad)
assert ax.zaxis is not None
ax.set_xlim(amin(X), amax(X)) # type: ignore
ax.set_ylim(amin(Y), amax(Y)) # type: ignore
if set_z_lim:
if not log_scale:
ax.set_zlim(
amin(Z) - (amax(Z) - amin(Z)) * 0.02, # type: ignore
amax(Z) + (amax(Z) - amin(Z)) * 0.02 # type: ignore
)
else:
ax.set_zlim(
amin(Z) * 0.97, amax(Z) * 1.02) # type: ignore
if log_scale:
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_zscale("log")
for spine in ax.spines.values():
spine.set_visible(False)
ax.xaxis.pane.set_alpha(0.3)
ax.yaxis.pane.set_alpha(0.3)
ax.zaxis.pane.set_alpha(0.3)
if nbins is not None:
ax.xaxis.set_major_locator(
MaxNLocator(nbins)
)
ax.yaxis.set_major_locator(
MaxNLocator(nbins)
)
fig.set_size_inches(*figsize)
with catch_warnings():
filterwarnings("ignore", message=".*Tight layout")
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles
def plot(X: In, Y: In, xlabel: str, ylabel: str,
filename: Union[Path, str], *, logscale: bool = False,
ylim: Optional[tuple[float, float]] = None,
yticks: bool = True, cycler: int = 0) -> None:
"""Create a simple 1D plot"""
X, Y = fix_inputs(X, Y) # type: ignore
if not check_inputs(
X, Y, xlabel, ylabel):
return
if len(X) <= 1 or len(Y) <= 1:
raise ValueError(
f"The data for plot {filename} contains empty rows!")
if cycler > 0:
for _ in range(cycler):
plt.plot([], [])
plt.plot(X, Y, linestyle="-")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if logscale:
plt.xscale("log")
plt.yscale("log")
if ylim is None:
plt.ylim(min(Y) * 0.97, max(Y) * 1.02)
elif ylim is None:
plt.ylim(
min(Y) - (max(Y) - min(Y)) * 0.02,
max(Y) + (max(Y) - min(Y)) * 0.02
)
if ylim is not None:
plt.ylim(*ylim)
plt.xlim(min(X), max(X))
if not yticks:
plt.yticks([])
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles
def two_plots(x1: In, y1: In, label1: str,
x2: In, y2: In, label2: str,
xlabel: str, ylabel: str,
filename: Union[Path, str], *,
logscale: bool = False, cycle: int = 0,
color: tuple[int, int] = (0, 1),
outer: bool = False) -> None:
"""Create a simple 1D plot with two different graphs inside of a single
plot and a single y-axis.
Keyword arguments:
cycle -- skip this many colours in the colour-wheel before plotting
color -- use these indeces in the colour-wheel when creating a plot
outer -- use the outer limits on the x-axis rather than the inner limit
"""
x1, y1 = fix_inputs(x1, y1) # type: ignore
x2, y2 = fix_inputs(x2, y2) # type: ignore
if not (
check_inputs(x1, y1, xlabel, label1)
or check_inputs(x2, y2, xlabel, label2)):
return
if len(x1) <= 1 or len(y1) <= 1 or len(y2) <= 1 or len(x2) <= 1:
raise ValueError(
f"The data for plot {filename} contains empty rows!")
if cycle > 0:
color = (color[0] + cycle, color[1] + cycle)
# access colour
prop_cycle = plt.rcParams["axes.prop_cycle"]
try:
linestyle = prop_cycle.by_key()["linestyle"]
except KeyError:
linestyle = rwth_cycle.by_key()["linestyle"]
colors = prop_cycle.by_key()["color"]
if max(color) >= len(colors):
colors += colors
linestyle += linestyle
plt.plot(x1, y1, label=label1,
color=colors[color[0]],
linestyle=linestyle[0])
plt.plot(x2, y2, label=label2,
color=colors[color[1]],
linestyle=linestyle[1])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
min_ = min(min(y1), min(y2))
max_ = max(max(y1), max(y2))
if not logscale:
plt.ylim(
min_ - (max_ - min_) * 0.02,
max_ + (max_ - min_) * 0.02
)
else:
plt.xscale("log")
plt.yscale("log")
plt.ylim(
min_ * 0.97, max_ * 1.02)
if outer:
plt.xlim(min(min(x1), min(x2)),
max(max(x1), max(x2)))
else:
plt.xlim(max(min(x1), min(x2)),
min(max(x1), max(x2)))
plt.legend()
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles
def three_plots(x1: In, y1: In, label1: str,
x2: In, y2: In, label2: str,
x3: In, y3: In, label3: str,
xlabel: str, ylabel: str,
filename: Union[Path, str], *,
logscale: bool = False,
xmin: Optional[float] = None,
xmax: Optional[float] = None) -> None:
"""Create a simple 1D plot with three different graphs inside of a single
plot and a single y-axis."""
x1, y1 = fix_inputs(x1, y1) # type: ignore
x2, y2 = fix_inputs(x2, y2) # type: ignore
x3, y3 = fix_inputs(x3, y3) # type: ignore
if not (
check_inputs(x1, y1, xlabel, label1)
or check_inputs(x2, y3, xlabel, label1)
or check_inputs(x3, y3, xlabel, label3)):
return
if any(len(x) <= 1 for x in (x1, x2, y1, y2, x3, y3)):
raise ValueError(
f"The data for plot {filename} contains empty rows!")
plt.plot(x1, y1, label=label1)
plt.plot(x2, y2, label=label2, linestyle="dashed")
plt.plot(x3, y3, label=label3, linestyle="dotted")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
min_ = min(min(y1), min(y2), min(y3))
max_ = max(max(y1), max(y2), max(y3))
if not logscale:
plt.ylim(
min_ - (max_ - min_) * 0.02,
max_ + (max_ - min_) * 0.02
)
else:
plt.xscale("log")
plt.yscale("log")
plt.ylim(
min_ * 0.97, max_ * 1.02)
if xmin is not None and xmax is not None:
plt.xlim(xmin, xmax)
else:
plt.xlim(min(x1), max(x1))
plt.legend()
plt.tight_layout()
plt.savefig(filename)
plt.close()
@apply_styles
def two_axis_plots(x1: In, y1: In, label1: str,
x2: In, y2: In, label2: str,
xlabel: str, ylabel: str,
ylabel2: str,
filename: Union[Path, str], *,
ticks: Optional[tuple[list[float], list[str]]] = None,
xlim: Optional[tuple[float, float]] = None,
color: tuple[int, int] = (0, 1))\
-> None:
"""Create a simple 1D plot with two different graphs inside of a single
plot with two y-axis.
The variable "ticks" sets costum y-ticks on the second y-axis. The first
argument gives the position of the ticks and the second argument gives the
values to be shown.
Color selects the indeces of the chosen color-wheel, which should be taken
for the different plots. The default is (1,2)."""
x1, y1 = fix_inputs(x1, y1) # type: ignore
x2, y2 = fix_inputs(x2, y2) # type: ignore
if not check_inputs(
y1, y2, label1, label2):
return
if len(x1) <= 1 or len(y1) <= 1 or len(y2) <= 1 or len(x2) <= 1:
raise ValueError(
f"The data for plot {filename} contains empty rows!")
fig = plt.figure()
ax1 = fig.add_subplot(111)
# access colour
prop_cycle = plt.rcParams["axes.prop_cycle"]
try:
linestyle = prop_cycle.by_key()["linestyle"]
except KeyError:
linestyle = rwth_cycle.by_key()["linestyle"]
colors = prop_cycle.by_key()["color"]
if max(color) >= len(colors):
colors += colors
linestyle += linestyle
# first plot
lines = ax1.plot(x1, y1, label=label1,
color=colors[color[0]],
linestyle=linestyle[0])
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
ax1.set_ylim(
min(y1) - (max(y1) - min(y1)) * 0.02,
max(y1) + (max(y1) - min(y1)) * 0.02
)
# second plot
ax2 = ax1.twinx()
lines += ax2.plot(x2, y2, label=label2,
color=colors[color[1]],
linestyle=linestyle[1])
ax2.set_ylabel(ylabel2)
ax2.set_ylim(
min(y2) - (max(y2) - min(y2)) * 0.02,
max(y2) + (max(y2) - min(y2)) * 0.02
)
# general settings
if xlim is None:
plt.xlim(min(x1), max(x1))
else:
plt.xlim(*xlim)
labels = [line.get_label() for line in lines]
plt.legend(lines, labels)
# ticks
if ticks is not None:
ax2.set_yticks(ticks[0])
ax2.set_yticklabels(ticks[1])
plt.tight_layout()
plt.savefig(filename)
plt.close()
def make_invisible(ax: plt.Axes) -> None:
"""Make all patch spines invisible."""
ax.set_frame_on(True)
ax.patch.set_visible(False)
for spine in ax.spines.values():
spine.set_visible(False)
@apply_styles
def three_axis_plots(x1: In, y1: In, label1: str,
x2: In, y2: In, label2: str,
x3: In, y3: In, label3: str,
xlabel: str, ylabel: str,
ylabel2: str, ylabel3: str,
filename: Union[Path, str], *,
ticks: Optional[tuple[list[float], list[str]]] = None,
xlim: Optional[tuple[float, float]] = None,
color: tuple[int, int, int] = (0, 1, 2),
legend: bool = True)\
-> None:
"""Create a simple 1D plot with two different graphs inside of a single
plot with two y-axis.
The variable "ticks" sets costum y-ticks on the second y-axis. The first
argument gives the position of the ticks and the second argument gives the
values to be shown.
Color selects the indeces of the chosen color-wheel, which should be taken
for the different plots. The default is (1,2)."""
# pylint: disable=R0915
x1, y1 = fix_inputs(x1, y1) # type: ignore
x2, y2 = fix_inputs(x2, y2) # type: ignore
x3, y3 = fix_inputs(x3, y3) # type: ignore
if not check_inputs(
y1, y2, label1, label2):
return
if not check_inputs(
x3, y3, xlabel, label3):
return
if len(x1) <= 1 or len(y1) <= 1 or len(y2) <= 1 or len(x2) <= 1:
raise ValueError(
f"The data for plot {filename} contains empty rows!")
assert len(color) == 3
fig, ax1 = plt.subplots()
fig.subplots_adjust(right=0.75)
# access colour
prop_cycle = plt.rcParams["axes.prop_cycle"]
try:
linestyle = prop_cycle.by_key()["linestyle"]
except KeyError:
linestyle = rwth_cycle.by_key()["linestyle"]
colors = prop_cycle.by_key()["color"]
if max(color) >= len(colors):
colors += colors
linestyle += linestyle
# first plot
lines = ax1.plot(x1, y1, label=label1,
color=colors[color[0]],
linestyle=linestyle[0])
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
ax1.set_ylim(
min(y1) - (max(y1) - min(y1)) * 0.02,
max(y1) + (max(y1) - min(y1)) * 0.02
)
ax1.yaxis.label.set_color(colors[color[0]])
ax1.tick_params(axis="y", colors=colors[color[0]])
# second plot
ax2 = ax1.twinx()
lines += ax2.plot(x2, y2, label=label2,
color=colors[color[1]],
linestyle=linestyle[1])
ax2.set_ylabel(ylabel2)
ax2.set_ylim(
min(y2) - (max(y2) - min(y2)) * 0.02,
max(y2) + (max(y2) - min(y2)) * 0.02
)
ax2.yaxis.label.set_color(colors[color[1]])
ax2.tick_params(axis="y", colors=colors[color[1]])
# third plot
ax3 = ax1.twinx()
make_invisible(ax3)
ax3.spines["right"].set_position(("axes", 1.25))
ax3.spines["right"].set_visible(True)
lines += ax3.plot(x3, y3, label=label3,
color=colors[color[2]],
linestyle=linestyle[2])
ax3.set_ylabel(ylabel3)
ax3.set_ylim(
min(y3) - (max(y3) - min(y3)) * 0.02,
max(y3) + (max(y3) - min(y3)) * 0.02
)
ax3.yaxis.label.set_color(colors[color[2]])
ax3.tick_params(axis="y", colors=colors[color[2]])
# general settings
if xlim is None:
plt.xlim(min(x1), max(x1))
else:
plt.xlim(*xlim)
labels = [line.get_label() for line in lines]
if legend:
plt.legend(lines, labels)
# ticks
if ticks is not None:
ax2.set_yticks(ticks[0])
ax2.set_yticklabels(ticks[1])
plt.tight_layout()
plt.savefig(filename)
plt.close() | 0.945951 | 0.587795 |
from __future__ import annotations
import csv
import locale
from contextlib import contextmanager
from copy import copy, deepcopy
from functools import wraps
from typing import (
Generator, Optional, Union, Callable, Any, overload)
from pathlib import Path
from warnings import warn, catch_warnings, simplefilter
from textwrap import dedent
import mpl_toolkits
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.pyplot import Axes
from matplotlib import colors
from cycler import cycler
import numpy as np
from .utilities import translate
from .types_ import Vector
mpl.use("Agg")
plt.rcParams["axes.unicode_minus"] = False
SPINE_COLOR = "black"
FIGSIZE = (3.15, 2.35)
FIGSIZE_SLIM = (3.15, 2.1)
FIGSIZE_SMALL = (2.2, 2.1)
_savefig = copy(plt.savefig) # backup the old save-function
def linestyles() -> Generator[str, None, None]:
"""get the line-stiles as an iterator"""
yield "-"
yield "dotted"
yield "--"
yield "-."
rwth_colorlist: list[tuple[int, int, int]] = [(0, 84, 159), (246, 168, 0),
(161, 16, 53), (0, 97, 101)]
rwth_cmap = colors.ListedColormap(rwth_colorlist, name="rwth_list")
mpl.colormaps.register(rwth_cmap)
rwth_hex_colors = ["#00549F", "#F6A800", "#A11035", "#006165",
"#57AB27", "#E30066"]
rwth_cycle = (
cycler(color=rwth_hex_colors)
+ cycler(linestyle=["-", "--", "-.", "dotted",
(0, (3, 1, 1, 1, 1, 1)),
(0, (3, 5, 1, 5))]))
rwth_gradient: dict[str, tuple[tuple[float, float, float],
tuple[float, float, float]]] = {
"red": ((0.0, 0.0, 0.0), (1.0, 142 / 255, 142 / 255)),
"green": ((0.0, 84 / 255.0, 84 / 255), (1.0, 186 / 255, 186 / 255)),
"blue": ((0.0, 159 / 255, 159 / 255), (1.0, 229 / 255, 229 / 255)),
}
def make_colormap(seq: list[tuple[tuple[Optional[float], ...],
float,
tuple[Optional[float], ...]]],
name: str = "rwth_gradient")\
-> colors.LinearSegmentedColormap:
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
cdict: dict[str, list[tuple[float,
Optional[float],
Optional[float]
]
]] =\
{"red": [], "green": [], "blue": []}
for item in seq:
red_1, green_1, blue_1 = item[0]
red_2, green_2, blue_2 = item[2]
cdict["red"].append((item[1], red_1, red_2))
cdict["green"].append((item[1], green_1, green_2))
cdict["blue"].append((item[1], blue_1, blue_2))
return colors.LinearSegmentedColormap(name, cdict)
def partial_rgb(*x: float) -> tuple[float, ...]:
"""return the rgb value as a fraction of 1"""
return tuple(v / 255.0 for v in x)
hks_44 = partial_rgb(0.0, 84.0, 159.0)
hks_44_75 = partial_rgb(64.0, 127.0, 183.0)
rwth_orange = partial_rgb(246.0, 168.0, 0.0)
rwth_orange_75 = partial_rgb(250.0, 190.0, 80.0)
rwth_gelb = partial_rgb(255.0, 237.0, 0.0)
rwth_magenta = partial_rgb(227.0, 0.0, 102.0)
rwth_bordeux = partial_rgb(161.0, 16.0, 53.0)
rwth_gradient_map = make_colormap(
[
((None, None, None), 0., hks_44),
(hks_44_75, 0.33, hks_44_75),
(rwth_orange_75, 0.66, rwth_orange),
(rwth_bordeux, 1., (None, None, None))
]
)
mpl.colormaps.register(rwth_gradient_map)
def _germanify(ax: Axes, reverse: bool = False) -> None:
"""
translate a figure from english to german.
The direction can be reversed, if reverse it set to True
Use the decorator instead
"""
for axi in ax.figure.axes:
try:
axi.ticklabel_format(
useLocale=True)
except AttributeError:
pass
items = [
axi.xaxis.label,
axi.yaxis.label,
*axi.get_xticklabels(),
*axi.get_yticklabels(),
]
try:
if axi.zaxis is not None:
items.append(axi.zaxis.label)
items += [*axi.get_zticklabels()]
except AttributeError:
pass
if axi.get_legend():
items += [*axi.get_legend().texts]
for item in items:
item.set_text(translate(item.get_text(),
reverse=reverse))
try:
plt.tight_layout()
except IndexError:
pass
@contextmanager
def germanify(ax: Axes,
reverse: bool = False) -> Generator[None, None, None]:
"""
Translate the plot to german and reverse
the translation in the other direction. If reverse is set to false, no
reversal of the translation will be applied.
"""
old_locale = locale.getlocale(locale.LC_NUMERIC)
try:
try:
locale.setlocale(locale.LC_ALL, "de_DE")
locale.setlocale(locale.LC_NUMERIC, "de_DE")
except locale.Error:
# locale not available
pass
plt.rcParams["axes.formatter.use_locale"] = True
_germanify(ax)
yield
except Exception as e:
print("Translation of the plot has failed")
print(e)
raise
finally:
try:
locale.setlocale(locale.LC_ALL, old_locale)
locale.setlocale(locale.LC_ALL, old_locale)
except locale.Error:
pass
plt.rcParams["axes.formatter.use_locale"] = False
if reverse:
_germanify(ax, reverse=True)
def data_plot(filename: Union[str, Path]) -> None:
"""
Write the data, which is to be plotted, into a txt-file in csv-format.
"""
# pylint: disable=W0613
if isinstance(filename, str):
file_ = Path(filename)
else:
file_ = filename
file_ = file_.parent / (file_.stem + ".csv")
ax = plt.gca()
try:
with open(file_, "w", encoding="utf-8", newline="") as data_file:
writer = csv.writer(data_file)
for line in ax.get_lines():
writer.writerow(
[line.get_label(), ax.get_ylabel(), ax.get_xlabel()])
writer.writerow(line.get_xdata())
writer.writerow(line.get_ydata())
except PermissionError as e:
print(f"Data-file could not be written for {filename}.")
print(e)
def read_data_plot(filename: Union[str, Path])\
-> dict[str, tuple[Vector, Vector]]:
"""Read and parse the csv-data-files, which have been generated by the
'data_plot'-function."""
data: dict[str, tuple[Vector, Vector]] = {}
with open(filename, "r", newline="", encoding="utf-8") as file_:
reader = csv.reader(file_)
title: str
x_data: Vector
for i, row in enumerate(reader):
if i % 3 == 0:
title = row[0]
elif i % 3 == 1:
x_data = np.array(row, dtype=float)
else:
y_data: Vector
y_data = np.array(row, dtype=float)
data[title] = (x_data, y_data)
return data
@contextmanager
def presentation_figure(figsize: tuple[float, float] = (4, 3)) ->\
Generator[Axes, None, None]:
"""context manager to open an close the file.
default seaborn-like plot"""
fig, ax = plt.subplots(figsize=figsize)
mpl.rcParams["text.latex.preamble"] = [
r"\usepackage{helvet}", # set the normal font here
r"\usepackage{sansmath}", # load up the sansmath so that math
# -> helvet
r"\sansmath", # <- tricky! -- gotta actually tell tex to use!
]
mpl.rc("font", family="sans-serif")
mpl.rc("text", usetex=False)
font = {"size": 30}
mpl.rc("font", **font)
plt.set_cmap("rwth_list")
try:
yield ax
except Exception as e:
print("creation of plot failed")
print(e)
raise
finally:
plt.close(fig)
plt.close("all")
mpl.rcParams.update(mpl.rcParamsDefault)
plt.style.use("default")
old_save = plt.savefig
def alternative_save(
filename: Path,
dpi: Optional[int] = None,
bbox_inches: Optional[Union[str, tuple[float, float]]] = None,
figsize: tuple[float, float] = FIGSIZE,
subfolder: str = "small") -> None:
"""
Create additional saves of the given figsize and save these new figures
into subfolder of given names. This function can be used to create
additional plots of different sizes without a large overhead.
"""
fig = deepcopy(plt.gcf())
fig.set_size_inches(*figsize)
with catch_warnings(record=True) as warning:
simplefilter("always")
fig.tight_layout()
if warning:
if issubclass(warning[-1].category, UserWarning):
plt.close(fig)
return
folder = filename.parent / subfolder
folder.mkdir(exist_ok=True)
try:
fig.savefig(
folder
/ filename.name, dpi=dpi, bbox_inches=bbox_inches)
except PermissionError:
fig.savefig(
folder
/ (filename.stem + "_" + filename.suffix),
dpi=dpi, bbox_inches=bbox_inches)
plt.close(fig)
def try_save(filename: Path,
dpi: Optional[int] = None,
bbox_inches: Optional[Union[str, tuple[float, float]]] = None, *,
small: bool = False,
slim: bool = False) -> None:
"""Try to save the current figure to the given path, if it is not possible,
try to save it under a different name.
If small is set to true, also create
a smaller version of the given plot.
If slim is set to true, a slightly slimmer version
of the plot is created."""
def alternative_save(
figsize: tuple[float, float] = FIGSIZE,
subfolder: str = "small") -> None:
"""
Create additional saves of the given figsize and save these new figures
into subfolder of given names. This function can be used to create
additional plots of different sizes without a large overhead.
"""
fig = deepcopy(plt.gcf())
fig.set_size_inches(*figsize)
with catch_warnings(record=True) as warning:
simplefilter("always")
fig.tight_layout()
if warning:
if issubclass(warning[-1].category, UserWarning):
plt.close(fig)
return
folder = filename.parent / subfolder
folder.mkdir(exist_ok=True)
try:
fig.savefig(
folder
/ filename.name, dpi=dpi, bbox_inches=bbox_inches)
except PermissionError:
fig.savefig(
folder
/ (filename.stem + "_" + filename.suffix),
dpi=dpi, bbox_inches=bbox_inches)
plt.close(fig)
try:
old_save(filename, dpi=dpi, bbox_inches=bbox_inches)
except PermissionError:
old_save(filename.parent / (filename.stem + "_" + filename.suffix),
dpi=dpi, bbox_inches=bbox_inches)
if small:
alternative_save(
figsize=FIGSIZE_SMALL,
subfolder="small")
if slim:
alternative_save(
figsize=FIGSIZE_SLIM,
subfolder="slim")
def new_save_simple(subfolder: Union[str, Path] = "", suffix: str = "", *,
german: bool = False, png: bool = True,
pdf: bool = True, small: bool = False,
slim: bool = False)\
-> Callable[..., None]:
"""
Return a new save function, which saves the file to a new given name in pdf
format, and also creates a png version.
If the argument "german" is set to true, also create German language
version of the plots.
"""
@wraps(old_save)
def savefig_(filename: Union[Path, str],
dpi: Optional[int] = None,
bbox_inches: Optional[
Union[tuple[float, float], str]] = None) -> None:
"""Save the plot to this location as pdf and png."""
if isinstance(filename, str):
filename = Path(filename)
if filename.parent == Path("."):
warn(
f"The filename {filename} in 'savefig' does "
f"not contain a subfolder (i.e. 'subfolder/{filename})! "
"Many files might be created onto the top level.")
if subfolder:
(filename.parent / subfolder).mkdir(exist_ok=True)
new_path_pdf = filename.parent / subfolder / (
filename.stem + suffix + ".pdf")
new_path_png = filename.parent / subfolder / (
filename.stem + suffix + ".png")
else:
new_path_pdf = filename.parent / (
filename.stem + suffix + ".pdf")
new_path_png = filename.parent / (
filename.stem + suffix + ".png")
# save the data
data_path = filename.parent / (
filename.stem + ".dat")
if not data_path.exists():
data_plot(data_path)
try:
plt.tight_layout()
except IndexError:
pass
# save the figure
if pdf:
try_save(new_path_pdf, bbox_inches=bbox_inches,
small=small, slim=slim)
if png:
try_save(new_path_png, bbox_inches=bbox_inches,
dpi=dpi, small=small, slim=slim)
if german:
with germanify(plt.gca()):
if pdf:
try_save(
new_path_pdf.parent
/ (new_path_pdf.stem + "_german.pdf"),
bbox_inches=bbox_inches, small=small,
slim=slim)
if png:
try_save(
new_path_png.parent
/ (new_path_png.stem + "_german.png"),
bbox_inches=bbox_inches, dpi=dpi, small=small,
slim=slim)
return savefig_
def presentation_settings() -> None:
"""Change the settings of rcParams for presentations."""
# increase size
fig = plt.gcf()
fig.set_size_inches(8, 6)
mpl.rcParams["font.size"] = 24
mpl.rcParams["axes.titlesize"] = 24
mpl.rcParams["axes.labelsize"] = 24
# mpl.rcParams["axes.location"] = "left"
mpl.rcParams["lines.linewidth"] = 3
mpl.rcParams["lines.markersize"] = 10
mpl.rcParams["xtick.labelsize"] = 18
mpl.rcParams["ytick.labelsize"] = 18
mpl.rcParams["figure.figsize"] = (10, 6)
mpl.rcParams["figure.titlesize"] = 24
mpl.rcParams["font.family"] = "sans-serif"
def set_rwth_colors(three_d: bool = False) -> None:
"""Apply the RWTH CD colors to matplotlib."""
mpl.rcParams["text.usetex"] = False
mpl.rcParams["axes.prop_cycle"] = rwth_cycle
if three_d:
plt.set_cmap("rwth_gradient")
else:
plt.set_cmap("rwth_list")
def set_serif() -> None:
"""Set the plot to use a style with serifs."""
mpl.rcParams["font.family"] = "serif"
mpl.rcParams["font.serif"] = [
"cmr10", "stix", "Times New Roman"]
mpl.rcParams["mathtext.fontset"] = "cm"
def set_sans_serif() -> None:
"""Set matplotlib to use a sans-serif font."""
mpl.rcParams["font.family"] = "sans-serif"
mpl.rcParams["font.sans-serif"] = [
"Arial", "Helvetica", "DejaVu Sans"]
class ThreeDPlotException(Exception):
"""This exception is called when a 3D plot is drawn. This is used to exit
the plotting function with the science-style."""
class FallBackException(Exception):
"""This is excaption is thrown when the fallback-style is selected.
Only for debug purposes."""
def check_3d(three_d: bool) -> None:
"""This function checks if the current plot is a 3d plot. In that case, an
exception is thrown, which can be used to stop the creation of the default
plot."""
if three_d:
raise ThreeDPlotException
if isinstance(plt.gca(), mpl_toolkits.mplot3d.axes3d.Axes3D):
raise ThreeDPlotException
PlotFunction = Callable[..., None]
@overload
def apply_styles(plot_function: PlotFunction, *,
three_d: bool = False,
_fallback: bool = False) -> PlotFunction:
...
@overload
def apply_styles(plot_function: None, *, three_d: bool = False,
_fallback: bool = False)\
-> Callable[[PlotFunction], PlotFunction]:
...
@overload
def apply_styles(*, three_d: bool = False,
_fallback: bool = False)\
-> Callable[[PlotFunction], PlotFunction]:
...
def apply_styles(plot_function: Optional[PlotFunction] = None, *,
three_d: bool = False, _fallback: bool = False)\
-> Union[Callable[[PlotFunction], PlotFunction], PlotFunction]:
"""
Apply the newly defined styles to a function, which creates a plot.
The new plots are saved into different subdirectories and multiple
variants of every plot will be created.
Arguments
--------
three_d: Create a use this option for 3D-plots
fallback: switch directly to the fallback-style (for debug)
"""
# pylint: disable=too-many-statements
def _decorator(_plot_function: PlotFunction) -> PlotFunction:
"""This is the actual decorator. Thus, the outer function
'apply_styles' is actually a decorator-factory."""
@wraps(_plot_function)
def new_plot_function(*args: Any, **kwargs: Any) -> None:
"""
New plotting function, with applied styles.
"""
# default plot
plt.set_cmap("rwth_list")
plt.savefig = new_save_simple(png=False)
_plot_function(*args, **kwargs)
errors = (OSError, FileNotFoundError, ThreeDPlotException,
FallBackException)
def journal() -> None:
"""Create a plot for journals."""
set_rwth_colors(three_d)
set_serif()
plt.savefig = new_save_simple("journal", png=False,
small=not three_d)
_plot_function(*args, **kwargs)
plt.close("all")
def sans_serif() -> None:
"""
Create a plot for journals with sans-serif-fonts.
"""
set_rwth_colors(three_d)
set_sans_serif()
plt.savefig = new_save_simple("sans_serif", german=True,
small=not three_d)
_plot_function(*args, **kwargs)
plt.close("all")
def grayscale() -> None:
"""
Create a plot in grayscales for disserations.
"""
mpl.rcParams["text.usetex"] = False
set_serif()
if three_d:
plt.set_cmap("Greys")
new_kwargs = copy(kwargs)
new_kwargs["colorscheme"] = "Greys"
else:
new_kwargs = kwargs
plt.savefig = new_save_simple("grayscale", png=False,
small=not three_d,
slim=not three_d)
_plot_function(*args, **new_kwargs)
plt.close("all")
def presentation() -> None:
"""
Create a plot for presentations.
"""
if three_d:
new_kwargs = copy(kwargs)
new_kwargs["figsize"] = (9, 7)
new_kwargs["labelpad"] = 20
new_kwargs["nbins"] = 5
else:
new_kwargs = kwargs
set_rwth_colors(three_d)
presentation_settings()
set_sans_serif()
plt.savefig = new_save_simple("presentation",
german=True, pdf=False)
_plot_function(*args, **new_kwargs)
plt.close("all")
try:
plt.close("all")
check_3d(three_d)
if _fallback:
raise FallBackException
plt.close("all")
# journal
with plt.style.context(["science", "ieee"]):
journal()
# sans-serif
with plt.style.context(["science", "ieee", "nature"]):
sans_serif()
# grayscale
with plt.style.context(["science", "ieee", "grayscale"]):
grayscale()
# presentation
with plt.style.context(["science", "ieee"]):
presentation()
except errors:
if not three_d:
warn(dedent(""""Could not found style 'science'.
The package was probably installed incorrectly.
Using a fallback-style."""), ImportWarning)
plt.close("all")
# journal
with plt.style.context("fast"):
if not three_d:
mpl.rcParams["figure.figsize"] = FIGSIZE
mpl.rcParams["font.size"] = 8
journal()
# sans-serif
with plt.style.context("fast"):
if not three_d:
mpl.rcParams["figure.figsize"] = FIGSIZE
mpl.rcParams["font.size"] = 8
sans_serif()
# grayscale
with plt.style.context("grayscale"):
if not three_d:
mpl.rcParams["figure.figsize"] = FIGSIZE
mpl.rcParams["font.size"] = 8
grayscale()
# presentation
with plt.style.context("fast"):
presentation()
plt.savefig = old_save
return new_plot_function
if plot_function is not None:
return _decorator(plot_function)
return _decorator | scientific-plots | /scientific_plots-1.7.2-py3-none-any.whl/scientific_plots/plot_settings.py | plot_settings.py | from __future__ import annotations
import csv
import locale
from contextlib import contextmanager
from copy import copy, deepcopy
from functools import wraps
from typing import (
Generator, Optional, Union, Callable, Any, overload)
from pathlib import Path
from warnings import warn, catch_warnings, simplefilter
from textwrap import dedent
import mpl_toolkits
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.pyplot import Axes
from matplotlib import colors
from cycler import cycler
import numpy as np
from .utilities import translate
from .types_ import Vector
mpl.use("Agg")
plt.rcParams["axes.unicode_minus"] = False
SPINE_COLOR = "black"
FIGSIZE = (3.15, 2.35)
FIGSIZE_SLIM = (3.15, 2.1)
FIGSIZE_SMALL = (2.2, 2.1)
_savefig = copy(plt.savefig) # backup the old save-function
def linestyles() -> Generator[str, None, None]:
"""get the line-stiles as an iterator"""
yield "-"
yield "dotted"
yield "--"
yield "-."
rwth_colorlist: list[tuple[int, int, int]] = [(0, 84, 159), (246, 168, 0),
(161, 16, 53), (0, 97, 101)]
rwth_cmap = colors.ListedColormap(rwth_colorlist, name="rwth_list")
mpl.colormaps.register(rwth_cmap)
rwth_hex_colors = ["#00549F", "#F6A800", "#A11035", "#006165",
"#57AB27", "#E30066"]
rwth_cycle = (
cycler(color=rwth_hex_colors)
+ cycler(linestyle=["-", "--", "-.", "dotted",
(0, (3, 1, 1, 1, 1, 1)),
(0, (3, 5, 1, 5))]))
rwth_gradient: dict[str, tuple[tuple[float, float, float],
tuple[float, float, float]]] = {
"red": ((0.0, 0.0, 0.0), (1.0, 142 / 255, 142 / 255)),
"green": ((0.0, 84 / 255.0, 84 / 255), (1.0, 186 / 255, 186 / 255)),
"blue": ((0.0, 159 / 255, 159 / 255), (1.0, 229 / 255, 229 / 255)),
}
def make_colormap(seq: list[tuple[tuple[Optional[float], ...],
float,
tuple[Optional[float], ...]]],
name: str = "rwth_gradient")\
-> colors.LinearSegmentedColormap:
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
cdict: dict[str, list[tuple[float,
Optional[float],
Optional[float]
]
]] =\
{"red": [], "green": [], "blue": []}
for item in seq:
red_1, green_1, blue_1 = item[0]
red_2, green_2, blue_2 = item[2]
cdict["red"].append((item[1], red_1, red_2))
cdict["green"].append((item[1], green_1, green_2))
cdict["blue"].append((item[1], blue_1, blue_2))
return colors.LinearSegmentedColormap(name, cdict)
def partial_rgb(*x: float) -> tuple[float, ...]:
"""return the rgb value as a fraction of 1"""
return tuple(v / 255.0 for v in x)
hks_44 = partial_rgb(0.0, 84.0, 159.0)
hks_44_75 = partial_rgb(64.0, 127.0, 183.0)
rwth_orange = partial_rgb(246.0, 168.0, 0.0)
rwth_orange_75 = partial_rgb(250.0, 190.0, 80.0)
rwth_gelb = partial_rgb(255.0, 237.0, 0.0)
rwth_magenta = partial_rgb(227.0, 0.0, 102.0)
rwth_bordeux = partial_rgb(161.0, 16.0, 53.0)
rwth_gradient_map = make_colormap(
[
((None, None, None), 0., hks_44),
(hks_44_75, 0.33, hks_44_75),
(rwth_orange_75, 0.66, rwth_orange),
(rwth_bordeux, 1., (None, None, None))
]
)
mpl.colormaps.register(rwth_gradient_map)
def _germanify(ax: Axes, reverse: bool = False) -> None:
"""
translate a figure from english to german.
The direction can be reversed, if reverse it set to True
Use the decorator instead
"""
for axi in ax.figure.axes:
try:
axi.ticklabel_format(
useLocale=True)
except AttributeError:
pass
items = [
axi.xaxis.label,
axi.yaxis.label,
*axi.get_xticklabels(),
*axi.get_yticklabels(),
]
try:
if axi.zaxis is not None:
items.append(axi.zaxis.label)
items += [*axi.get_zticklabels()]
except AttributeError:
pass
if axi.get_legend():
items += [*axi.get_legend().texts]
for item in items:
item.set_text(translate(item.get_text(),
reverse=reverse))
try:
plt.tight_layout()
except IndexError:
pass
@contextmanager
def germanify(ax: Axes,
reverse: bool = False) -> Generator[None, None, None]:
"""
Translate the plot to german and reverse
the translation in the other direction. If reverse is set to false, no
reversal of the translation will be applied.
"""
old_locale = locale.getlocale(locale.LC_NUMERIC)
try:
try:
locale.setlocale(locale.LC_ALL, "de_DE")
locale.setlocale(locale.LC_NUMERIC, "de_DE")
except locale.Error:
# locale not available
pass
plt.rcParams["axes.formatter.use_locale"] = True
_germanify(ax)
yield
except Exception as e:
print("Translation of the plot has failed")
print(e)
raise
finally:
try:
locale.setlocale(locale.LC_ALL, old_locale)
locale.setlocale(locale.LC_ALL, old_locale)
except locale.Error:
pass
plt.rcParams["axes.formatter.use_locale"] = False
if reverse:
_germanify(ax, reverse=True)
def data_plot(filename: Union[str, Path]) -> None:
"""
Write the data, which is to be plotted, into a txt-file in csv-format.
"""
# pylint: disable=W0613
if isinstance(filename, str):
file_ = Path(filename)
else:
file_ = filename
file_ = file_.parent / (file_.stem + ".csv")
ax = plt.gca()
try:
with open(file_, "w", encoding="utf-8", newline="") as data_file:
writer = csv.writer(data_file)
for line in ax.get_lines():
writer.writerow(
[line.get_label(), ax.get_ylabel(), ax.get_xlabel()])
writer.writerow(line.get_xdata())
writer.writerow(line.get_ydata())
except PermissionError as e:
print(f"Data-file could not be written for {filename}.")
print(e)
def read_data_plot(filename: Union[str, Path])\
-> dict[str, tuple[Vector, Vector]]:
"""Read and parse the csv-data-files, which have been generated by the
'data_plot'-function."""
data: dict[str, tuple[Vector, Vector]] = {}
with open(filename, "r", newline="", encoding="utf-8") as file_:
reader = csv.reader(file_)
title: str
x_data: Vector
for i, row in enumerate(reader):
if i % 3 == 0:
title = row[0]
elif i % 3 == 1:
x_data = np.array(row, dtype=float)
else:
y_data: Vector
y_data = np.array(row, dtype=float)
data[title] = (x_data, y_data)
return data
@contextmanager
def presentation_figure(figsize: tuple[float, float] = (4, 3)) ->\
Generator[Axes, None, None]:
"""context manager to open an close the file.
default seaborn-like plot"""
fig, ax = plt.subplots(figsize=figsize)
mpl.rcParams["text.latex.preamble"] = [
r"\usepackage{helvet}", # set the normal font here
r"\usepackage{sansmath}", # load up the sansmath so that math
# -> helvet
r"\sansmath", # <- tricky! -- gotta actually tell tex to use!
]
mpl.rc("font", family="sans-serif")
mpl.rc("text", usetex=False)
font = {"size": 30}
mpl.rc("font", **font)
plt.set_cmap("rwth_list")
try:
yield ax
except Exception as e:
print("creation of plot failed")
print(e)
raise
finally:
plt.close(fig)
plt.close("all")
mpl.rcParams.update(mpl.rcParamsDefault)
plt.style.use("default")
old_save = plt.savefig
def alternative_save(
filename: Path,
dpi: Optional[int] = None,
bbox_inches: Optional[Union[str, tuple[float, float]]] = None,
figsize: tuple[float, float] = FIGSIZE,
subfolder: str = "small") -> None:
"""
Create additional saves of the given figsize and save these new figures
into subfolder of given names. This function can be used to create
additional plots of different sizes without a large overhead.
"""
fig = deepcopy(plt.gcf())
fig.set_size_inches(*figsize)
with catch_warnings(record=True) as warning:
simplefilter("always")
fig.tight_layout()
if warning:
if issubclass(warning[-1].category, UserWarning):
plt.close(fig)
return
folder = filename.parent / subfolder
folder.mkdir(exist_ok=True)
try:
fig.savefig(
folder
/ filename.name, dpi=dpi, bbox_inches=bbox_inches)
except PermissionError:
fig.savefig(
folder
/ (filename.stem + "_" + filename.suffix),
dpi=dpi, bbox_inches=bbox_inches)
plt.close(fig)
def try_save(filename: Path,
dpi: Optional[int] = None,
bbox_inches: Optional[Union[str, tuple[float, float]]] = None, *,
small: bool = False,
slim: bool = False) -> None:
"""Try to save the current figure to the given path, if it is not possible,
try to save it under a different name.
If small is set to true, also create
a smaller version of the given plot.
If slim is set to true, a slightly slimmer version
of the plot is created."""
def alternative_save(
figsize: tuple[float, float] = FIGSIZE,
subfolder: str = "small") -> None:
"""
Create additional saves of the given figsize and save these new figures
into subfolder of given names. This function can be used to create
additional plots of different sizes without a large overhead.
"""
fig = deepcopy(plt.gcf())
fig.set_size_inches(*figsize)
with catch_warnings(record=True) as warning:
simplefilter("always")
fig.tight_layout()
if warning:
if issubclass(warning[-1].category, UserWarning):
plt.close(fig)
return
folder = filename.parent / subfolder
folder.mkdir(exist_ok=True)
try:
fig.savefig(
folder
/ filename.name, dpi=dpi, bbox_inches=bbox_inches)
except PermissionError:
fig.savefig(
folder
/ (filename.stem + "_" + filename.suffix),
dpi=dpi, bbox_inches=bbox_inches)
plt.close(fig)
try:
old_save(filename, dpi=dpi, bbox_inches=bbox_inches)
except PermissionError:
old_save(filename.parent / (filename.stem + "_" + filename.suffix),
dpi=dpi, bbox_inches=bbox_inches)
if small:
alternative_save(
figsize=FIGSIZE_SMALL,
subfolder="small")
if slim:
alternative_save(
figsize=FIGSIZE_SLIM,
subfolder="slim")
def new_save_simple(subfolder: Union[str, Path] = "", suffix: str = "", *,
german: bool = False, png: bool = True,
pdf: bool = True, small: bool = False,
slim: bool = False)\
-> Callable[..., None]:
"""
Return a new save function, which saves the file to a new given name in pdf
format, and also creates a png version.
If the argument "german" is set to true, also create German language
version of the plots.
"""
@wraps(old_save)
def savefig_(filename: Union[Path, str],
dpi: Optional[int] = None,
bbox_inches: Optional[
Union[tuple[float, float], str]] = None) -> None:
"""Save the plot to this location as pdf and png."""
if isinstance(filename, str):
filename = Path(filename)
if filename.parent == Path("."):
warn(
f"The filename {filename} in 'savefig' does "
f"not contain a subfolder (i.e. 'subfolder/{filename})! "
"Many files might be created onto the top level.")
if subfolder:
(filename.parent / subfolder).mkdir(exist_ok=True)
new_path_pdf = filename.parent / subfolder / (
filename.stem + suffix + ".pdf")
new_path_png = filename.parent / subfolder / (
filename.stem + suffix + ".png")
else:
new_path_pdf = filename.parent / (
filename.stem + suffix + ".pdf")
new_path_png = filename.parent / (
filename.stem + suffix + ".png")
# save the data
data_path = filename.parent / (
filename.stem + ".dat")
if not data_path.exists():
data_plot(data_path)
try:
plt.tight_layout()
except IndexError:
pass
# save the figure
if pdf:
try_save(new_path_pdf, bbox_inches=bbox_inches,
small=small, slim=slim)
if png:
try_save(new_path_png, bbox_inches=bbox_inches,
dpi=dpi, small=small, slim=slim)
if german:
with germanify(plt.gca()):
if pdf:
try_save(
new_path_pdf.parent
/ (new_path_pdf.stem + "_german.pdf"),
bbox_inches=bbox_inches, small=small,
slim=slim)
if png:
try_save(
new_path_png.parent
/ (new_path_png.stem + "_german.png"),
bbox_inches=bbox_inches, dpi=dpi, small=small,
slim=slim)
return savefig_
def presentation_settings() -> None:
"""Change the settings of rcParams for presentations."""
# increase size
fig = plt.gcf()
fig.set_size_inches(8, 6)
mpl.rcParams["font.size"] = 24
mpl.rcParams["axes.titlesize"] = 24
mpl.rcParams["axes.labelsize"] = 24
# mpl.rcParams["axes.location"] = "left"
mpl.rcParams["lines.linewidth"] = 3
mpl.rcParams["lines.markersize"] = 10
mpl.rcParams["xtick.labelsize"] = 18
mpl.rcParams["ytick.labelsize"] = 18
mpl.rcParams["figure.figsize"] = (10, 6)
mpl.rcParams["figure.titlesize"] = 24
mpl.rcParams["font.family"] = "sans-serif"
def set_rwth_colors(three_d: bool = False) -> None:
"""Apply the RWTH CD colors to matplotlib."""
mpl.rcParams["text.usetex"] = False
mpl.rcParams["axes.prop_cycle"] = rwth_cycle
if three_d:
plt.set_cmap("rwth_gradient")
else:
plt.set_cmap("rwth_list")
def set_serif() -> None:
"""Set the plot to use a style with serifs."""
mpl.rcParams["font.family"] = "serif"
mpl.rcParams["font.serif"] = [
"cmr10", "stix", "Times New Roman"]
mpl.rcParams["mathtext.fontset"] = "cm"
def set_sans_serif() -> None:
"""Set matplotlib to use a sans-serif font."""
mpl.rcParams["font.family"] = "sans-serif"
mpl.rcParams["font.sans-serif"] = [
"Arial", "Helvetica", "DejaVu Sans"]
class ThreeDPlotException(Exception):
"""This exception is called when a 3D plot is drawn. This is used to exit
the plotting function with the science-style."""
class FallBackException(Exception):
"""This is excaption is thrown when the fallback-style is selected.
Only for debug purposes."""
def check_3d(three_d: bool) -> None:
"""This function checks if the current plot is a 3d plot. In that case, an
exception is thrown, which can be used to stop the creation of the default
plot."""
if three_d:
raise ThreeDPlotException
if isinstance(plt.gca(), mpl_toolkits.mplot3d.axes3d.Axes3D):
raise ThreeDPlotException
PlotFunction = Callable[..., None]
@overload
def apply_styles(plot_function: PlotFunction, *,
three_d: bool = False,
_fallback: bool = False) -> PlotFunction:
...
@overload
def apply_styles(plot_function: None, *, three_d: bool = False,
_fallback: bool = False)\
-> Callable[[PlotFunction], PlotFunction]:
...
@overload
def apply_styles(*, three_d: bool = False,
_fallback: bool = False)\
-> Callable[[PlotFunction], PlotFunction]:
...
def apply_styles(plot_function: Optional[PlotFunction] = None, *,
three_d: bool = False, _fallback: bool = False)\
-> Union[Callable[[PlotFunction], PlotFunction], PlotFunction]:
"""
Apply the newly defined styles to a function, which creates a plot.
The new plots are saved into different subdirectories and multiple
variants of every plot will be created.
Arguments
--------
three_d: Create a use this option for 3D-plots
fallback: switch directly to the fallback-style (for debug)
"""
# pylint: disable=too-many-statements
def _decorator(_plot_function: PlotFunction) -> PlotFunction:
"""This is the actual decorator. Thus, the outer function
'apply_styles' is actually a decorator-factory."""
@wraps(_plot_function)
def new_plot_function(*args: Any, **kwargs: Any) -> None:
"""
New plotting function, with applied styles.
"""
# default plot
plt.set_cmap("rwth_list")
plt.savefig = new_save_simple(png=False)
_plot_function(*args, **kwargs)
errors = (OSError, FileNotFoundError, ThreeDPlotException,
FallBackException)
def journal() -> None:
"""Create a plot for journals."""
set_rwth_colors(three_d)
set_serif()
plt.savefig = new_save_simple("journal", png=False,
small=not three_d)
_plot_function(*args, **kwargs)
plt.close("all")
def sans_serif() -> None:
"""
Create a plot for journals with sans-serif-fonts.
"""
set_rwth_colors(three_d)
set_sans_serif()
plt.savefig = new_save_simple("sans_serif", german=True,
small=not three_d)
_plot_function(*args, **kwargs)
plt.close("all")
def grayscale() -> None:
"""
Create a plot in grayscales for disserations.
"""
mpl.rcParams["text.usetex"] = False
set_serif()
if three_d:
plt.set_cmap("Greys")
new_kwargs = copy(kwargs)
new_kwargs["colorscheme"] = "Greys"
else:
new_kwargs = kwargs
plt.savefig = new_save_simple("grayscale", png=False,
small=not three_d,
slim=not three_d)
_plot_function(*args, **new_kwargs)
plt.close("all")
def presentation() -> None:
"""
Create a plot for presentations.
"""
if three_d:
new_kwargs = copy(kwargs)
new_kwargs["figsize"] = (9, 7)
new_kwargs["labelpad"] = 20
new_kwargs["nbins"] = 5
else:
new_kwargs = kwargs
set_rwth_colors(three_d)
presentation_settings()
set_sans_serif()
plt.savefig = new_save_simple("presentation",
german=True, pdf=False)
_plot_function(*args, **new_kwargs)
plt.close("all")
try:
plt.close("all")
check_3d(three_d)
if _fallback:
raise FallBackException
plt.close("all")
# journal
with plt.style.context(["science", "ieee"]):
journal()
# sans-serif
with plt.style.context(["science", "ieee", "nature"]):
sans_serif()
# grayscale
with plt.style.context(["science", "ieee", "grayscale"]):
grayscale()
# presentation
with plt.style.context(["science", "ieee"]):
presentation()
except errors:
if not three_d:
warn(dedent(""""Could not found style 'science'.
The package was probably installed incorrectly.
Using a fallback-style."""), ImportWarning)
plt.close("all")
# journal
with plt.style.context("fast"):
if not three_d:
mpl.rcParams["figure.figsize"] = FIGSIZE
mpl.rcParams["font.size"] = 8
journal()
# sans-serif
with plt.style.context("fast"):
if not three_d:
mpl.rcParams["figure.figsize"] = FIGSIZE
mpl.rcParams["font.size"] = 8
sans_serif()
# grayscale
with plt.style.context("grayscale"):
if not three_d:
mpl.rcParams["figure.figsize"] = FIGSIZE
mpl.rcParams["font.size"] = 8
grayscale()
# presentation
with plt.style.context("fast"):
presentation()
plt.savefig = old_save
return new_plot_function
if plot_function is not None:
return _decorator(plot_function)
return _decorator | 0.744656 | 0.243817 |
from __future__ import print_function
import re
from functools import wraps
from subprocess import Popen, PIPE
from sys import __stdout__
from os import mkdir
from os.path import dirname, exists
from typing import Iterable, Optional, List, Callable, TypeVar, Union, Any
from pathlib import Path
from collections import OrderedDict
from urllib3.exceptions import MaxRetryError
from python_translator import Translator
def running_average(X: List[float], n: int) -> List[float]:
"""creates a running average of X with n entries in both dircetions"""
X_new = []
for i in range(n):
X_new += [sum(X[0: i + 1]) / (i + 1)]
for i in range(n, len(X) - n):
X_new += [sum(X[i - n: i + n + 1]) / (2 * n + 1)]
for i in range(len(X) - n, len(X)):
X_new += [sum(X[2 * i - len(X) + 1:]) / ((len(X) - (i + 1)) * 2 + 1)]
return X_new
def quality_of_fit(X: List[float], Y: List[float]) -> float:
"""calculates the quality of a fit 'bestimmtheitsmass'"""
mean_x = sum(X) / len(X)
return sum(x ** 2 - mean_x ** 2 for x in X)\
/ sum(x ** 2 - mean_x ** 2 for x in Y)
Return = TypeVar("Return")
def print_redirect(f: Callable[..., Return]) -> Callable[..., Return]:
"""
wraps print to print to both stdout and __stdout__
the reason for doing that is that abaqus replaces
stdout and prints everything to abaqus
this way everything can be seen both in the command line and in abaqus
"""
@wraps(f)
def inner_function(*args: Any, **argv: Any) -> Return:
f(*args, **argv)
for i, arg in enumerate(args):
if isinstance(type, arg):
del arg[i]
for _, val in argv.items():
if isinstance(type, val):
del val
argv["file"] = __stdout__
f(*args, **argv)
try:
return f(*args, **argv)
except TypeError:
print("The argument 'file' is twice in a print statement")
raise
return inner_function
def filter_function(x: str) -> bool:
"""filter function in order to remove unused
arguments from this script before parsing"""
if ":" in x or "rwth" in x:
return False
analysed_item = x.replace("--", ".")
analysed_item = analysed_item.replace("-", "!")
if len(analysed_item) > 2\
and "." not in analysed_item\
and "!" in analysed_item:
return False
return True
def filter_argv(X: list[str]) -> list[str]:
"""removes unessecery entries from
argv which have been generated by abaqus"""
Y: list[str] = list(X)
for i, x in enumerate(X):
if not filter_function(x):
del Y[i]
if i < len(Y):
del Y[i + 1]
return Y
def dumb_plot(X: list[float], Y: list[float],
title: str = "title", log: bool = False) -> None:
"""plot X and Y using dumb gnuplot"""
try:
with Popen(["gnuplot"], stdin=PIPE) as gnuplot:
assert gnuplot, """Gnuplot could not be started."""
assert gnuplot.stdin, """Input to Gnuplot could not be found."""
gnuplot.stdin.write(bytes("set term dumb 79 25\n", "utf-8"))
if log:
gnuplot.stdin.write(bytes("set logscale xy\n", "utf-8"))
gnuplot.stdin.write(
bytes(
f"""plot '-' using 1:2 title '{title}'
with linespoints \n""",
"utf-8"
)
)
for x, y in zip(X, Y):
gnuplot.stdin.write(bytes(f"{x} {y}\n", "utf-8"))
gnuplot.stdin.write(bytes("e\n", "utf-8"))
gnuplot.stdin.flush()
except FileNotFoundError:
print("There is no installed instance of gnuplot")
return
def read_file(filename: Union[str, Path],
header: int = 0,
type_: type = float) -> tuple[list[float], ...]:
"""read a file with given filename into
a python-tuple. Skip n='header' lines in the beginning
of the document"""
# reads a given file and returns lists
lists: list[list[float]] = []
with open(filename, "r", encoding="utf-8") as input_file:
for i, line in enumerate(input_file):
if i < header:
continue
lists += [[type_(v) for v in line.split()]]
return tuple(list(v) for v in zip(*lists))
def write_file(filename: Union[Path, str],
*args: Iterable[float],
title: Optional[str] = None) -> None:
"""write the rows given in 'arga' to a file"""
# write data to a file
if not any(args[0]):
raise Exception("Tried to write an empty row to a file")
if isinstance(filename, str):
filename = Path(filename)
with open(filename, "w", encoding="utf-8") as output_file:
if title is not None:
output_file.write(title + "\n")
for row in zip(*args):
output_file.write(" ".join(str(r) for r in row) + "\n")
def mkdir_p(foldername: str) -> None:
"""creates a new folder if the folder does not exist"""
try:
if exists(foldername):
return
mkdir(foldername)
except IOError as e:
# recursive creation of needed folders
if e.errno != 2:
raise
mkdir_p(dirname(foldername))
mkdir_p(foldername)
def trash_remover(func: Callable[..., tuple[Return, ...]])\
-> Callable[..., Return]:
"""only keeps the first output of a given function"""
@wraps(func)
def inner_function(*args: Any, **kwargs: Any) -> Return:
result = func(*args, **kwargs)
return result[0]
return inner_function
def use_translator(string: str, lang1: str, lang2: str) -> str:
"""
Translate the given input string from lang2 to lang1 using google
translate.
"""
# one letter strings
if len(string) <= 1:
return string
if r"\times" in string:
return string
try:
translator = Translator()
result: str
result = translator.translate(string, lang1, lang2)
except (OSError, MaxRetryError):
return string
return result
def translate(string: str, reverse: bool = False) -> str:
"""translates a string from english to german
@input string any string wich contains specific english words
@input reverse translate ger->en if set to true
@return the german translation of the same string"""
if reverse:
string = re.sub(r'(\d+),(\d+)', r'\1\.\2', string)
else:
string = re.sub(r'(\d+)\.(\d+)', r'\1,\2', string)
_dict: dict[str, str] = OrderedDict({
"leakage": "Leckage",
"contact pressure": "Kontaktdruck",
"fluid pressure": "Fluiddruck",
"pressure": "Druck",
"density": "Dichte",
"roundness": "Rundheit",
"eccentricity": r"Exzentrizit\"at",
"contact area": r"Kontaktoberfl\"ache",
"area": r"Fl\"ache",
"maximal": "Maximale",
"time": "Zeit",
"normal-force": "Normalkraft",
"normal force": "Normalkraft",
"total force": "Gesamtkraft",
"force": "Kraft",
"distance": "Abstand",
"position": "Position",
"contact broadness": "Kontaktbreite",
"broadness": "Breite",
"contact": "Kontakt-",
"seat": "Sitz",
"ball": "Kugel",
"high": "hoch",
"low": "tief",
"elastic": "elastisch",
"plastic": "plastisch",
"angle": "Winkel",
"degree": "Grad",
"deg": "Grad",
"hard": "hart",
"soft": "weich",
"fit": "Fit",
"data": "Messwerte",
"velocity": "Geschwindigkeit",
"measurement": "Messung",
"experiment": "Experiment",
"simulation": "Simulation",
"analytical": "analytisch",
"signal": "Signal",
"valve control": "Ventilansteuerung",
"off": "zu",
"on": "auf",
"status": "Zustand",
"valve": "Ventil",
"relative pressure": "Relativdruck",
"absolute pressure": "Absolutdruck",
"relative": "relativ",
"absolute": "absolut",
"plot": "Graph"
})
if not reverse and r"\times" not in string:
for key, value in _dict.items():
if key in string.lower():
string = re.sub(key, value, string, flags=re.IGNORECASE)
break
else:
string = use_translator(string, "german", "english")
else:
for key, value in _dict.items():
if value.lower() in string.lower():
string = string.lower().replace(value.lower(), key.lower())
break
else:
string = use_translator(string, "english", "german")
return string | scientific-plots | /scientific_plots-1.7.2-py3-none-any.whl/scientific_plots/utilities.py | utilities.py | from __future__ import print_function
import re
from functools import wraps
from subprocess import Popen, PIPE
from sys import __stdout__
from os import mkdir
from os.path import dirname, exists
from typing import Iterable, Optional, List, Callable, TypeVar, Union, Any
from pathlib import Path
from collections import OrderedDict
from urllib3.exceptions import MaxRetryError
from python_translator import Translator
def running_average(X: List[float], n: int) -> List[float]:
"""creates a running average of X with n entries in both dircetions"""
X_new = []
for i in range(n):
X_new += [sum(X[0: i + 1]) / (i + 1)]
for i in range(n, len(X) - n):
X_new += [sum(X[i - n: i + n + 1]) / (2 * n + 1)]
for i in range(len(X) - n, len(X)):
X_new += [sum(X[2 * i - len(X) + 1:]) / ((len(X) - (i + 1)) * 2 + 1)]
return X_new
def quality_of_fit(X: List[float], Y: List[float]) -> float:
"""calculates the quality of a fit 'bestimmtheitsmass'"""
mean_x = sum(X) / len(X)
return sum(x ** 2 - mean_x ** 2 for x in X)\
/ sum(x ** 2 - mean_x ** 2 for x in Y)
Return = TypeVar("Return")
def print_redirect(f: Callable[..., Return]) -> Callable[..., Return]:
"""
wraps print to print to both stdout and __stdout__
the reason for doing that is that abaqus replaces
stdout and prints everything to abaqus
this way everything can be seen both in the command line and in abaqus
"""
@wraps(f)
def inner_function(*args: Any, **argv: Any) -> Return:
f(*args, **argv)
for i, arg in enumerate(args):
if isinstance(type, arg):
del arg[i]
for _, val in argv.items():
if isinstance(type, val):
del val
argv["file"] = __stdout__
f(*args, **argv)
try:
return f(*args, **argv)
except TypeError:
print("The argument 'file' is twice in a print statement")
raise
return inner_function
def filter_function(x: str) -> bool:
"""filter function in order to remove unused
arguments from this script before parsing"""
if ":" in x or "rwth" in x:
return False
analysed_item = x.replace("--", ".")
analysed_item = analysed_item.replace("-", "!")
if len(analysed_item) > 2\
and "." not in analysed_item\
and "!" in analysed_item:
return False
return True
def filter_argv(X: list[str]) -> list[str]:
"""removes unessecery entries from
argv which have been generated by abaqus"""
Y: list[str] = list(X)
for i, x in enumerate(X):
if not filter_function(x):
del Y[i]
if i < len(Y):
del Y[i + 1]
return Y
def dumb_plot(X: list[float], Y: list[float],
title: str = "title", log: bool = False) -> None:
"""plot X and Y using dumb gnuplot"""
try:
with Popen(["gnuplot"], stdin=PIPE) as gnuplot:
assert gnuplot, """Gnuplot could not be started."""
assert gnuplot.stdin, """Input to Gnuplot could not be found."""
gnuplot.stdin.write(bytes("set term dumb 79 25\n", "utf-8"))
if log:
gnuplot.stdin.write(bytes("set logscale xy\n", "utf-8"))
gnuplot.stdin.write(
bytes(
f"""plot '-' using 1:2 title '{title}'
with linespoints \n""",
"utf-8"
)
)
for x, y in zip(X, Y):
gnuplot.stdin.write(bytes(f"{x} {y}\n", "utf-8"))
gnuplot.stdin.write(bytes("e\n", "utf-8"))
gnuplot.stdin.flush()
except FileNotFoundError:
print("There is no installed instance of gnuplot")
return
def read_file(filename: Union[str, Path],
header: int = 0,
type_: type = float) -> tuple[list[float], ...]:
"""read a file with given filename into
a python-tuple. Skip n='header' lines in the beginning
of the document"""
# reads a given file and returns lists
lists: list[list[float]] = []
with open(filename, "r", encoding="utf-8") as input_file:
for i, line in enumerate(input_file):
if i < header:
continue
lists += [[type_(v) for v in line.split()]]
return tuple(list(v) for v in zip(*lists))
def write_file(filename: Union[Path, str],
*args: Iterable[float],
title: Optional[str] = None) -> None:
"""write the rows given in 'arga' to a file"""
# write data to a file
if not any(args[0]):
raise Exception("Tried to write an empty row to a file")
if isinstance(filename, str):
filename = Path(filename)
with open(filename, "w", encoding="utf-8") as output_file:
if title is not None:
output_file.write(title + "\n")
for row in zip(*args):
output_file.write(" ".join(str(r) for r in row) + "\n")
def mkdir_p(foldername: str) -> None:
"""creates a new folder if the folder does not exist"""
try:
if exists(foldername):
return
mkdir(foldername)
except IOError as e:
# recursive creation of needed folders
if e.errno != 2:
raise
mkdir_p(dirname(foldername))
mkdir_p(foldername)
def trash_remover(func: Callable[..., tuple[Return, ...]])\
-> Callable[..., Return]:
"""only keeps the first output of a given function"""
@wraps(func)
def inner_function(*args: Any, **kwargs: Any) -> Return:
result = func(*args, **kwargs)
return result[0]
return inner_function
def use_translator(string: str, lang1: str, lang2: str) -> str:
"""
Translate the given input string from lang2 to lang1 using google
translate.
"""
# one letter strings
if len(string) <= 1:
return string
if r"\times" in string:
return string
try:
translator = Translator()
result: str
result = translator.translate(string, lang1, lang2)
except (OSError, MaxRetryError):
return string
return result
def translate(string: str, reverse: bool = False) -> str:
"""translates a string from english to german
@input string any string wich contains specific english words
@input reverse translate ger->en if set to true
@return the german translation of the same string"""
if reverse:
string = re.sub(r'(\d+),(\d+)', r'\1\.\2', string)
else:
string = re.sub(r'(\d+)\.(\d+)', r'\1,\2', string)
_dict: dict[str, str] = OrderedDict({
"leakage": "Leckage",
"contact pressure": "Kontaktdruck",
"fluid pressure": "Fluiddruck",
"pressure": "Druck",
"density": "Dichte",
"roundness": "Rundheit",
"eccentricity": r"Exzentrizit\"at",
"contact area": r"Kontaktoberfl\"ache",
"area": r"Fl\"ache",
"maximal": "Maximale",
"time": "Zeit",
"normal-force": "Normalkraft",
"normal force": "Normalkraft",
"total force": "Gesamtkraft",
"force": "Kraft",
"distance": "Abstand",
"position": "Position",
"contact broadness": "Kontaktbreite",
"broadness": "Breite",
"contact": "Kontakt-",
"seat": "Sitz",
"ball": "Kugel",
"high": "hoch",
"low": "tief",
"elastic": "elastisch",
"plastic": "plastisch",
"angle": "Winkel",
"degree": "Grad",
"deg": "Grad",
"hard": "hart",
"soft": "weich",
"fit": "Fit",
"data": "Messwerte",
"velocity": "Geschwindigkeit",
"measurement": "Messung",
"experiment": "Experiment",
"simulation": "Simulation",
"analytical": "analytisch",
"signal": "Signal",
"valve control": "Ventilansteuerung",
"off": "zu",
"on": "auf",
"status": "Zustand",
"valve": "Ventil",
"relative pressure": "Relativdruck",
"absolute pressure": "Absolutdruck",
"relative": "relativ",
"absolute": "absolut",
"plot": "Graph"
})
if not reverse and r"\times" not in string:
for key, value in _dict.items():
if key in string.lower():
string = re.sub(key, value, string, flags=re.IGNORECASE)
break
else:
string = use_translator(string, "german", "english")
else:
for key, value in _dict.items():
if value.lower() in string.lower():
string = string.lower().replace(value.lower(), key.lower())
break
else:
string = use_translator(string, "english", "german")
return string | 0.551815 | 0.288379 |
from __future__ import annotations
from os.path import join
from math import pi
from queue import Queue
from threading import Thread
from subprocess import check_output
from typing import (
List, Tuple, TypeVar, Union, Iterable, Any, Optional)
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
from numpy import array
from .plot_settings import apply_styles, rwth_gradient_map
from .types_ import Vector
mpl.use("Agg")
SURFACEFOLDER = join("simulation", "surface_data")
In = TypeVar("In", List[float], Tuple[float],
Vector)
@apply_styles
def create_two_d_scatter_plot(
X: In, Y: In, Z: In,
folder: Union[str, Path],
plot_title: str,
xlabel: Optional[str], ylabel: Optional[str], zlabel: str)\
-> None:
"""create two_d_plots"""
# rearrange x, y, z to fit the shape needed for the plot
fig = plt.figure()
plt.set_cmap("jet")
ax = fig.add_subplot(projection="3d")
ax.scatter(Y, X, Z, cmap=rwth_gradient_map)
if xlabel:
ax.set_ylabel(xlabel)
if ylabel:
ax.set_xlabel(ylabel)
ax.set_zlabel(zlabel)
plt.tight_layout()
plt.savefig(join(folder, plot_title.replace(" ", "_") + ".pdf"))
@apply_styles
def create_two_d_surface_plot(
X: In, Y: In, Z: In,
folder: Union[str, Path],
plot_title: str,
xlabel: Optional[str], ylabel: Optional[str], zlabel: str)\
-> None:
"""create two_d_plots"""
# rearrange x, y, z to fit the shape needed for the plot
fig = plt.figure()
plt.set_cmap("jet")
Z_flat: Vector = array(Z)
# X_two_d, Y_two_d=meshgrid(X_flat, Y_flat)
ax = fig.add_subplot(projection="3d")
# ax.plot_surface(X_two_d, Y_two_d, Z_flat, cmap=rwth_gradient_map)
ax.plot_trisurf(Y, X, Z, cmap=rwth_gradient_map)
if ylabel:
ax.set_ylabel(ylabel)
if xlabel:
ax.set_xlabel(xlabel)
ax.set_zlabel(zlabel)
ax.set_zlim(min(Z_flat) * 0.98, max(Z_flat) * 1.05)
ax.set_xlim(min(Y), max(Y))
ax.set_ylim(min(X), max(X))
plt.tight_layout()
plt.savefig(join(folder, plot_title.replace(" ", "_") + ".pdf"))
def get_leakage(data: Iterable[Any], var: str = "density",
surface_file: Optional[str] = None) -> list[float]:
"""calculate the leakage for a given set of data
@param data enumerable set of valve-objects
which allow the determination of the leakage
@return list of the same dimension for the leakage"""
if surface_file is None:
surface_path = join(SURFACEFOLDER, "c_q.dat")
leakage_bin = join(".", "subroutines", "bin", "test_leakage")
Y: list[float] = []
X: list[float] = []
q: Queue[Any] = Queue()
# run every call of the fortran-code in parallel
for d in data: # put the data into the
# queue to access them later as needed
q.put(d)
def work_of_queue() -> None:
nonlocal X
nonlocal Y
while True:
d = q.get()
if d is None:
return # last data-point
pressure = max(d.short.p)
print(pressure)
print(d.angle, d.wobble)
C = float(check_output([leakage_bin, "-i", surface_path, "-P",
f"{pressure}"]))
# A=d.short.unroundness2
A = d.short.sigma
R = d.valve.seat.radius
delta_p = d.dictionary["fluid-pressure"]["value"]
Y += [delta_p * 2.0 * pi * R / A * C]
X += [getattr(d, var)]
threads = [Thread(target=work_of_queue) for i in range(16)]
for thread in threads: # start all threads
thread.start()
q.put(None)
for thread in threads: # wait for all threads to finish
thread.join()
return Y
def plot_2d_surface(
data: Iterable[Any],
folder: str = "simulation",
var1: str = "angle",
var2: str = "wobble",
xlabel1: Optional[str] = None,
xlabel2: Optional[str] = None,
surface_file: Optional[str] = None,
) -> None:
"""create the two d surface plots of two given variables"""
X = [getattr(d, var1) for d in data]
Y = [getattr(d, var2) for d in data]
pressure = [max(d.short.p) for d in data]
A = [d.short.unroundness for d in data]
leakage = get_leakage(data, surface_file=surface_file)
create_two_d_scatter_plot(
X, Y, pressure, folder, "two d pressure",
xlabel1, xlabel2, "maximal pressure [MPa]"
)
create_two_d_scatter_plot(
X, Y, A, folder, "two d area", xlabel1, xlabel2, "contact area [mm]"
)
create_two_d_scatter_plot(
X, Y, leakage, folder,
"two d leakage", xlabel2, xlabel2, "leakage [ml/s]"
)
create_two_d_surface_plot(
X,
Y,
pressure,
folder,
"two d pressure surface",
xlabel1,
xlabel2,
"maximal pressure [MPa]",
)
create_two_d_surface_plot(
X, Y, A, folder, "two d area surface",
xlabel1, xlabel2, "contact area [mm]"
)
create_two_d_surface_plot(
X,
Y,
pressure,
folder,
"two d pressure surface",
xlabel1,
xlabel2,
"maximal pressure [MPa]",
)
create_two_d_surface_plot(
X, Y, leakage, folder, "two d leakage surface",
xlabel2, xlabel2, "leakage [ml/s]"
) | scientific-plots | /scientific_plots-1.7.2-py3-none-any.whl/scientific_plots/two_d_plot.py | two_d_plot.py | from __future__ import annotations
from os.path import join
from math import pi
from queue import Queue
from threading import Thread
from subprocess import check_output
from typing import (
List, Tuple, TypeVar, Union, Iterable, Any, Optional)
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
from numpy import array
from .plot_settings import apply_styles, rwth_gradient_map
from .types_ import Vector
mpl.use("Agg")
SURFACEFOLDER = join("simulation", "surface_data")
In = TypeVar("In", List[float], Tuple[float],
Vector)
@apply_styles
def create_two_d_scatter_plot(
X: In, Y: In, Z: In,
folder: Union[str, Path],
plot_title: str,
xlabel: Optional[str], ylabel: Optional[str], zlabel: str)\
-> None:
"""create two_d_plots"""
# rearrange x, y, z to fit the shape needed for the plot
fig = plt.figure()
plt.set_cmap("jet")
ax = fig.add_subplot(projection="3d")
ax.scatter(Y, X, Z, cmap=rwth_gradient_map)
if xlabel:
ax.set_ylabel(xlabel)
if ylabel:
ax.set_xlabel(ylabel)
ax.set_zlabel(zlabel)
plt.tight_layout()
plt.savefig(join(folder, plot_title.replace(" ", "_") + ".pdf"))
@apply_styles
def create_two_d_surface_plot(
X: In, Y: In, Z: In,
folder: Union[str, Path],
plot_title: str,
xlabel: Optional[str], ylabel: Optional[str], zlabel: str)\
-> None:
"""create two_d_plots"""
# rearrange x, y, z to fit the shape needed for the plot
fig = plt.figure()
plt.set_cmap("jet")
Z_flat: Vector = array(Z)
# X_two_d, Y_two_d=meshgrid(X_flat, Y_flat)
ax = fig.add_subplot(projection="3d")
# ax.plot_surface(X_two_d, Y_two_d, Z_flat, cmap=rwth_gradient_map)
ax.plot_trisurf(Y, X, Z, cmap=rwth_gradient_map)
if ylabel:
ax.set_ylabel(ylabel)
if xlabel:
ax.set_xlabel(xlabel)
ax.set_zlabel(zlabel)
ax.set_zlim(min(Z_flat) * 0.98, max(Z_flat) * 1.05)
ax.set_xlim(min(Y), max(Y))
ax.set_ylim(min(X), max(X))
plt.tight_layout()
plt.savefig(join(folder, plot_title.replace(" ", "_") + ".pdf"))
def get_leakage(data: Iterable[Any], var: str = "density",
surface_file: Optional[str] = None) -> list[float]:
"""calculate the leakage for a given set of data
@param data enumerable set of valve-objects
which allow the determination of the leakage
@return list of the same dimension for the leakage"""
if surface_file is None:
surface_path = join(SURFACEFOLDER, "c_q.dat")
leakage_bin = join(".", "subroutines", "bin", "test_leakage")
Y: list[float] = []
X: list[float] = []
q: Queue[Any] = Queue()
# run every call of the fortran-code in parallel
for d in data: # put the data into the
# queue to access them later as needed
q.put(d)
def work_of_queue() -> None:
nonlocal X
nonlocal Y
while True:
d = q.get()
if d is None:
return # last data-point
pressure = max(d.short.p)
print(pressure)
print(d.angle, d.wobble)
C = float(check_output([leakage_bin, "-i", surface_path, "-P",
f"{pressure}"]))
# A=d.short.unroundness2
A = d.short.sigma
R = d.valve.seat.radius
delta_p = d.dictionary["fluid-pressure"]["value"]
Y += [delta_p * 2.0 * pi * R / A * C]
X += [getattr(d, var)]
threads = [Thread(target=work_of_queue) for i in range(16)]
for thread in threads: # start all threads
thread.start()
q.put(None)
for thread in threads: # wait for all threads to finish
thread.join()
return Y
def plot_2d_surface(
data: Iterable[Any],
folder: str = "simulation",
var1: str = "angle",
var2: str = "wobble",
xlabel1: Optional[str] = None,
xlabel2: Optional[str] = None,
surface_file: Optional[str] = None,
) -> None:
"""create the two d surface plots of two given variables"""
X = [getattr(d, var1) for d in data]
Y = [getattr(d, var2) for d in data]
pressure = [max(d.short.p) for d in data]
A = [d.short.unroundness for d in data]
leakage = get_leakage(data, surface_file=surface_file)
create_two_d_scatter_plot(
X, Y, pressure, folder, "two d pressure",
xlabel1, xlabel2, "maximal pressure [MPa]"
)
create_two_d_scatter_plot(
X, Y, A, folder, "two d area", xlabel1, xlabel2, "contact area [mm]"
)
create_two_d_scatter_plot(
X, Y, leakage, folder,
"two d leakage", xlabel2, xlabel2, "leakage [ml/s]"
)
create_two_d_surface_plot(
X,
Y,
pressure,
folder,
"two d pressure surface",
xlabel1,
xlabel2,
"maximal pressure [MPa]",
)
create_two_d_surface_plot(
X, Y, A, folder, "two d area surface",
xlabel1, xlabel2, "contact area [mm]"
)
create_two_d_surface_plot(
X,
Y,
pressure,
folder,
"two d pressure surface",
xlabel1,
xlabel2,
"maximal pressure [MPa]",
)
create_two_d_surface_plot(
X, Y, leakage, folder, "two d leakage surface",
xlabel2, xlabel2, "leakage [ml/s]"
) | 0.901004 | 0.486392 |
import re
from typing import Any, Callable, Iterable, List, Optional
# ========================================= What can be exported? =========================================
__all__ = ['strings_to_', 'strings_to_integers', 'strings_to_floats', 'string_to_float', 'match_one_string',
'match_one_pattern', 'all_strings']
def strings_to_(strings: Iterable[str], f: Callable) -> Iterable[Any]:
"""
Convert a list of strings to a list of certain form, specified by *f*.
:param strings: a list of string
:param f: a function that converts your string
:return: type undefined, but specified by `to_type`
.. doctest::
>>> strings_to_(['0.333', '0.667', '0.250'], float)
[0.333, 0.667, 0.25]
"""
if not all_strings(strings):
raise TypeError('All have to be strings!')
# ``type(strs)`` is the container of *strs*.
return type(strings)(map(f, strings))
def strings_to_integers(strings: Iterable[str]) -> Iterable[int]:
"""
Convert a list of strings to a list of integers.
:param strings: a list of string
:return: a list of converted integers
.. doctest::
>>> strings_to_integers(['1', '1.0', '-0.2'])
[1, 1, ValueError('-0.2 cannot be converted to an integer')]
>>> strings_to_integers(['1', '1.0', '-0.'])
[1, 1, 0]
"""
return strings_to_(strings, lambda x: int(float(x)) if float(x).is_integer() else ValueError(
"{} cannot be converted to an integer".format(x)))
def strings_to_floats(strings: Iterable[str]) -> Iterable[float]:
"""
Convert a list of strings to a list of floats.
:param strings: a list of string
:return: a list of converted floats
.. doctest::
>>> strings_to_floats(['1', '1.0', '-0.2'])
[1.0, 1.0, -0.2]
"""
return strings_to_(strings, string_to_float)
def string_to_float(s: str) -> float:
"""
Double precision float in Fortran file will have form 'x.ydz' or 'x.yDz', this cannot be convert directly to float
by Python ``float`` function, so I wrote this function to help conversion. For example,
:param s: a string denoting a double precision number
:return: a Python floating point number
.. doctest::
>>> string_to_float('1d-82')
1e-82
>>> string_to_float('-1.0D-82')
-1e-82
>>> string_to_float('+0.8D234')
8e+233
>>> string_to_float('.8d234')
8e+233
>>> string_to_float('+1.0D-5')
1e-05
>>> string_to_float('-0.00001')
-1e-05
>>> string_to_float('.8e234')
8e+233
>>> string_to_float('.1')
0.1
"""
return float(re.sub('d', 'e', s, flags=re.IGNORECASE))
def match_one_string(pattern: str, s: str, *args):
"""
Make sure you know only none or one string will be matched! If you are not sure, use `match_one_pattern` instead.
:param pattern:
:param s:
:param args:
:return:
.. doctest::
>>> p = "\d+"
>>> s = "abc 123 def"
>>> match_one_string(p, s, int)
123
>>> print(match_one_string(p, "abc"))
Pattern "\d+" not found, or more than one found in string abc!
None
>>> print(match_one_string(p, "abc 123 def 456"))
Pattern "\d+" not found, or more than one found in string abc 123 def 456!
None
"""
try:
# `match` is either an empty list or a list of string.
match, = re.findall(pattern, s)
if len(args) == 0: # If no wrapper argument is given, return directly the matched string
return match
elif len(args) == 1: # If wrapper argument is given, i.e., not empty, then apply wrapper to the match
wrapper, = args
return wrapper(match)
else:
raise TypeError(
'Multiple wrappers are given! Only one should be given!')
except ValueError:
print("Pattern \"{0}\" not found, or more than one found in string {1}!".format(
pattern, s))
def match_one_pattern(pattern: str, s: str, *args: Callable, **flags):
"""
Find a pattern in a certain string. If found and a wrapper is given, then return the wrapped matched-string; if no
wrapper is given, return the pure matched string. If no match is found, return None.
:param pattern: a pattern, can be a string or a regular expression
:param s: a string
:param args: at most 1 argument can be given
:param flags: the same flags as ``re.findall``'s
:return:
.. doctest::
>>> p = "\d+"
>>> s = "abc 123 def 456"
>>> match_one_pattern(p, s)
['123', '456']
>>> match_one_pattern(p, s, int)
[123, 456]
>>> match_one_pattern(p, "abc 123 def")
['123']
>>> print(match_one_pattern('s', 'abc'))
Pattern "s" not found in string abc!
None
>>> match_one_pattern('s', 'Ssa', flags=re.IGNORECASE)
['S', 's']
"""
match: Optional[List[str]] = re.findall(pattern, s,
**flags) # `match` is either an empty list or a list of strings.
if match:
if len(args) == 0: # If no wrapper argument is given, return directly the matched string
return match
elif len(args) == 1: # If wrapper argument is given, i.e., not empty, then apply wrapper to the match
wrapper, = args
return [wrapper(m) for m in match]
else:
raise TypeError(
'Multiple wrappers are given! Only one should be given!')
else: # If no match is found
print("Pattern \"{0}\" not found in string {1}!".format(pattern, s))
return None
def all_strings(iterable: Iterable[object]) -> bool:
"""
If any element of an iterable is not a string, return `True`.
:param iterable: Can be a set, a tuple, a list, etc.
:return: Whether any element of an iterable is not a string.
.. doctest::
>>> all_strings(['a', 'b', 'c', 3])
False
>>> all_strings(('a', 'b', 'c', 'd'))
True
"""
return all(isinstance(_, str) for _ in iterable) | scientific-string | /scientific_string-0.1.0-py3-none-any.whl/scientific_string/__init__.py | __init__.py |
import re
from typing import Any, Callable, Iterable, List, Optional
# ========================================= What can be exported? =========================================
__all__ = ['strings_to_', 'strings_to_integers', 'strings_to_floats', 'string_to_float', 'match_one_string',
'match_one_pattern', 'all_strings']
def strings_to_(strings: Iterable[str], f: Callable) -> Iterable[Any]:
"""
Convert a list of strings to a list of certain form, specified by *f*.
:param strings: a list of string
:param f: a function that converts your string
:return: type undefined, but specified by `to_type`
.. doctest::
>>> strings_to_(['0.333', '0.667', '0.250'], float)
[0.333, 0.667, 0.25]
"""
if not all_strings(strings):
raise TypeError('All have to be strings!')
# ``type(strs)`` is the container of *strs*.
return type(strings)(map(f, strings))
def strings_to_integers(strings: Iterable[str]) -> Iterable[int]:
"""
Convert a list of strings to a list of integers.
:param strings: a list of string
:return: a list of converted integers
.. doctest::
>>> strings_to_integers(['1', '1.0', '-0.2'])
[1, 1, ValueError('-0.2 cannot be converted to an integer')]
>>> strings_to_integers(['1', '1.0', '-0.'])
[1, 1, 0]
"""
return strings_to_(strings, lambda x: int(float(x)) if float(x).is_integer() else ValueError(
"{} cannot be converted to an integer".format(x)))
def strings_to_floats(strings: Iterable[str]) -> Iterable[float]:
"""
Convert a list of strings to a list of floats.
:param strings: a list of string
:return: a list of converted floats
.. doctest::
>>> strings_to_floats(['1', '1.0', '-0.2'])
[1.0, 1.0, -0.2]
"""
return strings_to_(strings, string_to_float)
def string_to_float(s: str) -> float:
"""
Double precision float in Fortran file will have form 'x.ydz' or 'x.yDz', this cannot be convert directly to float
by Python ``float`` function, so I wrote this function to help conversion. For example,
:param s: a string denoting a double precision number
:return: a Python floating point number
.. doctest::
>>> string_to_float('1d-82')
1e-82
>>> string_to_float('-1.0D-82')
-1e-82
>>> string_to_float('+0.8D234')
8e+233
>>> string_to_float('.8d234')
8e+233
>>> string_to_float('+1.0D-5')
1e-05
>>> string_to_float('-0.00001')
-1e-05
>>> string_to_float('.8e234')
8e+233
>>> string_to_float('.1')
0.1
"""
return float(re.sub('d', 'e', s, flags=re.IGNORECASE))
def match_one_string(pattern: str, s: str, *args):
"""
Make sure you know only none or one string will be matched! If you are not sure, use `match_one_pattern` instead.
:param pattern:
:param s:
:param args:
:return:
.. doctest::
>>> p = "\d+"
>>> s = "abc 123 def"
>>> match_one_string(p, s, int)
123
>>> print(match_one_string(p, "abc"))
Pattern "\d+" not found, or more than one found in string abc!
None
>>> print(match_one_string(p, "abc 123 def 456"))
Pattern "\d+" not found, or more than one found in string abc 123 def 456!
None
"""
try:
# `match` is either an empty list or a list of string.
match, = re.findall(pattern, s)
if len(args) == 0: # If no wrapper argument is given, return directly the matched string
return match
elif len(args) == 1: # If wrapper argument is given, i.e., not empty, then apply wrapper to the match
wrapper, = args
return wrapper(match)
else:
raise TypeError(
'Multiple wrappers are given! Only one should be given!')
except ValueError:
print("Pattern \"{0}\" not found, or more than one found in string {1}!".format(
pattern, s))
def match_one_pattern(pattern: str, s: str, *args: Callable, **flags):
"""
Find a pattern in a certain string. If found and a wrapper is given, then return the wrapped matched-string; if no
wrapper is given, return the pure matched string. If no match is found, return None.
:param pattern: a pattern, can be a string or a regular expression
:param s: a string
:param args: at most 1 argument can be given
:param flags: the same flags as ``re.findall``'s
:return:
.. doctest::
>>> p = "\d+"
>>> s = "abc 123 def 456"
>>> match_one_pattern(p, s)
['123', '456']
>>> match_one_pattern(p, s, int)
[123, 456]
>>> match_one_pattern(p, "abc 123 def")
['123']
>>> print(match_one_pattern('s', 'abc'))
Pattern "s" not found in string abc!
None
>>> match_one_pattern('s', 'Ssa', flags=re.IGNORECASE)
['S', 's']
"""
match: Optional[List[str]] = re.findall(pattern, s,
**flags) # `match` is either an empty list or a list of strings.
if match:
if len(args) == 0: # If no wrapper argument is given, return directly the matched string
return match
elif len(args) == 1: # If wrapper argument is given, i.e., not empty, then apply wrapper to the match
wrapper, = args
return [wrapper(m) for m in match]
else:
raise TypeError(
'Multiple wrappers are given! Only one should be given!')
else: # If no match is found
print("Pattern \"{0}\" not found in string {1}!".format(pattern, s))
return None
def all_strings(iterable: Iterable[object]) -> bool:
"""
If any element of an iterable is not a string, return `True`.
:param iterable: Can be a set, a tuple, a list, etc.
:return: Whether any element of an iterable is not a string.
.. doctest::
>>> all_strings(['a', 'b', 'c', 3])
False
>>> all_strings(('a', 'b', 'c', 'd'))
True
"""
return all(isinstance(_, str) for _ in iterable) | 0.86852 | 0.540621 |
import matplotlib.pyplot as plt
import numpy as np
from scientific_tools.graphics.function_graphs import plot_2Dfunction
import scientific_tools.physics.uncertainty as uncertainty
def plot_uncertainty_function(f, u_f, min_x, max_x, values_number, args_before_x=[], args_after_x=[], title="", xlabel="", ylabel="", function_label="f(x)", uncertainty_label="f(x)±u(f(x))", function_color='red', uncertainty_color='blue', function_linestyle="-", uncertainty_linestyle="-", **kwargs) :
"""Draw a graph with f, f + u_f and f - u_f
Draw an uncertainty graph with the function f, the function plus its uncertainty (f + u_f) and the fonction minus its uncertainty (f - u_f).
f is a function that take at least one argument x that varies from min_x to max_x by taking values_number values.
u_f calculate the uncertainty of f. It take at least one argument : x. (To add other arguments, see plot_2Dfunction documentation. N.B. f and u_f must have the same arguments)
title is the graph title
xlabel and ylabel are texts to put on the axes
function_label is the text display in the legend about function curve
uncertainty_label is the text display in the legend about curves that represent f ± u_f
function_color is color of function curve
uncertainty_color is the color of curves that represent f ± u_f
function_linestyle & uncertainty_linestyle are the line style of each curve (cf Matplotlib docs for futher information)
"""
f_plus_u = lambda *args, **kwargs : f(*args, **kwargs) + u_f(*args, **kwargs)
plot_2Dfunction(f_plus_u, min_x, max_x, values_number, args_before_x, args_after_x, color=uncertainty_color, linestyle =uncertainty_linestyle, function_label=uncertainty_label, **kwargs)#draw f+u_f
f_minus_u = lambda *args, **kwargs : f(*args, **kwargs) - u_f(*args, **kwargs)
plot_2Dfunction(f_minus_u, min_x, max_x, values_number, args_before_x, args_after_x, color=uncertainty_color, linestyle =uncertainty_linestyle, **kwargs)#draw f-u_f
plot_2Dfunction(f, min_x, max_x, values_number, args_before_x, args_after_x, title=title, xlabel=xlabel, ylabel=ylabel, function_label=function_label, color=function_color, linestyle =function_linestyle, **kwargs)#draw f (this is the last function drawing else title and axes labels haven't been displayed)
plt.legend()#show function & uncertainty labels
def plot_uncertainty_points(x, y, u_x, u_y, title="Experimental values with error bar", xlabel="", ylabel="") :
"""Draw experimental values with error bar
x is the list of x coordinates, y is the list of y coordinates
u_x is the list of x uncertainties, u_y is the list of y uncertainties
xlabel is the text to display with the x ax
ylabel is the text to display with the y ax
"""
plt.errorbar(x, y, xerr=u_x, yerr=u_y, fmt='bo', label='Mesures')
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def null_function(*args, **kwargs) :
"""Return 0 for all value of 'value'.
It's can use as an uncertainty calculator when the function is a reference function. (see the documentation of plot_z_score_graph).
"""
return 0
def plot_z_score_graph(f1, u_f1, f2, u_f2, min_x, max_x, values_nb, args_f1_before_x=[], args_f1_after_x=[], kwargs_f1={}, args_f2_before_x=[], args_f2_after_x=[], kwargs_f2={}, z_score_limit=2, title="", xlabel="", ylabel="", limit_label="Limits of z-score validity", z_score_label="Z-score", limit_color='red', z_score_color='blue', limit_linestyle="-", z_score_linestyle="-",) :
"""Trace the z-score between two functions
f1 is the first function & f2 is the second one.
u_f1 is the function that calculate the f1 uncertainty & u_f2 calculate f2 uncertainty.
Those four functions takes at least one argument x that varies from min_x to max_x by taking values_nb values.
f1 and u_f1 take same args and kwargs. args_f1_before_x is the list of f1 positional arguments before the x position
args_f1_after_x is the list of f1 positional arguments after the x position
kwargs_f1 is a dictionary with f1 kwargs
(Idem for f2)
If a function is a function reference, u_f must be null_function (define in this module).
z_score_limit is the validity limit for the z-score (usually, it's 2)
limit_color is color of lines that represents limits of z_score validity
title is the graph title
xlabel and ylabel are texts to put on the axes
limit_label is the text display in the legend about lines that represents limits of z_score validity
z_score_label is the text display in the legend about the z-score curve
z_score_color is the color of the z_score curve
limit_linestyle & z_score_linestyle are the line style of each curve (cf Matplotlib docs for futher information)
"""
x_values = np.linspace(min_x, max_x, values_nb)
#calculate values for f1 & f2
f1_values = []
u_f1_values = []
f2_values = []
u_f2_values = []
for x in x_values :
f1_values.append(f1(*args_f1_before_x, x, *args_f1_after_x, **kwargs_f1))
if u_f1 is not null_function :
u_f1_values.append(u_f1(*args_f1_before_x, x, *args_f1_after_x, **kwargs_f1))
f2_values.append(f2(*args_f2_before_x, x, *args_f2_after_x, **kwargs_f2))
if u_f2 is not null_function :
u_f2_values.append(u_f2(*args_f2_before_x, x, *args_f2_after_x, **kwargs_f2))
z_score_values = []
#calculate z_score
if u_f1 is null_function :
for i in range(values_nb) :
z_score_values.append(uncertainty.z_score_ref(f2_values[i], f1_values[i], u_f2_values[i]))
elif u_f2 is null_function :
for i in range(values_nb) :
z_score_values.append(uncertainty.z_score_ref(f1_values[i], f2_values[i], u_f1_values[i]))
else :
for i in range(values_nb) :
z_score_values.append(uncertainty.z_score(f1_values[i], u_f1_values[i], f2_values[i], u_f2_values[i]))
#displaying
plt.plot(x_values, z_score_values, color=z_score_color, linestyle=z_score_linestyle, label=z_score_label)
plt.plot([np.min(x_values), np.max(x_values)], [z_score_limit, z_score_limit], color=limit_color,linestyle=limit_linestyle, label=limit_label)
plt.plot([np.min(x_values), np.max(x_values)], [-z_score_limit, -z_score_limit], color=limit_color,linestyle=limit_linestyle)
plt.legend()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def plot_z_score_points_graph(x, y1, u_y1, y2, u_y2, z_score_limit=2, title="", xlabel="", ylabel="", limit_label="Limits of z-score validity", z_score_label="Z-score", limit_color='red', z_score_color='blue', limit_linestyle="-", z_score_linestyle="-") :
"""Trace the z-score between two lists of points
x is the list of point abscissa
y1 is the first list of values & f2 is the second one.
u_y1 is the list of uncertainties of y1 points & u_y2 is the list for y2 points uncertainties. If a list of points is a reference, u_y be a list of zero
title is the graph title
xlabel and ylabel are texts to put on the axes
limit_label is the text display in the legend about lines that represents limits of z_score validity
z_score_label is the text display in the legend about the z-score curve
z_score_limit is the validity limit for the z-score (usually, it's 2)
limit_color is color of lines that represents limits of z_score validity
z_score_color is the color of the z_score curve
limit_linestyle & z_score_linestyle are the line style of each curve (cf Matplotlib docs for futher information)
"""
z_score_values = []
#calculate z_score
for i in range(len(x)) :
z_score_values.append(uncertainty.z_score(y1[i], u_y1[i], y2[i], u_y2[i]))
#displaying
plt.plot(x, z_score_values, color=z_score_color, linestyle=z_score_linestyle, label=z_score_label)
plt.plot([np.min(x), np.max(x)], [z_score_limit, z_score_limit], color=limit_color,linestyle=limit_linestyle, label=limit_label)
plt.plot([np.min(x), np.max(x)], [-z_score_limit, -z_score_limit], color=limit_color,linestyle=limit_linestyle)
plt.legend()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel) | scientific-tools | /scientific_tools-0.0.0a17-py3-none-any.whl/scientific_tools/graphics/uncertainty_graphs.py | uncertainty_graphs.py |
import matplotlib.pyplot as plt
import numpy as np
from scientific_tools.graphics.function_graphs import plot_2Dfunction
import scientific_tools.physics.uncertainty as uncertainty
def plot_uncertainty_function(f, u_f, min_x, max_x, values_number, args_before_x=[], args_after_x=[], title="", xlabel="", ylabel="", function_label="f(x)", uncertainty_label="f(x)±u(f(x))", function_color='red', uncertainty_color='blue', function_linestyle="-", uncertainty_linestyle="-", **kwargs) :
"""Draw a graph with f, f + u_f and f - u_f
Draw an uncertainty graph with the function f, the function plus its uncertainty (f + u_f) and the fonction minus its uncertainty (f - u_f).
f is a function that take at least one argument x that varies from min_x to max_x by taking values_number values.
u_f calculate the uncertainty of f. It take at least one argument : x. (To add other arguments, see plot_2Dfunction documentation. N.B. f and u_f must have the same arguments)
title is the graph title
xlabel and ylabel are texts to put on the axes
function_label is the text display in the legend about function curve
uncertainty_label is the text display in the legend about curves that represent f ± u_f
function_color is color of function curve
uncertainty_color is the color of curves that represent f ± u_f
function_linestyle & uncertainty_linestyle are the line style of each curve (cf Matplotlib docs for futher information)
"""
f_plus_u = lambda *args, **kwargs : f(*args, **kwargs) + u_f(*args, **kwargs)
plot_2Dfunction(f_plus_u, min_x, max_x, values_number, args_before_x, args_after_x, color=uncertainty_color, linestyle =uncertainty_linestyle, function_label=uncertainty_label, **kwargs)#draw f+u_f
f_minus_u = lambda *args, **kwargs : f(*args, **kwargs) - u_f(*args, **kwargs)
plot_2Dfunction(f_minus_u, min_x, max_x, values_number, args_before_x, args_after_x, color=uncertainty_color, linestyle =uncertainty_linestyle, **kwargs)#draw f-u_f
plot_2Dfunction(f, min_x, max_x, values_number, args_before_x, args_after_x, title=title, xlabel=xlabel, ylabel=ylabel, function_label=function_label, color=function_color, linestyle =function_linestyle, **kwargs)#draw f (this is the last function drawing else title and axes labels haven't been displayed)
plt.legend()#show function & uncertainty labels
def plot_uncertainty_points(x, y, u_x, u_y, title="Experimental values with error bar", xlabel="", ylabel="") :
"""Draw experimental values with error bar
x is the list of x coordinates, y is the list of y coordinates
u_x is the list of x uncertainties, u_y is the list of y uncertainties
xlabel is the text to display with the x ax
ylabel is the text to display with the y ax
"""
plt.errorbar(x, y, xerr=u_x, yerr=u_y, fmt='bo', label='Mesures')
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def null_function(*args, **kwargs) :
"""Return 0 for all value of 'value'.
It's can use as an uncertainty calculator when the function is a reference function. (see the documentation of plot_z_score_graph).
"""
return 0
def plot_z_score_graph(f1, u_f1, f2, u_f2, min_x, max_x, values_nb, args_f1_before_x=[], args_f1_after_x=[], kwargs_f1={}, args_f2_before_x=[], args_f2_after_x=[], kwargs_f2={}, z_score_limit=2, title="", xlabel="", ylabel="", limit_label="Limits of z-score validity", z_score_label="Z-score", limit_color='red', z_score_color='blue', limit_linestyle="-", z_score_linestyle="-",) :
"""Trace the z-score between two functions
f1 is the first function & f2 is the second one.
u_f1 is the function that calculate the f1 uncertainty & u_f2 calculate f2 uncertainty.
Those four functions takes at least one argument x that varies from min_x to max_x by taking values_nb values.
f1 and u_f1 take same args and kwargs. args_f1_before_x is the list of f1 positional arguments before the x position
args_f1_after_x is the list of f1 positional arguments after the x position
kwargs_f1 is a dictionary with f1 kwargs
(Idem for f2)
If a function is a function reference, u_f must be null_function (define in this module).
z_score_limit is the validity limit for the z-score (usually, it's 2)
limit_color is color of lines that represents limits of z_score validity
title is the graph title
xlabel and ylabel are texts to put on the axes
limit_label is the text display in the legend about lines that represents limits of z_score validity
z_score_label is the text display in the legend about the z-score curve
z_score_color is the color of the z_score curve
limit_linestyle & z_score_linestyle are the line style of each curve (cf Matplotlib docs for futher information)
"""
x_values = np.linspace(min_x, max_x, values_nb)
#calculate values for f1 & f2
f1_values = []
u_f1_values = []
f2_values = []
u_f2_values = []
for x in x_values :
f1_values.append(f1(*args_f1_before_x, x, *args_f1_after_x, **kwargs_f1))
if u_f1 is not null_function :
u_f1_values.append(u_f1(*args_f1_before_x, x, *args_f1_after_x, **kwargs_f1))
f2_values.append(f2(*args_f2_before_x, x, *args_f2_after_x, **kwargs_f2))
if u_f2 is not null_function :
u_f2_values.append(u_f2(*args_f2_before_x, x, *args_f2_after_x, **kwargs_f2))
z_score_values = []
#calculate z_score
if u_f1 is null_function :
for i in range(values_nb) :
z_score_values.append(uncertainty.z_score_ref(f2_values[i], f1_values[i], u_f2_values[i]))
elif u_f2 is null_function :
for i in range(values_nb) :
z_score_values.append(uncertainty.z_score_ref(f1_values[i], f2_values[i], u_f1_values[i]))
else :
for i in range(values_nb) :
z_score_values.append(uncertainty.z_score(f1_values[i], u_f1_values[i], f2_values[i], u_f2_values[i]))
#displaying
plt.plot(x_values, z_score_values, color=z_score_color, linestyle=z_score_linestyle, label=z_score_label)
plt.plot([np.min(x_values), np.max(x_values)], [z_score_limit, z_score_limit], color=limit_color,linestyle=limit_linestyle, label=limit_label)
plt.plot([np.min(x_values), np.max(x_values)], [-z_score_limit, -z_score_limit], color=limit_color,linestyle=limit_linestyle)
plt.legend()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def plot_z_score_points_graph(x, y1, u_y1, y2, u_y2, z_score_limit=2, title="", xlabel="", ylabel="", limit_label="Limits of z-score validity", z_score_label="Z-score", limit_color='red', z_score_color='blue', limit_linestyle="-", z_score_linestyle="-") :
"""Trace the z-score between two lists of points
x is the list of point abscissa
y1 is the first list of values & f2 is the second one.
u_y1 is the list of uncertainties of y1 points & u_y2 is the list for y2 points uncertainties. If a list of points is a reference, u_y be a list of zero
title is the graph title
xlabel and ylabel are texts to put on the axes
limit_label is the text display in the legend about lines that represents limits of z_score validity
z_score_label is the text display in the legend about the z-score curve
z_score_limit is the validity limit for the z-score (usually, it's 2)
limit_color is color of lines that represents limits of z_score validity
z_score_color is the color of the z_score curve
limit_linestyle & z_score_linestyle are the line style of each curve (cf Matplotlib docs for futher information)
"""
z_score_values = []
#calculate z_score
for i in range(len(x)) :
z_score_values.append(uncertainty.z_score(y1[i], u_y1[i], y2[i], u_y2[i]))
#displaying
plt.plot(x, z_score_values, color=z_score_color, linestyle=z_score_linestyle, label=z_score_label)
plt.plot([np.min(x), np.max(x)], [z_score_limit, z_score_limit], color=limit_color,linestyle=limit_linestyle, label=limit_label)
plt.plot([np.min(x), np.max(x)], [-z_score_limit, -z_score_limit], color=limit_color,linestyle=limit_linestyle)
plt.legend()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel) | 0.777469 | 0.784484 |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
def plot_2Dfunction(function, min_x, max_x, values_number, args_before_x=[], args_after_x=[], title="", xlabel="", ylabel="", function_label="", color="blue", linestyle ="-", **kwargs) :
"""Trace the 2D graphic of the function "function"
function is a function with at least one argument x
args_before_x is the list of positional arguments before the variable argument's position
args_after_x is the list of positional arguments after the variable argument's position
The value of the variable argument x varies from min_x to max_variable by taking values_number values
title is the graph title
xlabel and ylabel are texts to put on the axes
function_label is the label of the function. (Doesn't show it if you doesn't call plt.legend() after this plot_2Dfunction.)
color is the line color
linestyle is the line style (cf Matplotlib docs for futher information)
You can add after keywords arguments for the function "function"
"""
variable_list = np.linspace(min_x, max_x, values_number)
results_list = []
for variable in variable_list :
results_list.append(function(*args_before_x, variable, *args_after_x, **kwargs))
#displaying
plt.plot(variable_list, results_list, color=color, linestyle=linestyle, label=function_label)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def plot_3Dfunction(function, min_x, max_x, values_x, min_y, max_y, values_y, args_before_variables=[], args_between_variables=[], args_after_variables=[], x_before_y = True, title="", xlabel ="", ylabel="", zlabel="", colormap=cm.RdYlGn, **kwargs) :
"""Trace the 3D graphic of the function "function"
function is a function with at least two arguments
args_before_variable is the list of positional arguments before the first variable argument's position
args_between_variables is the list of positional arguments between positions of the first and the second variable
args_after_variables is the list of positional arguments after the second variable argument's position
x_before_x is true if x variable is the first variable (in the function arguments order)
The value of the "x" variable varies from min_x to max_x by taking values_x values
Idem for "y" variable
title is the graph title
xlabel, ylabel and zlabel are texts to put on the axes
colormap is the colormap used for displaying
You can add after keywords arguments for the function "function"
"""
line = np.linspace(min_x, max_x, values_x)
array_x = np.array([line for i in range(values_y) ], dtype=float)
#create an array with x values
column = np.linspace(min_y, max_y, values_y)
array_y = np.array([[column[j]]*values_x for j in range(values_y)], dtype=float)
#create an array with y values
results = []#a array like object with values of function
for i in range(values_y) :
results_line = []
for j in range(values_x) :
variable1 = array_x[i][j]
variable2 = array_y[i][j]
if x_before_y is False :
variable1, variable2 = variable2, variable1
results_line.append(function(*args_before_variables, variable1, *args_between_variables, variable2, *args_after_variables, **kwargs))
results.append(results_line)
array_z = np.array(results, dtype=float)
linewidth = (max_x - min_x+ max_y - min_y)/20#to trace around 10 lines
#displaying
ax = plt.axes(projection='3d')#3D diplaying
ax.plot_surface(array_x, array_y, array_z, cmap=colormap, linewidth=linewidth)#linewidth : distance between two lines
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel) | scientific-tools | /scientific_tools-0.0.0a17-py3-none-any.whl/scientific_tools/graphics/function_graphs.py | function_graphs.py |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
def plot_2Dfunction(function, min_x, max_x, values_number, args_before_x=[], args_after_x=[], title="", xlabel="", ylabel="", function_label="", color="blue", linestyle ="-", **kwargs) :
"""Trace the 2D graphic of the function "function"
function is a function with at least one argument x
args_before_x is the list of positional arguments before the variable argument's position
args_after_x is the list of positional arguments after the variable argument's position
The value of the variable argument x varies from min_x to max_variable by taking values_number values
title is the graph title
xlabel and ylabel are texts to put on the axes
function_label is the label of the function. (Doesn't show it if you doesn't call plt.legend() after this plot_2Dfunction.)
color is the line color
linestyle is the line style (cf Matplotlib docs for futher information)
You can add after keywords arguments for the function "function"
"""
variable_list = np.linspace(min_x, max_x, values_number)
results_list = []
for variable in variable_list :
results_list.append(function(*args_before_x, variable, *args_after_x, **kwargs))
#displaying
plt.plot(variable_list, results_list, color=color, linestyle=linestyle, label=function_label)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def plot_3Dfunction(function, min_x, max_x, values_x, min_y, max_y, values_y, args_before_variables=[], args_between_variables=[], args_after_variables=[], x_before_y = True, title="", xlabel ="", ylabel="", zlabel="", colormap=cm.RdYlGn, **kwargs) :
"""Trace the 3D graphic of the function "function"
function is a function with at least two arguments
args_before_variable is the list of positional arguments before the first variable argument's position
args_between_variables is the list of positional arguments between positions of the first and the second variable
args_after_variables is the list of positional arguments after the second variable argument's position
x_before_x is true if x variable is the first variable (in the function arguments order)
The value of the "x" variable varies from min_x to max_x by taking values_x values
Idem for "y" variable
title is the graph title
xlabel, ylabel and zlabel are texts to put on the axes
colormap is the colormap used for displaying
You can add after keywords arguments for the function "function"
"""
line = np.linspace(min_x, max_x, values_x)
array_x = np.array([line for i in range(values_y) ], dtype=float)
#create an array with x values
column = np.linspace(min_y, max_y, values_y)
array_y = np.array([[column[j]]*values_x for j in range(values_y)], dtype=float)
#create an array with y values
results = []#a array like object with values of function
for i in range(values_y) :
results_line = []
for j in range(values_x) :
variable1 = array_x[i][j]
variable2 = array_y[i][j]
if x_before_y is False :
variable1, variable2 = variable2, variable1
results_line.append(function(*args_before_variables, variable1, *args_between_variables, variable2, *args_after_variables, **kwargs))
results.append(results_line)
array_z = np.array(results, dtype=float)
linewidth = (max_x - min_x+ max_y - min_y)/20#to trace around 10 lines
#displaying
ax = plt.axes(projection='3d')#3D diplaying
ax.plot_surface(array_x, array_y, array_z, cmap=colormap, linewidth=linewidth)#linewidth : distance between two lines
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel) | 0.562537 | 0.73053 |
"""Calculate standard uncertainty (standart uncertainty mainly)"""
from warnings import WarningMessage
import numpy as np
def standard_uncertainty(u_x, u_y, dz_dx, dz_dy) :
"""Calculate the standard uncertainty of z with the general formule."""
return np.sqrt((u_x*dz_dx)**2+(u_y*dz_dy)**2)
def standard_uncertainty_addition(u_x, u_y, a=1, b=1) :
"""Calculate the standard uncertainty of z = ax + by (a & b const).
a and b are constants define with no uncertainty
"""
return np.sqrt((a**2)*(u_x**2)+(b**2)*(u_y)**2)
def relative_uncertainty_multiplication(ur_x, ur_y, a=1, b=1, c=1) :
"""Calculate the relative uncertainty of z= c*x^a*y^b (a, b, c const)
a, b and c are constants define with no uncertainty
c have no influance on the result
ur(x) = u(x)/x. Idem for y.
"""
return np.sqrt((a*ur_x)**2+(b*ur_y)**2)
def relative_uncertainty_multiplications(k=1, *args) :
"""Calculate the relative uncertainty of z = k*Π(xi^ni) (ni & k are const)
ni & k are constants define with no uncertainty
Arguments are tuples (or lists) of with those elements (in this order) : relative uncertainty and power (optionnal, default is 1)
k have no influance on the result
"""
u_r2 = 0#relative uncertainty**2
for arg in args :
if not isinstance(arg, (list, tuple)) :
raise TypeError("args must be tuples or lists")
if len(arg) < 1 :
raise ValueError("args must have at least one element : relative uncertainty and power (optionnal, default is 1)")
if len(arg) > 2 :
raise WarningMessage("args must have at most two elements : relative uncertainty and power (optionnal, default is 1)")
u_r2 += (arg[1]*arg[0])**2
return np.sqrt(u_r2)
def standard_uncertainty_multiplication(x, u_x, y, u_y, a=1, b=1, c=1) :
"""Calculate the standard uncertainty of z= c*x^a*y^b (a, b, c const)
a, b and c are constants define with no uncertainty
"""
z = c*(x**a)*(y**b)
return relative_uncertainty_multiplication(u_x/x, u_y/y, a, b, c)*abs(z)
def standard_uncertainty_multiplications(k=1, *args) :
"""Calculate the standard uncertainty of z = k*Π(xi^ni) (ni & k are const)
ni & k are constants define with no uncertainty
Arguments are tuples (or lists) of with those elements (in this order) : value, standard uncertainty and power (optionnal, default is 1)
"""
z=k
u_r2 = 0#relative uncertainty**2
for arg in args :
if not isinstance(arg, (list, tuple)) :
raise TypeError("args must be tuples or lists")
if len(arg) < 2 :
raise ValueError("args must have at least two elements : value, standard uncertainty and power (optionnal, default is 1)")
if len(arg) >3 :
raise WarningMessage("args must have at most three elements : value, standard uncertainty and power (optionnal, default is 1)")
z *= arg[0]**arg[2]
u_r2 += (arg[2]*arg[1]/arg[0])**2
return abs(z)*np.sqrt(u_r2)
def z_score_ref(x, x_ref, u_x):
"""Calculate the z-score between a measured value and a reference value.
x is the measured value, x_ref the reference value and u_x the uncertainty
"""
#this function is similar to z_score(x, u_x, x_ref, 0)
#but avoid to calculate square and square root
return abs((x-x_ref)/u_x)
def z_score(x1, u_x1, x2, u_x2) :
"""Calculate the z-score between two measured values
x1 is the first value, x2 the second
u_x1 is the uncertainty for x1, u_x2 for x2
"""
return abs(x1-x2)/np.sqrt(u_x1**2 + u_x2**2) | scientific-tools | /scientific_tools-0.0.0a17-py3-none-any.whl/scientific_tools/physics/uncertainty.py | uncertainty.py | """Calculate standard uncertainty (standart uncertainty mainly)"""
from warnings import WarningMessage
import numpy as np
def standard_uncertainty(u_x, u_y, dz_dx, dz_dy) :
"""Calculate the standard uncertainty of z with the general formule."""
return np.sqrt((u_x*dz_dx)**2+(u_y*dz_dy)**2)
def standard_uncertainty_addition(u_x, u_y, a=1, b=1) :
"""Calculate the standard uncertainty of z = ax + by (a & b const).
a and b are constants define with no uncertainty
"""
return np.sqrt((a**2)*(u_x**2)+(b**2)*(u_y)**2)
def relative_uncertainty_multiplication(ur_x, ur_y, a=1, b=1, c=1) :
"""Calculate the relative uncertainty of z= c*x^a*y^b (a, b, c const)
a, b and c are constants define with no uncertainty
c have no influance on the result
ur(x) = u(x)/x. Idem for y.
"""
return np.sqrt((a*ur_x)**2+(b*ur_y)**2)
def relative_uncertainty_multiplications(k=1, *args) :
"""Calculate the relative uncertainty of z = k*Π(xi^ni) (ni & k are const)
ni & k are constants define with no uncertainty
Arguments are tuples (or lists) of with those elements (in this order) : relative uncertainty and power (optionnal, default is 1)
k have no influance on the result
"""
u_r2 = 0#relative uncertainty**2
for arg in args :
if not isinstance(arg, (list, tuple)) :
raise TypeError("args must be tuples or lists")
if len(arg) < 1 :
raise ValueError("args must have at least one element : relative uncertainty and power (optionnal, default is 1)")
if len(arg) > 2 :
raise WarningMessage("args must have at most two elements : relative uncertainty and power (optionnal, default is 1)")
u_r2 += (arg[1]*arg[0])**2
return np.sqrt(u_r2)
def standard_uncertainty_multiplication(x, u_x, y, u_y, a=1, b=1, c=1) :
"""Calculate the standard uncertainty of z= c*x^a*y^b (a, b, c const)
a, b and c are constants define with no uncertainty
"""
z = c*(x**a)*(y**b)
return relative_uncertainty_multiplication(u_x/x, u_y/y, a, b, c)*abs(z)
def standard_uncertainty_multiplications(k=1, *args) :
"""Calculate the standard uncertainty of z = k*Π(xi^ni) (ni & k are const)
ni & k are constants define with no uncertainty
Arguments are tuples (or lists) of with those elements (in this order) : value, standard uncertainty and power (optionnal, default is 1)
"""
z=k
u_r2 = 0#relative uncertainty**2
for arg in args :
if not isinstance(arg, (list, tuple)) :
raise TypeError("args must be tuples or lists")
if len(arg) < 2 :
raise ValueError("args must have at least two elements : value, standard uncertainty and power (optionnal, default is 1)")
if len(arg) >3 :
raise WarningMessage("args must have at most three elements : value, standard uncertainty and power (optionnal, default is 1)")
z *= arg[0]**arg[2]
u_r2 += (arg[2]*arg[1]/arg[0])**2
return abs(z)*np.sqrt(u_r2)
def z_score_ref(x, x_ref, u_x):
"""Calculate the z-score between a measured value and a reference value.
x is the measured value, x_ref the reference value and u_x the uncertainty
"""
#this function is similar to z_score(x, u_x, x_ref, 0)
#but avoid to calculate square and square root
return abs((x-x_ref)/u_x)
def z_score(x1, u_x1, x2, u_x2) :
"""Calculate the z-score between two measured values
x1 is the first value, x2 the second
u_x1 is the uncertainty for x1, u_x2 for x2
"""
return abs(x1-x2)/np.sqrt(u_x1**2 + u_x2**2) | 0.888414 | 0.783947 |
# Roboy ScientIO
## About
Roboy ScientIO (from Lat. scientia - knowledge and Input/Output) - a Knowledge Graph Engine to organise and query complex data.
## Dependencies
To use ScientIO, you will need to have one of it's supported back-ends installed. Currently, the only supported back-end is Neo4j, which may be run in a number of ways
(if you don't have a remote instance available). We recommend simply running it through docker - like this:
```bash
docker run \
--publish=7474:7474 --publish=7687:7687 \
--volume=$HOME/neo4j/data:/data \
--volume=$HOME/neo4j/logs:/logs \
neo4j:3.0
```
## Installation
### Via PIP
The easiest way to install ScientIO is through pip:
``
pip install scientio
``
### For developers
First, install dependencies:
```bash
pip install -r requirements.txt
```
Then, you may open the repository in any IDE, and mark the
`src` folder as a sources root.
## Basic ScientIO use-cases
### Supplying an ontology description
The ontology description is a collection of named entity types, where each type may declare a specific set of properties and relationships like this:
```yaml
# my_ontology.yml
!OType
entity: Alien # The name of the ontology type
---
!OType
entity: Vulcan # Declare a more specific Alien type
meta: [Alien]
properties: # Allowed properties for every Vulcan
- name
- homeworld
- ear_pointiness
---
!OType
entity: Human # Declare a more specific Alien type
meta: [Alien]
properties: # Allowed properties for every Human
- name
- homeworld
relationships: [captain_of] # Allowed relationships for every Human
```
### Creating some nodes
```python
from scientio.ontology.ontology import Ontology
from scientio.session import Session
from scientio.ontology.node import Node
# Load the ontology from a yaml file
onto = Ontology(path_to_yaml="my_ontology.yml")
# Create a session (with default Neo4j backend)
sess = Session(
ontology=onto,
neo4j_address="bolt://localhost:7687",
neo4j_username="neo4j",
neo4j_password="test")
# Get human/vulcan types from ontology
human_type = onto.get_type("Human")
vulcan_type = onto.get_type("Vulcan")
# Create a transient human named "Kirk"
kirk = Node(metatype=human_type)
kirk.set_name("Kirk")
# Create a transient vulcan named "Spock"
spock = Node(metatype=vulcan_type)
spock.set_name("Spock")
# Persist kirk and spock
sess.create(kirk)
sess.create(spock)
```
### Add a relationship between your nodes
```python
from scientio.ontology.ontology import Ontology
from scientio.session import Session
from scientio.ontology.node import Node
# Load the ontology from a yaml file
onto = Ontology(path_to_yaml="my_ontology.yml")
# Create a session (with default Neo4j backend)
sess = Session(
ontology=onto,
neo4j_address="bolt://localhost:7687",
neo4j_username="neo4j",
neo4j_password="test")
# Get human/vulcan types from ontology
human_type = onto.get_type("Human")
vulcan_type = onto.get_type("Vulcan")
# Create query templates to get the actual kirk/spock
kirk = Node(metatype=human_type)
spock = Node(metatype=vulcan_type)
# Query Kirk and Spock from the database, using
# the query nodes we created previously. We're just
# gonna assume that the first human is Kirk, and the first
# vulcan is Spock.
kirk = sess.retrieve(request=kirk)[0]
spock = sess.retrieve(request=spock)[0]
# Add a relationship between Kirk and Spock
kirk.add_relationships({"captain_of": {spock.get_id()}})
# Make sure that the new relationship is persisted
sess.update(kirk)
```
| scientio | /scientio-0.9.1rc2.tar.gz/scientio-0.9.1rc2/README.md | README.md | docker run \
--publish=7474:7474 --publish=7687:7687 \
--volume=$HOME/neo4j/data:/data \
--volume=$HOME/neo4j/logs:/logs \
neo4j:3.0
pip install -r requirements.txt
# my_ontology.yml
!OType
entity: Alien # The name of the ontology type
---
!OType
entity: Vulcan # Declare a more specific Alien type
meta: [Alien]
properties: # Allowed properties for every Vulcan
- name
- homeworld
- ear_pointiness
---
!OType
entity: Human # Declare a more specific Alien type
meta: [Alien]
properties: # Allowed properties for every Human
- name
- homeworld
relationships: [captain_of] # Allowed relationships for every Human
from scientio.ontology.ontology import Ontology
from scientio.session import Session
from scientio.ontology.node import Node
# Load the ontology from a yaml file
onto = Ontology(path_to_yaml="my_ontology.yml")
# Create a session (with default Neo4j backend)
sess = Session(
ontology=onto,
neo4j_address="bolt://localhost:7687",
neo4j_username="neo4j",
neo4j_password="test")
# Get human/vulcan types from ontology
human_type = onto.get_type("Human")
vulcan_type = onto.get_type("Vulcan")
# Create a transient human named "Kirk"
kirk = Node(metatype=human_type)
kirk.set_name("Kirk")
# Create a transient vulcan named "Spock"
spock = Node(metatype=vulcan_type)
spock.set_name("Spock")
# Persist kirk and spock
sess.create(kirk)
sess.create(spock)
from scientio.ontology.ontology import Ontology
from scientio.session import Session
from scientio.ontology.node import Node
# Load the ontology from a yaml file
onto = Ontology(path_to_yaml="my_ontology.yml")
# Create a session (with default Neo4j backend)
sess = Session(
ontology=onto,
neo4j_address="bolt://localhost:7687",
neo4j_username="neo4j",
neo4j_password="test")
# Get human/vulcan types from ontology
human_type = onto.get_type("Human")
vulcan_type = onto.get_type("Vulcan")
# Create query templates to get the actual kirk/spock
kirk = Node(metatype=human_type)
spock = Node(metatype=vulcan_type)
# Query Kirk and Spock from the database, using
# the query nodes we created previously. We're just
# gonna assume that the first human is Kirk, and the first
# vulcan is Spock.
kirk = sess.retrieve(request=kirk)[0]
spock = sess.retrieve(request=spock)[0]
# Add a relationship between Kirk and Spock
kirk.add_relationships({"captain_of": {spock.get_id()}})
# Make sure that the new relationship is persisted
sess.update(kirk) | 0.545528 | 0.854156 |
import sys
from scientisst import *
from scientisst import __version__
from threading import Timer
from threading import Event
from sense_src.arg_parser import ArgParser
from sense_src.custom_script import get_custom_script, CustomScript
from sense_src.device_picker import DevicePicker
from sense_src.file_writer import *
def run_scheduled_task(duration, stop_event):
def stop(stop_event):
stop_event.set()
timer = Timer(duration, stop, [stop_event])
timer.start()
return timer
def main():
arg_parser = ArgParser()
args = arg_parser.args
if args.version:
sys.stdout.write("sense.py version {}\n".format(__version__))
sys.exit(0)
if args.address:
address = args.address
else:
if args.mode == COM_MODE_BT:
address = DevicePicker().select_device()
if not address:
arg_parser.error("No paired device found")
else:
arg_parser.error("No address provided")
args.channels = sorted(map(int, args.channels.split(",")))
scientisst = ScientISST(address, com_mode=args.mode, log=args.log)
try:
if args.output:
firmware_version = scientisst.version_and_adc_chars(print=False)
file_writer = FileWriter(
args.output,
address,
args.fs,
args.channels,
args.convert,
__version__,
firmware_version,
)
if args.stream:
from sense_src.stream_lsl import StreamLSL
lsl = StreamLSL(
args.channels,
args.fs,
address,
)
if args.script:
script = get_custom_script(args.script)
stop_event = Event()
scientisst.start(args.fs, args.channels)
sys.stdout.write("Start acquisition\n")
if args.output:
file_writer.start()
if args.stream:
lsl.start()
if args.script:
script.start()
timer = None
if args.duration > 0:
timer = run_scheduled_task(args.duration, stop_event)
try:
if args.verbose:
header = "\t".join(get_header(args.channels, args.convert)) + "\n"
sys.stdout.write(header)
while not stop_event.is_set():
frames = scientisst.read(convert=args.convert)
if args.output:
file_writer.put(frames)
if args.stream:
lsl.put(frames)
if args.script:
script.put(frames)
if args.verbose:
sys.stdout.write("{}\n".format(frames[0]))
except KeyboardInterrupt:
if args.duration and timer:
timer.cancel()
pass
scientisst.stop()
# let the acquisition stop before stoping other threads
time.sleep(0.25)
sys.stdout.write("Stop acquisition\n")
if args.output:
file_writer.stop()
if args.stream:
lsl.stop()
if args.script:
script.stop()
finally:
scientisst.disconnect()
sys.exit(0)
if __name__ == "__main__":
main() | scientisst-sense | /scientisst_sense-1.1.0-py3-none-any.whl/sense.py | sense.py | import sys
from scientisst import *
from scientisst import __version__
from threading import Timer
from threading import Event
from sense_src.arg_parser import ArgParser
from sense_src.custom_script import get_custom_script, CustomScript
from sense_src.device_picker import DevicePicker
from sense_src.file_writer import *
def run_scheduled_task(duration, stop_event):
def stop(stop_event):
stop_event.set()
timer = Timer(duration, stop, [stop_event])
timer.start()
return timer
def main():
arg_parser = ArgParser()
args = arg_parser.args
if args.version:
sys.stdout.write("sense.py version {}\n".format(__version__))
sys.exit(0)
if args.address:
address = args.address
else:
if args.mode == COM_MODE_BT:
address = DevicePicker().select_device()
if not address:
arg_parser.error("No paired device found")
else:
arg_parser.error("No address provided")
args.channels = sorted(map(int, args.channels.split(",")))
scientisst = ScientISST(address, com_mode=args.mode, log=args.log)
try:
if args.output:
firmware_version = scientisst.version_and_adc_chars(print=False)
file_writer = FileWriter(
args.output,
address,
args.fs,
args.channels,
args.convert,
__version__,
firmware_version,
)
if args.stream:
from sense_src.stream_lsl import StreamLSL
lsl = StreamLSL(
args.channels,
args.fs,
address,
)
if args.script:
script = get_custom_script(args.script)
stop_event = Event()
scientisst.start(args.fs, args.channels)
sys.stdout.write("Start acquisition\n")
if args.output:
file_writer.start()
if args.stream:
lsl.start()
if args.script:
script.start()
timer = None
if args.duration > 0:
timer = run_scheduled_task(args.duration, stop_event)
try:
if args.verbose:
header = "\t".join(get_header(args.channels, args.convert)) + "\n"
sys.stdout.write(header)
while not stop_event.is_set():
frames = scientisst.read(convert=args.convert)
if args.output:
file_writer.put(frames)
if args.stream:
lsl.put(frames)
if args.script:
script.put(frames)
if args.verbose:
sys.stdout.write("{}\n".format(frames[0]))
except KeyboardInterrupt:
if args.duration and timer:
timer.cancel()
pass
scientisst.stop()
# let the acquisition stop before stoping other threads
time.sleep(0.25)
sys.stdout.write("Stop acquisition\n")
if args.output:
file_writer.stop()
if args.stream:
lsl.stop()
if args.script:
script.stop()
finally:
scientisst.disconnect()
sys.exit(0)
if __name__ == "__main__":
main() | 0.290377 | 0.08438 |
import sys
from argparse import ArgumentParser, RawTextHelpFormatter
from scientisst.constants import *
class ArgParser:
class MyParser(ArgumentParser):
def error(self, message):
sys.stderr.write("error: %s\n\n" % message)
self.print_help()
sys.exit(2)
def __init__(self):
usage = "%(prog)s [args] address"
description = "description: The program connects to the ScientISST Sense device and starts an acquisition, providing the option to store the received data in a .csv file."
self.parser = self.MyParser(
usage=usage, description=description, formatter_class=RawTextHelpFormatter
)
self.parser.add_argument(
"address",
nargs="?",
type=str,
help="For BTH communication:\n\tLinux: BTH MAC address\n\tMac: serial port address\n\tWindows: BTH serial COM port\nFor TCP/UDP communication:\n\tAll plataforms: server port.",
)
self.parser.add_argument(
"-f",
"--frequency",
dest="fs",
help="sampling frequency, default: 1000",
type=int,
default=1000,
)
self.parser.add_argument(
"-c",
"--channels",
dest="channels",
type=str,
help="analog channels, default: 1,2,3,4,5,6",
default="1,2,3,4,5,6",
)
self.parser.add_argument(
"-d",
"--duration",
dest="duration",
help="duration in seconds, default: unlimited",
type=int,
default=0,
)
self.parser.add_argument(
"-o",
"--output",
dest="output",
help="write report to output file, default: None",
type=str,
default=None,
)
self.parser.add_argument(
"-r",
"--raw",
action="store_false",
dest="convert",
default=True,
help="do not convert from raw to mV",
)
self.parser.add_argument(
"-s",
"--lsl",
dest="stream",
action="store_true",
default=False,
help="stream data using Lab Streaming Layer protocol. Use `python -m pylsl.examples.ReceiveAndPlot` to view stream",
)
self.parser.add_argument(
"--script",
dest="script",
help="send the received frames to a script that inherits the CustomScript class",
type=str,
default=None,
)
self.parser.add_argument(
"-q",
"--quiet",
action="store_false",
dest="verbose",
default=True,
help="don't print ScientISST frames",
)
self.parser.add_argument(
"-v",
"--version",
dest="version",
action="store_true",
default=False,
help="show sense.py version",
)
self.parser.add_argument(
"--verbose",
dest="log",
action="store_true",
default=False,
help="log sent/received bytes",
)
self.parser.add_argument(
"-m",
"--mode",
dest="mode",
type=str,
default=COM_MODE_BT,
help="The communication mode. Currently supported modes: "
+ ", ".join(COM_MODE_LIST)
+ ". Default: "
+ COM_MODE_BT,
)
self.args = self.parser.parse_args()
def error(self, value):
self.parser.error(value) | scientisst-sense | /scientisst_sense-1.1.0-py3-none-any.whl/sense_src/arg_parser.py | arg_parser.py | import sys
from argparse import ArgumentParser, RawTextHelpFormatter
from scientisst.constants import *
class ArgParser:
class MyParser(ArgumentParser):
def error(self, message):
sys.stderr.write("error: %s\n\n" % message)
self.print_help()
sys.exit(2)
def __init__(self):
usage = "%(prog)s [args] address"
description = "description: The program connects to the ScientISST Sense device and starts an acquisition, providing the option to store the received data in a .csv file."
self.parser = self.MyParser(
usage=usage, description=description, formatter_class=RawTextHelpFormatter
)
self.parser.add_argument(
"address",
nargs="?",
type=str,
help="For BTH communication:\n\tLinux: BTH MAC address\n\tMac: serial port address\n\tWindows: BTH serial COM port\nFor TCP/UDP communication:\n\tAll plataforms: server port.",
)
self.parser.add_argument(
"-f",
"--frequency",
dest="fs",
help="sampling frequency, default: 1000",
type=int,
default=1000,
)
self.parser.add_argument(
"-c",
"--channels",
dest="channels",
type=str,
help="analog channels, default: 1,2,3,4,5,6",
default="1,2,3,4,5,6",
)
self.parser.add_argument(
"-d",
"--duration",
dest="duration",
help="duration in seconds, default: unlimited",
type=int,
default=0,
)
self.parser.add_argument(
"-o",
"--output",
dest="output",
help="write report to output file, default: None",
type=str,
default=None,
)
self.parser.add_argument(
"-r",
"--raw",
action="store_false",
dest="convert",
default=True,
help="do not convert from raw to mV",
)
self.parser.add_argument(
"-s",
"--lsl",
dest="stream",
action="store_true",
default=False,
help="stream data using Lab Streaming Layer protocol. Use `python -m pylsl.examples.ReceiveAndPlot` to view stream",
)
self.parser.add_argument(
"--script",
dest="script",
help="send the received frames to a script that inherits the CustomScript class",
type=str,
default=None,
)
self.parser.add_argument(
"-q",
"--quiet",
action="store_false",
dest="verbose",
default=True,
help="don't print ScientISST frames",
)
self.parser.add_argument(
"-v",
"--version",
dest="version",
action="store_true",
default=False,
help="show sense.py version",
)
self.parser.add_argument(
"--verbose",
dest="log",
action="store_true",
default=False,
help="log sent/received bytes",
)
self.parser.add_argument(
"-m",
"--mode",
dest="mode",
type=str,
default=COM_MODE_BT,
help="The communication mode. Currently supported modes: "
+ ", ".join(COM_MODE_LIST)
+ ". Default: "
+ COM_MODE_BT,
)
self.args = self.parser.parse_args()
def error(self, value):
self.parser.error(value) | 0.463444 | 0.206834 |
import sys
from scientisst.scientisst import AX1, AX2
from sense_src.thread_builder import ThreadBuilder
from datetime import datetime
class FileWriter(ThreadBuilder):
def __init__(
self, filename, address, fs, channels, mv, api_version, firmware_version
):
super().__init__()
self.filename = filename
self.mv = mv
self.channels = channels
self.metadata = self.__get_metadata(
address, fs, channels, api_version, firmware_version
)
def start(self):
self.__init_file()
super().start()
def stop(self):
super().stop()
if self.f:
self.f.close()
def thread_method(self, frames):
self.f.write("\n".join(map(str, frames)) + "\n")
def __init_file(
self,
):
self.f = open(self.filename, "w")
sys.stdout.write("Saving data to {}\n".format(self.filename))
header = "\t".join(self.metadata["Header"])
self.f.write("#{}\n".format(self.metadata))
self.f.write("#{}\n".format(header))
def __get_metadata(self, address, fs, channels, api_version, firmware_version):
timestamp = datetime.now()
metadata = {
"API version": api_version,
"Channels": channels,
"Channels labels": get_channel_labels(channels, self.mv),
"Device": address,
"Firmware version": firmware_version,
"Header": get_header(channels, self.mv),
"Resolution (bits)": [4, 1, 1, 1, 1] + self.__get_channel_resolutions(),
"Sampling rate (Hz)": fs,
"Timestamp": timestamp.timestamp(),
"ISO 8601": timestamp.isoformat(),
}
if self.mv:
metadata["Channels indexes raw"] = list(
map(lambda x: (x - 1) * 2 + 5, channels)
)
metadata["Channels indexes mV"] = list(
map(lambda x: (x - 1) * 2 + 6, channels)
)
else:
metadata["Channels indexes"] = list(map(lambda x: x + 5, channels))
sorted_metadata = {}
for key in sorted(metadata):
sorted_metadata[key] = metadata[key]
return sorted_metadata
def __get_channel_resolutions(self):
channel_resolutions = []
for ch in self.channels:
if ch == AX1 or ch == AX2:
channel_resolutions += [24]
else:
channel_resolutions += [12]
return channel_resolutions
def __get_channel_resolutions_mv(self):
channel_resolutions = []
for ch in self.channels:
if ch == AX1 or ch == AX2:
channel_resolutions += [0.4]
else:
channel_resolutions += [0.8]
return channel_resolutions
def get_channel_labels(channels, mv):
channel_labels = []
for ch in channels:
if not mv:
if ch == AX1 or ch == AX2:
channel_labels += ["AX{}".format(ch)]
else:
channel_labels += ["AI{}".format(ch)]
else:
if ch == AX1 or ch == AX2:
channel_labels += ["AX{}_raw".format(ch)]
channel_labels += ["AX{}_mv".format(ch)]
else:
channel_labels += ["AI{}_raw".format(ch)]
channel_labels += ["AI{}_mv".format(ch)]
return channel_labels
def get_header(channels, mv):
header = ["NSeq", "I1", "I2", "O1", "O2"]
header += get_channel_labels(channels, mv)
return header | scientisst-sense | /scientisst_sense-1.1.0-py3-none-any.whl/sense_src/file_writer.py | file_writer.py | import sys
from scientisst.scientisst import AX1, AX2
from sense_src.thread_builder import ThreadBuilder
from datetime import datetime
class FileWriter(ThreadBuilder):
def __init__(
self, filename, address, fs, channels, mv, api_version, firmware_version
):
super().__init__()
self.filename = filename
self.mv = mv
self.channels = channels
self.metadata = self.__get_metadata(
address, fs, channels, api_version, firmware_version
)
def start(self):
self.__init_file()
super().start()
def stop(self):
super().stop()
if self.f:
self.f.close()
def thread_method(self, frames):
self.f.write("\n".join(map(str, frames)) + "\n")
def __init_file(
self,
):
self.f = open(self.filename, "w")
sys.stdout.write("Saving data to {}\n".format(self.filename))
header = "\t".join(self.metadata["Header"])
self.f.write("#{}\n".format(self.metadata))
self.f.write("#{}\n".format(header))
def __get_metadata(self, address, fs, channels, api_version, firmware_version):
timestamp = datetime.now()
metadata = {
"API version": api_version,
"Channels": channels,
"Channels labels": get_channel_labels(channels, self.mv),
"Device": address,
"Firmware version": firmware_version,
"Header": get_header(channels, self.mv),
"Resolution (bits)": [4, 1, 1, 1, 1] + self.__get_channel_resolutions(),
"Sampling rate (Hz)": fs,
"Timestamp": timestamp.timestamp(),
"ISO 8601": timestamp.isoformat(),
}
if self.mv:
metadata["Channels indexes raw"] = list(
map(lambda x: (x - 1) * 2 + 5, channels)
)
metadata["Channels indexes mV"] = list(
map(lambda x: (x - 1) * 2 + 6, channels)
)
else:
metadata["Channels indexes"] = list(map(lambda x: x + 5, channels))
sorted_metadata = {}
for key in sorted(metadata):
sorted_metadata[key] = metadata[key]
return sorted_metadata
def __get_channel_resolutions(self):
channel_resolutions = []
for ch in self.channels:
if ch == AX1 or ch == AX2:
channel_resolutions += [24]
else:
channel_resolutions += [12]
return channel_resolutions
def __get_channel_resolutions_mv(self):
channel_resolutions = []
for ch in self.channels:
if ch == AX1 or ch == AX2:
channel_resolutions += [0.4]
else:
channel_resolutions += [0.8]
return channel_resolutions
def get_channel_labels(channels, mv):
channel_labels = []
for ch in channels:
if not mv:
if ch == AX1 or ch == AX2:
channel_labels += ["AX{}".format(ch)]
else:
channel_labels += ["AI{}".format(ch)]
else:
if ch == AX1 or ch == AX2:
channel_labels += ["AX{}_raw".format(ch)]
channel_labels += ["AX{}_mv".format(ch)]
else:
channel_labels += ["AI{}_raw".format(ch)]
channel_labels += ["AI{}_mv".format(ch)]
return channel_labels
def get_header(channels, mv):
header = ["NSeq", "I1", "I2", "O1", "O2"]
header += get_channel_labels(channels, mv)
return header | 0.345105 | 0.201892 |
import sys
class DevicePicker:
def select_device(self):
options, labels = self.__get_device_options()
if len(options) > 0:
sys.stdout.write("ScientISST devices:\n")
label_index = 1
for label in labels:
sys.stdout.write("[{}] {}\n".format(label_index, label))
label_index += 1
selected_index = 0
while selected_index == 0:
user_input = input("Connect to: ")
try:
selected_index = int(user_input)
if selected_index > len(options):
selected_index = 0
raise ValueError()
except ValueError:
sys.stderr.write('"{}" is not a valid index\n'.format(user_input))
return options[selected_index - 1]
def __get_device_options(self):
if sys.platform == "linux":
options = self.__get_linux_bth_devices()
return list(map(lambda option: option["addr"], options)), list(
map(
lambda option: "{} - {}".format(
option["name"] if "name" in option else "unnamed",
option["addr"],
),
options,
)
)
else:
import serial.tools.list_ports
ports = serial.tools.list_ports.comports()
options = []
labels = []
for port, desc, hwid in sorted(ports):
if "scientisst" in port.lower():
options += [port]
label = ""
if desc != "n/a":
label += "{} - ".format(desc)
labels += [label + port]
return options, labels
def __get_linux_bth_devices(self):
import pydbus
bt_devices = {}
bus = pydbus.SystemBus()
mngr = bus.get("org.bluez", "/")
mngd_objs = mngr.GetManagedObjects()
for path in mngd_objs:
addr = mngd_objs[path].get("org.bluez.Device1", {}).get("Address")
name = mngd_objs[path].get("org.bluez.Device1", {}).get("Name")
if name and "scientisst" in name.lower() and addr not in bt_devices:
bt_devices[addr] = {
"name": name,
"addr": addr,
}
return bt_devices.values() | scientisst-sense | /scientisst_sense-1.1.0-py3-none-any.whl/sense_src/device_picker.py | device_picker.py | import sys
class DevicePicker:
def select_device(self):
options, labels = self.__get_device_options()
if len(options) > 0:
sys.stdout.write("ScientISST devices:\n")
label_index = 1
for label in labels:
sys.stdout.write("[{}] {}\n".format(label_index, label))
label_index += 1
selected_index = 0
while selected_index == 0:
user_input = input("Connect to: ")
try:
selected_index = int(user_input)
if selected_index > len(options):
selected_index = 0
raise ValueError()
except ValueError:
sys.stderr.write('"{}" is not a valid index\n'.format(user_input))
return options[selected_index - 1]
def __get_device_options(self):
if sys.platform == "linux":
options = self.__get_linux_bth_devices()
return list(map(lambda option: option["addr"], options)), list(
map(
lambda option: "{} - {}".format(
option["name"] if "name" in option else "unnamed",
option["addr"],
),
options,
)
)
else:
import serial.tools.list_ports
ports = serial.tools.list_ports.comports()
options = []
labels = []
for port, desc, hwid in sorted(ports):
if "scientisst" in port.lower():
options += [port]
label = ""
if desc != "n/a":
label += "{} - ".format(desc)
labels += [label + port]
return options, labels
def __get_linux_bth_devices(self):
import pydbus
bt_devices = {}
bus = pydbus.SystemBus()
mngr = bus.get("org.bluez", "/")
mngd_objs = mngr.GetManagedObjects()
for path in mngd_objs:
addr = mngd_objs[path].get("org.bluez.Device1", {}).get("Address")
name = mngd_objs[path].get("org.bluez.Device1", {}).get("Name")
if name and "scientisst" in name.lower() and addr not in bt_devices:
bt_devices[addr] = {
"name": name,
"addr": addr,
}
return bt_devices.values() | 0.258139 | 0.279196 |
class InvalidAddressError(Exception):
"""
The specified address is invalid.
"""
def __init__(self):
super().__init__("The specified address is invalid.")
class BTAdapterNotFoundError(Exception):
"""
No Bluetooth adapter was found.
"""
def __init__(self):
super().__init__("No Bluetooth adapter was found.")
class DeviceNotFoundError(Exception):
"""
The device could not be found.
"""
def __init__(self):
super().__init__("The device could not be found.")
class ContactingDeviceError(Exception):
"""
The computer lost communication with the device.
"""
def __init__(self):
super().__init__("The computer lost communication with the device.")
class PortCouldNotBeOpenedError(Exception):
"""
The communication port does not exist or it is already being used.
"""
def __init__(self):
super().__init__(
"The communication port does not exist or it is already being used."
)
class PortInitializationError(Exception):
"""
The communication port could not be initialized.
"""
def __init__(self):
super().__init__("The communication port could not be initialized.")
class DeviceNotIdleError(Exception):
"""
The device is not idle.
"""
def __init__(self):
super().__init__("The device is not idle.")
class DeviceNotInAcquisitionError(Exception):
"""
The device is not in acquisition mode.
"""
def __init__(self):
super().__init__("The device is not in acquisition mode.")
class InvalidParameterError(Exception):
"""
Invalid parameter.
"""
def __init__(self):
super().__init__("Invalid parameter.")
class NotSupportedError(Exception):
"""
Operation not supported by the device.
"""
def __init__(self):
super().__init__("Operation not supported by the device.")
class UnknownError(Exception):
"""
Unknown error: `message`.
"""
def __init__(self, message=""):
super().__init__("Unknown error: {}".format(message)) | scientisst-sense | /scientisst_sense-1.1.0-py3-none-any.whl/scientisst/exceptions.py | exceptions.py | class InvalidAddressError(Exception):
"""
The specified address is invalid.
"""
def __init__(self):
super().__init__("The specified address is invalid.")
class BTAdapterNotFoundError(Exception):
"""
No Bluetooth adapter was found.
"""
def __init__(self):
super().__init__("No Bluetooth adapter was found.")
class DeviceNotFoundError(Exception):
"""
The device could not be found.
"""
def __init__(self):
super().__init__("The device could not be found.")
class ContactingDeviceError(Exception):
"""
The computer lost communication with the device.
"""
def __init__(self):
super().__init__("The computer lost communication with the device.")
class PortCouldNotBeOpenedError(Exception):
"""
The communication port does not exist or it is already being used.
"""
def __init__(self):
super().__init__(
"The communication port does not exist or it is already being used."
)
class PortInitializationError(Exception):
"""
The communication port could not be initialized.
"""
def __init__(self):
super().__init__("The communication port could not be initialized.")
class DeviceNotIdleError(Exception):
"""
The device is not idle.
"""
def __init__(self):
super().__init__("The device is not idle.")
class DeviceNotInAcquisitionError(Exception):
"""
The device is not in acquisition mode.
"""
def __init__(self):
super().__init__("The device is not in acquisition mode.")
class InvalidParameterError(Exception):
"""
Invalid parameter.
"""
def __init__(self):
super().__init__("Invalid parameter.")
class NotSupportedError(Exception):
"""
Operation not supported by the device.
"""
def __init__(self):
super().__init__("Operation not supported by the device.")
class UnknownError(Exception):
"""
Unknown error: `message`.
"""
def __init__(self, message=""):
super().__init__("Unknown error: {}".format(message)) | 0.793346 | 0.191933 |
# scientistmetrics
## About scientistmetrics
**scientistmetrics** is a `Python` package for calculating correlation amongst categorical variables.
## Why scientistmetrics?
The function provides the option for computing one of six measures of association between two nominal variables from the data given in a 2d contingency table:
* Pearson's chi-squared test : https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test
* Phi coefficient : https://en.wikipedia.org/wiki/Phi_coefficient
* G-test: https://en.wikipedia.org/wiki/G-test
* Cramer's V : https://en.wikipedia.org/wiki/Cramer's_V
* Tschuprow's T : https://en.wikipedia.org/wiki/Tschuprow's_T
* Pearson contingency coefficient : https://www.statisticshowto.com/contingency-coefficient/
Notebook is availabled.
## Installation
### Dependencies
scientistmetrics requires :
```
Python >=3.10
Numpy >=1.23.5
Pandas >=1.5.3
Plotnine >=0.10.1
Scipy >=1.10.1
```
## User installation
You can install scientistmetrics using `pip` :
```
pip install scientistmetrics
```
## Author
Duvérier DJIFACK ZEBAZE | scientistmetrics | /scientistmetrics-0.0.2.tar.gz/scientistmetrics-0.0.2/README.md | README.md | Python >=3.10
Numpy >=1.23.5
Pandas >=1.5.3
Plotnine >=0.10.1
Scipy >=1.10.1
pip install scientistmetrics | 0.575469 | 0.942612 |
# Powerset model
[linear regression dataset](https://www.telusinternational.com/insights/ai-data/article/10-open-datasets-for-linear-regression)
[logistic regression dataset](https://sushtend.com/machine-learning/datasets-for-practicing-logistic-regression/)
[Machine learning with R datasets](https://github.com/stedy/Machine-Learning-with-R-datasets)
## Linear regression
```
# Set repository
import os
os.chdir("D:/Bureau/PythonProject/packages/scientistmetrics/data/")
# warnings message
import warnings
warnings.filterwarnings("ignore")
# Load dataset
import pandas as pd
import numpy as np
insurance = pd.read_csv("insurance.csv",sep=",")
insurance.head()
insurance.info()
# Powerset
from scientistmetrics.model import powersetmodel
```
## [Insurance dataset](https://www.kaggle.com/datasets/mirichoi0218/insurance)
```
# Powerset model
ols_res = powersetmodel(DTrain=insurance,target="charges")
ols_model = ols_res[0]
ols_metrics = ols_res[1]
print(ols_metrics)
```
## Logistic regression
## [Diabetes]()
```
# Load datasets
diabetes = pd.read_csv("diabetes.csv",sep=",")
diabetes.info()
glm_res = powersetmodel(DTrain=diabetes,split_data=False,target="Outcome",model_type = "logistic",num_from=2,num_to=3)
glm_model = glm_res[0]
glm_metrics = glm_res[1]
print(glm_metrics)
```
| scientistmetrics | /scientistmetrics-0.0.2.tar.gz/scientistmetrics-0.0.2/model.ipynb | model.ipynb | # Set repository
import os
os.chdir("D:/Bureau/PythonProject/packages/scientistmetrics/data/")
# warnings message
import warnings
warnings.filterwarnings("ignore")
# Load dataset
import pandas as pd
import numpy as np
insurance = pd.read_csv("insurance.csv",sep=",")
insurance.head()
insurance.info()
# Powerset
from scientistmetrics.model import powersetmodel
# Powerset model
ols_res = powersetmodel(DTrain=insurance,target="charges")
ols_model = ols_res[0]
ols_metrics = ols_res[1]
print(ols_metrics)
# Load datasets
diabetes = pd.read_csv("diabetes.csv",sep=",")
diabetes.info()
glm_res = powersetmodel(DTrain=diabetes,split_data=False,target="Outcome",model_type = "logistic",num_from=2,num_to=3)
glm_model = glm_res[0]
glm_metrics = glm_res[1]
print(glm_metrics) | 0.395835 | 0.884937 |
# scientisttools : Python library for multidimensional analysis
## About scientisttools
**scientisttools** is a `Python` package dedicated to multivariate Exploratory Data Analysis.
## Why use scientisttools?
* It performs **classical principal component methods** :
* Principal Components Analysis (PCA)
* Principal Components Analysis with partial correlation matrix (PPCA)
* Weighted Principal Components Analysis (WPCA)
* Expectation-Maximization Principal Components Analysis (EMPCA)
* Exploratory Factor Analysis (EFA)
* Classical Multidimensional Scaling (CMDSCALE)
* Metric and Non - Metric Multidimensional Scaling (MDS)
* Correspondence Analysis (CA)
* Multiple Correspondence Analysis (MCA)
* Factor Analysis of Mixed Data (FAMD)
* Multiple Factor Analysis (MFA)
* In some methods, it allowed to **add supplementary informations** such as supplementary individuals and/or variables.
* It provides a geometrical point of view, a lot of graphical outputs.
* It provides efficient implementations, using a scikit-learn API.
Those statistical methods can be used in two ways :
* as descriptive methods ("datamining approach")
* as reduction methods in scikit-learn pipelines ("machine learning approach")
`scientisttools` also performs some algorithms such as `clustering analysis` and `discriminant analysis`.
* **Clustering analysis**:
* Hierarchical Clustering on Principal Components (HCPC)
* Variables Hierarchical Clustering Analysis (VARHCA)
* Variables Hierarchical Clustering Analysis on Principal Components (VARHCPC)
* Categorical Variables Hierarchical Clustering Analysis (CATVARHCA)
* **Discriminant Analysis**
* Canonical Discriminant Analysis (CANDISC)
* Linear Discriminant Analysis (LDA)
* Discriminant with qualitatives variables (DISQUAL)
* Discriminant Correspondence Analysis (DISCA)
* Discriminant with mixed data (DISMIX)
* Stepwise Discriminant Analysis (STEPDISC) (only `backward` elimination is available).
Notebooks are availabled.
## Installation
### Dependencies
scientisttools requires
```
Python >=3.10
Numpy >= 1.23.5
Matplotlib >= 3.5.3
Scikit-learn >= 1.2.2
Pandas >= 1.5.3
mapply >= 0.1.21
Plotnine >= 0.10.1
Plydata >= 0.4.3
```
### User installation
You can install scientisttools using `pip` :
```
pip install scientisttools
```
Tutorial are available
````
https://github.com/enfantbenidedieu/scientisttools/blob/master/ca_example2.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/classic_mds.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/efa_example.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/famd_example.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/ggcorrplot.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/mca_example.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/mds_example.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/partial_pca.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/pca_example.ipynb
````
## Author
Duvérier DJIFACK ZEBAZE ([[email protected]]([email protected]))
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/README.md | README.md | Python >=3.10
Numpy >= 1.23.5
Matplotlib >= 3.5.3
Scikit-learn >= 1.2.2
Pandas >= 1.5.3
mapply >= 0.1.21
Plotnine >= 0.10.1
Plydata >= 0.4.3
pip install scientisttools
https://github.com/enfantbenidedieu/scientisttools/blob/master/ca_example2.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/classic_mds.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/efa_example.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/famd_example.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/ggcorrplot.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/mca_example.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/mds_example.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/partial_pca.ipynb
https://github.com/enfantbenidedieu/scientisttools/blob/master/pca_example.ipynb | 0.693161 | 0.990102 |
# Linear Discriminant Analysis (LDA)
```
# Chargement de la base
import numpy as np
import pandas as pd
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
DTrain = pd.read_excel("Data_Illustration_Livre_ADL.xlsx",sheet_name="DATA_2_TRAIN",header=0)
DTrain.info()
from scientisttools.discriminant_analysis import LDA
lda = LDA(features_labels=list(DTrain.columns[1:]),
target=["TYPE"],
row_labels=DTrain.index,
parallelize=False)
# Instanciation
lda.fit(DTrain)
# Prabilité à priori
lda.priors_
# Matrice de covariance totale
lda.tcov_
# Matrice de covariance conditionnelle
lda.gcov_
# Between covariance matrix
lda.bcov_
# Within covariance matrix
lda.wcov_
# Moyenne conditionnelle
lda.gmean_
# Coeffcients des fonctions de score
lda.coef_
# Constance des fonctions de score
lda.intercept_
# Evaluation statistique
se = lda.statistical_evaluation_
se
DTest = pd.read_excel("Data_Illustration_Livre_ADL.xlsx",sheet_name="DATA_2_TEST",header=0)
DTest.head()
XTest = DTest[DTest.columns[1:]]
yTest = DTest[DTest.columns[0]]
XTest.head()
#Scores des individus
lda.decision_function(XTest).head()
pred = lda.predict(XTest)
pred.head()
yTest.head()
# Accurary score
lda.score(XTest,yTest)
lda.predict_proba(XTest).head()
lda.squared_mdist_
```
## Procédure Backward
```
from scientisttools.discriminant_analysis import STEPDISC
stepdisc = STEPDISC(method="backward",
alpha=0.01,
model_train=True,
verbose=True,
parallelize=False)
stepdisc.fit(lda)
# Model Final
lda_reduit = stepdisc.train_model_
# Statistical evaluation
lda_reduit.statistical_evaluation_
lda_reduit.coef_
lda_reduit.intercept_
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/lda_example.ipynb | lda_example.ipynb | # Chargement de la base
import numpy as np
import pandas as pd
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
DTrain = pd.read_excel("Data_Illustration_Livre_ADL.xlsx",sheet_name="DATA_2_TRAIN",header=0)
DTrain.info()
from scientisttools.discriminant_analysis import LDA
lda = LDA(features_labels=list(DTrain.columns[1:]),
target=["TYPE"],
row_labels=DTrain.index,
parallelize=False)
# Instanciation
lda.fit(DTrain)
# Prabilité à priori
lda.priors_
# Matrice de covariance totale
lda.tcov_
# Matrice de covariance conditionnelle
lda.gcov_
# Between covariance matrix
lda.bcov_
# Within covariance matrix
lda.wcov_
# Moyenne conditionnelle
lda.gmean_
# Coeffcients des fonctions de score
lda.coef_
# Constance des fonctions de score
lda.intercept_
# Evaluation statistique
se = lda.statistical_evaluation_
se
DTest = pd.read_excel("Data_Illustration_Livre_ADL.xlsx",sheet_name="DATA_2_TEST",header=0)
DTest.head()
XTest = DTest[DTest.columns[1:]]
yTest = DTest[DTest.columns[0]]
XTest.head()
#Scores des individus
lda.decision_function(XTest).head()
pred = lda.predict(XTest)
pred.head()
yTest.head()
# Accurary score
lda.score(XTest,yTest)
lda.predict_proba(XTest).head()
lda.squared_mdist_
from scientisttools.discriminant_analysis import STEPDISC
stepdisc = STEPDISC(method="backward",
alpha=0.01,
model_train=True,
verbose=True,
parallelize=False)
stepdisc.fit(lda)
# Model Final
lda_reduit = stepdisc.train_model_
# Statistical evaluation
lda_reduit.statistical_evaluation_
lda_reduit.coef_
lda_reduit.intercept_ | 0.323487 | 0.599866 |
# HCPC with MCA
```
# Set environment
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
# load datasets
import pandas as pd
tea = pd.read_excel("tea.xlsx")
tea.info()
```
## MCA on tea
```
# MCA
from scientisttools.decomposition import MCA
mca = MCA(n_components=20,
matrix_type="completed",
row_labels=tea.index,
var_labels=list(tea.columns[:18]),
quanti_sup_labels=["age"],
quali_sup_labels=tea.columns[19:],
parallelize=True)
mca.fit(tea)
```
## HCPC
```
from scientisttools.clustering import HCPC
hcpc = HCPC(n_clusters=3,
metric="euclidean",
method="ward",
parallelize=True)
hcpc.fit(mca)
hcpc.cluster_infos_
from scientisttools.pyplot import plot_dendrogram
import matplotlib.pyplot as plt
plot_dendrogram(hcpc)
hcpc.cluster_centers_
from scientisttools.pyplot import plotHCPC
fig, axe = plt.subplots(figsize=(10,10))
#plotHCPC(hcpc,ax=axe)
#plt.show()
hcpc.desc_axes_correlation_ratio_
hcpc.desc_axes_infos_
hcpc.desc_var_quali_["chi2"]
hcpc.desc_var_quali_sup_
hcpc.var_quali_infos_
hcpc.desc_var_category_.loc["cluster_1",]
hcpc.desc_var_quanti_sup_
```
## Caractérisation par les individus
### Individus proches
```
hcpc.disto_near_
# Individus loin
hcpc.disto_far_
# Parangons
hcpc.parangons_
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/hcpc_mca_example.ipynb | hcpc_mca_example.ipynb | # Set environment
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
# load datasets
import pandas as pd
tea = pd.read_excel("tea.xlsx")
tea.info()
# MCA
from scientisttools.decomposition import MCA
mca = MCA(n_components=20,
matrix_type="completed",
row_labels=tea.index,
var_labels=list(tea.columns[:18]),
quanti_sup_labels=["age"],
quali_sup_labels=tea.columns[19:],
parallelize=True)
mca.fit(tea)
from scientisttools.clustering import HCPC
hcpc = HCPC(n_clusters=3,
metric="euclidean",
method="ward",
parallelize=True)
hcpc.fit(mca)
hcpc.cluster_infos_
from scientisttools.pyplot import plot_dendrogram
import matplotlib.pyplot as plt
plot_dendrogram(hcpc)
hcpc.cluster_centers_
from scientisttools.pyplot import plotHCPC
fig, axe = plt.subplots(figsize=(10,10))
#plotHCPC(hcpc,ax=axe)
#plt.show()
hcpc.desc_axes_correlation_ratio_
hcpc.desc_axes_infos_
hcpc.desc_var_quali_["chi2"]
hcpc.desc_var_quali_sup_
hcpc.var_quali_infos_
hcpc.desc_var_category_.loc["cluster_1",]
hcpc.desc_var_quanti_sup_
hcpc.disto_near_
# Individus loin
hcpc.disto_far_
# Parangons
hcpc.parangons_ | 0.360377 | 0.653286 |
# Canonical discriminant analysis
```
# Chargement des donnees
import pandas as pd
import numpy as np
from plydata import *
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
wine = pd.read_excel("wine_quality.xls",index_col=1)
display(wine >> head())
from scientisttools.discriminant_analysis import CANDISC
my_cda = CANDISC(n_components=2,
target=["Qualite"],
row_labels=wine.index,
features_labels=["Temperature","Soleil","Chaleur","Pluie"],
parallelize=False)
my_cda.fit(wine)
my_cda.classes_
my_cda.summary_information_
my_cda.class_level_information_
my_cda.squared_mdist_
my_cda.univariate_test_statistis_
anova = my_cda.anova_
anova
my_cda.correlation_ratio_
print(my_cda.manova_)
tukey = my_cda.tukey_
print(tukey["Temperature"])
bonf_test = my_cda.bonferroni_correction_
print(bonf_test["Temperature"])
sidak = my_cda.sidak_
print(sidak["Temperature"])
my_cda.tcov_
my_cda.tcorr_
my_cda.gcov_
my_cda.wcov_
my_cda.wcorr_
my_cda.bcorr_
my_cda.eig_.T
my_cda.intercept_
my_cda.coef_
from scientisttools.extractfactor import get_candisc
row_coord = get_candisc(my_cda)
row_coord["coord"].head()
my_cda.global_performance_
my_cda.likelihood_test_
transform = my_cda.transform(wine.drop(columns=["Qualite","Obs."]))
transform.iloc[:5,:]
Xtest = pd.DataFrame(np.array([3000,1100,20,300]).reshape(1,4),index=['1958'],
columns = my_cda.features_labels_)
Xtest
my_cda.transform(Xtest)
X = wine[my_cda.features_labels_]
predict_proba = my_cda.predict_proba(X)
predict_proba.head().round(4)
my_cda.decision_function(Xtest)
predict_proba = my_cda.predict_proba(X)
predict_proba.head()
my_cda.predict(Xtest)
my_cda.score(X,wine["Qualite"])
from scientisttools.extractfactor import get_eigenvalue,summaryCANDISC,get_candisc_coef
eig = get_eigenvalue(my_cda)
eig
coef = get_candisc_coef(my_cda,choice="absolute")
coef
coef = get_candisc_coef(my_cda,choice="score")
coef
summaryCANDISC(my_cda,to_markdown=True)
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/candisc_example.ipynb | candisc_example.ipynb | # Chargement des donnees
import pandas as pd
import numpy as np
from plydata import *
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
wine = pd.read_excel("wine_quality.xls",index_col=1)
display(wine >> head())
from scientisttools.discriminant_analysis import CANDISC
my_cda = CANDISC(n_components=2,
target=["Qualite"],
row_labels=wine.index,
features_labels=["Temperature","Soleil","Chaleur","Pluie"],
parallelize=False)
my_cda.fit(wine)
my_cda.classes_
my_cda.summary_information_
my_cda.class_level_information_
my_cda.squared_mdist_
my_cda.univariate_test_statistis_
anova = my_cda.anova_
anova
my_cda.correlation_ratio_
print(my_cda.manova_)
tukey = my_cda.tukey_
print(tukey["Temperature"])
bonf_test = my_cda.bonferroni_correction_
print(bonf_test["Temperature"])
sidak = my_cda.sidak_
print(sidak["Temperature"])
my_cda.tcov_
my_cda.tcorr_
my_cda.gcov_
my_cda.wcov_
my_cda.wcorr_
my_cda.bcorr_
my_cda.eig_.T
my_cda.intercept_
my_cda.coef_
from scientisttools.extractfactor import get_candisc
row_coord = get_candisc(my_cda)
row_coord["coord"].head()
my_cda.global_performance_
my_cda.likelihood_test_
transform = my_cda.transform(wine.drop(columns=["Qualite","Obs."]))
transform.iloc[:5,:]
Xtest = pd.DataFrame(np.array([3000,1100,20,300]).reshape(1,4),index=['1958'],
columns = my_cda.features_labels_)
Xtest
my_cda.transform(Xtest)
X = wine[my_cda.features_labels_]
predict_proba = my_cda.predict_proba(X)
predict_proba.head().round(4)
my_cda.decision_function(Xtest)
predict_proba = my_cda.predict_proba(X)
predict_proba.head()
my_cda.predict(Xtest)
my_cda.score(X,wine["Qualite"])
from scientisttools.extractfactor import get_eigenvalue,summaryCANDISC,get_candisc_coef
eig = get_eigenvalue(my_cda)
eig
coef = get_candisc_coef(my_cda,choice="absolute")
coef
coef = get_candisc_coef(my_cda,choice="score")
coef
summaryCANDISC(my_cda,to_markdown=True) | 0.376623 | 0.459076 |
# Importation, partition train-test et DISQUAL
```
#changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#importation des données
import pandas
df = pandas.read_excel("mushroom.xls")
df.info()
#description
df.describe(include='object')
#valeurs par variable
df.describe(include='object').loc['unique']
#partition sur la base de la colonne SAMPLE_STATUS
dfTrain = df.loc[df.SAMPLE_STATUS == 'train'].drop('SAMPLE_STATUS',axis='columns')
dfTest = df.loc[df.SAMPLE_STATUS == 'test'].drop('SAMPLE_STATUS',axis='columns')
#vérif.
print(dfTrain.shape)
print(dfTest.shape)
#méthode DISQUAL instanciation
from scientisttools.discriminant_analysis import DISQUAL
disqual = DISQUAL(n_components=2,
target=['classe'],
features_labels=list(dfTrain.columns[:-1]),
row_labels=dfTrain.index,
parallelize=True)
#entraînement - temps de traitement dépasse la minute
#ai dû interrompre les calculs
disqual.fit(dfTrain)
```
# Test avec simplement l'analyse des corresp. multiples
```
#variables X seulement
XTrain = dfTrain.loc[:,dfTrain.columns != 'classe']
XTrain.columns
#instancier et lancer l'ACM
from scientisttools.decomposition import MCA
my_mca = MCA(n_components=None,
row_labels=XTrain.index.values,
var_labels=XTrain.columns.values,
mod_labels=None,
matrix_type='completed',
benzecri=True,
greenacre=True,
row_sup_labels=None,
quali_sup_labels=None,
quanti_sup_labels=None,
parallelize=True)
#entraînement -- ici aussi arrêt des calculs
#après plus d'une minute
my_mca.fit(XTrain)
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/disqual_mushroom.ipynb | disqual_mushroom.ipynb | #changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#importation des données
import pandas
df = pandas.read_excel("mushroom.xls")
df.info()
#description
df.describe(include='object')
#valeurs par variable
df.describe(include='object').loc['unique']
#partition sur la base de la colonne SAMPLE_STATUS
dfTrain = df.loc[df.SAMPLE_STATUS == 'train'].drop('SAMPLE_STATUS',axis='columns')
dfTest = df.loc[df.SAMPLE_STATUS == 'test'].drop('SAMPLE_STATUS',axis='columns')
#vérif.
print(dfTrain.shape)
print(dfTest.shape)
#méthode DISQUAL instanciation
from scientisttools.discriminant_analysis import DISQUAL
disqual = DISQUAL(n_components=2,
target=['classe'],
features_labels=list(dfTrain.columns[:-1]),
row_labels=dfTrain.index,
parallelize=True)
#entraînement - temps de traitement dépasse la minute
#ai dû interrompre les calculs
disqual.fit(dfTrain)
#variables X seulement
XTrain = dfTrain.loc[:,dfTrain.columns != 'classe']
XTrain.columns
#instancier et lancer l'ACM
from scientisttools.decomposition import MCA
my_mca = MCA(n_components=None,
row_labels=XTrain.index.values,
var_labels=XTrain.columns.values,
mod_labels=None,
matrix_type='completed',
benzecri=True,
greenacre=True,
row_sup_labels=None,
quali_sup_labels=None,
quanti_sup_labels=None,
parallelize=True)
#entraînement -- ici aussi arrêt des calculs
#après plus d'une minute
my_mca.fit(XTrain) | 0.207295 | 0.607547 |
# Linear Discriminant Analysis with both continuous and categories variables (DISMIX)
```
# Chargement des librairies
import numpy as np
import pandas as pd
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
# Chargement des données
D = pd.read_excel("Data_Methodes_Factorielles.xlsx",sheet_name="AFDM_TENNIS",index_col=0)
display(D)
D.info()
```
on souhaite prédire `RolandGarros`
```
# Importation de la classe de Calcul
from scientisttools.discriminant_analysis import DISMIX
# Instanciation
mixdisc = DISMIX(n_components=None,
target=["RolandGarros"],
quanti_features_labels=["Taille","Titres","Finales","TitresGC","BestClassDouble"],
quali_features_labels=["Lateralite","MainsRevers"],
row_labels=D.index,
priors=None,
parallelize=False)
# Entrainement - Fit
mixdisc.fit(D)
```
## Factor Analysis of Mixed Data
```
# FAMD
famd = mixdisc.famd_model_
# Valeur propres
eig = famd.eig_.T
display(pd.DataFrame(eig))
from scientisttools.pyplot import plot_eigenvalues
import matplotlib.pyplot as plt
fig, axe =plt.subplots(figsize=(10,8))
plot_eigenvalues(famd,choice="eigenvalue",ax=axe)
plt.show()
# Coordonnées des individus
pd.DataFrame(famd.row_coord_)
# Coefficients issus de LDA
mixdisc.lda_coef_
# Constante issue de LDA
mixdisc.intercept_
# Coefficients du MIXDISC
mixdisc.coef_
# Constante du MIXDISC
mixdisc.intercept_
X= D.drop(columns=["RolandGarros"])
y = D["RolandGarros"]
pred = mixdisc.predict(X)
pred
# Accurate
mixdisc.score(X,y)
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/dismix_example.ipynb | dismix_example.ipynb | # Chargement des librairies
import numpy as np
import pandas as pd
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
# Chargement des données
D = pd.read_excel("Data_Methodes_Factorielles.xlsx",sheet_name="AFDM_TENNIS",index_col=0)
display(D)
D.info()
# Importation de la classe de Calcul
from scientisttools.discriminant_analysis import DISMIX
# Instanciation
mixdisc = DISMIX(n_components=None,
target=["RolandGarros"],
quanti_features_labels=["Taille","Titres","Finales","TitresGC","BestClassDouble"],
quali_features_labels=["Lateralite","MainsRevers"],
row_labels=D.index,
priors=None,
parallelize=False)
# Entrainement - Fit
mixdisc.fit(D)
# FAMD
famd = mixdisc.famd_model_
# Valeur propres
eig = famd.eig_.T
display(pd.DataFrame(eig))
from scientisttools.pyplot import plot_eigenvalues
import matplotlib.pyplot as plt
fig, axe =plt.subplots(figsize=(10,8))
plot_eigenvalues(famd,choice="eigenvalue",ax=axe)
plt.show()
# Coordonnées des individus
pd.DataFrame(famd.row_coord_)
# Coefficients issus de LDA
mixdisc.lda_coef_
# Constante issue de LDA
mixdisc.intercept_
# Coefficients du MIXDISC
mixdisc.coef_
# Constante du MIXDISC
mixdisc.intercept_
X= D.drop(columns=["RolandGarros"])
y = D["RolandGarros"]
pred = mixdisc.predict(X)
pred
# Accurate
mixdisc.score(X,y) | 0.314471 | 0.797241 |
# Discriminant Correspondence Analysis (DISCA)
```
# Chargement de la base
import numpy as np
import pandas as pd
import plotnine as pn
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
D = pd.read_excel("Data_Illustration_Livre_ADL.xlsx",sheet_name="DIVAY",header=0)
D
from scientisttools.discriminant_analysis import DISCA
disca = DISCA(n_components=None,
target=["Region"],
features_labels=list(D.columns[1:]),
matrix_type="completed",
priors=None,
parallelize=False)
disca.fit(D)
disca.statistics_test_["cramer's V"]
# Individus supplémentaires
Xsup = pd.DataFrame({
"Woody" : "A","Fruity" : "C", "Sweet" : "B", "Alcohol" : "B", "Hedonic" : "A"
},index=[13])
Xsup
yTrain = D["Region"]
XTrain = D.drop(columns=["Region"])
XTrain
disca.transform(Xsup)
disca.decision_function(Xsup)
disca.predict_proba(Xsup)
disca.predict(Xsup)
disca.score(XTrain,yTrain)
disca.score(XTrain,disca.predict(XTrain))
```
# Approche 2 - Dummies variables
```
dummies = pd.get_dummies(XTrain,prefix_sep="_")
D2 = pd.concat([yTrain,dummies],axis=1)
D2
disca2 = DISCA(n_components=None,
target=["Region"],
features_labels=None,
mod_labels=list[dummies.columns],
matrix_type="dummies",
priors=None,
parallelize=False)
disca2.fit(D2)
disca2.coef_
disca2.row_coord_
disca2.statistics_test_["cramer's V"]
ca = disca.ca_model_
from scientisttools.pyplot import plot_eigenvalues
import matplotlib.pyplot as plt
fig, axe =plt.subplots(figsize=(10,8))
plot_eigenvalues(ca,choice="eigenvalue",ax=axe)
plt.show()
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/disca_example.ipynb | disca_example.ipynb | # Chargement de la base
import numpy as np
import pandas as pd
import plotnine as pn
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
D = pd.read_excel("Data_Illustration_Livre_ADL.xlsx",sheet_name="DIVAY",header=0)
D
from scientisttools.discriminant_analysis import DISCA
disca = DISCA(n_components=None,
target=["Region"],
features_labels=list(D.columns[1:]),
matrix_type="completed",
priors=None,
parallelize=False)
disca.fit(D)
disca.statistics_test_["cramer's V"]
# Individus supplémentaires
Xsup = pd.DataFrame({
"Woody" : "A","Fruity" : "C", "Sweet" : "B", "Alcohol" : "B", "Hedonic" : "A"
},index=[13])
Xsup
yTrain = D["Region"]
XTrain = D.drop(columns=["Region"])
XTrain
disca.transform(Xsup)
disca.decision_function(Xsup)
disca.predict_proba(Xsup)
disca.predict(Xsup)
disca.score(XTrain,yTrain)
disca.score(XTrain,disca.predict(XTrain))
dummies = pd.get_dummies(XTrain,prefix_sep="_")
D2 = pd.concat([yTrain,dummies],axis=1)
D2
disca2 = DISCA(n_components=None,
target=["Region"],
features_labels=None,
mod_labels=list[dummies.columns],
matrix_type="dummies",
priors=None,
parallelize=False)
disca2.fit(D2)
disca2.coef_
disca2.row_coord_
disca2.statistics_test_["cramer's V"]
ca = disca.ca_model_
from scientisttools.pyplot import plot_eigenvalues
import matplotlib.pyplot as plt
fig, axe =plt.subplots(figsize=(10,8))
plot_eigenvalues(ca,choice="eigenvalue",ax=axe)
plt.show() | 0.320502 | 0.659196 |
# Importation et inspection des données
```
#changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#chargement des données
import pandas
jobrate = pandas.read_excel("jobrate.xlsx")
jobrate.head()
#information sur les données
jobrate.info()
#variables actives
X = jobrate[jobrate.columns[:-1]]
print(X.columns)
#matrice des corrélations
R = X.corr()
print(R)
#représentation graphique - heatmap
import seaborn as sns
sns.heatmap(R,vmin=-1,vmax=+1,cmap='Blues')
#ou avec le carré des corrélations
#la valeur min devient 0 dans ce cas
sns.heatmap(R**2,vmin=0,vmax=+1,cmap='Blues')
```
# CAH sur les variables
```
import numpy
D = numpy.sqrt(1-R**2)
print(D.iloc[:3,:3])
#préparation pour la CAH de scipy
#vectoriser la matrice des distances
from scipy.spatial.distance import squareform
VD = squareform(D)
print(VD)
#CAH - Ward
from scipy.cluster.hierarchy import ward
cah = ward(VD)
print(cah)
#dendrogramme
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram
plt.title("CAH")
dendrogram(cah,labels=X.columns,orientation='left',color_threshold=0)
plt.show()
#matérialisation de 4 classes
plt.title("CAH - 4 classes")
dendrogram(cah,labels=X.columns,orientation='left',color_threshold=0.95)
plt.show()
#découpage effectif en 4 classes
from scipy.cluster.hierarchy import fcluster
groupes = fcluster(cah,t=0.95,criterion='distance')
print(groupes)
#comptage
print(numpy.unique(groupes,return_counts=True))
#liste des variables pour le groupe 1
print(X.columns[groupes == 1])
#affichage des groupes
for g in numpy.unique(groupes):
print(g," : ",X.columns[groupes == g])
```
# Traitement de la variable supplémentaire (overral)
```
#corrélation avec chaque variable
print(X.corrwith(jobrate.Overall_Rating))
#dataset pour le groupe 1
X[X.columns[groupes == 1]].head()
#corrélations des variables du groupe 1 avec "overall"
print(X[X.columns[groupes == 1]].corrwith(jobrate.Overall_Rating))
#moyenne des carrés des corrélations avec les groupes
for g in numpy.unique(groupes):
print(g," : ",numpy.mean(X[X.columns[groupes==g]].corrwith(jobrate.Overall_Rating)**2))
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/jobrate_var_clustering.ipynb | jobrate_var_clustering.ipynb | #changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#chargement des données
import pandas
jobrate = pandas.read_excel("jobrate.xlsx")
jobrate.head()
#information sur les données
jobrate.info()
#variables actives
X = jobrate[jobrate.columns[:-1]]
print(X.columns)
#matrice des corrélations
R = X.corr()
print(R)
#représentation graphique - heatmap
import seaborn as sns
sns.heatmap(R,vmin=-1,vmax=+1,cmap='Blues')
#ou avec le carré des corrélations
#la valeur min devient 0 dans ce cas
sns.heatmap(R**2,vmin=0,vmax=+1,cmap='Blues')
import numpy
D = numpy.sqrt(1-R**2)
print(D.iloc[:3,:3])
#préparation pour la CAH de scipy
#vectoriser la matrice des distances
from scipy.spatial.distance import squareform
VD = squareform(D)
print(VD)
#CAH - Ward
from scipy.cluster.hierarchy import ward
cah = ward(VD)
print(cah)
#dendrogramme
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram
plt.title("CAH")
dendrogram(cah,labels=X.columns,orientation='left',color_threshold=0)
plt.show()
#matérialisation de 4 classes
plt.title("CAH - 4 classes")
dendrogram(cah,labels=X.columns,orientation='left',color_threshold=0.95)
plt.show()
#découpage effectif en 4 classes
from scipy.cluster.hierarchy import fcluster
groupes = fcluster(cah,t=0.95,criterion='distance')
print(groupes)
#comptage
print(numpy.unique(groupes,return_counts=True))
#liste des variables pour le groupe 1
print(X.columns[groupes == 1])
#affichage des groupes
for g in numpy.unique(groupes):
print(g," : ",X.columns[groupes == g])
#corrélation avec chaque variable
print(X.corrwith(jobrate.Overall_Rating))
#dataset pour le groupe 1
X[X.columns[groupes == 1]].head()
#corrélations des variables du groupe 1 avec "overall"
print(X[X.columns[groupes == 1]].corrwith(jobrate.Overall_Rating))
#moyenne des carrés des corrélations avec les groupes
for g in numpy.unique(groupes):
print(g," : ",numpy.mean(X[X.columns[groupes==g]].corrwith(jobrate.Overall_Rating)**2)) | 0.241132 | 0.759091 |
```
#dossier de travail
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#importation des individus actifs
import pandas as pd
X = pd.read_excel("loisirs_subset.xlsx",sheet_name="data")
X.info()
```
# ACM sous scientisttools
```
# ACM
from scientisttools.decomposition import MCA
my_mca = MCA(n_components=2,
row_labels=X.index,
var_labels=X.columns,
mod_labels=None,
matrix_type="completed",
benzecri=True,
greenacre=True,
row_sup_labels=None,
quali_sup_labels=None,
quanti_sup_labels=None,
parallelize=False)
my_mca.fit(X)
from scientisttools.pyplot import plotMCA
import matplotlib.pyplot as plt
fig, axe =plt.subplots(figsize=(10,10))
plotMCA(my_mca,choice="mod",repel=True,ax=axe)
plt.show()
```
## Classification ascendante hiérarchique sur les modalités
```
from scientisttools.clustering import VARHCPC
#
varhcpc = VARHCPC(n_clusters=3,
metric="euclidean",
method="ward",
parallelize=False)
# Instanciation
varhcpc.fit(my_mca)
# Dendrogram
from scientisttools.pyplot import plot_dendrogram
plot_dendrogram(varhcpc,orientation='top',leaf_rotation=90,color_threshold=0)
plt.show()
varhcpc.cluster_centers_
from scientisttools.pyplot import plotVARHCPC
fig,axe =plt.subplots(figsize=(10,10))
plotVARHCPC(varhcpc,ax=axe,xlim=(-1.2,1.2),ylim=(-1.2,1.2),repel=True,random_state=123,show_clust_cent=False)
plt.show()
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/varhcpc2.ipynb | varhcpc2.ipynb | #dossier de travail
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#importation des individus actifs
import pandas as pd
X = pd.read_excel("loisirs_subset.xlsx",sheet_name="data")
X.info()
# ACM
from scientisttools.decomposition import MCA
my_mca = MCA(n_components=2,
row_labels=X.index,
var_labels=X.columns,
mod_labels=None,
matrix_type="completed",
benzecri=True,
greenacre=True,
row_sup_labels=None,
quali_sup_labels=None,
quanti_sup_labels=None,
parallelize=False)
my_mca.fit(X)
from scientisttools.pyplot import plotMCA
import matplotlib.pyplot as plt
fig, axe =plt.subplots(figsize=(10,10))
plotMCA(my_mca,choice="mod",repel=True,ax=axe)
plt.show()
from scientisttools.clustering import VARHCPC
#
varhcpc = VARHCPC(n_clusters=3,
metric="euclidean",
method="ward",
parallelize=False)
# Instanciation
varhcpc.fit(my_mca)
# Dendrogram
from scientisttools.pyplot import plot_dendrogram
plot_dendrogram(varhcpc,orientation='top',leaf_rotation=90,color_threshold=0)
plt.show()
varhcpc.cluster_centers_
from scientisttools.pyplot import plotVARHCPC
fig,axe =plt.subplots(figsize=(10,10))
plotVARHCPC(varhcpc,ax=axe,xlim=(-1.2,1.2),ylim=(-1.2,1.2),repel=True,random_state=123,show_clust_cent=False)
plt.show() | 0.384334 | 0.628977 |
# CANDISC SKLEARN
```
# Chargement des donnees
import pandas as pd
import numpy as np
from plydata import *
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
data = pd.read_excel("wine_quality.xls",index_col=1)
display(data >> head())
wine = data >> select("-Obs.")
display(wine >> head())
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
X = wine.drop(columns=["Qualite"])
y =wine["Qualite"]
clf = LinearDiscriminantAnalysis(solver="svd")
clf.fit(X, y)
predict=clf.predict(X)
predict
transform = clf.transform(X)
transform[:5,]
decision = clf.decision_function(X)
decision[:5,:]
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/candisc_sklearn.ipynb | candisc_sklearn.ipynb | # Chargement des donnees
import pandas as pd
import numpy as np
from plydata import *
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
data = pd.read_excel("wine_quality.xls",index_col=1)
display(data >> head())
wine = data >> select("-Obs.")
display(wine >> head())
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
X = wine.drop(columns=["Qualite"])
y =wine["Qualite"]
clf = LinearDiscriminantAnalysis(solver="svd")
clf.fit(X, y)
predict=clf.predict(X)
predict
transform = clf.transform(X)
transform[:5,]
decision = clf.decision_function(X)
decision[:5,:] | 0.278649 | 0.463505 |
# Discriminant Analysis with categorical variables (DISQUAL)
```
# Chargement des librairies
import numpy as np
import pandas as pd
#changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
DTrain = pd.read_excel("CongressVotePipeline.xlsx",sheet_name="train",header=0)
display(DTrain.head())
DTrain.info()
from scientisttools.discriminant_analysis import DISQUAL
disqual = DISQUAL(n_components=None,
target=["group"],
features_labels=list(DTrain.columns[:-1]),
row_labels=DTrain.index,
parallelize=False)
disqual.fit(DTrain)
stats_test = disqual.statistics_test_
stats_test.keys()
stats_test["chi2"]
stats_test["log-likelihood-test"]
stats_test["cramer's V"]
stats_test["tschuprow's T"]
stats_test["pearson"]
```
## Résultats de l'ACM
```
mca = disqual.mca_model_
```
### Valeurs propres
```
eig = mca.eig_.T
display(pd.DataFrame(eig))
from scientisttools.pyplot import plot_eigenvalues
import matplotlib.pyplot as plt
fig, axe =plt.subplots(figsize=(10,8))
plot_eigenvalues(mca,ax=axe,n_components=32)
plt.show()
from scientisttools.extractfactor import get_mca_mod,get_mca_ind
mod =get_mca_mod(mca)
mod_infos = mod["infos"]
display(mod_infos)
mod_coord = mod["coord"]
display(mod_coord.iloc[:,:2])
# Fonction de projection
fproj = disqual.projection_function_
display(fproj.iloc[:,:2])
# Coordonnées des individus
row = get_mca_ind(mca)
row_coord = row["coord"]
display(row_coord.head(10).iloc[:,:2])
# Coeffcients du lDA
lda_coef = disqual.lda_coef_
display(lda_coef)
lda_intercept = disqual.lda_intercept_
display(lda_intercept)
# Evaluation globale
se = disqual.statistical_evaluation_
display(se)
coef = disqual.coef_
display(coef)
intercept = disqual.intercept_
display(intercept)
DTest = pd.read_excel("CongressVotePipeline.xlsx",sheet_name="test",header=0)
DTest.head()
DTest.info()
XTest = DTest[DTest.columns[:-1]]
new_coord = disqual.transform(XTest)
new_coord.iloc[:,:2]
XTrain = DTrain[DTrain.columns[:-1]]
coord = disqual.transform(XTrain)
coord.iloc[:,:2]
pred_train = disqual.predict(XTrain)
pred_train
from sklearn.metrics import confusion_matrix
confusion_matrix(DTrain[DTrain.columns[-1]],pred_train)
pred_test = disqual.predict(XTest)
pred_test
confusion_matrix(DTest[DTest.columns[-1]],pred_test)
prob_train = disqual.predict_proba(XTrain)
prob_train
prob_test = disqual.predict_proba(XTest)
prob_test
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/disqual_example.ipynb | disqual_example.ipynb | # Chargement des librairies
import numpy as np
import pandas as pd
#changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
DTrain = pd.read_excel("CongressVotePipeline.xlsx",sheet_name="train",header=0)
display(DTrain.head())
DTrain.info()
from scientisttools.discriminant_analysis import DISQUAL
disqual = DISQUAL(n_components=None,
target=["group"],
features_labels=list(DTrain.columns[:-1]),
row_labels=DTrain.index,
parallelize=False)
disqual.fit(DTrain)
stats_test = disqual.statistics_test_
stats_test.keys()
stats_test["chi2"]
stats_test["log-likelihood-test"]
stats_test["cramer's V"]
stats_test["tschuprow's T"]
stats_test["pearson"]
mca = disqual.mca_model_
eig = mca.eig_.T
display(pd.DataFrame(eig))
from scientisttools.pyplot import plot_eigenvalues
import matplotlib.pyplot as plt
fig, axe =plt.subplots(figsize=(10,8))
plot_eigenvalues(mca,ax=axe,n_components=32)
plt.show()
from scientisttools.extractfactor import get_mca_mod,get_mca_ind
mod =get_mca_mod(mca)
mod_infos = mod["infos"]
display(mod_infos)
mod_coord = mod["coord"]
display(mod_coord.iloc[:,:2])
# Fonction de projection
fproj = disqual.projection_function_
display(fproj.iloc[:,:2])
# Coordonnées des individus
row = get_mca_ind(mca)
row_coord = row["coord"]
display(row_coord.head(10).iloc[:,:2])
# Coeffcients du lDA
lda_coef = disqual.lda_coef_
display(lda_coef)
lda_intercept = disqual.lda_intercept_
display(lda_intercept)
# Evaluation globale
se = disqual.statistical_evaluation_
display(se)
coef = disqual.coef_
display(coef)
intercept = disqual.intercept_
display(intercept)
DTest = pd.read_excel("CongressVotePipeline.xlsx",sheet_name="test",header=0)
DTest.head()
DTest.info()
XTest = DTest[DTest.columns[:-1]]
new_coord = disqual.transform(XTest)
new_coord.iloc[:,:2]
XTrain = DTrain[DTrain.columns[:-1]]
coord = disqual.transform(XTrain)
coord.iloc[:,:2]
pred_train = disqual.predict(XTrain)
pred_train
from sklearn.metrics import confusion_matrix
confusion_matrix(DTrain[DTrain.columns[-1]],pred_train)
pred_test = disqual.predict(XTest)
pred_test
confusion_matrix(DTest[DTest.columns[-1]],pred_test)
prob_train = disqual.predict_proba(XTrain)
prob_train
prob_test = disqual.predict_proba(XTest)
prob_test | 0.408277 | 0.755592 |
# Linear Discriminant Analysis (LDA)
```
# Chargement de la base
import numpy as np
import pandas as pd
import plotnine as pn
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
D = pd.read_excel("Data_Illustration_Livre_ADL.xlsx",sheet_name="DATA_1",header=0)
D
# Représentation graphique
p = pn.ggplot(D,pn.aes(x="X1",y="X2"))+pn.geom_point(pn.aes(color="Groupe"))+pn.ggtitle("Position relatives dans classes dans le plan")
print(p)
from scientisttools.discriminant_analysis import LDA
```
## Distribution multinomiale
```
lda = LDA(features_labels=["X1","X2"],
target=["Groupe"],
distribution="multinomiale",
row_labels=D.index,
parallelize=False)
lda.fit(D)
# Moyennes conditionnelles
g_k = lda.gmean_
g_k
p = p+ pn.geom_point(g_k,pn.aes(x="X1",y="X2",color=g_k.index))+pn.geom_text(g_k,pn.aes(x="X1",y="X2",label=g_k.index,color=g_k.index))
print(p)
lda.gcov_
p2 = (pn.ggplot(D,pn.aes(x="X1",y="X2",color="Groupe"))+
pn.geom_point()+
pn.scale_color_manual(values=["skyblue","green"])+
pn.stat_ellipse(type="norm",level=0.7)+
pn.ggtitle('Ellipses de dispersion'))
print(p2)
# Nouveua individu
XTest = pd.DataFrame({"X1" : 6, "X2" : 7},index=["omega"])
XTest
lda.decision_function(XTest)
lda.predict_proba(XTest)
lda.predict(XTest)
```
## Hypothèse d'homoscédasticité
```
lda2 = LDA(features_labels=["X1","X2"],
target=["Groupe"],
distribution="homoscedastik",
row_labels=D.index,
parallelize=False)
lda2.fit(D)
lda2.intercept_
lda2.coef_
lda.priors_
X = D[["X1","X2"]]
lda2.decision_function(X)
lda2.decision_function(XTest)
lda2.predict_proba(X=X)
lda2.squared_mdist_
lda2.predict_proba(X=XTest)
lda2.predict(XTest)
# 2valuation globale
lda2.global_performance_
# Contribution des variables
lda2.statistical_evaluation_
lda2.wcov_
lda2.tcov_
lda2.generalized_distance(X)
print(lda2.manova_)
lda2.generalized_distance(XTest)
lda3 = LDA(features_labels=["X1","X2"],
target=["Groupe"],
distribution="homoscedastik",
row_labels = D.index,
priors=[1/2,1/2],
parallelize=False)
lda3.fit(D)
lda3.priors_
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/lda_example2.ipynb | lda_example2.ipynb | # Chargement de la base
import numpy as np
import pandas as pd
import plotnine as pn
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
D = pd.read_excel("Data_Illustration_Livre_ADL.xlsx",sheet_name="DATA_1",header=0)
D
# Représentation graphique
p = pn.ggplot(D,pn.aes(x="X1",y="X2"))+pn.geom_point(pn.aes(color="Groupe"))+pn.ggtitle("Position relatives dans classes dans le plan")
print(p)
from scientisttools.discriminant_analysis import LDA
lda = LDA(features_labels=["X1","X2"],
target=["Groupe"],
distribution="multinomiale",
row_labels=D.index,
parallelize=False)
lda.fit(D)
# Moyennes conditionnelles
g_k = lda.gmean_
g_k
p = p+ pn.geom_point(g_k,pn.aes(x="X1",y="X2",color=g_k.index))+pn.geom_text(g_k,pn.aes(x="X1",y="X2",label=g_k.index,color=g_k.index))
print(p)
lda.gcov_
p2 = (pn.ggplot(D,pn.aes(x="X1",y="X2",color="Groupe"))+
pn.geom_point()+
pn.scale_color_manual(values=["skyblue","green"])+
pn.stat_ellipse(type="norm",level=0.7)+
pn.ggtitle('Ellipses de dispersion'))
print(p2)
# Nouveua individu
XTest = pd.DataFrame({"X1" : 6, "X2" : 7},index=["omega"])
XTest
lda.decision_function(XTest)
lda.predict_proba(XTest)
lda.predict(XTest)
lda2 = LDA(features_labels=["X1","X2"],
target=["Groupe"],
distribution="homoscedastik",
row_labels=D.index,
parallelize=False)
lda2.fit(D)
lda2.intercept_
lda2.coef_
lda.priors_
X = D[["X1","X2"]]
lda2.decision_function(X)
lda2.decision_function(XTest)
lda2.predict_proba(X=X)
lda2.squared_mdist_
lda2.predict_proba(X=XTest)
lda2.predict(XTest)
# 2valuation globale
lda2.global_performance_
# Contribution des variables
lda2.statistical_evaluation_
lda2.wcov_
lda2.tcov_
lda2.generalized_distance(X)
print(lda2.manova_)
lda2.generalized_distance(XTest)
lda3 = LDA(features_labels=["X1","X2"],
target=["Groupe"],
distribution="homoscedastik",
row_labels = D.index,
priors=[1/2,1/2],
parallelize=False)
lda3.fit(D)
lda3.priors_ | 0.254787 | 0.805479 |
# Metric and Non - Metric MDS
```
# Chargement des librairies
import pandas as pd
import numpy as np
from plydata import *
import matplotlib.pyplot as plt
from scientisttools.extractfactor import get_mds
from scipy.spatial.distance import pdist,squareform
from scientisttools.pyplot import plot_shepard
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#chargement - index_col = 0 pour indiquer que la colonne n°0 est un label import pandas
D = pd.read_excel("Data_Methodes_Factorielles.xlsx",sheet_name="MDS_MADAGASCAR",index_col=0)
display(D)
from scientisttools.manifold import MDS
my_mds = MDS(n_components=2,
random_state=123,
proximity ="precomputed",
labels=D.index.values,
normalized_stress=True,
parallelize=False)
my_mds.fit(D)
from scientisttools.extractfactor import get_mds
coord = get_mds(my_mds)["coord"]
display(coord)
print(my_mds.stress_)
fig, axe =plt.subplots(figsize=(8,8))
plot_shepard(my_mds,ax=axe)
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/mds_example.ipynb | mds_example.ipynb | # Chargement des librairies
import pandas as pd
import numpy as np
from plydata import *
import matplotlib.pyplot as plt
from scientisttools.extractfactor import get_mds
from scipy.spatial.distance import pdist,squareform
from scientisttools.pyplot import plot_shepard
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#chargement - index_col = 0 pour indiquer que la colonne n°0 est un label import pandas
D = pd.read_excel("Data_Methodes_Factorielles.xlsx",sheet_name="MDS_MADAGASCAR",index_col=0)
display(D)
from scientisttools.manifold import MDS
my_mds = MDS(n_components=2,
random_state=123,
proximity ="precomputed",
labels=D.index.values,
normalized_stress=True,
parallelize=False)
my_mds.fit(D)
from scientisttools.extractfactor import get_mds
coord = get_mds(my_mds)["coord"]
display(coord)
print(my_mds.stress_)
fig, axe =plt.subplots(figsize=(8,8))
plot_shepard(my_mds,ax=axe) | 0.298083 | 0.62055 |
# MCA
```
#changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#importation des données
import pandas
df = pandas.read_excel("mushroom_acm.xlsx")
#df = pandas.read_excel("canines.xlsx")
df.info()
from scientisttools.decomposition import MCA
my_mca = MCA(n_components=None,
row_labels=df.index.values,
var_labels=df.columns.values,
mod_labels=None,
matrix_type='completed',
benzecri=True,
greenacre=True,
row_sup_labels=None,
quali_sup_labels=None,
quanti_sup_labels=None,
graph=False)
#entraînement
my_mca.fit(df)
#valeurs propres
print(my_mca.eig_[0])
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/mca_mushroom.ipynb | mca_mushroom.ipynb | #changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#importation des données
import pandas
df = pandas.read_excel("mushroom_acm.xlsx")
#df = pandas.read_excel("canines.xlsx")
df.info()
from scientisttools.decomposition import MCA
my_mca = MCA(n_components=None,
row_labels=df.index.values,
var_labels=df.columns.values,
mod_labels=None,
matrix_type='completed',
benzecri=True,
greenacre=True,
row_sup_labels=None,
quali_sup_labels=None,
quanti_sup_labels=None,
graph=False)
#entraînement
my_mca.fit(df)
#valeurs propres
print(my_mca.eig_[0]) | 0.173463 | 0.438545 |
# Hierarchical Clustering Analysis of continuous variables
```
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
import pandas as pd
df = pd.read_excel("jobrate.xlsx")
df.head()
df.info()
#variables actives
D = df[df.columns[:-1]]
from scientisttools.clustering import VARHCA
varhca = VARHCA(n_clusters=4,
var_labels=D.columns,
matrix_type="completed",
metric="euclidean",
method="ward",
parallelize=False)
# Instanciation
varhca.fit(D)
varhca.linkage_matrix_
varhca.distances_
```
# Individus supplémentaires
```
Xsup = df[df.columns[-1]]
Xsup.head()
varhca.transform(Xsup)
```
# Approche 2 : Matrice globale
```
varhca2 = VARHCA(n_clusters=4,
var_labels=df.columns[:-1],
var_sup_labels=["Overall_Rating"],
matrix_type="completed",
metric="euclidean",
method="ward",
parallelize=False)
varhca2.fit(df)
varhca2.corr_mean_square_
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/varhca.ipynb | varhca.ipynb |
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
import pandas as pd
df = pd.read_excel("jobrate.xlsx")
df.head()
df.info()
#variables actives
D = df[df.columns[:-1]]
from scientisttools.clustering import VARHCA
varhca = VARHCA(n_clusters=4,
var_labels=D.columns,
matrix_type="completed",
metric="euclidean",
method="ward",
parallelize=False)
# Instanciation
varhca.fit(D)
varhca.linkage_matrix_
varhca.distances_
Xsup = df[df.columns[-1]]
Xsup.head()
varhca.transform(Xsup)
varhca2 = VARHCA(n_clusters=4,
var_labels=df.columns[:-1],
var_sup_labels=["Overall_Rating"],
matrix_type="completed",
metric="euclidean",
method="ward",
parallelize=False)
varhca2.fit(df)
varhca2.corr_mean_square_ | 0.325306 | 0.67868 |
# VARHCPC
```
#changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#importation des données
import pandas as pd
vote = pd.read_excel("vote_clustering.xlsx")
vote.head()
#variables actives
X = vote.iloc[:,1:]
print(X.columns)
```
## ACM sur les données actives
```
# ACM
from scientisttools.decomposition import MCA
my_mca = MCA(n_components=2,
row_labels=X.index,
var_labels=X.columns,
mod_labels=None,
matrix_type="completed",
benzecri=True,
greenacre=True,
row_sup_labels=None,
quali_sup_labels=None,
quanti_sup_labels=None,
parallelize=False)
my_mca.fit(X)
# Coordonnées des modalités
pd.DataFrame(my_mca.mod_coord_,index=my_mca.mod_labels_,columns=my_mca.dim_index_)
pd.DataFrame(my_mca.mod_infos_,index=my_mca.mod_labels_,columns=["dist","weight","Inertie"])
```
> Note
Dans ce tableau `weight` représente le poids relatif de chaque modalité, c'est - à - dire :
$$
w_{k} = \dfrac{n_{k}}{n\times p}
$$
où
* $n_{k}$ est le nombre d'individu possédant la modalité $k$
* $n$ le nombre total d'individu
* $p$ le nombre de variables catégorielles
Dans le cadre de la classification automatique, on utilise le poids absolu (ou proportion), c'est - à - dire :
$$
p_{k} = \dfrac{n_{k}}{n} = w_{k}\times p
$$
```
# Poids absolus des modalités
my_mca.mod_infos_[:,1]*my_mca.n_vars_
```
## Classification automatique
```
from scientisttools.clustering import VARHCPC
#
varhcpc = VARHCPC(n_clusters=3,
metric="euclidean",
method="average",
parallelize=False)
# Instanciation
varhcpc.fit(my_mca)
```
### Dendrogram
```
# Dendrogram
import matplotlib.pyplot as plt
from scientisttools.pyplot import plot_dendrogram
plot_dendrogram(varhcpc,orientation='top',leaf_rotation=90,color_threshold=1.6)
plt.show()
varhcpc.cluster_centers_
from scientisttools.pyplot import plotVARHCPC
fig,axe =plt.subplots(figsize=(10,10))
plotVARHCPC(varhcpc,ax=axe,xlim=(-1.5,2),ylim=(-5,1),repel=True,random_state=123,show_clust_cent=True)
plt.show()
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/varhcpc.ipynb | varhcpc.ipynb | #changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#importation des données
import pandas as pd
vote = pd.read_excel("vote_clustering.xlsx")
vote.head()
#variables actives
X = vote.iloc[:,1:]
print(X.columns)
# ACM
from scientisttools.decomposition import MCA
my_mca = MCA(n_components=2,
row_labels=X.index,
var_labels=X.columns,
mod_labels=None,
matrix_type="completed",
benzecri=True,
greenacre=True,
row_sup_labels=None,
quali_sup_labels=None,
quanti_sup_labels=None,
parallelize=False)
my_mca.fit(X)
# Coordonnées des modalités
pd.DataFrame(my_mca.mod_coord_,index=my_mca.mod_labels_,columns=my_mca.dim_index_)
pd.DataFrame(my_mca.mod_infos_,index=my_mca.mod_labels_,columns=["dist","weight","Inertie"])
# Poids absolus des modalités
my_mca.mod_infos_[:,1]*my_mca.n_vars_
from scientisttools.clustering import VARHCPC
#
varhcpc = VARHCPC(n_clusters=3,
metric="euclidean",
method="average",
parallelize=False)
# Instanciation
varhcpc.fit(my_mca)
# Dendrogram
import matplotlib.pyplot as plt
from scientisttools.pyplot import plot_dendrogram
plot_dendrogram(varhcpc,orientation='top',leaf_rotation=90,color_threshold=1.6)
plt.show()
varhcpc.cluster_centers_
from scientisttools.pyplot import plotVARHCPC
fig,axe =plt.subplots(figsize=(10,10))
plotVARHCPC(varhcpc,ax=axe,xlim=(-1.5,2),ylim=(-5,1),repel=True,random_state=123,show_clust_cent=True)
plt.show() | 0.382833 | 0.623334 |
# Multiple Factor Analysis (MFA)
```
#changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#importation des données
import pandas as pd
url = "http://factominer.free.fr/factomethods/datasets/wine.txt"
wine = pd.read_table(url,sep="\t")
wine.head()
wine.info()
from prince import MFA
wine2 = pd.DataFrame(data=wine.values,
columns = pd.MultiIndex.from_tuples(
[
("others","Label"),
("others","Soil"),
("before shaking","Odor.Intensity"),
("before shaking","Aroma.quality"),
("before shaking","Fruity"),
("before shaking","Flower"),
("before shaking","Spice"),
("vision","Visual.intensity"),
("vision","Nuance"),
("vision","Surface.feeling"),
("after shaking","Odor.intensity"),
("after shaking","Quality.of.odour"),
("after shaking","Fruity"),
("after shaking","Flower"),
("after shaking","Spice"),
("after shaking","Plante"),
("after shaking","Phenolic"),
("after shaking","Aroma.intensity"),
("after shaking","Aroma.persistency"),
("after shaking","Aroma.quality"),
("gustation","Attack.intensity"),
("gustation","Acidity"),
("gustation","Astringency"),
("gustation","Alcohol"),
("gustation","Balance"),
("gustation","Smooth"),
("gustation","Bitterness"),
("gustation","Intensity"),
("gustation","Harmony"),
("overall judgement","Overall.quality"),
("overall judgement","Typical")
]
))
groups = wine2.columns.levels[0].drop(["others","overall judgement"]).tolist()
groups
wine2 = wine2[groups].astype("float")
import prince
mfa = prince.MFA(
n_components=3,
n_iter=3,
copy=True,
check_input=True,
engine='sklearn',
random_state=42
)
mfa = mfa.fit(wine2, groups=groups)
mfa.eigenvalues_summary
mfa.row_coordinates(wine2)
mfa.plot(wine2)
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/mfa2_prince.ipynb | mfa2_prince.ipynb | #changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#importation des données
import pandas as pd
url = "http://factominer.free.fr/factomethods/datasets/wine.txt"
wine = pd.read_table(url,sep="\t")
wine.head()
wine.info()
from prince import MFA
wine2 = pd.DataFrame(data=wine.values,
columns = pd.MultiIndex.from_tuples(
[
("others","Label"),
("others","Soil"),
("before shaking","Odor.Intensity"),
("before shaking","Aroma.quality"),
("before shaking","Fruity"),
("before shaking","Flower"),
("before shaking","Spice"),
("vision","Visual.intensity"),
("vision","Nuance"),
("vision","Surface.feeling"),
("after shaking","Odor.intensity"),
("after shaking","Quality.of.odour"),
("after shaking","Fruity"),
("after shaking","Flower"),
("after shaking","Spice"),
("after shaking","Plante"),
("after shaking","Phenolic"),
("after shaking","Aroma.intensity"),
("after shaking","Aroma.persistency"),
("after shaking","Aroma.quality"),
("gustation","Attack.intensity"),
("gustation","Acidity"),
("gustation","Astringency"),
("gustation","Alcohol"),
("gustation","Balance"),
("gustation","Smooth"),
("gustation","Bitterness"),
("gustation","Intensity"),
("gustation","Harmony"),
("overall judgement","Overall.quality"),
("overall judgement","Typical")
]
))
groups = wine2.columns.levels[0].drop(["others","overall judgement"]).tolist()
groups
wine2 = wine2[groups].astype("float")
import prince
mfa = prince.MFA(
n_components=3,
n_iter=3,
copy=True,
check_input=True,
engine='sklearn',
random_state=42
)
mfa = mfa.fit(wine2, groups=groups)
mfa.eigenvalues_summary
mfa.row_coordinates(wine2)
mfa.plot(wine2) | 0.206814 | 0.6703 |
```
#changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#importation des données
import pandas
df = pandas.read_excel("Olive_Oil_Candisc.xlsx",sheet_name="dataset")
df.info()
display(df.head())
#classes - distribution
df.CLASSE.value_counts()
#analyse factorielle discriminante
from scientisttools.discriminant_analysis import CANDISC
#instanciation
candisc = CANDISC(n_components=2,
target=['CLASSE'],
features_labels=list(df.columns[1:]),
row_labels=df.index,
priors=None,
parallelize=True)
#entraînement -- l'erreur apparaît ici...
candisc.fit(df)
candisc.likelihood_test_
print(candisc.manova_)
display(pandas.DataFrame(candisc.eig_.T))
candisc.univariate_test_statistis_
pandas.DataFrame(candisc.coef_,index=candisc.features_labels_,columns=candisc.dim_index_)
candisc.gcenter_
pandas.DataFrame(candisc.score_coef_,index=candisc.features_labels_,columns=candisc.classes_)
candisc.gdisto_
candisc.tcorr_
candisc.bcorr_
candisc.wcorr_
candisc.global_performance_
candisc.anova_
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/candisc_olive_oil.ipynb | candisc_olive_oil.ipynb | #changement de dossier
import os
os.chdir("d:/Bureau/PythonProject/packages/scientisttools/data/")
#importation des données
import pandas
df = pandas.read_excel("Olive_Oil_Candisc.xlsx",sheet_name="dataset")
df.info()
display(df.head())
#classes - distribution
df.CLASSE.value_counts()
#analyse factorielle discriminante
from scientisttools.discriminant_analysis import CANDISC
#instanciation
candisc = CANDISC(n_components=2,
target=['CLASSE'],
features_labels=list(df.columns[1:]),
row_labels=df.index,
priors=None,
parallelize=True)
#entraînement -- l'erreur apparaît ici...
candisc.fit(df)
candisc.likelihood_test_
print(candisc.manova_)
display(pandas.DataFrame(candisc.eig_.T))
candisc.univariate_test_statistis_
pandas.DataFrame(candisc.coef_,index=candisc.features_labels_,columns=candisc.dim_index_)
candisc.gcenter_
pandas.DataFrame(candisc.score_coef_,index=candisc.features_labels_,columns=candisc.classes_)
candisc.gdisto_
candisc.tcorr_
candisc.bcorr_
candisc.wcorr_
candisc.global_performance_
candisc.anova_ | 0.213869 | 0.262771 |
# Additionnal functions
```
from scientisttools.utils import *
import numpy as np
from scipy.spatial.distance import pdist,squareform
# Match arg
lst = ["gaussian", "epanechnikov", "rectangular", "triangular"]
print(match_arg("gauss", lst))
print(match_arg("pauss", lst))
# is_euclidean
np.random.seed(123)
w = np.array(np.random.uniform(size=10000)).reshape(100,100)
w = squareform(pdist(w,metric="euclidean"))
is_euclidean(w)
is_euclidean(w,plot=True,printf=True)
w = np.array([[1,4],[2,5],[3,6]])
bicenter_wt(w, [0.2,0.6,0.2], [0.3,0.7])
notes = np.array([13, 11, 10, 11, 12, 5, 8, 7, 2, 4, 16, 17, 13, 16, 15])
suivi = np.array([1,1,1,1,1,2,2,2,2,2,3,3,3,3,3])
display(notes)
display(suivi)
pd.DataFrame(eta2(suivi,notes),index=['eta - test'])
```
| scientisttools | /scientisttools-0.0.8.tar.gz/scientisttools-0.0.8/notebooks/utils.ipynb | utils.ipynb | from scientisttools.utils import *
import numpy as np
from scipy.spatial.distance import pdist,squareform
# Match arg
lst = ["gaussian", "epanechnikov", "rectangular", "triangular"]
print(match_arg("gauss", lst))
print(match_arg("pauss", lst))
# is_euclidean
np.random.seed(123)
w = np.array(np.random.uniform(size=10000)).reshape(100,100)
w = squareform(pdist(w,metric="euclidean"))
is_euclidean(w)
is_euclidean(w,plot=True,printf=True)
w = np.array([[1,4],[2,5],[3,6]])
bicenter_wt(w, [0.2,0.6,0.2], [0.3,0.7])
notes = np.array([13, 11, 10, 11, 12, 5, 8, 7, 2, 4, 16, 17, 13, 16, 15])
suivi = np.array([1,1,1,1,1,2,2,2,2,2,3,3,3,3,3])
display(notes)
display(suivi)
pd.DataFrame(eta2(suivi,notes),index=['eta - test']) | 0.407687 | 0.816736 |