max_stars_repo_path
stringlengths
4
197
max_stars_repo_name
stringlengths
6
120
max_stars_count
int64
0
191k
id
stringlengths
1
8
content
stringlengths
6
964k
score
float64
-0.88
3.95
int_score
int64
0
4
Scripts/VRM_DeleteLeafBones.py
TheHoodieGuy02/VRoid2UE4_BlenderScripts
3
12797355
# Obliterate unused leaf bones in VRoid models! import bpy context = bpy.context obj = context.object # By default, VRM Importer includes leaf bones automatically. # It's cool and stuff, but it's not necessary for Blender, and will spew out # scary long warning when imported to UE4. # Use this script to obliterate those leaf bones in one click. if obj.type == 'ARMATURE': armature = obj.data bpy.ops.object.mode_set(mode='EDIT') for bone in armature.edit_bones: if bone.name.endswith("_end") : armature.edit_bones.remove(bone) else: continue bpy.ops.object.mode_set(mode='OBJECT')
1.03125
1
proteinham/lattice/turn_circuit.py
couteiral/proteinham
0
12797483
<filename>proteinham/lattice/turn_circuit.py import math import numpy as np import sympy as sp import symengine as se from abc import * from tqdm import tqdm, trange from copy import deepcopy from itertools import chain from functools import reduce from .qlogic import * from proteinham.core.hamiltonian import Hamiltonian class CommonTurnCircuitHamiltonian(Hamiltonian): is_TurnCircuit = True def __init__(self, pepstring, ss_fmat='babej'): """Encapsulates the expression and methods of a protein hamiltonian of the "turn circuit encoding" form, described by Babbush et al., 2012.""" self._proc_input(pepstring) self.ss_fmat = ss_fmat self.n_bits = self.dim * (self.naas-1) self._sum_strings = dict() self._create_bitreg() @property def encoding(self): return 'turn_circuit' def build_exp(self): self.expr = (self.naas+1) * self.back_term() if self.dim == 3: self.expr += (self.naas+1)**2 * self.redun_term() self.expr += (self.naas+1) * self.steric_term() self.expr += self.interaction_term() #self.expr = se.expand(self.expr) self.n_terms = len(self.expr.args) def get(self, k): """Access the kth bit of the hamiltonian.""" return self.bit_list[k] def half_adder(self, q_i, q_j): """Applies a half-adder.""" return qand([q_i, q_j]), qxor(q_i, q_j) @property @abstractmethod def dim(self): pass class TurnCircuitHamiltonian2D(CommonTurnCircuitHamiltonian): is_2D = True @property def dim(self): return 2 def pointer(self, i): """Points to the start of the string describing the ith turn.""" return 2*i def circuit_xp(self, q_i, q_j): """Implements a circuit that returns 1 if the chain moves in the direction x+.""" return (1-q_i)*q_j def circuit_xn(self, q_i, q_j): """Implements a circuit that returns 1 if the chain moves in the direction x-.""" return q_i*(1-q_j) def circuit_yp(self, q_i, q_j): """Implements a circuit that returns 1 if the chain moves in the direction y+.""" return q_i*q_j def circuit_yn(self, q_i, q_j): """Implements a circuit that returns 1 if the chain moves in the direction y-.""" return (1-q_i)*(1-q_j) def sum_string(self, i, j, k): """Computes the sum string.""" if i > j: raise ValueError("i > j") if (i, j, k) in self._sum_strings.keys(): return self._sum_strings[(i, j, k)] if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1)) for t in range(i, j)] else: raise ValueError('k was {:s}'.format(k)) n_layers = j-i-1 counter = np.zeros(n_layers) # lazy way to keep track of half-adders sum_string = list(reversed(sum_string)) for t in chain(range(n_layers), reversed(range(n_layers-1))): if t % 2 == 0: iterator = range(0, t+1, 2) if t > 0 else [0] else: iterator = range(1, t+1, 2) if t > 1 else [1] for h in iterator: if self.ss_fmat == 'babej': if counter[h] > math.log2(j-i): continue else: counter[h] += 1 a, b = self.half_adder(sum_string[h], sum_string[h+1]) sum_string[h] = a sum_string[h+1] = b maximum = int(math.ceil(math.log2(j-i))) sum_string = list(reversed(sum_string)) self._sum_strings[(i, j, k)] = [sp.expand(sum_string[x]) for x in range(maximum)] return self._sum_strings[(i, j, k)] def back_term(self): """Ensures that the chain does not go back on itself.""" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) + \ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1)) for i in range(self.naas-2)]) def overlap(self, i, j): """Computes the overlap term for residues i and j.""" maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if (j-i) % 2 != 0 or maximum < 2: return sp.numbers.Integer(0) sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)] + \ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] ) def steric_term(self): """Ensures that the chain does not overlap.""" return sum([ sum([ self.overlap(i, j) for j in range(i+1, self.naas)]) for i in range(self.naas)]) def a_x(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0 if maximum == 0: return 0 prefactor = qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)]) return prefactor * \ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \ * qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(1, maximum)]) \ + sum([ qxor(sumstring['x+'][p-2], sumstring['x+'][p-1]) \ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x+'][r]) for r in range(1, p-1)]) \ * qand([ qxor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(1, p+1)]) \ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def a_y(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if maximum == 0: return 0 prefactor = qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)]) return prefactor *\ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(1, maximum)]) \ + sum([ qxor(sumstring['y+'][p-2], sumstring['y+'][p-1]) \ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y+'][r]) for r in range(1, p-1)]) \ * qand([ qxor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(1, p+1)]) \ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def interaction_term_ij(self, i, j): return -1 * self.int_mat[i, j] * (self.a_x(i, j) + \ self.a_y(i, j)) def interaction_term(self): """Computes contacts between residues.""" expr = sp.numbers.Integer(0) for i in range(self.naas-3): for j in range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] == 0: continue expr += self.interaction_term_ij(i, 1+i+2*j) return expr class TurnCircuitHamiltonian3D(CommonTurnCircuitHamiltonian): is_3D = True @property def dim(self): return 3 def pointer(self, i): """Points to the start of the string describing the ith turn.""" return 3*i def circuit_xp(self, q_i, q_j, q_k): """Implements a circuit that returns 1 if the chain moves in the direction x+.""" return q_i * q_j * q_k def circuit_xn(self, q_i, q_j, q_k): """Implements a circuit that returns 1 if the chain moves in the direction x-.""" return q_i * (1-q_j) * (1-q_k) def circuit_yp(self, q_i, q_j, q_k): """Implements a circuit that returns 1 if the chain moves in the direction y+.""" return q_i * (1-q_j) * q_k def circuit_yn(self, q_i, q_j, q_k): """Implements a circuit that returns 1 if the chain moves in the direction y-.""" return q_i * q_j * (1-q_k) def circuit_zp(self, q_i, q_j, q_k): """Implements a circuit that returns 1 if the chain moves in the direction z+.""" return (1-q_i) * (1-q_j) * q_k def circuit_zn(self, q_i, q_j, q_k): """Implements a circuit that returns 1 if the chain moves in the direction z-.""" return (1-q_i) * q_j * (1-q_k) def circuit_000(self, q_i, q_j, q_k): """Implements a circuit that checks the nonsensical string 000.""" return (1-q_i) * (1-q_j) * (1-q_k) def circuit_011(self, q_i, q_j, q_k): """Implements a circuit that checks the nonsensical string 000.""" return (1-q_i) * q_j * q_k def sum_string(self, i, j, k): """Computes the sum string.""" if i > j: raise ValueError("i > j") if (i, j, k) in self._sum_strings.keys(): return self._sum_strings[(i, j, k)] if k == 'x+': sum_string = [self.circuit_xp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'x-': sum_string = [self.circuit_xn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'y+': sum_string = [self.circuit_yp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'y-': sum_string = [self.circuit_yn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'z+': sum_string = [self.circuit_zp(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] elif k == 'z-': sum_string = [self.circuit_zn(self.get(self.pointer(t)), self.get(self.pointer(t)+1), self.get(self.pointer(t)+2)) for t in range(i, j)] else: raise ValueError('k was {:s}'.format(k)) n_layers = j-i-1 counter = np.zeros(n_layers) # lazy way to keep track of half-adders sum_string = list(reversed(sum_string)) for t in chain(range(n_layers), reversed(range(n_layers-1))): if t % 2 == 0: iterator = range(0, t+1, 2) if t > 0 else [0] else: iterator = range(1, t+1, 2) if t > 1 else [1] for h in iterator: if self.ss_fmat == 'babej': if counter[h] > math.log2(j-i): continue else: counter[h] += 1 a, b = self.half_adder(sum_string[h], sum_string[h+1]) sum_string[h] = a sum_string[h+1] = b maximum = int(math.ceil(math.log2(j-i))) sum_string = list(reversed(sum_string)) self._sum_strings[(i, j, k)] = [sp.expand(sum_string[x]) for x in range(maximum)] return self._sum_strings[(i, j, k)] def redun_term(self): """Implements the term that penalises meaningless residue bitstrings 000 and 011.""" return sum([ self.circuit_000(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) + \ self.circuit_011(self.get(self.pointer(k)), self.get(self.pointer(k)+1), self.get(self.pointer(k)+2)) for k in range(self.naas-1)]) def back_term(self): """Ensures that the chain does not go back on itself.""" return sum([ self.circuit_xp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \ self.circuit_xn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_xp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \ self.circuit_yp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \ self.circuit_yn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_yp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \ self.circuit_zp(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zn(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) + \ self.circuit_zn(self.get(self.pointer(i)), self.get(self.pointer(i)+1), self.get(self.pointer(i)+2)) * self.circuit_zp(self.get(self.pointer(i+1)), self.get(self.pointer(i+1)+1), self.get(self.pointer(i+1)+2)) for i in range(self.naas-2)]) def overlap(self, i, j): """Computes the overlap term for residues i and j.""" maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j != 0 else 0 if (j-i) % 2 != 0 or maximum < 2: return sp.numbers.Integer(0) sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-'), } return qand( [qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(maximum)] + \ [qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(maximum)] + \ [qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(maximum)] ) def steric_term(self): """Ensures that the chain does not overlap.""" return sum([ sum([ self.overlap(i, j) for j in range(i+1, self.naas)]) for i in range(self.naas)]) def a_x(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0 if maximum == 0: return 0 prefactor = qand([ qand([ qxnor(sumstring['%s+' % k][r], sumstring['%s-' % k][r]) for r in range(maximum)]) for k in ['y', 'z']]) return prefactor * \ ( qxor(sumstring['x+'][0], sumstring['x-'][0]) \ * qand([ qxnor(sumstring['x+'][r], sumstring['x-'][r]) for r in range(1, maximum)]) \ + sum([ qxor(sumstring['x+'][p-2], sumstring['x+'][p-1]) \ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x+'][r]) for r in range(1, p-1)]) \ * qand([ qxor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(1, p+1)]) \ * qand([ qxnor(sumstring['x+'][r-1], sumstring['x-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def a_y(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0 if maximum == 0: return 0 prefactor = qand([ qand([ qxnor(sumstring['%s+' % k][r], sumstring['%s-' % k][r]) for r in range(maximum)]) for k in ['x', 'z']]) return prefactor * \ ( qxor(sumstring['y+'][0], sumstring['y-'][0]) \ * qand([ qxnor(sumstring['y+'][r], sumstring['y-'][r]) for r in range(1, maximum)]) \ + sum([ qxor(sumstring['y+'][p-2], sumstring['y+'][p-1]) \ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y+'][r]) for r in range(1, p-1)]) \ * qand([ qxor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(1, p+1)]) \ * qand([ qxnor(sumstring['y+'][r-1], sumstring['y-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def a_z(self, i, j): sumstring = { 'x+': self.sum_string(i, j, 'x+'), 'x-': self.sum_string(i, j, 'x-'), 'y+': self.sum_string(i, j, 'y+'), 'y-': self.sum_string(i, j, 'y-'), 'z+': self.sum_string(i, j, 'z+'), 'z-': self.sum_string(i, j, 'z-') } maximum = int(math.ceil(math.log2(abs(i-j)))) if i-j !=0 else 0 if maximum == 0: return 0 prefactor = qand([ qand([ qxnor(sumstring['%s+' % k][r], sumstring['%s-' % k][r]) for r in range(maximum)]) for k in ['x', 'y']]) return prefactor * \ ( qxor(sumstring['z+'][0], sumstring['z-'][0]) \ * qand([ qxnor(sumstring['z+'][r], sumstring['z-'][r]) for r in range(1, maximum)]) \ + sum([ qxor(sumstring['z+'][p-2], sumstring['z+'][p-1]) \ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z+'][r]) for r in range(1, p-1)]) \ * qand([ qxor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(1, p+1)]) \ * qand([ qxnor(sumstring['z+'][r-1], sumstring['z-'][r-1]) for r in range(p+1, maximum+1)]) for p in range(2, maximum+1)])) def interaction_term_ij(self, i, j): return -1* self.int_mat[i, j] * (self.a_x(i, j) + \ self.a_y(i, j) + \ self.a_z(i, j)) def interaction_term(self): """Computes contacts between residues.""" expr = sp.numbers.Integer(0) for i in range(self.naas-3): for j in range(1, math.ceil((self.naas-i-1)/2)): if self.int_mat[i, 1+i+2*j] == 0: continue expr += interaction_term_ij(i, 1+i+2*j) return expr
2.34375
2
django/website/wagtail_vue/apps/pages/models.py
hyshka/wagtail-vue-talk
26
12797611
<reponame>hyshka/wagtail-vue-talk # -*- coding: utf-8 -*- """Page models.""" from django.db import models from wagtail.admin.edit_handlers import FieldPanel, StreamFieldPanel from wagtail.images.edit_handlers import ImageChooserPanel from wagtail.core.models import Page from wagtail.api import APIField from wagtail.images.api.fields import ImageRenditionField from wagtail.core.fields import StreamField from .streamfields import ContentBlock, ImageGalleryBlock, CallToActionBlock class HomePage(Page): """A home page class.""" template = "cms/pages/home_page.html" subpage_types = ['pages.FlexPage'] banner_subtitle = models.CharField( max_length=50, blank=True, null=True, help_text="An optional banner subtitle" ) banner_image = models.ForeignKey( "wagtailimages.Image", null=True, blank=False, on_delete=models.SET_NULL, related_name="+", help_text="An optional banner image", ) content = StreamField([ ('ContentBlock', ContentBlock()), ('ImageGalleryBlock', ImageGalleryBlock()), ('CallToActionBlock', CallToActionBlock()), ], null=True, blank=True) content_panels = [ FieldPanel("title", classname="full title"), ImageChooserPanel("banner_image"), FieldPanel("banner_subtitle"), StreamFieldPanel('content'), ] api_fields = [ APIField("title"), APIField("banner_subtitle"), APIField("banner_image"), APIField("banner_image_thumbnail", serializer=ImageRenditionField("fill-100x100", source="banner_image")), APIField("content"), ] class Meta: """Meta information.""" verbose_name = "Home Page" verbose_name_plural = "Home Pages" class FlexPage(Page): """A Flexible page class. Used for generic pages that don't have a true purpose.""" template = "cms/pages/flex_page.html" subpage_types = [] content = StreamField([ ('ContentBlock', ContentBlock()), ('ImageGalleryBlock', ImageGalleryBlock()), ('CallToActionBlock', CallToActionBlock()), ], null=True, blank=True) content_panels = [ FieldPanel("title", classname="full title"), StreamFieldPanel('content'), ] api_fields = [ APIField("title"), APIField("content"), ] class Meta: """Meta information.""" verbose_name = "Flex Page" verbose_name_plural = "Flex Pages"
1.296875
1
easyai/train_task.py
lpj0822/image_point_cloud_det
1
12797739
<reponame>lpj0822/image_point_cloud_det<gh_stars>1-10 #!/usr/bin/env python # -*- coding:utf-8 -*- # Author: from easyai.helper.arguments_parse import TaskArgumentsParse from easyai.tasks.cls.classify_train import ClassifyTrain from easyai.tasks.det2d.detect2d_train import Detection2dTrain from easyai.tasks.seg.segment_train import SegmentionTrain from easyai.tasks.pc_cls.pc_classify_train import PointCloudClassifyTrain from easyai.tools.model_to_onnx import ModelConverter from easyai.base_name.task_name import TaskName class TrainTask(): def __init__(self, train_path, val_path, pretrain_model_path, is_convert=False): self.train_path = train_path self.val_path = val_path self.pretrain_model_path = pretrain_model_path self.is_convert = is_convert def classify_train(self, cfg_path, gpu_id, config_path): cls_train_task = ClassifyTrain(cfg_path, gpu_id, config_path) cls_train_task.load_pretrain_model(self.pretrain_model_path) cls_train_task.train(self.train_path, self.val_path) self.image_model_convert(cls_train_task, cfg_path) def detect2d_train(self, cfg_path, gpu_id, config_path): det2d_train = Detection2dTrain(cfg_path, gpu_id, config_path) det2d_train.load_pretrain_model(self.pretrain_model_path) det2d_train.train(self.train_path, self.val_path) self.image_model_convert(det2d_train, cfg_path) def segment_train(self, cfg_path, gpu_id, config_path): seg_train = SegmentionTrain(cfg_path, gpu_id, config_path) seg_train.load_pretrain_model(self.pretrain_model_path) seg_train.train(self.train_path, self.val_path) self.image_model_convert(seg_train, cfg_path) def pc_classify_train(self, cfg_path, gpu_id, config_path): pc_cls_train_task = PointCloudClassifyTrain(cfg_path, gpu_id, config_path) pc_cls_train_task.load_pretrain_model(self.pretrain_model_path) pc_cls_train_task.train(self.train_path, self.val_path) def image_model_convert(self, train_task, cfg_path): if self.is_convert: converter = ModelConverter(train_task.train_task_config.image_size) converter.model_convert(cfg_path, train_task.train_task_config.best_weights_file, train_task.train_task_config.snapshot_path) def main(): print("process start...") options = TaskArgumentsParse.train_input_parse() train_task = TrainTask(options.trainPath, options.valPath, options.pretrainModel) if options.task_name == TaskName.Classify_Task: train_task.classify_train(options.model, 0, options.config_path) elif options.task_name == TaskName.Detect2d_Task: train_task.detect2d_train(options.model, 0, options.config_path) elif options.task_name == TaskName.Segment_Task: train_task.segment_train(options.model, 0, options.config_path) elif options.task_name == TaskName.PC_Classify_Task: train_task.pc_classify_train(options.model, 0, options.config_path) print("process end!") if __name__ == '__main__': main()
1.765625
2
study/study8.py
tanyong-cq/pythonlearning
0
12797867
<reponame>tanyong-cq/pythonlearning #!/usr/bin/env python # -*- coding: utf-8 -*- ''' dict ''' d1 = {'a':1, 'b':2, 'c':3} print(d1) print(d1.keys()) print(d1.values()) print(str(d1)) print(len(d1)) print(d1['a']) d1['a'] = 10 print(d1['a']) del d1['a'] print(d1) d1.clear() print(d1) print(d1.get('a'))
2.09375
2
Python/sum.py
AbdalrohmanGitHub/Logik
13
12797995
<gh_stars>10-100 # This program reads a number n and computes the sum 1 + 2 + ... + n. n = input('Type a natural number and press return: ') n = int(n) s = { i for i in range(1, n+1) } s = sum(s) print('The sum 1 + 2 + ... + ', n, ' is equal to ', s, '.', sep= '')
2.71875
3
examples/misc/hexapod_z.py
brutzl/pymbs
0
12798123
# -*- coding: utf-8 -*- ''' This file is part of PyMbs. PyMbs is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. PyMbs is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with PyMbs. If not, see <http://www.gnu.org/licenses/>. Copyright 2011, 2012 <NAME>, <NAME>, <NAME>, <NAME> ''' ''' Created on 13.05.2011 @author: <NAME> Pfade für Visualisierung anpassen !!! ''' ################################# # import PyMbs & Lib. # ################################# from PyMbs.Input import * from PyMbs.Symbolics import Matrix,cos,sin pi = 3.1415926535897932384626433832795 ################################# # set up inertial frame # ################################# world=MbsSystem([0,0,-1]) ################################# # Parameters # ################################# # Länge der Zylinderstangen und Gehäuse hoehe = 0.01 R_AP=0.3 R_BP=0.5 R_Zyl_stange=0.02 R_Zyl_geh=0.04 l_zyl=0.6 m_z_geh = 0.1 m_z_st = 0.1 c=world.addParam('c',10) c1=world.addParam('c1',5) m1=world.addParam('m1', 1.0) R1=world.addParam('R1', R_BP) m2=world.addParam('m2', 50) R2=world.addParam('R2', R_AP) H2=world.addParam('H2',hoehe) I2x=world.addParam( 'I2x', (m2*H2**2)/12) # Traägheit eines Vollzylinders um x die x-Achse I2y=world.addParam( 'I2y', (m2*H2**2)/12) # Traägheit eines Vollzylinders um y die x-Achse I2z=world.addParam( 'I2z', (m2*R2**2)/2) # Traägheit eines Vollzylinders um z die x-Achse ################################################ m_Zyl_Geh=world.addParam('m_Zyl_Geh', 18.6) l_Zyl_Geh=world.addParam('l_Zyl_Geh',0.74) cg_Zyl_Geh_x=world.addParam('cg_Zyl_Geh_x',0.353) I_Zyl_Geh_x=world.addParam( 'I_Zyl_Geh_x', 0.027) I_Zyl_Geh_y=world.addParam( 'I_Zyl_Geh_y', 1.061) I_Zyl_Geh_z=world.addParam( 'I_Zyl_Geh_z', 1.061) m_Zyl_Stange=world.addParam('m_Zyl_Stange', 8.4) l_Zyl_Stange=world.addParam('l_Zyl_Stange',0.66) cg_Zyl_Stange_x=world.addParam('cg_Zyl_Stange_x',-0.347) I_Zyl_Stange_x=world.addParam('I_Zyl_Stange_x', 0.003) I_Zyl_Stange_y=world.addParam('I_Zyl_Stange_y', 0.433) I_Zyl_Stange_z=world.addParam('I_Zyl_Stange_z', 0.432) ############### # Anordnungen # ############### phi_BP_1 = pi/2-pi/18 phi_BP_2 = phi_BP_1 + pi/9 phi_BP_3 = phi_BP_1 + 2*pi/3 phi_BP_4 = phi_BP_2 + 2*pi/3 phi_BP_5 = phi_BP_3 + 2*pi/3 phi_BP_6 = phi_BP_4 + 2*pi/3 phi_AP_1 = pi/6+pi/18 phi_AP_2 = phi_AP_1 + 2*pi/3-pi/9 phi_AP_3 = phi_AP_1 + 2*pi/3 phi_AP_4 = phi_AP_3 + 2*pi/3-pi/9 phi_AP_5 = phi_AP_3 + 2*pi/3 phi_AP_6 = phi_AP_4 + 2*pi/3 ################ # Hexapod # ################ ################################# # Bodies & KS # ################################# Ground = world.addBody(name='Ground',mass=1) Ground.addFrame(name='KS_1',p=[0,0,0]) BP = Ground.KS_1 BP.addFrame(name='BP_visual', p=[0,0,0],R=rotMat(pi/2,'x')) BP.addFrame(name='BP_Anlenkpunkt_1', p=[R1*cos(phi_BP_1),R1*sin(phi_BP_1),0]) BP.addFrame(name='BP_Anlenkpunkt_2', p=[R1*cos(phi_BP_2),R1*sin(phi_BP_2),0]) BP.addFrame(name='BP_Anlenkpunkt_3', p=[R1*cos(phi_BP_3),R1*sin(phi_BP_3),0]) BP.addFrame(name='BP_Anlenkpunkt_4', p=[R1*cos(phi_BP_4),R1*sin(phi_BP_4),0]) BP.addFrame(name='BP_Anlenkpunkt_5', p=[R1*cos(phi_BP_5),R1*sin(phi_BP_5),0]) BP.addFrame(name='BP_Anlenkpunkt_6', p=[R1*cos(phi_BP_6),R1*sin(phi_BP_6),0]) BP.addFrame(name='BP_Feder',p=[0,0,1.1]) ################################################################################ AP = world.addBody(name='Arbeitsplattform', mass=m2,inertia=diag([I2x,I2y,I2z])) AP.addFrame(name='AP_visual', p=[0,0,0],R=rotMat(pi/2,'x')) AP.addFrame(name='AP_Anlenkpunkt_1', p=[R2*cos(phi_AP_1),R2*sin(phi_AP_1),0]) AP.addFrame(name='AP_Anlenkpunkt_2', p=[R2*cos(phi_AP_2),R2*sin(phi_AP_2),0]) AP.addFrame(name='AP_Anlenkpunkt_3', p=[R2*cos(phi_AP_3),R2*sin(phi_AP_3),0]) AP.addFrame(name='AP_Anlenkpunkt_4', p=[R2*cos(phi_AP_4),R2*sin(phi_AP_4),0]) AP.addFrame(name='AP_Anlenkpunkt_5', p=[R2*cos(phi_AP_5),R2*sin(phi_AP_5),0]) AP.addFrame(name='AP_Anlenkpunkt_6', p=[R2*cos(phi_AP_6),R2*sin(phi_AP_6),0]) ################################################################################ ''' #Für Visualisierung im Dymola Zyl_geh_1 = world.addBody( mass=m_Zyl_Geh,cg=[cg_Zyl_Geh_x,0,0], inertia=diag([I_Zyl_Geh_x,I_Zyl_Geh_y,I_Zyl_Geh_z]),name='Zyl_geh_1') Zyl_geh_1.addFrame('Zyl_geh_1_visual', p=[0,0,0],R=rotMat(pi/2,'y')*rotMat(pi/2,'x')) Zyl_geh_1.addFrame('Zyl_geh_1_cs', p=[0,0,0]) Zyl_geh_1.addFrame('Zyl_geh_1_cs_2', p=[0,0,0]) Zyl_geh_2 = world.addBody( mass=m_Zyl_Geh,cg=[cg_Zyl_Geh_x,0,0], inertia=diag([I_Zyl_Geh_x,I_Zyl_Geh_y,I_Zyl_Geh_z]),name='Zyl_geh_2') Zyl_geh_2.addFrame('Zyl_geh_2_visual', p=[0,0,0],R=rotMat(pi/2,'y')*rotMat(pi/2,'x')) Zyl_geh_2.addFrame('Zyl_geh_2_cs', p=[0,0,0]) Zyl_geh_2.addFrame('Zyl_geh_2_cs_2', p=[0,0,0]) Zyl_geh_3 = world.addBody( mass=m_Zyl_Geh,cg=[cg_Zyl_Geh_x,0,0], inertia=diag([I_Zyl_Geh_x,I_Zyl_Geh_y,I_Zyl_Geh_z]),name='Zyl_geh_3') Zyl_geh_3.addFrame('Zyl_geh_3_visual', p=[0,0,0],R=rotMat(pi/2,'y')*rotMat(pi/2,'x')) Zyl_geh_3.addFrame('Zyl_geh_3_cs', p=[0,0,0]) Zyl_geh_3.addFrame('Zyl_geh_3_cs_2', p=[0,0,0]) Zyl_geh_4 = world.addBody( mass=m_Zyl_Geh,cg=[cg_Zyl_Geh_x,0,0], inertia=diag([I_Zyl_Geh_x,I_Zyl_Geh_y,I_Zyl_Geh_z]),name='Zyl_geh_4') Zyl_geh_4.addFrame('Zyl_geh_4_visual', p=[0,0,0],R=rotMat(pi/2,'y')*rotMat(pi/2,'x')) Zyl_geh_4.addFrame('Zyl_geh_4_cs', p=[0,0,0]) Zyl_geh_4.addFrame('Zyl_geh_4_cs_2', p=[0,0,0]) Zyl_geh_5 = world.addBody( mass=m_Zyl_Geh,cg=[cg_Zyl_Geh_x,0,0], inertia=diag([I_Zyl_Geh_x,I_Zyl_Geh_y,I_Zyl_Geh_z]),name='Zyl_geh_5') Zyl_geh_5.addFrame('Zyl_geh_5_visual', p=[0,0,0],R=rotMat(pi/2,'y')*rotMat(pi/2,'x')) Zyl_geh_5.addFrame('Zyl_geh_5_cs', p=[0,0,0]) Zyl_geh_5.addFrame('Zyl_geh_5_cs_2', p=[0,0,0]) Zyl_geh_6 = world.addBody( mass=m_Zyl_Geh,cg=[cg_Zyl_Geh_x,0,0], inertia=diag([I_Zyl_Geh_x,I_Zyl_Geh_y,I_Zyl_Geh_z]),name='Zyl_geh_6') Zyl_geh_6.addFrame('Zyl_geh_6_visual', p=[0,0,0],R=rotMat(pi/2,'y')*rotMat(pi/2,'x')) Zyl_geh_6.addFrame('Zyl_geh_6_cs', p=[0,0,0]) Zyl_geh_6.addFrame('Zyl_geh_6_cs_2', p=[0,0,0]) ################################################################################ Zyl_stange_1 = world.addBody( mass=m_Zyl_Stange,cg=[cg_Zyl_Stange_x,0,0], inertia=diag([I_Zyl_Stange_x,I_Zyl_Stange_y,I_Zyl_Stange_z]),name='Zyl_stange_1') Zyl_stange_1.addFrame('Zyl_stange_1_visual', p=[0,0,0],R=rotMat(pi/2,'y')*rotMat(pi/2,'x')) Zyl_stange_1.addFrame('Zyl_stange_1_cs', p=[0,0,0]) Zyl_stange_1.addFrame('Zyl_stange_1_cs_2', p=[0,0,0]) Zyl_stange_2 = world.addBody( mass=m_Zyl_Stange,cg=[cg_Zyl_Stange_x,0,0], inertia=diag([I_Zyl_Stange_x,I_Zyl_Stange_y,I_Zyl_Stange_z]),name='Zyl_stange_2') Zyl_stange_2.addFrame('Zyl_stange_2_visual', p=[0,0,0],R=rotMat(pi/2,'y')*rotMat(pi/2,'x')) Zyl_stange_2.addFrame('Zyl_stange_2_cs', p=[0,0,0]) Zyl_stange_2.addFrame('Zyl_stange_2_cs_2', p=[0,0,0]) Zyl_stange_3 = world.addBody( mass=m_Zyl_Stange,cg=[cg_Zyl_Stange_x,0,0], inertia=diag([I_Zyl_Stange_x,I_Zyl_Stange_y,I_Zyl_Stange_z]),name='Zyl_stange_3') Zyl_stange_3.addFrame('Zyl_stange_3_visual', p=[0,0,0],R=rotMat(pi/2,'y')*rotMat(pi/2,'x')) Zyl_stange_3.addFrame('Zyl_stange_3_cs', p=[0,0,0]) Zyl_stange_3.addFrame('Zyl_stange_3_cs_2', p=[0,0,0]) Zyl_stange_4 = world.addBody( mass=m_Zyl_Stange,cg=[cg_Zyl_Stange_x,0,0], inertia=diag([I_Zyl_Stange_x,I_Zyl_Stange_y,I_Zyl_Stange_z]),name='Zyl_stange_4') Zyl_stange_4.addFrame('Zyl_stange_4_visual', p=[0,0,0],R=rotMat(pi/2,'y')*rotMat(pi/2,'x')) Zyl_stange_4.addFrame('Zyl_stange_4_cs', p=[0,0,0]) Zyl_stange_4.addFrame('Zyl_stange_4_cs_2', p=[0,0,0]) Zyl_stange_5 = world.addBody( mass=m_Zyl_Stange,cg=[cg_Zyl_Stange_x,0,0], inertia=diag([I_Zyl_Stange_x,I_Zyl_Stange_y,I_Zyl_Stange_z]),name='Zyl_stange_5') Zyl_stange_5.addFrame('Zyl_stange_5_visual', p=[0,0,0],R=rotMat(pi/2,'y')*rotMat(pi/2,'x')) Zyl_stange_5.addFrame('Zyl_stange_5_cs', p=[0,0,0]) Zyl_stange_5.addFrame('Zyl_stange_5_cs_2', p=[0,0,0]) Zyl_stange_6 = world.addBody( mass=m_Zyl_Stange,cg=[cg_Zyl_Stange_x,0,0], inertia=diag([I_Zyl_Stange_x,I_Zyl_Stange_y,I_Zyl_Stange_z]),name='Zyl_stange_6') Zyl_stange_6.addFrame('Zyl_stange_6_visual', p=[0,0,0],R=rotMat(pi/2,'y')*rotMat(pi/2,'x')) Zyl_stange_6.addFrame('Zyl_stange_6_cs', p=[0,0,0]) Zyl_stange_6.addFrame('Zyl_stange_6_cs_2', p=[0,0,0]) ''' # Für Visualisierung im PyMbs Zyl_geh_1 = world.addBody( mass=m_Zyl_Geh,cg=[cg_Zyl_Geh_x,0,0], inertia=diag([I_Zyl_Geh_x,I_Zyl_Geh_y,I_Zyl_Geh_z]),name='Zyl_geh_1') Zyl_geh_1.addFrame('Zyl_geh_1_visual', p=[0,0,l_zyl/2],R=rotMat(pi/2,'x')) Zyl_geh_1.addFrame('Zyl_geh_1_cs', p=[0,0,0]) Zyl_geh_1.addFrame('Zyl_geh_1_cs_2', p=[0,0,0]) Zyl_geh_2 = world.addBody( mass=m_Zyl_Geh,cg=[cg_Zyl_Geh_x,0,0], inertia=diag([I_Zyl_Geh_x,I_Zyl_Geh_y,I_Zyl_Geh_z]),name='Zyl_geh_2') Zyl_geh_2.addFrame('Zyl_geh_2_visual', p=[0,0,l_zyl/2],R=rotMat(pi/2,'x')) Zyl_geh_2.addFrame('Zyl_geh_2_cs', p=[0,0,0]) Zyl_geh_2.addFrame('Zyl_geh_2_cs_2', p=[0,0,0]) Zyl_geh_3 = world.addBody( mass=m_Zyl_Geh,cg=[cg_Zyl_Geh_x,0,0], inertia=diag([I_Zyl_Geh_x,I_Zyl_Geh_y,I_Zyl_Geh_z]),name='Zyl_geh_3') Zyl_geh_3.addFrame('Zyl_geh_3_visual', p=[0,0,l_zyl/2],R=rotMat(pi/2,'x')) Zyl_geh_3.addFrame('Zyl_geh_3_cs', p=[0,0,0]) Zyl_geh_3.addFrame('Zyl_geh_3_cs_2', p=[0,0,0]) Zyl_geh_4 = world.addBody( mass=m_Zyl_Geh,cg=[cg_Zyl_Geh_x,0,0], inertia=diag([I_Zyl_Geh_x,I_Zyl_Geh_y,I_Zyl_Geh_z]),name='Zyl_geh_4') Zyl_geh_4.addFrame('Zyl_geh_4_visual', p=[0,0,l_zyl/2],R=rotMat(pi/2,'x')) Zyl_geh_4.addFrame('Zyl_geh_4_cs', p=[0,0,0]) Zyl_geh_4.addFrame('Zyl_geh_4_cs_2', p=[0,0,0]) Zyl_geh_5 = world.addBody( mass=m_Zyl_Geh,cg=[cg_Zyl_Geh_x,0,0], inertia=diag([I_Zyl_Geh_x,I_Zyl_Geh_y,I_Zyl_Geh_z]),name='Zyl_geh_5') Zyl_geh_5.addFrame('Zyl_geh_5_visual', p=[0,0,l_zyl/2],R=rotMat(pi/2,'x')) Zyl_geh_5.addFrame('Zyl_geh_5_cs', p=[0,0,0]) Zyl_geh_5.addFrame('Zyl_geh_5_cs_2', p=[0,0,0]) Zyl_geh_6 = world.addBody( mass=m_Zyl_Geh,cg=[cg_Zyl_Geh_x,0,0], inertia=diag([I_Zyl_Geh_x,I_Zyl_Geh_y,I_Zyl_Geh_z]),name='Zyl_geh_6') Zyl_geh_6.addFrame('Zyl_geh_6_visual', p=[0,0,l_zyl/2],R=rotMat(pi/2,'x')) Zyl_geh_6.addFrame('Zyl_geh_6_cs', p=[0,0,0]) Zyl_geh_6.addFrame('Zyl_geh_6_cs_2', p=[0,0,0]) ################################################################################ Zyl_stange_1 = world.addBody( mass=m_Zyl_Stange,cg=[cg_Zyl_Stange_x,0,0], inertia=diag([I_Zyl_Stange_x,I_Zyl_Stange_y,I_Zyl_Stange_z]),name='Zyl_stange_1') Zyl_stange_1.addFrame('Zyl_stange_1_visual', p=[0,0,-l_zyl/2],R=rotMat(pi/2,'x')) Zyl_stange_1.addFrame('Zyl_stange_1_cs', p=[0,0,0]) Zyl_stange_1.addFrame('Zyl_stange_1_cs_2', p=[0,0,0]) Zyl_stange_2 = world.addBody( mass=m_Zyl_Stange,cg=[cg_Zyl_Stange_x,0,0], inertia=diag([I_Zyl_Stange_x,I_Zyl_Stange_y,I_Zyl_Stange_z]),name='Zyl_stange_2') Zyl_stange_2.addFrame('Zyl_stange_2_visual', p=[0,0,-l_zyl/2],R=rotMat(pi/2,'x')) Zyl_stange_2.addFrame('Zyl_stange_2_cs', p=[0,0,0]) Zyl_stange_2.addFrame('Zyl_stange_2_cs_2', p=[0,0,0]) Zyl_stange_3 = world.addBody( mass=m_Zyl_Stange,cg=[cg_Zyl_Stange_x,0,0], inertia=diag([I_Zyl_Stange_x,I_Zyl_Stange_y,I_Zyl_Stange_z]),name='Zyl_stange_3') Zyl_stange_3.addFrame('Zyl_stange_3_visual', p=[0,0,-l_zyl/2],R=rotMat(pi/2,'x')) Zyl_stange_3.addFrame('Zyl_stange_3_cs', p=[0,0,0]) Zyl_stange_3.addFrame('Zyl_stange_3_cs_2', p=[0,0,0]) Zyl_stange_4 = world.addBody( mass=m_Zyl_Stange,cg=[cg_Zyl_Stange_x,0,0], inertia=diag([I_Zyl_Stange_x,I_Zyl_Stange_y,I_Zyl_Stange_z]),name='Zyl_stange_4') Zyl_stange_4.addFrame('Zyl_stange_4_visual', p=[0,0,-l_zyl/2],R=rotMat(pi/2,'x')) Zyl_stange_4.addFrame('Zyl_stange_4_cs', p=[0,0,0]) Zyl_stange_4.addFrame('Zyl_stange_4_cs_2', p=[0,0,0]) Zyl_stange_5 = world.addBody( mass=m_Zyl_Stange,cg=[cg_Zyl_Stange_x,0,0], inertia=diag([I_Zyl_Stange_x,I_Zyl_Stange_y,I_Zyl_Stange_z]),name='Zyl_stange_5') Zyl_stange_5.addFrame('Zyl_stange_5_visual', p=[0,0,-l_zyl/2],R=rotMat(pi/2,'x')) Zyl_stange_5.addFrame('Zyl_stange_5_cs', p=[0,0,0]) Zyl_stange_5.addFrame('Zyl_stange_5_cs_2', p=[0,0,0]) Zyl_stange_6 = world.addBody( mass=m_Zyl_Stange,cg=[cg_Zyl_Stange_x,0,0], inertia=diag([I_Zyl_Stange_x,I_Zyl_Stange_y,I_Zyl_Stange_z]),name='Zyl_stange_6') Zyl_stange_6.addFrame('Zyl_stange_6_visual', p=[0,0,-l_zyl/2],R=rotMat(pi/2,'x')) Zyl_stange_6.addFrame('Zyl_stange_6_cs', p=[0,0,0]) Zyl_stange_6.addFrame('Zyl_stange_6_cs_2', p=[0,0,0]) ################################# # Joints # ################################# #world.addJoint('fix_BP', world, BP) world.addJoint( world, Ground, name='fix_BP') jAP=world.addJoint(world, AP,['Tx', 'Ty', 'Tz','Rx', 'Ry', 'Rz'],[0,0,1,0,0,0],name='free_AP') world.addJoint(BP.BP_Anlenkpunkt_1,Zyl_geh_1.Zyl_geh_1_cs_2,['Rz', 'Ry'],[0,0],name='Zyl_geh_1_an_BP_1') world.addJoint(BP.BP_Anlenkpunkt_2,Zyl_geh_2.Zyl_geh_2_cs_2,['Rz', 'Ry'],[0,0],name='Zyl_geh_1_an_BP_2') world.addJoint(BP.BP_Anlenkpunkt_3,Zyl_geh_3.Zyl_geh_3_cs_2,['Rz', 'Ry'],[0,0],name='Zyl_geh_1_an_BP_3') world.addJoint(BP.BP_Anlenkpunkt_4,Zyl_geh_4.Zyl_geh_4_cs_2,['Rz', 'Ry'],[0,0],name='Zyl_geh_1_an_BP_4') world.addJoint(BP.BP_Anlenkpunkt_5,Zyl_geh_5.Zyl_geh_5_cs_2,['Rz', 'Ry'],[0,0],name='Zyl_geh_1_an_BP_5') world.addJoint(BP.BP_Anlenkpunkt_6,Zyl_geh_6.Zyl_geh_6_cs_2,['Rz', 'Ry'],[0,0],name='Zyl_geh_1_an_BP_6') world.addJoint(Zyl_geh_1.Zyl_geh_1_cs,Zyl_stange_1.Zyl_stange_1_cs_2,'Tz',0,name='Zyl_stange_1_an_Zyl_geh_1') world.addJoint(Zyl_geh_2.Zyl_geh_2_cs,Zyl_stange_2.Zyl_stange_2_cs_2,'Tz',0,name='Zyl_stange_1_an_Zyl_geh_2') world.addJoint(Zyl_geh_3.Zyl_geh_3_cs,Zyl_stange_3.Zyl_stange_3_cs_2,'Tz',0,name='Zyl_stange_1_an_Zyl_geh_3') world.addJoint(Zyl_geh_4.Zyl_geh_4_cs,Zyl_stange_4.Zyl_stange_4_cs_2,'Tz',0,name='Zyl_stange_1_an_Zyl_geh_4') world.addJoint(Zyl_geh_5.Zyl_geh_5_cs,Zyl_stange_5.Zyl_stange_5_cs_2,'Tz',0,name='Zyl_stange_1_an_Zyl_geh_5') world.addJoint(Zyl_geh_6.Zyl_geh_6_cs,Zyl_stange_6.Zyl_stange_6_cs_2,'Tz',0,name='Zyl_stange_1_an_Zyl_geh_6') ######################## # Constraints or Loops # ######################## world.addLoop.Hexapod(AP.AP_Anlenkpunkt_1, Zyl_stange_1.Zyl_stange_1_cs, 'Verbindung_1') world.addLoop.Hexapod(AP.AP_Anlenkpunkt_2, Zyl_stange_2.Zyl_stange_2_cs, 'Verbindung_2') world.addLoop.Hexapod(AP.AP_Anlenkpunkt_3, Zyl_stange_3.Zyl_stange_3_cs, 'Verbindung_3') world.addLoop.Hexapod(AP.AP_Anlenkpunkt_4, Zyl_stange_4.Zyl_stange_4_cs, 'Verbindung_4') world.addLoop.Hexapod(AP.AP_Anlenkpunkt_5, Zyl_stange_5.Zyl_stange_5_cs, 'Verbindung_5') world.addLoop.Hexapod(AP.AP_Anlenkpunkt_6, Zyl_stange_6.Zyl_stange_6_cs, 'Verbindung_6') ##################### # add visualisation # ##################### world.addVisualisation.Cylinder(BP.BP_visual,R_BP, hoehe) world.addVisualisation.Cylinder(AP.AP_visual,R_AP, hoehe) ''' # Für Visualisierung im Dymola world.addVisualisation.File(Zyl_geh_1.Zyl_geh_1_visual, 'C:\\Users\JeSche\Desktop\Diplom_Arbeit\Hexapod/zyl_geh_001.stl',1,name='Zylinder_geh_1') world.addVisualisation.File(Zyl_geh_2.Zyl_geh_2_visual, 'C:\\Users\JeSche\Desktop\Diplom_Arbeit\Hexapod/zyl_geh_001.stl',1,name='Zylinder_geh_2') world.addVisualisation.File(Zyl_geh_3.Zyl_geh_3_visual, 'C:\\Users\JeSche\Desktop\Diplom_Arbeit\Hexapod/zyl_geh_001.stl',1,name='Zylinder_geh_3') world.addVisualisation.File(Zyl_geh_4.Zyl_geh_4_visual, 'C:\\Users\JeSche\Desktop\Diplom_Arbeit\Hexapod/zyl_geh_001.stl',1,name='Zylinder_geh_4') world.addVisualisation.File(Zyl_geh_5.Zyl_geh_5_visual, 'C:\\Users\JeSche\Desktop\Diplom_Arbeit\Hexapod/zyl_geh_001.stl',1,name='Zylinder_geh_5') world.addVisualisation.File(Zyl_geh_6.Zyl_geh_6_visual, 'C:\\Users\JeSche\Desktop\Diplom_Arbeit\Hexapod/zyl_geh_001.stl',1,name='Zylinder_geh_6') world.addVisualisation.File(Zyl_stange_1.Zyl_stange_1_visual, 'C:\\Users\JeSche\Desktop\Diplom_Arbeit\Hexapod/zyl_stange_001.stl',1,name='Zylinder_stange_1') world.addVisualisation.File(Zyl_stange_2.Zyl_stange_2_visual, 'C:\\Users\JeSche\Desktop\Diplom_Arbeit\Hexapod/zyl_stange_001.stl',1,name='Zylinder_stange_2') world.addVisualisation.File(Zyl_stange_3.Zyl_stange_3_visual, 'C:\\Users\JeSche\Desktop\Diplom_Arbeit\Hexapod/zyl_stange_001.stl',1,name='Zylinder_stange_3') world.addVisualisation.File(Zyl_stange_4.Zyl_stange_4_visual, 'C:\\Users\JeSche\Desktop\Diplom_Arbeit\Hexapod/zyl_stange_001.stl',1,name='Zylinder_stange_4') world.addVisualisation.File(Zyl_stange_5.Zyl_stange_5_visual, 'C:\\Users\JeSche\Desktop\Diplom_Arbeit\Hexapod/zyl_stange_001.stl',1,name='Zylinder_stange_5') world.addVisualisation.File(Zyl_stange_6.Zyl_stange_6_visual, 'C:\\Users\JeSche\Desktop\Diplom_Arbeit\Hexapod/zyl_stange_001.stl',1,name='Zylinder_stange_6') ''' # Für Visualisierung im Dymola world.addVisualisation.Cylinder(Zyl_geh_1.Zyl_geh_1_visual, R_Zyl_geh,l_zyl) world.addVisualisation.Cylinder(Zyl_geh_2.Zyl_geh_2_visual, R_Zyl_geh,l_zyl) world.addVisualisation.Cylinder(Zyl_geh_3.Zyl_geh_3_visual, R_Zyl_geh,l_zyl) world.addVisualisation.Cylinder(Zyl_geh_4.Zyl_geh_4_visual, R_Zyl_geh,l_zyl) world.addVisualisation.Cylinder(Zyl_geh_5.Zyl_geh_5_visual, R_Zyl_geh,l_zyl) world.addVisualisation.Cylinder(Zyl_geh_6.Zyl_geh_6_visual, R_Zyl_geh,l_zyl) world.addVisualisation.Cylinder(Zyl_stange_1.Zyl_stange_1_visual, R_Zyl_stange,l_zyl) world.addVisualisation.Cylinder(Zyl_stange_2.Zyl_stange_2_visual, R_Zyl_stange,l_zyl) world.addVisualisation.Cylinder(Zyl_stange_3.Zyl_stange_3_visual, R_Zyl_stange,l_zyl) world.addVisualisation.Cylinder(Zyl_stange_4.Zyl_stange_4_visual, R_Zyl_stange,l_zyl) world.addVisualisation.Cylinder(Zyl_stange_5.Zyl_stange_5_visual, R_Zyl_stange,l_zyl) world.addVisualisation.Cylinder(Zyl_stange_6.Zyl_stange_6_visual, R_Zyl_stange,l_zyl) world.addVisualisation.Frame(AP,0.4) #world.addVisualisation.Frame(BP.BP_Feder,1) world.addVisualisation.Frame(Ground,0.6) ################################# # add visualisation # ################################# print("System has been assembled") ################################# # add Sensors # ################################# #world.addSensor.Position(world,AP.AP_Anlenkpunkt_1,"P_AP_1") #world.addSensor.Energy(AP,'E_AP') ##################### # add Imput & Load # ##################### #l = world.addSensor.Distance(AP,BP.BP_Feder, 'l', 'DistanceSensor') #lz = world.addSensor.Distance(BP,AP, 'lz', 'DistanceSensor_Cylinder') #c=50 #F_c = world.addExpression('SpringForce', 'F_c', -c*l[0]) #world.addLoad.PtPForce(AP,BP.BP_Feder, F_c, name='Spring') ################################# # generate equations & sim Code # ################################# world.genEquations.Recursive() #world.genCode.Modelica('hexapod_z_kpl','.\HP_Output',inputsAsInputs=True, debugMode=False) world.show('hexapod_z_kpl')
1.304688
1
src/train/test.py
jiangqn/RNNLM
1
12798251
<filename>src/train/test.py<gh_stars>1-10 import os import torch from torch import nn from torch.utils.data.dataloader import DataLoader from src.data_process.dataset import LMDataset from src.train.eval import eval from src.utils.constants import PAD_INDEX from src.utils.logger import Logger def test(args): os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) base_path = os.path.join('./data', args.data) processed_base_path = os.path.join(base_path, 'processed') processed_test_path = os.path.join(processed_base_path, 'test.npz') save_path = os.path.join(processed_base_path, 'rnnlm.pkl') log_base_path = os.path.join(base_path, 'log') log_path = os.path.join(log_base_path, 'test_log.txt') logger = Logger(log_path) test_data = LMDataset(processed_test_path) test_loader = DataLoader( dataset=test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True ) model = torch.load(save_path) model = model.cuda() criterion = nn.CrossEntropyLoss(ignore_index=PAD_INDEX) test_loss, test_ppl = eval(model, test_loader, criterion) logger.log('test_loss: %.4f\ttest_ppl: %.4f' % (test_loss, test_ppl))
1.46875
1
interface.py
Kapil-Shyam-M/riscv-isac
5
12798379
<reponame>Kapil-Shyam-M/riscv-isac import importlib import pluggy from riscv_isac.plugins.specification import * import riscv_isac.plugins as plugins def interface (trace, arch, mode): ''' Arguments: Trace - Log_file_path Arch - Architecture Mode - Execution trace format ''' parser_pm = pluggy.PluginManager("parser") decoder_pm = pluggy.PluginManager("decoder") parser_pm.add_hookspecs(ParserSpec) decoder_pm.add_hookspecs(DecoderSpec) parserfile = importlib.import_module("riscv_isac.plugins."+mode) parserclass = getattr(parserfile, "mode_"+mode) parser_pm.register(parserclass()) parser = parser_pm.hook parser.setup(trace=trace,arch=arch) instructionObjectfile = importlib.import_module("riscv_isac.plugins.internalDecoder") decoderclass = getattr(instructionObjectfile, "disassembler") decoder_pm.register(decoderclass()) decoder = decoder_pm.hook decoder.setup(arch=arch) iterator = iter(parser.__iter__()[0]) for instr, mnemonic, addr, commitvalue in iterator: if instr is not None: instrObj = decoder.decode(instr=instr, addr=addr)
1.289063
1
bin/tests/test_design.py
broadinstitute/adapt
12
12798507
<filename>bin/tests/test_design.py """Tests for design.py """ import random import os import copy import tempfile import unittest import logging from collections import OrderedDict from argparse import Namespace from adapt import alignment from adapt.prepare import align, ncbi_neighbors, prepare_alignment from adapt.utils import seq_io from bin import design __author__ = '<NAME> <<EMAIL>>' # Default args: window size 3, guide size 2, allow GU pairing # GU pairing allows AA to match GG in 1st window SEQS = OrderedDict() SEQS["genome_1"] = "AACTA" SEQS["genome_2"] = "AAACT" SEQS["genome_3"] = "GGCTA" SEQS["genome_4"] = "GGCTT" # Specificity seq stops AA from being the best guide in the 1st window SP_SEQS = OrderedDict() SP_SEQS["genome_5"] = "AA---" class TestDesign(object): """General class for testing design.py Defines helper functions for test cases and basic setUp and tearDown functions. """ class TestDesignCase(unittest.TestCase): def setUp(self): # Disable logging logging.disable(logging.INFO) # Create a temporary input file self.input_file = tempfile.NamedTemporaryFile(mode='w', delete=False) # Closes the file so that it can be reopened on Windows self.input_file.close() # Create a temporary output file self.output_file = tempfile.NamedTemporaryFile(mode='w', delete=False) self.output_file.close() self.files_to_delete = [self.input_file.name, self.output_file.name] def check_results(self, file, expected, header='target-sequences'): """Check the results of the test output Given a TSV file of test output and expected output, fails the test if the test output guide target sequences do not equal the expected guide target sequences Args: file: string, path name of the file expected: list of lists of strings, all the expected guide target sequences in each line of the output header: the header of the CSV that contains the guide target sequences """ col_loc = None with open(file) as f: for i, line in enumerate(f): if i == 0: headers = line.split('\t') # Will raise an error if header is not in output col_loc = headers.index(header) continue self.assertLess(i, len(expected) + 1) guide_line = line.split('\t')[col_loc] guides = guide_line.split(' ') for guide in guides: self.assertIn(guide, expected[i-1]) self.assertEqual(len(guides), len(expected[i-1])) self.assertEqual(i, len(expected)) def baseArgv(self, search_type='sliding-window', input_type='fasta', objective='minimize-guides', model=False, specific=None, specificity_file=None, output_loc=None): """Get arguments for tests Produces the correct arguments for a test case given details of what the test case is testing. See design.py help for details on input Args: search_type: 'sliding-window' or 'complete-targets' input_type: 'fasta', 'auto-from-args', or 'auto-from-file' objective: 'minimize-guides' or 'maximize-activity' model: boolean, true to use Cas13a built in model, false to use simple binary prediction specific: None, 'fasta', or 'taxa'; what sort of input to be specific against output_loc: path to the output file/directory; set to self.output_file.name if None Returns: List of strings that are the arguments of the test """ input_file = self.input_file.name if output_loc is None: output_loc = self.output_file.name argv = ['design.py', search_type, input_type] if input_type == 'fasta': argv.extend([input_file, '-o', output_loc]) elif input_type == 'auto-from-args': argv.extend(['64320', 'None', output_loc]) elif input_type == 'auto-from-file': argv.extend([input_file, output_loc]) if input_type in ['auto-from-args', 'auto-from-file']: argv.extend(['--sample-seqs', '1', '--mafft-path', 'fake_path']) if search_type == 'sliding-window': argv.extend(['-w', '3']) if search_type == 'complete-targets': argv.extend(['--best-n-targets', '2', '-pp', '.75', '-pl', '1', '--max-primers-at-site', '2']) if objective == 'minimize-guides': argv.extend(['-gm', '0', '-gp', '.75']) elif objective =='maximize-activity': argv.extend(['--maximization-algorithm', 'greedy']) # ID-M (mismatches to be considered identical) must be set to 0 since otherwise # having 1 base in common with a 2 base guide counts as a match if specific == 'fasta': argv.extend(['--specific-against-fastas', specificity_file, '--id-m', '0']) elif specific == 'taxa': argv.extend(['--specific-against-taxa', specificity_file, '--id-m', '0']) if model: argv.append('--predict-cas13a-activity-model') elif objective =='maximize-activity': argv.extend(['--use-simple-binary-activity-prediction', '-gm', '0']) argv.extend(['--obj', objective, '--seed', '0', '-gl', '2']) return argv def tearDown(self): for file in self.files_to_delete: if os.path.isfile(file): os.unlink(file) # Re-enable logging logging.disable(logging.NOTSET) class TestDesignFasta(TestDesign.TestDesignCase): """Test design.py given an input FASTA """ def setUp(self): super().setUp() self.real_output_file = self.output_file.name + '.tsv' self.files_to_delete.append(self.real_output_file) # Write to temporary input fasta seq_io.write_fasta(SEQS, self.input_file.name) def test_min_guides(self): argv = super().baseArgv() args = design.argv_to_args(argv) design.run(args) # Base args set the percentage of sequences to match at 75% expected = [["AA"], ["CT"], ["CT"]] self.check_results(self.real_output_file, expected) def test_max_activity(self): argv = super().baseArgv(objective='maximize-activity') args = design.argv_to_args(argv) design.run(args) # Doesn't use model, just greedy binary prediction with 0 mismatches # (so same outputs as min-guides) expected = [["AA"], ["CT"], ["CT"]] self.check_results(self.real_output_file, expected) def test_complete_targets(self): argv = super().baseArgv(search_type='complete-targets') args = design.argv_to_args(argv) design.run(args) # Since sequences are short and need 1 base for primer on each side, # only finds 1 target in middle expected = [["CT"]] self.check_results(self.real_output_file, expected, header='guide-target-sequences') def test_specificity_fastas(self): # Create a temporary fasta file for specificity self.sp_fasta = tempfile.NamedTemporaryFile(mode='w', delete=False) # Closes the file so that it can be reopened on Windows self.sp_fasta.close() seq_io.write_fasta(SP_SEQS, self.sp_fasta.name) self.files_to_delete.append(self.sp_fasta.name) argv = super().baseArgv(specific='fasta', specificity_file=self.sp_fasta.name) args = design.argv_to_args(argv) design.run(args) # AA isn't allowed in 1st window by specificity fasta, # so 1st window changes expected = [["AC", "GG"], ["CT"], ["CT"]] self.check_results(self.real_output_file, expected) class TestDesignAutos(TestDesign.TestDesignCase): """Test design.py given arguments to automatically download FASTAs Does not run the entire design.py; prematurely stops by giving a fake path to MAFFT. All are expected to return a FileNotFoundError """ def setUp(self): super().setUp() # Write to temporary input file with open(self.input_file.name, 'w') as f: f.write("Zika virus\t64320\tNone\tNC_035889\n") # Create a temporary output directory self.output_dir = tempfile.TemporaryDirectory() def test_auto_from_file(self): argv = super().baseArgv(input_type='auto-from-file', output_loc=self.output_dir.name) args = design.argv_to_args(argv) try: design.run(args) except FileNotFoundError: pass def test_auto_from_args(self): argv = super().baseArgv(input_type='auto-from-args') args = design.argv_to_args(argv) try: design.run(args) except FileNotFoundError: pass def test_specificity_taxa(self): argv = super().baseArgv(input_type='auto-from-args', specific='taxa', specificity_file='') args = design.argv_to_args(argv) try: design.run(args) except FileNotFoundError: pass def tearDown(self): super().tearDown() self.output_dir.cleanup() class TestDesignFull(TestDesign.TestDesignCase): """Test design.py fully through """ def setUp(self): super().setUp() # Write to temporary input file with open(self.input_file.name, 'w') as f: f.write("Zika virus\t64320\tNone\tNC_035889\n") # Create a temporary specificity file self.sp_file = tempfile.NamedTemporaryFile(mode='w', delete=False) self.sp_file.write("123\tNone\n") # Closes the file so that it can be reopened on Windows self.sp_file.close() # 'auto-from-args' gives different outputs for every cluster # Our test only produces 1 cluster, so store the name of that file self.real_output_file = self.output_file.name + '.0.tsv' self.files_to_delete.extend([self.sp_file.name, self.real_output_file]) # We cannot access MAFFT, so override this function; store original so # it can be fixed for future tests self.set_mafft_exec = align.set_mafft_exec align.set_mafft_exec = lambda mafft_path: None # Curating requires MAFFT, so override this function; store original so # it can be fixed for future tests self.curate_against_ref = align.curate_against_ref def small_curate(seqs, ref_accs, asm=None, remove_ref_accs=[]): return {seq: seqs[seq] for seq in seqs \ if seq.split('.')[0] not in remove_ref_accs} align.curate_against_ref = small_curate # Aligning requires MAFFT, so override this function and output simple # test sequences; store original so it can be fixed for future tests self.align = align.align align.align = lambda seqs, am=None: SEQS # We don't want to fetch sequences for the specificity file since we're # doing a simple test case, so override this function; store original # so it can be fixed for future tests self.fetch_sequences_for_taxonomy = prepare_alignment.fetch_sequences_for_taxonomy def small_fetch(taxid, segment): # 123 is the taxonomic ID used in our specificity file if taxid == 123: return SP_SEQS # If it's not the specificity taxonomic ID, test fetching the real # sequences, but don't return them as they won't be used else: self.fetch_sequences_for_taxonomy(taxid, segment) return SEQS prepare_alignment.fetch_sequences_for_taxonomy = small_fetch # Disable warning logging to avoid annotation warning logging.disable(logging.WARNING) def test_specificity_taxa(self): argv = super().baseArgv(input_type='auto-from-args', specific='taxa', specificity_file=self.sp_file.name) args = design.argv_to_args(argv) design.run(args) # Same output as test_specificity_fasta, as sequences are the same expected = [["AC", "GG"], ["CT"], ["CT"]] self.check_results(self.real_output_file, expected) def tearDown(self): # Fix all overridden functions align.set_mafft_exec = self.set_mafft_exec align.curate_against_ref = self.curate_against_ref align.align = self.align prepare_alignment.fetch_sequences_for_taxonomy = self.fetch_sequences_for_taxonomy super().tearDown()
1.882813
2
test/test_del_contact.py
vyacheslavmarkov/python_training
0
12798635
<gh_stars>0 from model.contact import Contact import random def test_delete_first_contact(app, db, check_ui): if len(db.get_contact_list()) == 0: app.contact.create(Contact(firstname="Tester", middlename="Something", lastname="Trump", photo="picture.jpg", nickname="super nickname", title="QA engineer", company="Google", address="Kremlin", homephone="1111111", mobilephone="2222222", workphone="3333333", fax="4444444", email="<EMAIL>", email2="<EMAIL>", email3="<EMAIL>", homepage="google.com", bday="29", bmonth="April", byear="1991", aday="22", amonth="August", ayear="2015", address_2="Moscow", secondaryphone="5555555", notes="Cool guy")) old_contacts = db.get_contact_list() contact = random.choice(old_contacts) app.contact.delete_contact_by_id(contact.id) new_contacts = db.get_contact_list() assert len(old_contacts) - 1 == len(new_contacts) old_contacts.remove(contact) assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max) if check_ui: # tune db contact data to be appropriate for the homepage representation old_contacts = app.contact.make_contacts_like_on_homepage(old_contacts) assert sorted(old_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contacts_list(), key=Contact.id_or_max)
1.757813
2
py/fingerboard.py
Takayoshi-Aoyagi/Jazz-Chords
1
12798763
# coding: UTF-8 from tone import Tone class Fingerboard: @classmethod def getPos(cls): _pos = {} openTones = ["E", "B", "G", "D", "A", "E"] tone = Tone() for stringIndex, openTone in enumerate(openTones): toneIndex = tone.getToneNumberByName(openTone) arr = [] for i in range(13): toneString = tone.getToneName(openTone, i) arr.append(toneString) _pos[stringIndex + 1] = arr return _pos @classmethod def dump(cls, includes): _pos = cls.getPos() if len(includes) > 0: for key in _pos.keys(): arr = _pos[key] for i, tone in enumerate(arr): if tone not in includes.keys(): arr[i] = " " else: arr[i] = "%2s(%3s)" % (tone, includes[tone]) flets = map(lambda x: " %7s " % x, range(13)) print " " + " ".join(flets) for key in sorted(_pos.keys()): tones = _pos[key] tones = map(lambda x: " %7s " % x, tones) print '%s弦: |%s|' % (key, "|".join(tones))
2.171875
2
unifi/objects/device.py
BastiG/unifi-py
0
12798891
from unifi.objects.base import UnifiBaseObject from unifi.helper import find_by_attr, json_print class UnifiDeviceObject(UnifiBaseObject): def get_port_profile(self, **filter_kwargs): port = find_by_attr(self.port_table, **filter_kwargs) port_override = find_by_attr(self.port_overrides, port_idx=port['port_idx']) portconf_id = port_override['portconf_id'] if port_override and 'portconf_id' in port_override else port['portconf_id'] portconf = find_by_attr(self.controller.portconf(), _id=portconf_id) return portconf def set_port_profile(self, portconf, **filter_kwargs): port = find_by_attr(self.port_table, **filter_kwargs) port_override = find_by_attr(self.port_overrides, port_idx=port['port_idx']) if port_override: port_override['portconf_id'] = portconf['_id'] else: port_override = { 'port_idx': port['port_idx'], 'portconf_id': portconf['_id'] } self.port_overrides.append(port_override)
1.570313
2
pipekit/__init__.py
DrDub/pipekit
3
12799019
<reponame>DrDub/pipekit #!/usr/bin/env python3 from .pipe import NullPipe # noqa: W0611
0.149414
0
Bioinformatics VI/Week II/SuffixArray.py
egeulgen/Bioinformatics_Specialization
3
12799147
<gh_stars>1-10 import sys def SuffixArray(Text): ''' Suffix Array Input: A string Text. Output: SuffixArray(Text). ''' suffixes = [] suffix_array = [] for i in range(len(Text)): suffixes.append(Text[i:]) suffix_array.append(i) suffix_array = [x for _, x in sorted(zip(suffixes, suffix_array), key=lambda pair: pair[0])] return suffix_array if __name__ == "__main__": Text = sys.stdin.read().rstrip() suffix_array = SuffixArray(Text) print(', '.join(str(x) for x in suffix_array))
2.765625
3
jsymbols.py
agnosticlines/ghidra_kernelcache
238
12799275
# Symbolicate the kernelcache from jtool2 #@author simo #@category iOS.kernel from utils.methods import * if __name__ == "__main__": default_file = "test" fname = askString("Kernelcache symbol file","Symbol file: ",default_file) f = open(fname,"rb+") buf = f.read().split('\n') i = 0 for line in buf: if len(line) == 0: continue addr , symbol , empty = line.split("|") if len(symbol) == 0: continue if "func_" in symbol: continue print addr,symbol symbol = symbol.strip()#.replace(" ","_") symbolicate(addr,symbol) i+= 1
1.28125
1
04-FaceRecognition-II/thetensorclan-backend-heroku/models/__init__.py
amitkml/TSAI-DeepVision-EVA4.0-Phase-2
1
12799403
from .utils import get_classifier, MODEL_REGISTER
0.275391
0
conditions/toy_shop.py
MaggieIllustrations/softuni-github-programming
0
12799531
<reponame>MaggieIllustrations/softuni-github-programming holiday_price = float(input()) puzzle_count = int(input()) dolls_count = int(input()) teddy_bears_count = int(input()) minions_count = int(input()) trucks_count = int(input()) # total_price_dolls = dolls_count * 3 total_price_puzzles = puzzle_count * 2.6 total_price_teddy_bears = teddy_bears_count * 4.10 total_price_minions = minions_count * 8.20 total_price_trucks = trucks_count * 2 total_price = total_price_puzzles + total_price_dolls + \ total_price_teddy_bears + total_price_minions + \ total_price_trucks total_amount_toys = puzzle_count + dolls_count + teddy_bears_count + minions_count + trucks_count if total_amount_toys >= 50: discount = total_price * 0.25 total_price = total_price - discount rent = total_price * 0.1 earning_after_rent = total_price - rent if earning_after_rent >= holiday_price: earning_left = earning_after_rent - holiday_price print(f"Yes! {earning_left:.2f} lv left.") else: needed_money = holiday_price - earning_after_rent print(f"Not enough money! {needed_money:.2f} lv left.") else: rent = total_price * 0.1 earning_after_rent = total_price - rent if earning_after_rent >= holiday_price: earning_left = earning_after_rent - holiday_price print(f"Yes! {earning_left:.2f} lv left.") else: needed_money = holiday_price - earning_after_rent print(f"Not enough money! {needed_money:.2f} lv needed.")
2.765625
3
queries/charQuery.py
Kadantte/AniPy-Bot
11
12799659
def searchChar(): query = ''' query ($search: String) { Character(search: $search) { siteUrl name { full } media(perPage: 1) { nodes { title { romaji english } siteUrl } } image { large } description(asHtml: true) } } ''' return query
0.945313
1
neuralizer/tests/test_data_process.py
BeckResearchLab/Neuralizer
1
12799787
<filename>neuralizer/tests/test_data_process.py from __future__ import absolute_import,division,print_function import os import pandas as pd import numpy as np import numpy.testing as npt import data_process as dp def test_read_file(): X,Y = dp.read_file('test.tsv',["A","B"],"y") npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]]) npt.assert_equal(Y,np.array[0,1,5]) def test_data_info(): data = {"filename":"test.tsv","X_var":["A","B"],"Y_var":"y"} X,Y,input_dim,output_dim = dp.data_info(data) assert input_dim == 2,"Dimension of input layer is not correct" assert output_dim == 1 , "Dimension of output layer is not correct"
1.453125
1
load_data.py
penguin2048/StockIt
32
12799915
""" handle preprocessing and loading of data. """ import html import os.path import pandas as pd import re from nltk import word_tokenize, pos_tag from nltk.corpus import stopwords, wordnet from nltk.stem.wordnet import WordNetLemmatizer class LoadData: @classmethod def preprocess_stocktwits_data(cls, file_location, columns=['datetime', 'message']): """ preprocess the data in file location and saves it as a csv file (appending '_preprocessed' before '.csv). The preprocessing us in following ways: 1) extract message and datetime columns. 2) sort according to datetime in descending order (newest first) 3) remove links, @ and $ references, extra whitespaces, extra '.', digits, slashes, hyphons 4) decode html entities 5) convert everything to lower case """ if 'datetime' in columns: dataFrame = pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'], infer_datetime_format=True) dataFrame.sort_values(by='datetime', ascending=False) else: dataFrame = pd.read_csv(file_location, usecols=columns) dataFrame['message'] = dataFrame['message'].apply(lambda x: html.unescape(x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'(www\.|https?://).*?(\s|$)|@.*?(\s|$)|\$.*?(\s|$)|\d|\%|\\|/|-|_', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\.+', '. ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\,+', ', ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\?+', '? ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\s+', ' ', x)) dataFrame['message'] = dataFrame['message'].apply(lambda x: x.lower()) dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False) @classmethod def labelled_data_lexicon_analysis(cls): """ extract keywords from labelled stocktwits data for improved accuracy in scoring for each labelled message do 1) tokenize the message 2) perform POS tagging 3) if a sense is present in wordnet then, lemmatize the word and remove stop words else ignore the word remove intersections from the two lists before saving """ dataFrame = LoadData.get_labelled_data() bullish_keywords = set() bearish_keywords = set() lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words('english')) for index, row in dataFrame.iterrows(): tokens = word_tokenize(row['message']) pos = pos_tag(tokens) selected_tags = set() for i in range(len(pos)): if len(wordnet.synsets(pos[i][0])): if pos[i][1].startswith('J'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a')) elif pos[i][1].startswith('V'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v')) elif pos[i][1].startswith('N'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n')) elif pos[i][1].startswith('R'): selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r')) selected_tags -= stop_words if row['sentiment'] == 'Bullish': bullish_keywords = bullish_keywords.union(selected_tags) elif row['sentiment'] == 'Bearish': bearish_keywords = bearish_keywords.union(selected_tags) updated_bullish_keywords = bullish_keywords - bearish_keywords updated_bearish_keywords = bearish_keywords - bullish_keywords with open('data-extractor/lexicon_bullish_words.txt', 'a') as file: for word in updated_bullish_keywords: file.write(word+"\n") with open('data-extractor/lexicon_bearish_words.txt', 'a') as file: for word in updated_bearish_keywords: file.write(word+"\n") @classmethod def get_stocktwits_data(cls, symbol): """ get_data loads the preprocessed data of 'symbol' from data-extractor and returns a pandas dataframe with columns [message(object), datetime(datetime64[ns])]. """ file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv') dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def get_price_data(cls, symbol): """ loads the price data of 'symbol' from data-extractor and returns a pandas dataframe with columns [Date(datetime64[ns]), Opening Price(float64), Closing Price(float64), Volume(float64)]. """ file_location = 'data-extractor/stock_prices_'+symbol+'.csv' dataFrame = pd.read_csv(file_location, usecols=['Date', 'Opening Price', 'Closing Price', 'Volume'], parse_dates=['Date'], infer_datetime_format=True) return dataFrame @classmethod def get_labelled_data(cls, type='complete'): """ get_labelled_data loads the preprocessed labelled data of stocktwits from data-extractor and returns a pandas dataframe with columns [sentiment(object), message(object)]. """ if type == 'complete': file_location = 'data-extractor/labelled_data_complete_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message']) elif type == 'training': file_location = 'data-extractor/labelled_data_training_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.get_training_data() elif type == 'test': file_location = 'data-extractor/labelled_data_test_preprocessed.csv' if os.path.isfile(file_location) is False: LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message']) dataFrame = pd.read_csv(file_location) return dataFrame @classmethod def get_custom_lexicon(cls): """ get custom lexicon of bearish and bullish words respectively """ file_location1 = 'data-extractor/lexicon_bearish_words.txt' file_location2 = 'data-extractor/lexicon_bullish_words.txt' if os.path.isfile(file_location1) is False or os.path.isfile(file_location2) is False: LoadData.labelled_data_lexicon_analysis() dataFrameBearish = pd.read_csv(file_location1, header=None, names=['word']) dataFrameBullish = pd.read_csv(file_location2, header=None, names=['word']) return dataFrameBearish, dataFrameBullish @classmethod def get_training_data(cls): """ get labelled training data with equal bearish and bullish messages """ try: os.remove('data-extractor/labelled_data_training.csv') except OSError: pass dataFrame = LoadData.get_labelled_data(type='complete') dataFrameBearish = dataFrame[dataFrame['sentiment']=='Bearish'] dataFrameBullish = dataFrame[dataFrame['sentiment']=='Bullish'] dataFrameBearishTraining = dataFrameBearish dataFrameBullishTraining = dataFrameBullish[:len(dataFrameBearish)] dataFrameTraining = dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True) dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False) @classmethod def combine_price_and_sentiment(cls, sentimentFrame, priceFrame): from datetime import timedelta """ receive sentimentFrame as (date, sentiment, message) indexed by date and sentiment and priceFrame as (Date, Opening Price, Closing Price, Volume) and return a combined frame as (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) """ dataFrame = pd.DataFrame() for date, df in sentimentFrame.groupby(level=0, sort=False): price_current = priceFrame[priceFrame['Date'] == date] if price_current.empty or date-timedelta(days=1) not in sentimentFrame.index: continue tweet_minus1 = sentimentFrame.loc[date-timedelta(days=1)] days = 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] while price_plus1.empty: days += 1 price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)] days = 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] while price_minus1.empty: days += 1 price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)] new_row = {} new_row['date'] = date new_row['sentiment_calculated_bullish'] = df.loc[(date, 'Bullish')]['message'] new_row['sentiment_calculated_bearish'] = df.loc[(date, 'Bearish')]['message'] new_row['sentiment_actual_previous'] = 1 if ((price_minus1.iloc[0]['Closing Price'] - price_minus1.iloc[0]['Opening Price']) >= 0) else -1 new_row['tweet_volume_change'] = df['message'].sum() - tweet_minus1['message'].sum() new_row['cash_volume'] = price_current['Volume'].iloc[0] new_row['label'] = 1 if ((price_plus1.iloc[0]['Closing Price'] - price_current.iloc[0]['Closing Price']) >= 0) else -1 print(new_row) dataFrame = dataFrame.append(new_row, ignore_index=True) return dataFrame @classmethod def aggregate_stock_price_data(cls): """ compile stocktwits data for stock prediction analysis in the following form (date, sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) we have choice to take previous n days sentiment_calculated and using label of next nth day returns dataframes for AAPL, AMZN, GOOGL respectively """ if not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')): from sklearn.externals import joblib file_location = 'naive_bayes_classifier.pkl' priceAAPL = LoadData.get_price_data('AAPL') priceAMZN = LoadData.get_price_data('AMZN') priceGOOGL = LoadData.get_price_data('GOOGL') sentimented_file = 'data-extractor/stocktwits_AAPL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAAPL = LoadData.get_stocktwits_data('AAPL') dataAAPL['sentiment'] = dataAAPL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAAPL['datetime'] = dataAAPL['datetime'].apply(lambda x: x.date()) dataAAPL.rename(columns={'datetime':'date'}, inplace=True) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_AMZN_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataAMZN = LoadData.get_stocktwits_data('AMZN') dataAMZN['sentiment'] = dataAMZN['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataAMZN['datetime'] = dataAMZN['datetime'].apply(lambda x: x.date()) dataAMZN.rename(columns={'datetime':'date'}, inplace=True) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False) sentimented_file = 'data-extractor/stocktwits_GOOGL_withsentiment.csv' if os.path.isfile(sentimented_file) is False: tweet_classifier = joblib.load(file_location) dataGOOGL = LoadData.get_stocktwits_data('GOOGL') dataGOOGL['sentiment'] = dataGOOGL['message'].apply(lambda x: tweet_classifier.predict([x])[0]) dataGOOGL['datetime'] = dataGOOGL['datetime'].apply(lambda x: x.date()) dataGOOGL.rename(columns={'datetime':'date'}, inplace=True) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True) dataAAPL = dataAAPL.groupby(['date','sentiment'], sort=False).count() dataAMZN = dataAMZN.groupby(['date','sentiment'], sort=False).count() dataGOOGL = dataGOOGL.groupby(['date','sentiment'], sort=False).count() dataAAPL = LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL) dataAMZN = LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN) dataGOOGL = LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL) dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False) dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False) dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False) dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True) return dataAAPL, dataAMZN, dataGOOGL @classmethod def get_stock_prediction_data(cls, symbol='ALL', type='training'): """ get the training and test data for stock prediction in format (sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label) Standardize the data before using. """ file_location = 'data-extractor/stockdata_'+symbol+'_'+type+'.csv' if not os.path.isfile(file_location): import numpy as np dataAAPL, dataAMZN, dataGOOGL = LoadData.aggregate_stock_price_data() combined_data = dataAAPL.append([dataAMZN, dataGOOGL], ignore_index=True) combined_data.sort_values('date') combined_data.drop(columns='date', inplace=True) combined_training, combined_test = np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))]) combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False) combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False) dataAAPL.sort_values('date') dataAAPL.drop(columns='date', inplace=True) AAPL_training, AAPL_test = np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))]) AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False) AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False) dataAMZN.sort_values('date') dataAMZN.drop(columns='date', inplace=True) AMZN_training, AMZN_test = np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))]) AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False) AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False) dataGOOGL.sort_values('date') dataGOOGL.drop(columns='date', inplace=True) GOOGL_training, GOOGL_test = np.split(dataGOOGL.sample(frac=1), [int(.9*len(dataGOOGL))]) GOOGL_training.to_csv('data-extractor/stockdata_GOOGL_training.csv', index=False) GOOGL_test.to_csv('data-extractor/stockdata_GOOGL_test.csv', index=False) data = pd.read_csv(file_location) return data
2.21875
2