code
stringlengths 10
805k
| def_use_chains
sequencelengths 0
667
|
---|---|
"""
ASGI config for CongoCart project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CongoCart.settings')
application = get_asgi_application()
| [
[
[
232,
234
],
[
287,
289
]
],
[
[
265,
285
],
[
372,
392
]
],
[
[
358,
369
]
]
] |
import pygame
"""
levels.py
Houses all possible levels for the game to choose from.
Selection occurs by invoking the selected level
and by having a return tuple of (pad_sprite, trophies, car[x,y]).
Must still be rendered into the main game.
"""
class PadSprite(pygame.sprite.Sprite):
def __init__(self, image, position):
super(PadSprite, self).__init__()
self.normal = pygame.image.load(image)
self.hit = pygame.image.load('images/collision.png')
self.rect = pygame.Rect(self.normal.get_rect())
self.rect.center = position
def update(self, hit_list):
if self in hit_list:
self.image = self.hit
else:
self.image = self.normal
class Trophy(pygame.sprite.Sprite):
def __init__(self, position):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('images/trophy.png')
self.rect = self.image.get_rect()
self.rect.x, self.rect.y = position
def draw(self, screen):
screen.blit(self.image, self.rect)
def level1():
pads = [
PadSprite('images/vertical_pads.png', (0, 100)),
PadSprite('images/vertical_pads.png', (0, 200)),
PadSprite('images/vertical_pads.png', (0, 400)),
PadSprite('images/vertical_pads.png', (1024, 100)),
PadSprite('images/vertical_pads.png', (1024, 550)),
PadSprite('images/vertical_pads.png', (824, 768)),
PadSprite('images/vertical_pads.png', (824, 368)),
PadSprite('images/vertical_pads.png', (210, 375)),
PadSprite('images/vertical_pads.png', (824, 368)),
PadSprite('images/race_pads.png', (900, 0)),
PadSprite('images/race_pads.png', (724, 0)),
PadSprite('images/race_pads.png', (524, 0)),
PadSprite('images/race_pads.png', (224, 0)),
PadSprite('images/race_pads.png', (1024, 768)),
PadSprite('images/race_pads.png', (624, 768)),
PadSprite('images/race_pads.png', (224, 768)),
PadSprite('images/race_pads.png', (450, 130)),
PadSprite('images/race_pads.png', (550, 130)),
PadSprite('images/small_horizontal.png', (600, 615)),
PadSprite('images/small_horizontal.png', (350, 615)),
PadSprite('images/small_horizontal.png', (470, 270)),
PadSprite('images/small_vertical.png', (600, 390)),
PadSprite('images/small_vertical.png', (350, 390)),
PadSprite('images/vertical_pads.png', (0,250)),
PadSprite('images/vertical_pads.png', (0, 525)),
PadSprite('images/vertical_pads.png', (1024, 250)),
PadSprite('images/vertical_pads.png', (1024, 525)),
PadSprite('images/race_pads.png', (250, 0)),
PadSprite('images/race_pads.png', (760, 0)),
PadSprite('images/race_pads.png', (500, 0)),
PadSprite('images/race_pads.png', (250, 768)),
PadSprite('images/race_pads.png', (760, 768)),
PadSprite('images/race_pads.png', (500, 768))
]
trophies = [Trophy((450, 320))]
return 1, pads, trophies, (970, 730), 60
def level2():
pads = [
PadSprite('images/vertical_pads.png', (0, 100)),
PadSprite('images/vertical_pads.png', (0, 200)),
PadSprite('images/vertical_pads.png', (0, 400)),
PadSprite('images/vertical_pads.png', (1024, 100)),
PadSprite('images/vertical_pads.png', (1024, 550)),
PadSprite('images/vertical_pads.png', (200, 768)),
PadSprite('images/vertical_pads.png', (200, 368)),
PadSprite('images/vertical_pads.png', (800, 375)),
PadSprite('images/vertical_pads.png', (200, 368)),
PadSprite('images/race_pads.png', (60, 0)),
PadSprite('images/race_pads.png', (300, 0)),
PadSprite('images/race_pads.png', (700, 0)),
PadSprite('images/race_pads.png', (900, 0)),
PadSprite('images/race_pads.png', (1024, 768)),
PadSprite('images/race_pads.png', (624, 768)),
PadSprite('images/race_pads.png', (224, 768)),
PadSprite('images/race_pads.png', (450, 130)),
PadSprite('images/race_pads.png', (550, 130)),
PadSprite('images/small_horizontal.png', (670, 615)),
PadSprite('images/small_horizontal.png', (470, 615)),
PadSprite('images/small_horizontal.png', (470, 270)),
PadSprite('images/small_vertical.png', (350, 490)),
PadSprite('images/small_vertical.png', (350, 390)),
PadSprite('images/small_vertical.png', (600, 390)),
PadSprite('images/vertical_pads.png', (0,250)),
PadSprite('images/vertical_pads.png', (0, 525)),
PadSprite('images/vertical_pads.png', (1024, 250)),
PadSprite('images/vertical_pads.png', (1024, 525)),
PadSprite('images/race_pads.png', (250, 0)),
PadSprite('images/race_pads.png', (760, 0)),
PadSprite('images/race_pads.png', (500, 0)),
PadSprite('images/race_pads.png', (250, 768)),
PadSprite('images/race_pads.png', (760, 768)),
PadSprite('images/race_pads.png', (500, 768))
]
trophies = [Trophy((450, 320))]
return 2, pads, trophies, (30, 730), 60
def level3():
pads = [
PadSprite('images/race_pads.png', (800, 150)),
PadSprite('images/race_pads.png', (800, 375)),
PadSprite('images/race_pads.png', (800, 625)),
PadSprite('images/race_pads.png', (800, 675)),
PadSprite('images/race_pads.png', (800, 575)),
PadSprite('images/race_pads.png', (200, 150)),
PadSprite('images/race_pads.png', (200, 675)),
PadSprite('images/race_pads.png', (200, 575)),
PadSprite('images/race_pads.png', (200, 375)),
PadSprite('images/race_pads.png', (200, 625)),
PadSprite('images/small_vertical.png', (450, 260)),
PadSprite('images/vertical_pads.png', (0, 250)),
PadSprite('images/vertical_pads.png', (0, 525)),
PadSprite('images/vertical_pads.png', (1024, 250)),
PadSprite('images/vertical_pads.png', (1024, 525)),
PadSprite('images/race_pads.png', (250, 0)),
PadSprite('images/race_pads.png', (760, 0)),
PadSprite('images/race_pads.png', (500, 0)),
PadSprite('images/race_pads.png', (250, 768)),
PadSprite('images/race_pads.png', (760, 768)),
PadSprite('images/race_pads.png', (500, 768))
]
trophies = [Trophy((490, 50))]
return 3, pads, trophies, (490, 700), 30
def level4():
pads = [
PadSprite('images/race_pads.png', (800, 150)),
PadSprite('images/race_pads.png', (800, 375)),
PadSprite('images/race_pads.png', (800, 625)),
PadSprite('images/race_pads.png', (800, 675)),
PadSprite('images/race_pads.png', (800, 575)),
PadSprite('images/race_pads.png', (200, 150)),
PadSprite('images/race_pads.png', (200, 675)),
PadSprite('images/race_pads.png', (200, 575)),
PadSprite('images/race_pads.png', (200, 375)),
PadSprite('images/race_pads.png', (200, 625)),
PadSprite('images/small_vertical.png', (555, 260)),
PadSprite('images/vertical_pads.png', (0, 250)),
PadSprite('images/vertical_pads.png', (0, 525)),
PadSprite('images/vertical_pads.png', (1024, 250)),
PadSprite('images/vertical_pads.png', (1024, 525)),
PadSprite('images/race_pads.png', (250, 0)),
PadSprite('images/race_pads.png', (760, 0)),
PadSprite('images/race_pads.png', (500, 0)),
PadSprite('images/race_pads.png', (250, 768)),
PadSprite('images/race_pads.png', (760, 768)),
PadSprite('images/race_pads.png', (500, 768))
]
trophies = [Trophy((490, 50))]
return 4, pads, trophies, (490, 700), 30 | [
[
[
7,
13
],
[
285,
291
],
[
752,
758
],
[
413,
419
],
[
457,
463
],
[
519,
525
],
[
817,
823
],
[
874,
880
]
],
[
[
275,
284
],
[
363,
372
],
[
1107,
1116
],
[
1164,
1173
],
[
1221,
1230
],
[
1278,
1287
],
[
1338,
1347
],
[
1398,
1407
],
[
1457,
1466
],
[
1516,
1525
],
[
1575,
1584
],
[
1634,
1643
],
[
1687,
1696
],
[
1740,
1749
],
[
1793,
1802
],
[
1846,
1855
],
[
1902,
1911
],
[
1957,
1966
],
[
2012,
2021
],
[
2067,
2076
],
[
2122,
2131
],
[
2184,
2193
],
[
2246,
2255
],
[
2308,
2317
],
[
2368,
2377
],
[
2429,
2438
],
[
2485,
2494
],
[
2542,
2551
],
[
2602,
2611
],
[
2662,
2671
],
[
2715,
2724
],
[
2768,
2777
],
[
2821,
2830
],
[
2876,
2885
],
[
2931,
2940
],
[
3102,
3111
],
[
3159,
3168
],
[
3216,
3225
],
[
3273,
3282
],
[
3333,
3342
],
[
3393,
3402
],
[
3452,
3461
],
[
3511,
3520
],
[
3570,
3579
],
[
3629,
3638
],
[
3681,
3690
],
[
3734,
3743
],
[
3787,
3796
],
[
3840,
3849
],
[
3896,
3905
],
[
3951,
3960
],
[
4006,
4015
],
[
4061,
4070
],
[
4116,
4125
],
[
4178,
4187
],
[
4240,
4249
],
[
4302,
4311
],
[
4362,
4371
],
[
4422,
4431
],
[
4483,
4492
],
[
4539,
4548
],
[
4596,
4605
],
[
4656,
4665
],
[
4716,
4725
],
[
4769,
4778
],
[
4822,
4831
],
[
4875,
4884
],
[
4930,
4939
],
[
4985,
4994
],
[
5156,
5165
],
[
5211,
5220
],
[
5266,
5275
],
[
5321,
5330
],
[
5376,
5385
],
[
5431,
5440
],
[
5486,
5495
],
[
5541,
5550
],
[
5596,
5605
],
[
5651,
5660
],
[
5706,
5715
],
[
5767,
5776
],
[
5824,
5833
],
[
5881,
5890
],
[
5941,
5950
],
[
6001,
6010
],
[
6054,
6063
],
[
6107,
6116
],
[
6160,
6169
],
[
6215,
6224
],
[
6270,
6279
],
[
6440,
6449
],
[
6495,
6504
],
[
6550,
6559
],
[
6605,
6614
],
[
6660,
6669
],
[
6715,
6724
],
[
6770,
6779
],
[
6825,
6834
],
[
6880,
6889
],
[
6935,
6944
],
[
6990,
6999
],
[
7051,
7060
],
[
7108,
7117
],
[
7165,
7174
],
[
7225,
7234
],
[
7285,
7294
],
[
7338,
7347
],
[
7391,
7400
],
[
7444,
7453
],
[
7499,
7508
],
[
7554,
7563
]
],
[
[
745,
751
],
[
3000,
3006
],
[
5054,
5060
],
[
6338,
6344
],
[
7622,
7628
]
],
[
[
1076,
1082
]
],
[
[
3071,
3077
]
],
[
[
5125,
5131
]
],
[
[
6409,
6415
]
]
] |
''' 3. Criar 2 matrizes 3x4 somar seus valores e armazenar o resultado em uma terceira
matriz 3x4.''' | [] |
#
# Copyright (c) European Synchrotron Radiation Facility (ESRF)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__authors__ = ["O. Svensson"]
__license__ = "MIT"
__date__ = "21/04/2019"
import os
import json
import unittest
from edna2.utils import UtilsTest
from edna2.tasks.DozorTasks import ControlDozor
class ControlDozorPlotExecTest(unittest.TestCase):
def setUp(self):
self.dataPath = UtilsTest.prepareTestDataPath(__file__)
@unittest.skipIf(os.name == 'nt', "Don't run on Windows")
def test_makePlot(self):
workingDirectory = UtilsTest.getTestRunPath()
dataCollectionId = 123456
outDataPath = self.dataPath / "outDataControlDozor.json"
with open(str(outDataPath)) as f:
outData = json.load(f)
controlDozor = ControlDozor(inData={})
controlDozor.template = "mesh-test_1_%4d.cbf"
controlDozor.directory = UtilsTest.getTestImageDirPath().as_posix()
controlDozor.makePlot(dataCollectionId, outData, workingDirectory) | [
[
[
1123,
1134
]
],
[
[
1153,
1164
]
],
[
[
1173,
1181
]
],
[
[
1206,
1208
],
[
1481,
1483
]
],
[
[
1216,
1220
],
[
1768,
1772
]
],
[
[
1228,
1236
],
[
1353,
1361
],
[
1465,
1473
]
],
[
[
1262,
1271
],
[
1419,
1428
],
[
1578,
1587
],
[
1915,
1924
]
],
[
[
1307,
1319
],
[
1804,
1816
]
],
[
[
1328,
1352
]
]
] |
# -*- coding: utf-8 -*-
import scrapy
import re
from rkpass.items import dzswMorningItem
# 电子商务设计师上午题
class DzswmorningspiderSpider(scrapy.Spider):
name = 'dzswMorningSpider'
allowed_domains = ['www.rkpass.cn']
start_urls = []
paperId_list = ['612', '541', '477', '453', '281', '280', '279', '278', '277', '276'] # 试卷的所有ID
field_list = ['20182', '20172', '20162', '20152', '20142', '20132', '20122', '20112', '20102', '20092'] # 跟上行试卷所有ID对应考试场次
for j in range(len(paperId_list)):
for i in range(1, 76):
start_urls.append(
'http://www.rkpass.cn/tk_timu/14_' + str(paperId_list[j]) + '_' + str(i) + '_xuanze.html?field=' +
field_list[j] + '&questionNum=' + str(i))
def parse(self, response):
questionNum = str(response.url).strip().split("questionNum=")[-1] # 题号 scrapy运行插库顺序不一致问题
field = (str(response.url).strip().split("field=")[-1]).split("&")[0] # 区别场次 20181表示2018年上半年
knowledgeTwo = response.xpath(".//span[@class='red']//text()").extract() # 知识点(二级分类)
# 针对2018年题库无分类处理
knowledgeTwo = knowledgeTwo[0] if list(knowledgeTwo) else ""
dataimg = response.xpath(".//span[@class='shisi_text']/img[last()]/@src").extract() # 爬取题目及选项中图片
product_id = re.findall('\((.*?)\)', response.xpath(".//script//text()").extract()[0])[0].split(',')[0].strip(
"'") # 该题目id 用于整理答案
question = "".join(response.xpath(".//table/tr[2]/td/span[@class='shisi_text']//text()").extract()) # 题目
A = "".join(
"".join(response.xpath(".//table/tr[5]/td/span[@class='shisi_text']//text()").extract()).split()) # A选项
B = "".join(
"".join(response.xpath(".//table/tr[7]/td/span[@class='shisi_text']//text()").extract()).split()) # B选项
C = "".join(
"".join(response.xpath(".//table/tr[9]/td/span[@class='shisi_text']//text()").extract()).split()) # C选项
D = "".join(
"".join(response.xpath(".//table/tr[11]/td/span[@class='shisi_text']//text()").extract()).split()) # D选项
questionImg = '' # 初始化 防止插库失败
if len(dataimg) > 0: # 判断题目及选项中是否有图片
if len(dataimg) == 1:
questionImg = dataimg[0] # 第一张为题目图片
elif len(dataimg) == 4: # 图片总数等于4张 即为选项中图片
A = A + dataimg[0]
B = B + dataimg[1]
C = C + dataimg[2]
D = D + dataimg[3]
elif len(dataimg) == 5: # 图片总数等于5张 则分别是A、B、C、D中的图片
questionImg = dataimg[0] # 第一张为题目图片
A = A + dataimg[1]
B = B + dataimg[2]
C = C + dataimg[3]
D = D + dataimg[4]
# 处理分类
# 特殊情况 题目上即为一级分类(该题库2018年不存在有分类 数据库字段设置 可以为空)
knowledgeOne = knowledgeTwo # 知识点一级分类
# 收集数据
item = dzswMorningItem()
item['question'] = question
item['questionImg'] = questionImg
item['optiona'] = A
item['optionb'] = B
item['optionc'] = C
item['optiond'] = D
url = 'http://www.rkpass.cn/tk_jiexi.jsp?product_id=' + product_id + '&tixing=xuanze&answer=&paper_id=&tihao=&cache='
yield scrapy.Request(url, callback=self.parse_detail, dont_filter=True, meta={'item': item, 'field': field, 'questionNum': questionNum, 'knowledgeOne': knowledgeOne, 'knowledgeTwo': knowledgeTwo})
def parse_detail(self, response):
knowledgeOne = response.meta['knowledgeOne'] # 接收当前题目一级分类
knowledgeTwo = response.meta['knowledgeTwo'] # 接收当前题目二级分类
questionNum = response.meta['questionNum'] # 接收当前题目号
field = response.meta['field'] # 接收当前考试场次
item = response.meta['item'] # 接收上级已爬取的数据
answer = response.xpath(".//td/span[@class='shisi_text']//text()").extract()[2].strip() # 答案
answerAnalysis = response.xpath(".//table/tr[3]/td//text()").extract() # 答案解析
answerAnalysis = "".join(answerAnalysis[3:len(answerAnalysis)])
# 接收二级答案页面数据
item['answer'] = answer
item['answeranalysis'] = answerAnalysis
item['field'] = field
item['questionNum'] = questionNum
item['knowledgeOne'] = knowledgeOne
item['knowledgeTwo'] = knowledgeTwo
return item | [
[
[
31,
37
],
[
133,
139
],
[
3211,
3217
]
],
[
[
45,
47
],
[
1290,
1292
]
],
[
[
73,
88
],
[
2862,
2877
]
],
[
[
109,
132
]
]
] |
def decoupling_regularization_prepare(graph, sigma_square,lambda_input):
# get W matrix, Z(for row sum) and Z_prime(for col sum), and A_tilde
# get matrix W:
A = np.array(nx.adjacency_matrix(graph).todense())
d = np.sum(A, axis=1)
D = np.diag(d)
n = len(D)
# Alternative way(19): set Sigma_square = sigma_square./d, where sigma_square is fixed
Sigma_square = np.divide(sigma_square,d)
Sigma = np.diag(Sigma_square)
W = np.dot(A,inv(Sigma))
lambda_diag = lambda_input*np.ones(n)
lambda_diag_matrix = np.diag(lambda_diag)
W = W+lambda_diag_matrix
w_col_sum = np.sum(W, axis=0)
w_row_sum = np.sum(W, axis=1)
Z_prime = np.diag(w_col_sum)
Z = np.diag(w_row_sum)
A_tilde = np.dot(np.dot(W,inv(Z_prime)),np.transpose(W))
# create
return (A_tilde) | [
[
[
4,
37
]
]
] |
# coding: utf8
from __future__ import unicode_literals
from ..char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER, QUOTES, HYPHENS
from ..char_classes import LIST_ELLIPSES, LIST_ICONS
_hyphens_no_dash = HYPHENS.replace('-', '').strip('|').replace('||', '')
_infixes = (LIST_ELLIPSES + LIST_ICONS +
[r'(?<=[{}])\.(?=[{}])'.format(ALPHA_LOWER, ALPHA_UPPER),
r'(?<=[{a}])[,!?/\(\)]+(?=[{a}])'.format(a=ALPHA),
r'(?<=[{a}{q}])[:<>=](?=[{a}])'.format(a=ALPHA, q=QUOTES),
r'(?<=[{a}])--(?=[{a}])'.format(a=ALPHA),
r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA),
r'(?<=[{a}])([{q}\)\]\(\[])(?=[\-{a}])'.format(a=ALPHA, q=QUOTES),
r'(?<=[{a}])[?";:=,.]*(?:{h})(?=[{a}])'.format(a=ALPHA,
h=_hyphens_no_dash),
r'(?<=[0-9])-(?=[0-9])'])
TOKENIZER_INFIXES = _infixes
| [
[
[
39,
55
]
],
[
[
86,
91
],
[
435,
440
],
[
498,
503
],
[
564,
569
],
[
619,
624
],
[
690,
695
],
[
771,
776
]
],
[
[
93,
104
],
[
351,
362
]
],
[
[
106,
117
],
[
364,
375
]
],
[
[
119,
125
],
[
507,
513
],
[
699,
705
]
],
[
[
127,
134
],
[
211,
218
]
],
[
[
163,
176
],
[
278,
291
]
],
[
[
178,
188
],
[
294,
304
]
],
[
[
192,
208
],
[
841,
857
]
],
[
[
266,
274
],
[
923,
931
]
],
[
[
903,
920
]
]
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Felix Fontein <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
sops_binary:
description:
- Path to the sops binary.
- By default uses C(sops).
type: path
version_added: 1.0.0
aws_profile:
description:
- The AWS profile to use for requests to AWS.
- This corresponds to the sops C(--aws-profile) option.
type: str
version_added: 1.0.0
aws_access_key_id:
description:
- The AWS access key ID to use for requests to AWS.
- Sets the environment variable C(AWS_ACCESS_KEY_ID) for the sops call.
type: str
version_added: 1.0.0
aws_secret_access_key:
description:
- The AWS secret access key to use for requests to AWS.
- Sets the environment variable C(AWS_SECRET_ACCESS_KEY) for the sops call.
type: str
version_added: 1.0.0
aws_session_token:
description:
- The AWS session token to use for requests to AWS.
- Sets the environment variable C(AWS_SESSION_TOKEN) for the sops call.
type: str
version_added: 1.0.0
config_path:
description:
- Path to the sops configuration file.
- If not set, sops will recursively search for the config file starting at
the file that is encrypted or decrypted.
- This corresponds to the sops C(--config) option.
type: path
version_added: 1.0.0
enable_local_keyservice:
description:
- Tell sops to use local key service.
- This corresponds to the sops C(--enable-local-keyservice) option.
type: bool
default: false
version_added: 1.0.0
keyservice:
description:
- Specify key services to use next to the local one.
- A key service must be specified in the form C(protocol://address), for
example C(tcp://myserver.com:5000).
- This corresponds to the sops C(--keyservice) option.
type: list
elements: str
version_added: 1.0.0
'''
ANSIBLE_VARIABLES = r'''
options:
sops_binary:
vars:
- name: sops_binary
aws_profile:
vars:
- name: sops_aws_profile
aws_access_key_id:
vars:
- name: sops_aws_access_key_id
aws_secret_access_key:
vars:
- name: sops_aws_secret_access_key
aws_session_token:
vars:
- name: sops_session_token
config_path:
vars:
- name: sops_config_path
enable_local_keyservice:
vars:
- name: sops_enable_local_keyservice
keyservice:
vars:
- name: sops_keyservice
'''
ENCRYPT_SPECIFIC = r'''
options:
kms:
description:
- List of KMS ARNs to use.
- This corresponds to the sops C(--kms) option.
type: list
elements: str
version_added: 1.0.0
gcp_kms:
description:
- GCP KMS resource IDs to use.
- This corresponds to the sops C(--gcp-kms) option.
type: list
elements: str
version_added: 1.0.0
azure_kv:
description:
- Azure Key Vault URLs to use.
- This corresponds to the sops C(--azure-kv) option.
type: list
elements: str
version_added: 1.0.0
hc_vault_transit:
description:
- HashiCorp Vault key URIs to use.
- For example, C(https://vault.example.org:8200/v1/transit/keys/dev).
- This corresponds to the sops C(--hc-vault-transit) option.
type: list
elements: str
version_added: 1.0.0
pgp:
description:
- PGP fingerprints to use.
- This corresponds to the sops C(--pgp) option.
type: list
elements: str
version_added: 1.0.0
unencrypted_suffix:
description:
- Override the unencrypted key suffix.
- This corresponds to the sops C(--unencrypted-suffix) option.
type: str
version_added: 1.0.0
encrypted_suffix:
description:
- Override the encrypted key suffix.
- When set to an empty string, all keys will be encrypted that are not explicitly
marked by I(unencrypted_suffix).
- This corresponds to the sops C(--encrypted-suffix) option.
type: str
version_added: 1.0.0
unencrypted_regex:
description:
- Set the unencrypted key suffix.
- When specified, only keys matching the regular expression will be left unencrypted.
- This corresponds to the sops C(--unencrypted-regex) option.
type: str
version_added: 1.0.0
encrypted_regex:
description:
- Set the encrypted key suffix.
- When specified, only keys matching the regular expression will be encrypted.
- This corresponds to the sops C(--encrypted-regex) option.
type: str
version_added: 1.0.0
encryption_context:
description:
- List of KMS encryption context pairs of format C(key:value).
- This corresponds to the sops C(--encryption-context) option.
type: list
elements: str
version_added: 1.0.0
shamir_secret_sharing_threshold:
description:
- The number of distinct keys required to retrieve the data key with
L(Shamir's Secret Sharing, https://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing).
- If not set here and in the sops config file, will default to C(0).
- This corresponds to the sops C(--shamir-secret-sharing-threshold) option.
type: int
version_added: 1.0.0
'''
| [
[
[
197,
212
]
],
[
[
214,
222
]
],
[
[
224,
238
]
],
[
[
240,
253
]
],
[
[
269,
286
]
]
] |
#!/usr/bin/env python3
from pyxdc.exceptions import (
ProviderError, BalanceError, APIError, AddressError, InvalidURLError,
ClientError, NotFoundError, UnitError
)
import pytest
def test_exceptions():
with pytest.raises(ProviderError, match="error"):
raise ProviderError("error")
with pytest.raises(ProviderError, match="error, error"):
raise ProviderError("error", "error")
with pytest.raises(BalanceError, match="error"):
raise BalanceError("error")
with pytest.raises(BalanceError, match="error, error"):
raise BalanceError("error", "error")
with pytest.raises(APIError, match="error"):
raise APIError("error")
with pytest.raises(APIError):
raise APIError("error", "error")
with pytest.raises(AddressError, match="error"):
raise AddressError("error")
with pytest.raises(AddressError, match="error, error"):
raise AddressError("error", "error")
with pytest.raises(InvalidURLError, match="error"):
raise InvalidURLError("error")
with pytest.raises(ClientError, match="error"):
raise ClientError("error")
with pytest.raises(ClientError):
raise ClientError("error", "error")
with pytest.raises(NotFoundError, match="error"):
raise NotFoundError("error")
with pytest.raises(UnitError, match="error"):
raise UnitError("error")
with pytest.raises(UnitError, match="error, error"):
raise UnitError("error", "error")
| [
[
[
59,
72
],
[
237,
250
],
[
282,
295
],
[
328,
341
],
[
380,
393
]
],
[
[
74,
86
],
[
435,
447
],
[
479,
491
],
[
524,
536
],
[
575,
587
]
],
[
[
88,
96
],
[
629,
637
],
[
669,
677
],
[
710,
718
],
[
735,
743
]
],
[
[
98,
110
],
[
785,
797
],
[
829,
841
],
[
874,
886
],
[
925,
937
]
],
[
[
112,
127
],
[
979,
994
],
[
1026,
1041
]
],
[
[
133,
144
],
[
1074,
1085
],
[
1117,
1128
],
[
1161,
1172
],
[
1189,
1200
]
],
[
[
146,
159
],
[
1242,
1255
],
[
1287,
1300
]
],
[
[
161,
170
],
[
1333,
1342
],
[
1374,
1383
],
[
1416,
1425
],
[
1464,
1473
]
],
[
[
181,
187
],
[
223,
229
],
[
314,
320
],
[
421,
427
],
[
510,
516
],
[
615,
621
],
[
696,
702
],
[
771,
777
],
[
860,
866
],
[
965,
971
],
[
1060,
1066
],
[
1147,
1153
],
[
1228,
1234
],
[
1319,
1325
],
[
1402,
1408
]
],
[
[
194,
209
]
]
] |
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
from __future__ import absolute_import
import unittest
import os
from groupdocs_conversion_cloud import *
from test.test_context import TestContext
from test.test_file import TestFile
class TestConvertApi(TestContext):
"""ConvertApi unit tests"""
def test_convert_document(self):
"""
Test case for convert_document
"""
test_file = TestFile.one_page_docx()
settings = ConvertSettings()
settings.file_path = test_file.folder + test_file.file_name
settings.format = "jpg"
settings.output_path = self.OUT_FOLDER
request = ConvertDocumentRequest(settings)
data = self.convert_api.convert_document(request)
self.assertTrue(len(data) > 0)
self.assertTrue(data[0].size > 0)
def test_convert_document_download(self):
"""
Test case for convert_document with file result
"""
test_file = TestFile.one_page_docx()
settings = ConvertSettings()
settings.file_path = test_file.folder + test_file.file_name
settings.format = "pdf"
request = ConvertDocumentRequest(settings)
data = self.convert_api.convert_document_download(request)
self.assertGreater(os.path.getsize(data), 0)
def test_convert_document_direct(self):
"""
Test case for convert_document with file result without using cloud storage
"""
test_file = TestFile.four_pages_docx()
local_file_path = self.get_test_file_path(test_file)
format = "pdf"
request = ConvertDocumentDirectRequest(format, local_file_path)
data = self.convert_api.convert_document_direct(request)
self.assertGreater(os.path.getsize(data), 0)
if __name__ == '__main__':
unittest.main()
| [
[
[
1406,
1421
]
],
[
[
1430,
1438
],
[
3163,
3171
]
],
[
[
1446,
1448
],
[
2613,
2615
],
[
3097,
3099
]
],
[
[
1489,
1490
],
[
1803,
1818
],
[
1986,
2008
],
[
2350,
2365
],
[
2486,
2508
],
[
2951,
2979
]
],
[
[
1521,
1532
],
[
1591,
1602
]
],
[
[
1560,
1568
],
[
1759,
1767
],
[
2306,
2314
],
[
2813,
2821
]
],
[
[
1576,
1590
]
]
] |
import numpy as np
import scipy.sparse as sp
import torch
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def load_data(path="../data/cora/", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset),
dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset),
dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
features = normalize(features)
adj = normalize(adj + sp.eye(adj.shape[0]))
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(np.where(labels)[1])
adj = sparse_mx_to_torch_sparse_tensor(adj)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def sub_graph(adj, num):
'''
Monte carlo sample a number of neighbors for each node given the adjacent matrix
adj: normalized and processed graph adjacent matrix
num: the number of samples for each neighbor
'''
nodes = adj.shape[0]
neighbor_number = torch.sum(adj>0,dim=1).reshape(node,1)/num
sub_graph = torch.randint(0,nodes, (nodes,num))
sub_graph = sub_graph.reshape(-1).cpu().tolist()
sub_graph = list(set(sub_graph))
mask = torch.zeros(nodes,nodes)
mask[sub_graph,sub_graph] = 1
return adj*mask*neighbor_number
| [
[
[
7,
18
],
[
136,
138
],
[
240,
242
],
[
321,
323
],
[
549,
551
],
[
647,
649
],
[
727,
729
],
[
823,
825
],
[
865,
867
],
[
945,
947
],
[
1037,
1039
],
[
1059,
1061
],
[
1146,
1148
],
[
1212,
1214
],
[
1362,
1364
],
[
1694,
1696
],
[
1754,
1756
],
[
2086,
2088
],
[
2118,
2120
],
[
2159,
2161
],
[
2590,
2592
],
[
2642,
2644
],
[
2691,
2693
]
],
[
[
26,
44
],
[
677,
679
],
[
1197,
1199
],
[
1548,
1550
],
[
2197,
2199
]
],
[
[
52,
57
],
[
1676,
1681
],
[
1737,
1742
],
[
1840,
1845
],
[
1882,
1887
],
[
1923,
1928
],
[
2616,
2621
],
[
2715,
2720
],
[
2760,
2765
],
[
2799,
2804
],
[
3127,
3132
],
[
3186,
3191
],
[
3323,
3328
]
],
[
[
64,
77
],
[
752,
765
]
],
[
[
362,
371
]
],
[
[
2020,
2029
],
[
1502,
1511
],
[
1532,
1541
]
],
[
[
2260,
2268
]
],
[
[
2438,
2470
],
[
1785,
1817
]
],
[
[
2853,
2862
]
]
] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'RemediationFiltersArgs',
]
@pulumi.input_type
class RemediationFiltersArgs:
def __init__(__self__, *,
locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The filters that will be applied to determine which resources to remediate.
:param pulumi.Input[Sequence[pulumi.Input[str]]] locations: The resource locations that will be remediated.
"""
if locations is not None:
pulumi.set(__self__, "locations", locations)
@property
@pulumi.getter
def locations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The resource locations that will be remediated.
"""
return pulumi.get(self, "locations")
@locations.setter
def locations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "locations", value)
| [
[
[
176,
184
]
],
[
[
192,
198
]
],
[
[
206,
220
],
[
386,
392
],
[
888,
894
],
[
501,
507
],
[
523,
529
],
[
823,
829
],
[
938,
944
],
[
960,
966
],
[
1077,
1083
],
[
1170,
1176
],
[
1192,
1198
],
[
1223,
1229
]
],
[
[
240,
243
]
],
[
[
245,
252
]
],
[
[
254,
262
],
[
492,
500
],
[
929,
937
],
[
1161,
1169
]
],
[
[
264,
272
],
[
514,
522
],
[
951,
959
],
[
1183,
1191
]
],
[
[
274,
279
]
],
[
[
281,
289
]
],
[
[
306,
316
]
],
[
[
337,
338
]
],
[
[
340,
347
]
],
[
[
410,
432
]
]
] |
import ast
with open('./test.txt',"r") as f: #设置文件对象
str = f.read() #可以是随便对文件的操作
print(str)
frame_list = ast.literal_eval(str)
for frame in frame_list:
print(frame) | [
[
[
7,
10
],
[
115,
118
]
],
[
[
42,
43
],
[
66,
67
]
],
[
[
60,
63
],
[
97,
100
],
[
132,
135
]
],
[
[
102,
112
],
[
150,
160
]
],
[
[
141,
146
],
[
172,
177
]
]
] |
"""
Package-level constants
"""
from strenum import StrEnum
class SummaryLevel(StrEnum):
"""
Values for the SUMLEV column in PL94 data
"""
STATE = "040"
STATE_COUNTY = "050"
STATE_COUNTY_TRACT = "140"
STATE_COUNTY_TRACT_BLOCKGROUP = "150"
STATE_COUNTY_TRACT_BLOCKGROUP_BLOCK = "750"
| [
[
[
52,
59
],
[
81,
88
]
],
[
[
68,
80
]
]
] |
import _initpath
import pyradox
#result = pyradox.txt.parse_file('D:/Steam/steamapps/common/Europa Universalis IV/common/prices/00_prices.txt')
#print(result)
result = pyradox.parse("""
regular_group = { 1 2 3 }
empty_tree = {}
mixed_group = {
10
{}
{ a = 1 b = 2 }
20
}
player_countries={
ITA={
user="Evil4Zerggin"
country_leader=yes
pinned_theatres={
{
id=16
type=67
}
}
}
}
""")
print(result)
| [
[
[
7,
16
]
],
[
[
24,
31
],
[
170,
177
]
],
[
[
161,
167
],
[
431,
437
]
]
] |
"""
This is a plugin created by ShiN0
Copyright (c) 2020 ShiN0
<https://www.github.com/mgaertne/minqlx-plugin-tests>
You are free to modify this plugin to your own one.
"""
import minqlx
from minqlx import Plugin
from minqlx.database import Redis
import os
import math
import time
import random
import itertools
import threading
from abc import abstractmethod
from operator import itemgetter
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
PLAYER_BASE = "minqlx:players:{0}"
IPS_BASE = "minqlx:ips"
SUPPORTED_GAMETYPES = ("ad", "ca", "ctf", "dom", "ft", "tdm")
def requests_retry_session(
retries=3,
backoff_factor=0.1,
status_forcelist=(500, 502, 504),
session=None,
):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class balancetwo(minqlx.Plugin):
"""
Checks qlstats for the elos of a player given as well as checking the elos of potentially aliases of the player
by looking for connection from the same IP as the player has connected to locally.
Uses:
* qlx_balancetwo_ratingSystem (default: "mapbased-truskills") Either "mapbased-truskills", "truskills", "a-elo",
"b-elo".
In the future there might be a "custom" option for other rating providers.
* qlx_balancetwo_ratingLimit_min (default: "15") minimum rating for players trying to connect
* qlx_balancetwo_ratingLimit_max (default: "35") maximum rating for players trying to connect
* qlx_balancetwo_ratingLimit_minGames (default: "10") minimum amount of rated games for player trying to connect
* qlx_balancetwo_ratingStrategy (default: "") unused at the moment. For future use
* qlx_balancetwo_autoSwitch (default: "0") automatically execute suggested switches rather than waiting for !agree
from players.
* qlx_balancetwo_uniquePlayerSwitches (default: "0") During a game, avoid switches that already happened during the
same game
* qlx_balancetwo_autoRebalance (default: "1") When new players join, the new players are automatically put on teams
that result in the lower difference between the the teams.
* qlx_balancetwo_elocheckPermission (default: "0") The permission for issuing the elocheck
* qlx_balancetwo_elocheckReplyChannel (default: "public") The reply channel where the elocheck output is put to.
Possible values: "public" or "private". Any other value leads to public announcements
* qlx_balancetwo_elocheckShowSteamids (default: "0") Also lists the steam ids of the players checked
"""
database = Redis
def __init__(self):
super().__init__()
self.set_cvar_once("qlx_balancetwo_ratingSystem", "mapbased-truskills")
self.set_cvar_once("qlx_balancetwo_ratingLimit_kick", "1")
self.set_cvar_once("qlx_balancetwo_ratingLimit_min", "15")
self.set_cvar_once("qlx_balancetwo_ratingLimit_max", "35")
self.set_cvar_once("qlx_balancetwo_ratingLimit_minGames", "10")
self.set_cvar_once("qlx_balancetwo_elocheckPermission", "0")
self.set_cvar_once("qlx_balancetwo_elocheckReplyChannel", "public")
self.set_cvar_once("qlx_balancetwo_elocheckShowSteamids", "0")
# indicates whether switch suggestions need to be opted-in (!a) or vetoed (!v) by the suggested players
self.set_cvar_once("qlx_balancetwo_autoSwitch", "0")
# if set to true, this avoids suggesting the same players for switching in the same game twice, might lead to
# fewer possible suggestions
self.set_cvar_once("qlx_balancetwo_uniquePlayerSwitches", "0")
self.set_cvar_once("qlx_balancetwo_minimumSuggestionDiff", "25")
self.set_cvar_once("qlx_balancetwo_minimumStddevDiff", "50")
self.set_cvar_once("qlx_balancetwo_autoRebalance", "1")
self.ratingLimit_kick = self.get_cvar("qlx_balancetwo_ratingLimit_kick", bool)
self.ratingLimit_min = self.get_cvar("qlx_balancetwo_ratingLimit_min", int)
self.ratingLimit_max = self.get_cvar("qlx_balancetwo_ratingLimit_max", int)
self.ratingLimit_minGames = self.get_cvar("qlx_balancetwo_ratingLimit_minGames", int)
self.reply_channel = self.get_cvar("qlx_balancetwo_elocheckReplyChannel")
if self.reply_channel != "private":
self.reply_channel = "public"
self.show_steam_ids = self.get_cvar("qlx_balancetwo_elocheckShowSteamids", bool)
self.auto_switch = self.get_cvar("qlx_balancetwo_autoSwitch", bool)
self.unique_player_switches = self.get_cvar("qlx_balancetwo_uniquePlayerSwitches", bool)
self.minimum_suggestion_diff = self.get_cvar("qlx_balancetwo_minimumSuggestionDiff", float)
self.minimum_suggestion_stddev_diff = self.get_cvar("qlx_balancetwo_minimumStddevDiff", int)
self.auto_rebalance = self.get_cvar("qlx_balancetwo_autoRebalance", bool)
self.add_command(("elocheck", "getrating", "getelo", "elo"), self.cmd_elocheck,
permission=self.get_cvar("qlx_balancetwo_elocheckPermission", int),
usage="<player or steam_id>")
self.add_command("aliases", self.cmd_aliases,
permission=self.get_cvar("qlx_balancetwo_elocheckPermission", int),
usage="[player or steam_id]")
self.add_command(("ratings", "elos", "selo"), self.cmd_ratings)
self.add_command("eloupdates", self.cmd_switch_elo_changes_notifications, usage="<0/1>")
self.add_command("balance", self.cmd_balance, 1)
self.add_command(("teams", "teens"), self.cmd_teams)
self.add_command("do", self.cmd_do, 1)
self.add_command("dont", self.cmd_dont, 1)
self.add_command(("agree", "a"), self.cmd_agree, client_cmd_perm=0)
self.add_command(("veto", "v"), self.cmd_veto, client_cmd_perm=0)
self.add_command(("nokick", "dontkick"), self.cmd_nokick, 2, usage="[<name>]")
self.add_hook("map", self.handle_map_change)
self.add_hook("player_connect", self.handle_player_connect, priority=minqlx.PRI_LOWEST)
self.add_hook("player_disconnect", self.handle_player_disconnect)
self.add_hook("team_switch_attempt", self.handle_team_switch_attempt)
self.add_hook("team_switch", self.handle_team_switch)
self.add_hook("game_countdown", self.handle_game_countdown)
self.add_hook("round_countdown", self.handle_round_countdown)
self.add_hook("round_start", self.handle_round_start)
self.add_hook("game_end", self.handle_game_end)
self.rating_system = self.get_cvar("qlx_balancetwo_ratingSystem")
self.balance_api = self.get_cvar("qlx_balanceApi")
self.kickthreads = {}
self.jointimes = {}
self.last_new_player_id = None
self.previous_teams = None
self.previous_map = None
self.previous_gametype = None
self.previous_ratings = {}
self.ratings = {}
self.rating_diffs = {}
self.fetch_elos_from_all_players()
self.informed_players = []
self.switched_players = []
self.switch_suggestion = None
self.in_countdown = False
self.twovstwo_steam_ids = []
self.twovstwo_combinations = []
self.twovstwo_iter = None
self.prevent = False
self.last_action = "spec"
@minqlx.thread
def fetch_elos_from_all_players(self):
self.fetch_ratings([player.steam_id for player in self.players()])
def fetch_ratings(self, steam_ids, mapname=None):
self.fetch_mapbased_ratings(steam_ids, mapname)
for rating_provider in [TRUSKILLS, A_ELO, B_ELO]:
rating_results = rating_provider.fetch_elos(steam_ids)
self.append_ratings(rating_provider.name, rating_results)
def fetch_mapbased_ratings(self, steam_ids, mapname=None):
if mapname is None and (self.game is None or self.game.map is None):
return
if mapname is None:
mapname = self.game.map.lower()
rating_results = TRUSKILLS.fetch_elos(steam_ids, headers={"X-QuakeLive-Map": mapname})
rating_provider_name = "{} {}".format(mapname, TRUSKILLS.name)
self.append_ratings(rating_provider_name, rating_results)
def append_ratings(self, rating_provider_name, json_result):
if json_result is None:
return
if rating_provider_name in self.ratings:
self.ratings[rating_provider_name].append_ratings(json_result)
return
self.ratings[rating_provider_name] = RatingProvider.from_json(json_result)
def cmd_elocheck(self, player: minqlx.Player, msg: str, channel: minqlx.AbstractChannel):
if len(msg) > 2:
return minqlx.RET_USAGE
if len(msg) == 1:
target = player.steam_id
else:
target = msg[1]
self.do_elocheck(player, target, channel)
@minqlx.thread
def do_elocheck(self, player: minqlx.Player, target: str, channel: minqlx.AbstractChannel):
target_players = self.find_target_player(target)
target_steam_id = None
if target_players is None or len(target_players) == 0:
try:
target_steam_id = int(target)
if not self.db.exists(PLAYER_BASE.format(target_steam_id)):
player.tell("Sorry, player with steam id {} never played here.".format(target_steam_id))
return
except ValueError:
player.tell("Sorry, but no players matched your tokens: {}.".format(target))
return
if len(target_players) > 1:
player.tell("A total of ^6{}^7 players matched for {}:".format(len(target_players), target))
out = ""
for p in target_players:
out += " " * 2
out += "{}^6:^7 {}\n".format(p.id, p.name)
player.tell(out[:-1])
return
if len(target_players) == 1:
target_steam_id = target_players.pop().steam_id
reply_func = self.reply_func(player, channel)
used_steam_ids = self.used_steam_ids_for(target_steam_id)
aliases = self.fetch_aliases(used_steam_ids)
truskill = RatingProvider.from_json(TRUSKILLS.fetch_elos(used_steam_ids))
a_elo = RatingProvider.from_json(A_ELO.fetch_elos(used_steam_ids))
b_elo = RatingProvider.from_json(B_ELO.fetch_elos(used_steam_ids))
map_based_truskill = None
if self.game is not None and self.game.map is not None:
map_based_truskill = RatingProvider.from_json(
TRUSKILLS.fetch_elos(used_steam_ids, headers={"X-QuakeLive-Map": self.game.map.lower()}))
if target_steam_id in aliases:
target_player_elos = self.format_player_elos(a_elo, b_elo, truskill, map_based_truskill,
target_steam_id, aliases=aliases[target_steam_id])
else:
target_player_elos = self.format_player_elos(a_elo, b_elo, truskill, map_based_truskill,
target_steam_id)
reply_func("{0}^7".format(target_player_elos))
alternative_steam_ids = used_steam_ids[:]
alternative_steam_ids.remove(target_steam_id)
if len(alternative_steam_ids) == 0:
return
reply_func("Players from the same IPs:\n")
for steam_id in alternative_steam_ids:
if steam_id in aliases:
player_elos = self.format_player_elos(a_elo, b_elo, truskill, map_based_truskill, steam_id,
aliases=aliases[steam_id])
else:
player_elos = self.format_player_elos(a_elo, b_elo, truskill, map_based_truskill, steam_id)
reply_func("{0}^7".format(player_elos))
def find_target_player(self, target: str):
try:
steam_id = int(target)
target_player = self.player(steam_id)
if target_player:
return [target_player]
except ValueError:
pass
except minqlx.NonexistentPlayerError:
pass
return self.find_player(target)
def reply_func(self, player, channel):
if self.reply_channel == "private":
return player.tell
return self.identify_reply_channel(channel).reply
def identify_reply_channel(self, channel):
if channel in [minqlx.RED_TEAM_CHAT_CHANNEL, minqlx.BLUE_TEAM_CHAT_CHANNEL,
minqlx.SPECTATOR_CHAT_CHANNEL, minqlx.FREE_CHAT_CHANNEL]:
return minqlx.CHAT_CHANNEL
return channel
def used_steam_ids_for(self, steam_id):
if not self.db.exists(PLAYER_BASE.format(steam_id) + ":ips"):
return [steam_id]
ips = self.db.smembers(PLAYER_BASE.format(steam_id) + ":ips")
used_steam_ids = set()
for ip in ips:
if not self.db.exists(IPS_BASE + ":{0}".format(ip)):
continue
used_steam_ids = used_steam_ids | self.db.smembers(IPS_BASE + ":{0}".format(ip))
return [int(_steam_id) for _steam_id in used_steam_ids]
def fetch_aliases(self, steam_ids):
url_template = "{}aliases/".format(A_ELO.url_base) + "{}.json"
try:
result = requests_retry_session().get(
url_template.format("+".join([str(steam_id) for steam_id in steam_ids])), timeout=A_ELO.timeout)
except requests.RequestException as exception:
self.logger.debug("request exception: {}".format(exception))
return {}
if result.status_code != requests.codes.ok:
return {}
js = result.json()
aliases = {}
for steam_id in steam_ids:
if str(steam_id) not in js:
continue
player_entry = js[str(steam_id)]
aliases[steam_id] = []
cleaned_aliases = []
for entry in player_entry:
if self.clean_text(entry) not in cleaned_aliases:
aliases[steam_id].append(entry)
cleaned_aliases.append(self.clean_text(entry))
return aliases
def format_player_elos(self, a_elo, b_elo, truskill, map_based_truskill, steam_id, indent=0, aliases=None):
display_name = self.resolve_player_name(steam_id)
result = " " * indent + "{0}^7\n".format(self.format_player_name(steam_id))
if aliases is not None:
displayed_aliases = aliases[:]
displayed_aliases.remove(display_name)
if len(displayed_aliases) != 0:
if len(displayed_aliases) <= 5:
result += " " * indent + "Aliases used: {}^7\n".format("^7, ".join(displayed_aliases[:5]))
else:
result += " " * indent + "Aliases used: {}^7, ... (^4!aliases <player>^7 to list all)\n" \
.format("^7, ".join(displayed_aliases[:5]))
if map_based_truskill is not None:
formatted_map_based_truskills = map_based_truskill.format_elos(steam_id)
if formatted_map_based_truskills is not None and len(formatted_map_based_truskills) > 0:
result += " " * indent + " " + "{1} Truskills: {0}\n" \
.format(formatted_map_based_truskills, self.game.map.lower())
formatted_truskills = truskill.format_elos(steam_id)
if truskill is not None and len(formatted_truskills) > 0:
result += " " * indent + " " + "Truskills: {0}\n".format(formatted_truskills)
formatted_a_elos = a_elo.format_elos(steam_id)
if a_elo is not None and len(formatted_a_elos) > 0:
result += " " * indent + " " + "Elos: {0}\n".format(formatted_a_elos)
formatted_b_elos = b_elo.format_elos(steam_id)
if b_elo is not None and len(formatted_b_elos) > 0:
result += " " * indent + " " + "B-Elos: {0}\n".format(formatted_b_elos)
return result
def format_player_name(self, steam_id):
result = ""
player_name = self.resolve_player_name(steam_id)
result += "{0}^7".format(player_name)
if self.show_steam_ids:
result += " ({0})".format(steam_id)
return result
def resolve_player_name(self, steam_id):
player = self.player(steam_id)
if player is not None:
return self.remove_trailing_color_code(player.name)
if self.db.exists(PLAYER_BASE.format(steam_id) + ":last_used_name"):
return self.remove_trailing_color_code(self.db[PLAYER_BASE.format(steam_id) + ":last_used_name"])
return "unknown"
def remove_trailing_color_code(self, text):
if not text.endswith("^7"):
return text
return text[:-2]
def cmd_aliases(self, player: minqlx.Player, msg: str, channel: minqlx.AbstractChannel):
if len(msg) != 2:
return minqlx.RET_USAGE
self.do_aliases(player, msg[1], channel)
@minqlx.thread
def do_aliases(self, player: minqlx.Player, target: str, channel: minqlx.AbstractChannel):
target_players = self.find_target_player(target)
target_steam_id = None
if target_players is None or len(target_players) == 0:
try:
target_steam_id = int(target)
if not self.db.exists(PLAYER_BASE.format(target_steam_id)):
player.tell("Sorry, player with steam id {} never played here.".format(target_steam_id))
return
except ValueError:
player.tell("Sorry, but no players matched your tokens: {}.".format(target))
return
if len(target_players) > 1:
player.tell("A total of ^6{}^7 players matched for {}:".format(len(target_players), target))
out = ""
for p in target_players:
out += " " * 2
out += "{}^6:^7 {}\n".format(p.id, p.name)
player.tell(out[:-1])
return
if len(target_players) == 1:
target_steam_id = target_players.pop().steam_id
reply_func = self.reply_func(player, channel)
aliases = self.fetch_aliases([target_steam_id])
if target_steam_id not in aliases:
reply_func("Sorry, no aliases returned for {}".format(target_steam_id))
return
reply_func("{0}^7".format(self.format_player_aliases(target_steam_id, aliases[target_steam_id])))
def format_player_aliases(self, steam_id, aliases):
result = "{0}^7\n".format(self.format_player_name(steam_id))
result += "Aliases used: {}".format("^7, ".join(aliases))
return result
def cmd_ratings(self, player, msg, channel):
teams = self.teams()
gametype = self.game.type_short
mapname = self.game.map.lower()
map_based_rating_provider_name = "{} {}".format(mapname, TRUSKILLS.name)
if TRUSKILLS.name in self.ratings and map_based_rating_provider_name in self.ratings:
truskills_rating_provider = self.ratings[TRUSKILLS.name]
mapbased_truskills_rating_provider = self.ratings[map_based_rating_provider_name]
channel.reply("^3{}^7 ratings (^3general^7/^3map-based^7) (^3{}^7)"
.format(TRUSKILLS.name, TRUSKILLS.url_base.split(':')[1].strip('/')))
self.report_ratings_for_team(channel, teams["free"], gametype,
truskills_rating_provider, mapbased_truskills_rating_provider,
primary_rating_prefix="^6", secondary_rating_prefix="^6")
self.report_ratings_for_team(channel, teams["red"], gametype,
truskills_rating_provider, mapbased_truskills_rating_provider,
primary_rating_prefix="^1", secondary_rating_prefix="^1")
self.report_ratings_for_team(channel, teams["blue"], gametype,
truskills_rating_provider, mapbased_truskills_rating_provider,
primary_rating_prefix="^4", secondary_rating_prefix="^4")
self.report_ratings_for_team(channel, teams["spectator"], gametype,
truskills_rating_provider, mapbased_truskills_rating_provider)
if A_ELO.name in self.ratings and B_ELO.name in self.ratings:
primary_rating_provider = self.ratings[A_ELO.name]
secondary_rating_provider = self.ratings[B_ELO.name]
channel.reply("^5=================================^7")
channel.reply("^3Elo^7 ratings (^3A elo^7/^3B elo^7) (^3{}^7)"
.format(A_ELO.url_base.split(':')[1].strip('/')))
self.report_ratings_for_team(channel, teams["free"], gametype,
primary_rating_provider, secondary_rating_provider,
primary_rating_prefix="A:^6", secondary_rating_prefix="B:^6")
self.report_ratings_for_team(channel, teams["red"], gametype,
primary_rating_provider, secondary_rating_provider,
primary_rating_prefix="A:^1", secondary_rating_prefix="B:^1")
self.report_ratings_for_team(channel, teams["blue"], gametype,
primary_rating_provider, secondary_rating_provider,
primary_rating_prefix="A:^4", secondary_rating_prefix="B:^4")
self.report_ratings_for_team(channel, teams["spectator"], gametype,
primary_rating_provider, secondary_rating_provider,
primary_rating_prefix="A:", secondary_rating_prefix="B:")
def report_ratings_for_team(self, channel, team, gametype, primary_rating_provider, secondary_rating_provider,
primary_rating_prefix="", secondary_rating_prefix=""):
if team is None or len(team) <= 0:
return
primary_filtered = [player for player in team if player.steam_id in primary_rating_provider.rated_steam_ids()]
primary_filtered = [player for player in primary_filtered
if gametype in primary_rating_provider.rated_gametypes_for(player.steam_id)]
primary_filtered = [player for player in primary_filtered
if primary_rating_provider.games_for(player.steam_id, gametype) > 0]
rated_player_texts = []
if len(primary_filtered) > 0:
primary_sorted = sorted(primary_filtered,
key=lambda x: primary_rating_provider[x.steam_id][gametype]["elo"], reverse=True)
for player in primary_sorted:
if player.steam_id in secondary_rating_provider.rated_steam_ids() and \
gametype in secondary_rating_provider.rated_gametypes_for(player.steam_id) and \
secondary_rating_provider.games_for(player.steam_id, gametype) > 0:
rated_player_texts.append("{}^7: {}{}^7/{}{}^7"
.format(player.name,
primary_rating_prefix,
primary_rating_provider[player.steam_id][gametype]["elo"],
secondary_rating_prefix,
secondary_rating_provider[player.steam_id][gametype]["elo"]))
else:
rated_player_texts.append("{}^7: {}{}^7/{}^5{}^7"
.format(player.name,
primary_rating_prefix,
primary_rating_provider[player.steam_id][gametype]["elo"],
secondary_rating_prefix,
secondary_rating_provider[player.steam_id][gametype]["elo"]))
primary_unranked = [player for player in team if player not in primary_filtered]
if len(primary_unranked) > 0:
secondary_filtered = [player for player in primary_unranked
if player.steam_id in secondary_rating_provider.rated_steam_ids()]
secondary_filtered = [player for player in secondary_filtered
if gametype in secondary_rating_provider.rated_gametypes_for(player.steam_id)]
secondary_filtered = [player for player in secondary_filtered
if secondary_rating_provider.games_for(player.steam_id, gametype) > 0]
if len(secondary_filtered) > 0:
secondary_sorted = sorted(secondary_filtered,
key=lambda x: primary_rating_provider[x.steam_id][gametype]["elo"],
reverse=True)
for player in secondary_sorted:
rated_player_texts.append("{}^7: {}^5{}/{}{}^7"
.format(player.name,
primary_rating_prefix,
primary_rating_provider[player.steam_id][gametype]["elo"],
secondary_rating_prefix,
secondary_rating_provider[player.steam_id][gametype]["elo"]))
secondary_unranked = [player for player in primary_unranked if player not in secondary_filtered]
for player in secondary_unranked:
rated_player_texts.append("{}^7: {}^5{}^7/{}^5{}^7"
.format(player.name,
primary_rating_prefix,
primary_rating_provider[player.steam_id][gametype]["elo"],
secondary_rating_prefix,
secondary_rating_provider[player.steam_id][gametype]["elo"]))
channel.reply(", ".join(rated_player_texts))
def cmd_switch_elo_changes_notifications(self, player, msg, channel):
flag = self.wants_to_be_informed(player.steam_id)
self.db.set_flag(player, "balancetwo:rating_changes", not flag)
if flag:
player.tell(
"Notifications for elo and truskill changes have been disabled. "
"Use ^6{}eloupdates^7 to enable them again.".format(self.get_cvar("qlx_commandPrefix")))
else:
player.tell(
"Notifications for elo and truskill changes have been enabled. "
"Use ^6{}eloupdates^7 to disable them again.".format(self.get_cvar("qlx_commandPrefix")))
return minqlx.RET_STOP_ALL
def wants_to_be_informed(self, steam_id):
return self.db.get_flag(steam_id, "balancetwo:rating_changes", default=False)
def cmd_balance(self, player, msg, channel):
gt = self.game.type_short
if gt not in SUPPORTED_GAMETYPES:
player.tell("This game mode is not supported by the balance plugin.")
return minqlx.RET_STOP_ALL
teams = self.teams()
if len(teams["red"] + teams["blue"]) % 2 != 0:
player.tell("The total number of players should be an even number.")
return minqlx.RET_STOP_ALL
players = dict([(p.steam_id, gt) for p in teams["red"] + teams["blue"]])
self.callback_balance(players, minqlx.CHAT_CHANNEL)
def callback_balance(self, players, channel):
if not self.game:
return
if self.game.state == "in_progress":
return
teams = self.teams()
current = teams["red"] + teams["blue"]
if len(current) % 2 == 1:
player_to_spec = self.find_player_to_spec(current)
self.logger.debug("putting {} to spec".format(player_to_spec.clean_name))
player_to_spec.put("spectator")
balanced_teams = self.find_balanced_teams()
if balanced_teams is None:
return
red_steam_ids, blue_steam_ids = balanced_teams
changed = False
for steam_id in red_steam_ids:
player = self.player(steam_id)
if player.team != "red":
changed = True
self.logger.debug("putting {} to red".format(player.clean_name))
player.put("red")
for steam_id in blue_steam_ids:
player = self.player(steam_id)
if player.team != "blue":
changed = True
self.logger.debug("putting {} to blue".format(player.clean_name))
player.put("blue")
if not changed:
channel.reply("Teams are good! Nothing to balance.")
return True
self.report_teams(red_steam_ids, blue_steam_ids, channel)
return True
def find_player_to_spec(self, players):
return min([player for player in players], key=lambda _player: self.find_games_here(_player))
def find_games_here(self, player):
completed_key = "minqlx:players:{}:games_completed"
if not self.db.exists(completed_key.format(player.steam_id)):
return 0
return int(self.db[completed_key.format(player.steam_id)])
def find_time(self, player):
if not (player.steam_id in self.jointimes):
self.jointimes[player.steam_id] = time.time()
return self.jointimes[player.steam_id]
def find_balanced_teams(self):
teams = self.teams()
# if 3 < len(teams["red"] + teams["blue"]) < 6:
# return self.find_next_2vs2_teams()
if len(teams["red"] + teams["blue"]) < 8:
return self.find_non_recent_small_balanced_teams()
return self.find_large_balanced_teams()
def find_next_2vs2_teams(self):
teams = self.teams()
steam_ids = [player.steam_id for player in teams["red"] + teams["blue"]]
if self.twovstwo_iter is None or not self.check_all_steam_ids(steam_ids):
self.twovstwo_steam_ids = steam_ids
self.twovstwo_combinations = self.filter_combinations(steam_ids)
self.twovstwo_iter = random_iterator(self.twovstwo_combinations)
red_steam_ids = list(next(self.twovstwo_iter))
blue_steam_ids = [steam_id for steam_id in steam_ids if steam_id not in red_steam_ids]
return red_steam_ids, blue_steam_ids
def check_all_steam_ids(self, steam_ids):
return sorted(steam_ids) == sorted(self.twovstwo_steam_ids)
def filter_combinations(self, steam_ids):
gametype = self.game.type_short
configured_rating_provider_name = self.configured_rating_provider_name()
if configured_rating_provider_name not in self.ratings:
self.logger.debug("Balancing aborted. No ratings found for {}.".format(configured_rating_provider_name))
return []
configured_rating_provider = self.ratings[configured_rating_provider_name]
combinations = []
if len(steam_ids) != 4:
return []
combinations_list = [(steam_ids[0], steam_ids[1]), (steam_ids[0], steam_ids[2]), (steam_ids[0], steam_ids[3])]
for red_steam_ids in combinations_list:
blue_steam_ids = [steam_id for steam_id in steam_ids if steam_id not in red_steam_ids]
red_avg = self.team_average(red_steam_ids, gametype, rating_provider=configured_rating_provider)
blue_avg = self.team_average(blue_steam_ids, gametype, rating_provider=configured_rating_provider)
diff = abs(red_avg - blue_avg)
if diff < self.minimum_suggestion_diff:
combinations.append((red_steam_ids, diff))
return combinations_list
def find_non_recent_small_balanced_teams(self):
teams = self.teams()
gt = self.game.type_short
steam_ids = [player.steam_id for player in teams["red"] + teams["blue"]]
configured_rating_provider_name = self.configured_rating_provider_name()
if configured_rating_provider_name not in self.ratings:
self.logger.debug("Balancing aborted. No ratings found for {}.".format(configured_rating_provider_name))
return
configured_rating_provider = self.ratings[configured_rating_provider_name]
team_combinations = []
for combination in itertools.combinations(steam_ids, int(len(steam_ids) / 2)):
red_steam_ids = list(combination)
blue_steam_ids = [steam_id for steam_id in steam_ids if steam_id not in red_steam_ids]
if self.previous_teams is not None and (
sorted(red_steam_ids) == sorted(self.previous_teams[0]) or
sorted(red_steam_ids) == sorted(self.previous_teams[1])):
continue
if self.previous_teams is not None and (
sorted(blue_steam_ids) == sorted(self.previous_teams[0]) or
sorted(blue_steam_ids) == sorted(self.previous_teams[1])):
continue
red_avg = self.team_average(red_steam_ids, gt, rating_provider=configured_rating_provider)
blue_avg = self.team_average(blue_steam_ids, gt, rating_provider=configured_rating_provider)
diff = abs(red_avg - blue_avg)
team_combinations.append((red_steam_ids, blue_steam_ids, diff))
filtered_combinations = [(red_steam_ids, blue_steam_ids, diff) for (red_steam_ids, blue_steam_ids, diff) in
team_combinations if diff < self.minimum_suggestion_diff]
self.logger.debug("team_combinations: {}".format(team_combinations))
self.logger.debug("filtered_combinations: {}".format(filtered_combinations))
if len(filtered_combinations) > 0:
red_team, blue_team, diff = random.choice(filtered_combinations)
elif len(team_combinations) > 0:
red_team, blue_team, diff = min(team_combinations, key=itemgetter(2))
else:
red_team = [player.steam_id for player in teams["red"]]
blue_team = [player.steam_id for player in teams["blue"]]
return red_team, blue_team
def find_large_balanced_teams(self):
teams = self.teams()
gametype = self.game.type_short
steam_ids = [player.steam_id for player in teams["red"] + teams["blue"]]
configured_rating_provider_name = self.configured_rating_provider_name()
if configured_rating_provider_name not in self.ratings:
self.logger.debug("Balancing aborted. No ratings found for {}.".format(configured_rating_provider_name))
return [], []
configured_rating_provider = self.ratings[configured_rating_provider_name]
rated_steam_ids = [steam_id for steam_id in steam_ids
if steam_id in configured_rating_provider.rated_steam_ids()]
rated_steam_ids = [steam_id for steam_id in rated_steam_ids if
gametype in configured_rating_provider.rated_gametypes_for(steam_id)]
rated_steam_ids = [steam_id for steam_id in rated_steam_ids if
configured_rating_provider[steam_id][gametype]["games"] > 0]
rated_steam_ids.sort(key=lambda steam_id: configured_rating_provider[steam_id][gametype]["elo"])
if len(rated_steam_ids) % 2 == 1:
rated_steam_ids.remove(rated_steam_ids[0])
red_steam_ids = []
blue_steam_ids = []
while len(rated_steam_ids) > 0:
player1 = rated_steam_ids.pop()
player2 = rated_steam_ids.pop()
option1_red_average = self.team_average(red_steam_ids + [player1], gametype,
rating_provider=configured_rating_provider)
option1_blue_average = self.team_average(blue_steam_ids + [player2], gametype,
rating_provider=configured_rating_provider)
option1_diff = abs(option1_red_average - option1_blue_average)
option2_red_average = self.team_average(red_steam_ids + [player2], gametype,
rating_provider=configured_rating_provider)
option2_blue_average = self.team_average(blue_steam_ids + [player1], gametype,
rating_provider=configured_rating_provider)
option2_diff = abs(option2_red_average - option2_blue_average)
if option1_diff < option2_diff:
red_steam_ids.append(player1)
blue_steam_ids.append(player2)
else:
red_steam_ids.append(player2)
blue_steam_ids.append(player1)
return red_steam_ids, blue_steam_ids
def report_teams(self, red_team, blue_team, channel):
gt = self.game.type_short
configured_rating_provider_name = self.configured_rating_provider_name()
if configured_rating_provider_name not in self.ratings:
self.logger.debug("No ratings for configured rating provider {} found. Abandoning."
.format(configured_rating_provider_name))
return
configured_rating_provider = self.ratings[configured_rating_provider_name]
avg_red = self.team_average(red_team, gt, rating_provider=configured_rating_provider)
avg_blue = self.team_average(blue_team, gt, rating_provider=configured_rating_provider)
avg_diff = avg_red - avg_blue
stddev_red = self.team_stddev(red_team, gt, mu=avg_red, rating_provider=configured_rating_provider)
stddev_blue = self.team_stddev(blue_team, gt, mu=avg_blue, rating_provider=configured_rating_provider)
if configured_rating_provider_name.endswith(TRUSKILLS.name):
if avg_diff >= 0.005:
channel.reply(
"{} ratings: ^1{:.02f} (deviation: {:.02f}) "
"^7vs ^4{:.02f} (deviation: {:.02f})^7 - DIFFERENCE: ^1{:.02f}"
.format(configured_rating_provider_name, avg_red, stddev_red, avg_blue, stddev_blue, abs(avg_diff)))
return
if avg_diff <= -0.005:
channel.reply(
"{} ratings: ^1{:.02f} (deviation: {:.02f}) "
"^7vs ^4{:.02f} (deviation: {:.02f})^7 - DIFFERENCE: ^4{:.02f}"
.format(configured_rating_provider_name, avg_red, stddev_red, avg_blue, stddev_blue, abs(avg_diff)))
return
channel.reply(
"{} ratings: ^1{:.02f} (deviation: {:.02f}) ^7vs ^4{:.02f} (deviation: {:.02f})^7 - Holy shit!"
.format(configured_rating_provider_name, avg_red, stddev_red, avg_blue, stddev_blue))
return
if int(avg_diff) > 0:
channel.reply("{} ratings: ^1{:.0f} (deviation: {:.0f}) "
"^7vs ^4{:.0f} (deviation: {:.0f})^7 - DIFFERENCE: ^1{:.0f}"
.format(configured_rating_provider_name, avg_red, stddev_red,
avg_blue, stddev_blue, abs(avg_diff)))
return
if int(avg_diff) < 0:
channel.reply("{} ratings: ^1{:.0f} (deviation: {:.0f}) "
"^7vs ^4{:.0f} (deviation: {:.0f})^7 - DIFFERENCE: ^4{:.0f}"
.format(configured_rating_provider_name, avg_red, stddev_red,
avg_blue, stddev_blue, abs(avg_diff)))
return
channel.reply(
"{} ratings: ^1{:.0f} (deviation: {:.0f}) ^7vs ^4{:.0f} (deviation: {:.0f})^7 - Holy shit!"
.format(configured_rating_provider_name, avg_red, stddev_red, avg_blue, stddev_blue))
def configured_rating_provider_name(self):
if self.game is not None and self.game.map is not None:
if self.rating_system == "mapbased-truskills":
rating_provider_name = "{} {}".format(self.game.map.lower(), TRUSKILLS.name)
return rating_provider_name
if self.rating_system.endswith("truskills"):
return TRUSKILLS.name
if self.rating_system == "a-elo":
return A_ELO.name
if self.rating_system == "b-elo":
return B_ELO.name
def team_average(self, steam_ids, gametype, rating_provider=None):
if not steam_ids or len(steam_ids) == 0:
return 0
configured_rating_provider = rating_provider
if configured_rating_provider is None:
configured_rating_provider_name = self.configured_rating_provider_name()
if configured_rating_provider_name not in self.ratings:
return 0
configured_rating_provider = self.ratings[configured_rating_provider_name]
for steam_id in steam_ids:
if steam_id not in configured_rating_provider.rated_steam_ids():
return 0
return sum([configured_rating_provider[steam_id][gametype]["elo"] for steam_id in steam_ids]) / len(
steam_ids)
def team_stddev(self, steam_ids, gametype, mu=None, rating_provider=None):
if not steam_ids or len(steam_ids) == 0:
return 0
configured_rating_provider = rating_provider
if configured_rating_provider is None:
configured_rating_provider_name = self.configured_rating_provider_name()
if configured_rating_provider_name not in self.ratings:
return 0
configured_rating_provider = self.ratings[configured_rating_provider_name]
for steam_id in steam_ids:
if steam_id not in configured_rating_provider.rated_steam_ids():
return 0
team_elos = [pow(configured_rating_provider[steam_id][gametype]["elo"] - mu, 2) for steam_id in steam_ids]
return math.sqrt(sum(team_elos) / len(steam_ids))
def cmd_teams(self, player, msg, channel):
gametype = self.game.type_short
if gametype not in SUPPORTED_GAMETYPES:
player.tell("This game mode is not supported by the balance plugin.")
return minqlx.RET_STOP_ALL
teams = self.teams()
if len(teams["red"]) != len(teams["blue"]):
player.tell("Both teams should have the same number of players.")
return minqlx.RET_STOP_ALL
self.report_teams([player.steam_id for player in teams["red"]],
[player.steam_id for player in teams["blue"]],
channel)
if len(teams["red"] + teams["blue"]) == 0:
channel.reply("No players active currently")
return minqlx.RET_STOP_ALL
if len(teams["red"] + teams["blue"]) == 4:
i = random.randint(0, 99)
if not i:
channel.reply("Teens look ^6good!")
else:
channel.reply("Teams look good!")
self.switch_suggestion = None
return minqlx.RET_STOP_ALL
self.collect_suggestions(teams, gametype, channel)
@minqlx.thread
def collect_suggestions(self, teams, gametype, channel):
possible_switches = self.filtered_suggestions(teams, gametype)
if self.unique_player_switches and len(self.switched_players) > 0:
possible_switches = list(filter(lambda suggestion:
suggestion.red_player.steam_id not in self.switched_players
and suggestion.blue_player.steam_id not in self.switched_players,
possible_switches))
self.handle_suggestions_collected(possible_switches, channel)
def filtered_suggestions(self, teams, gametype):
player_steam_ids = [player.steam_id for player in teams["red"] + teams["blue"]]
configured_rating_provider_name = self.configured_rating_provider_name()
configured_rating_provider = self.ratings[configured_rating_provider_name]
minimum_suggestion_diff, minimum_suggestion_stddev_diff = \
self.minimum_suggestion_parameters(gametype, player_steam_ids)
avg_red = self.team_average([player.steam_id for player in teams["red"]], gametype,
rating_provider=configured_rating_provider)
avg_blue = self.team_average([player.steam_id for player in teams["blue"]], gametype,
rating_provider=configured_rating_provider)
avg_diff = abs(avg_red - avg_blue)
possible_switches = self.possible_switches(teams, gametype)
if avg_diff <= minimum_suggestion_diff:
stddev_red = self.team_stddev([player.steam_id for player in teams["red"]], gametype, mu=avg_red,
rating_provider=configured_rating_provider)
stddev_blue = self.team_stddev([player.steam_id for player in teams["blue"]], gametype, mu=avg_blue,
rating_provider=configured_rating_provider)
stddev_diff = abs(stddev_red - stddev_blue)
return list(filter(lambda suggestion:
stddev_diff - abs(suggestion.stddev_diff) >= minimum_suggestion_stddev_diff and
abs(suggestion.stddev_diff) <= minimum_suggestion_stddev_diff and
abs(suggestion.avg_diff) <= minimum_suggestion_diff,
possible_switches))
return list(filter(
lambda suggestion: avg_diff > abs(suggestion.avg_diff) and
avg_diff - abs(suggestion.avg_diff) >= minimum_suggestion_diff,
possible_switches))
def minimum_suggestion_parameters(self, gametype, steam_ids):
return self.minimum_suggestion_diff, self.minimum_suggestion_stddev_diff
def possible_switches(self, teams, gametype):
player_steam_ids = [player.steam_id for player in teams["red"] + teams["blue"]]
configured_rating_provider_name = self.configured_rating_provider_name()
configured_rating_provider = self.ratings[configured_rating_provider_name]
minimum_suggestion_diff, minimum_suggestion_stddev_diff = \
self.minimum_suggestion_parameters(gametype, player_steam_ids)
switches = []
for red_p in teams["red"]:
for blue_p in teams["blue"]:
r = [player.steam_id for player in teams["red"]
if player.steam_id != red_p.steam_id] + [blue_p.steam_id]
b = [player.steam_id for player in teams["blue"]
if player.steam_id != blue_p.steam_id] + [red_p.steam_id]
avg_red = self.team_average(r, gametype, rating_provider=configured_rating_provider)
avg_blue = self.team_average(b, gametype, rating_provider=configured_rating_provider)
diff = avg_red - avg_blue
if diff <= minimum_suggestion_diff:
stddev_red = self.team_stddev(r, gametype, mu=avg_red, rating_provider=configured_rating_provider)
stddev_blue = self.team_stddev(b, gametype, mu=avg_blue, rating_provider=configured_rating_provider)
stddev_diff = stddev_red - stddev_blue
suggestion = Suggestion(red_p, blue_p, diff, stddev_diff)
switches.append(suggestion)
return switches
def handle_suggestions_collected(self, possible_switches, channel):
rating_strategy = self.rating_strategy(self.get_cvar("qlx_balancetwo_ratingStrategy", str))
switch_suggestion_queue = SuggestionQueue(possible_switches, rating_strategy)
if switch_suggestion_queue and len(switch_suggestion_queue) > 0:
switch = switch_suggestion_queue.best_suggestion()
channel.reply(switch.announcement())
if not self.switch_suggestion or switch != self.switch_suggestion:
self.switch_suggestion = switch
else:
i = random.randint(0, 99)
if not i:
channel.reply("Teens look ^6good!")
else:
channel.reply("Teams look good!")
self.switch_suggestion = None
return True
def rating_strategy(self, strategy):
return DiffSuggestionRatingStrategy()
def cmd_do(self, player, msg, channel):
if self.auto_switch:
return
if not self.switch_suggestion:
return
self.switch_suggestion.execute()
def cmd_dont(self, player, msg, channel):
if not self.auto_switch:
return
if not self.switch_suggestion:
return
self.msg("An admin prevented the switch! The switch will be terminated.")
self.switch_suggestion = None
def cmd_agree(self, player, msg, channel):
if self.auto_switch:
return
if not self.switch_suggestion:
return
if self.switch_suggestion.all_agreed():
return
self.switch_suggestion.agree(player)
if not self.switch_suggestion.all_agreed():
return
# If the game's in progress and we're not in the round countdown, wait for next round.
if self.game.state == "in_progress" and not self.in_countdown:
self.msg("The switch will be executed at the start of next round.")
return
# Otherwise, switch right away.
self.execute_suggestion()
def execute_suggestion(self):
try:
self.switch_suggestion.execute()
except minqlx.NonexistentPlayerError:
self.switch_suggestion = None
return
except PlayerMovedToSpecError:
self.switch_suggestion = None
return
self.switched_players += self.switch_suggestion.affected_steam_ids()
self.switch_suggestion = None
def cmd_veto(self, player, msg, channel):
if not self.auto_switch:
return
if not self.switch_suggestion:
return
self.switch_suggestion.agree(player)
if not self.switch_suggestion.all_agreed():
return
self.msg("Both players vetoed! The switch will be terminated.")
self.switch_suggestion = None
def cmd_nokick(self, player, msg, channel):
def dontkick(_steam_id):
if _steam_id not in self.kickthreads:
return
kickthread = self.kickthreads[_steam_id]
_resolved_player = self.player(_steam_id)
if _resolved_player is None:
return
kickthread.stop()
del self.kickthreads[_steam_id]
_resolved_player.unmute()
channel.reply("^7An admin has prevented {}^7 from being kicked.".format(_resolved_player.name))
if self.kickthreads is None or len(self.kickthreads.keys()) == 0:
player.tell("^6Psst^7: There are no people being kicked right now.")
return minqlx.RET_STOP_ALL
if len(self.kickthreads.keys()) == 1:
dontkick(list(self.kickthreads.keys())[0])
return
_scheduled_players = []
for steam_id in self.kickthreads.keys():
if not self.kickthreads[steam_id].is_alive():
continue
_player = self.player(steam_id)
if _player is None:
continue
_scheduled_players.append(_player)
_names = [p.name for p in _scheduled_players]
if len(msg) < 2:
player.tell("^6Psst^7: did you mean ^6{}^7?".format("^7 or ^6".join(_names)))
return minqlx.RET_STOP_ALL
matched_players = [_player for _player in _scheduled_players if msg[1] in _player.name]
if len(matched_players) == 0:
player.tell("^6Psst^7: no players matched '^6{}^7'?".format(msg[1]))
return minqlx.RET_STOP_ALL
if len(matched_players) > 1:
_matched_names = [_player.name for _player in matched_players]
player.tell("^6Psst^7: did you mean ^6{}^7?".format("^7 or ^6".join(_matched_names)))
return minqlx.RET_STOP_ALL
dontkick(matched_players[0].steam_id)
def handle_map_change(self, mapname, factory):
@minqlx.delay(3)
def fetch_ratings_from_newmap(_mapname):
steam_ids = [player.steam_id for player in self.players()]
self.fetch_mapbased_ratings(steam_ids, mapname=_mapname)
self.switched_players = []
self.informed_players = []
self.previous_ratings = self.ratings
self.ratings = {}
self.fetch_and_diff_ratings()
fetch_ratings_from_newmap(mapname.lower())
self.clean_up_kickthreads()
@minqlx.thread
def clean_up_kickthreads(self):
dead_threads = []
for steam_id in self.kickthreads.keys():
thread = self.kickthreads[steam_id]
if not thread.is_alive():
dead_threads.append(steam_id)
for dead_thread in dead_threads:
del self.kickthreads[dead_thread]
@minqlx.thread
def fetch_and_diff_ratings(self):
for rating_provider in [TRUSKILLS, A_ELO, B_ELO]:
if rating_provider.name in self.previous_ratings:
rating_results = \
rating_provider.fetch_elos(self.previous_ratings[rating_provider.name].rated_steam_ids())
if rating_results is None:
continue
self.append_ratings(rating_provider.name, rating_results)
self.rating_diffs[rating_provider.name] = \
RatingProvider.from_json(rating_results) - self.previous_ratings[rating_provider.name]
if self.previous_map is None:
return
rating_provider_name = "{} {}".format(self.previous_map, TRUSKILLS.name)
if rating_provider_name not in self.previous_ratings:
return
rating_results = TRUSKILLS.fetch_elos(self.previous_ratings[rating_provider_name].rated_steam_ids(),
headers={"X-QuakeLive-Map": self.previous_map})
if rating_results is None:
return
self.append_ratings(rating_provider_name, rating_results)
self.rating_diffs[rating_provider_name] = \
RatingProvider.from_json(rating_results) - self.previous_ratings[rating_provider_name]
def handle_player_connect(self, player):
@minqlx.thread
def fetch_player_elos(_player):
self.fetch_ratings([_player.steam_id])
self.schedule_kick_for_players_outside_rating_limits([_player.steam_id])
self.record_join_times(player)
fetch_player_elos(player)
def record_join_times(self, player):
if player.steam_id in self.jointimes:
if (time.time() - self.jointimes[player.steam_id]) < 5:
return
self.jointimes[player.steam_id] = time.time()
def schedule_kick_for_players_outside_rating_limits(self, steam_ids):
if not self.ratingLimit_kick:
return
for steam_id in steam_ids:
if not self.is_player_within_configured_rating_limit(steam_id):
if steam_id not in self.kickthreads or not self.kickthreads[steam_id].is_alive():
configured_rating_provider_name = self.configured_rating_provider_name()
configured_rating_provider = self.ratings[configured_rating_provider_name]
if steam_id not in configured_rating_provider:
continue
gametype = self.game.type_short
player_ratings = configured_rating_provider.rating_for(steam_id, gametype)
if self.ratingLimit_min <= player_ratings:
highlow = "high"
else:
highlow = "low"
t = KickThread(steam_id, player_ratings, highlow)
t.start()
self.kickthreads[steam_id] = t
def handle_player_disconnect(self, player, reason):
if player.steam_id in self.jointimes:
del self.jointimes[player.steam_id]
def handle_team_switch_attempt(self, player, old, new):
self.logger.debug("{} switched from {} to {}".format(player.clean_name, old, new))
if not self.game:
return minqlx.RET_NONE
gametype = self.game.type_short
if gametype not in SUPPORTED_GAMETYPES:
return minqlx.RET_NONE
if new in ["red", "blue", "any", "free"]:
rating_check = self.check_rating_limit(player)
if rating_check is not None:
return rating_check
if self.game.state != "in_progress":
return minqlx.RET_NONE
return self.try_auto_rebalance(player, old, new)
def check_rating_limit(self, player):
if self.is_player_within_configured_rating_limit(player.steam_id):
return
if self.ratingLimit_kick:
kickmsg = "so you'll be kicked shortly..."
else:
kickmsg = "but you are free to keep watching."
player.tell("^6You do not meet the skill rating requirements to play on this server, {}".format(kickmsg))
player.center_print(
"^6You do not meet the skill rating requirements to play on this server, {}".format(kickmsg))
return minqlx.RET_STOP_ALL
def is_player_within_configured_rating_limit(self, steam_id):
configured_rating_provider_name = self.configured_rating_provider_name()
if configured_rating_provider_name.endswith("truskills"):
configured_rating_provider_name = TRUSKILLS.name
if configured_rating_provider_name not in self.ratings:
self.logger.debug("Ratings not found. Allowing player to join: {}.".format(configured_rating_provider_name))
return True
configured_rating_provider = self.ratings[configured_rating_provider_name]
if steam_id not in configured_rating_provider:
return False
gametype = self.game.type_short
player_ratings = configured_rating_provider.rating_for(steam_id, gametype)
if self.ratingLimit_min <= player_ratings <= self.ratingLimit_max:
return True
player_games = configured_rating_provider.games_for(steam_id, gametype)
return player_games < self.ratingLimit_minGames
def try_auto_rebalance(self, player, old, new):
if not self.auto_rebalance:
return minqlx.RET_NONE
if old not in ["spectator", "free"] or new not in ['red', 'blue', 'any']:
return minqlx.RET_NONE
teams = self.teams()
if len(teams["red"]) == len(teams["blue"]):
self.last_new_player_id = player.steam_id
return minqlx.RET_NONE
if not self.last_new_player_id:
return minqlx.RET_NONE
last_new_player = self.player(self.last_new_player_id)
if not last_new_player:
self.last_new_player_id = None
return minqlx.RET_NONE
gametype = self.game.type_short
other_than_last_players_team = self.other_team(last_new_player.team)
new_player_team = teams[other_than_last_players_team].copy() + [player]
proposed_diff = self.calculate_player_average_difference(gametype,
teams[last_new_player.team].copy(),
new_player_team)
alternative_team_a = [player for player in teams[last_new_player.team] if player != last_new_player] + \
[player]
alternative_team_b = teams[other_than_last_players_team].copy() + [last_new_player]
alternative_diff = self.calculate_player_average_difference(gametype,
alternative_team_a,
alternative_team_b)
self.last_new_player_id = None
if proposed_diff > alternative_diff:
last_new_player.tell("{}, you have been moved to {} to maintain team balance."
.format(last_new_player.clean_name, self.format_team(other_than_last_players_team)))
last_new_player.put(other_than_last_players_team)
if new in [last_new_player.team]:
return minqlx.RET_NONE
if new not in ["any"]:
player.tell("{}, you have been moved to {} to maintain team balance."
.format(player.clean_name, self.format_team(last_new_player.team)))
player.put(last_new_player.team)
return minqlx.RET_STOP_ALL
if new not in ["any", other_than_last_players_team]:
player.tell("{}, you have been moved to {} to maintain team balance."
.format(player.clean_name, self.format_team(other_than_last_players_team)))
player.put(other_than_last_players_team)
return minqlx.RET_STOP_ALL
return minqlx.RET_NONE
def other_team(self, team):
if team == "red":
return "blue"
return "red"
def calculate_player_average_difference(self, gametype, team1, team2):
team1_steam_ids = [player.steam_id for player in team1]
team2_steam_ids = [player.steam_id for player in team2]
configured_rating_provider_name = self.configured_rating_provider_name()
configured_rating_provider = self.ratings[configured_rating_provider_name]
team1_avg = self.team_average(gametype, team1_steam_ids, rating_provider=configured_rating_provider)
team2_avg = self.team_average(gametype, team2_steam_ids, rating_provider=configured_rating_provider)
return abs(team1_avg - team2_avg)
def format_team(self, team):
if team == "red":
return "^1red^7"
if team == "blue":
return "^4blue^7"
return "^3{}^7".format(team)
def handle_team_switch(self, player, old, new):
if self.last_new_player_id == player.steam_id and new in ["free", "spectator"]:
self.last_new_player_id = None
if new not in ["red", "blue", "any"]:
return
self.inform_about_rating_changes(player)
def inform_about_rating_changes(self, player):
if player.steam_id in self.informed_players:
return
self.informed_players.append(player.steam_id)
if not self.wants_to_be_informed(player.steam_id):
return
changed_ratings = []
previous_truskills = "{} {}".format(self.previous_map, TRUSKILLS.name)
for rating_provider_name in [previous_truskills, TRUSKILLS.name, A_ELO.name, B_ELO.name]:
formatted_diffs = self.format_rating_diffs_for_rating_provider_name_and_player(
rating_provider_name, player.steam_id)
if formatted_diffs is not None:
changed_ratings.append(formatted_diffs)
if len(changed_ratings) == 0:
return
player.tell("Your ratings changed since the last map: {}".format(", ".join(changed_ratings)))
def format_rating_diffs_for_rating_provider_name_and_player(self, rating_provider_name, steam_id):
if rating_provider_name not in self.rating_diffs or steam_id not in self.rating_diffs[rating_provider_name] or \
self.previous_gametype not in self.rating_diffs[rating_provider_name][steam_id] or \
rating_provider_name not in self.ratings or steam_id not in self.ratings[rating_provider_name]:
return None
current_rating = self.ratings[rating_provider_name][steam_id][self.previous_gametype]["elo"]
rating_diff = self.rating_diffs[rating_provider_name][steam_id][self.previous_gametype]
if rating_provider_name.endswith(TRUSKILLS.name):
if rating_diff < 0.0:
return "^3{}^7: ^4{:.02f}^7 (^1{:+.02f}^7)".format(rating_provider_name, current_rating, rating_diff)
elif rating_diff > 0.0:
return "^3{}^7: ^4{:.02f}^7 (^2{:+.02f}^7)".format(rating_provider_name, current_rating, rating_diff)
return None
if rating_diff < 0:
return "^3{}^7: ^4{:d}^7 (^1{:+d}^7)".format(rating_provider_name, current_rating, rating_diff)
elif rating_diff > 0:
return "^3{}^7: ^4{:d}^7 (^2{:+d}^7)".format(rating_provider_name, current_rating, rating_diff)
return None
@minqlx.delay(5)
def handle_game_countdown(self):
self.msg("^7Balancing on skill ratings...")
self.callback_balance(None, minqlx.CHAT_CHANNEL)
def handle_round_countdown(self, round_number):
@minqlx.next_frame
def execute_switch_suggestion():
self.execute_suggestion()
if (not self.auto_switch and self.switch_suggestion is not None and self.switch_suggestion.all_agreed()) or \
(self.auto_switch and self.switch_suggestion is not None and
not self.switch_suggestion.all_agreed()):
execute_switch_suggestion()
self.in_countdown = True
self.even_up_teams()
self.balance_before_start(round_number)
def even_up_teams(self):
teams = self.teams()
player_count = len(teams["red"] + teams["blue"])
if player_count == 1:
return
team_diff = len(teams["red"]) - len(teams["blue"])
if abs(team_diff) == 0:
return
even_to, even_from = ["blue", "red"] if team_diff > 0 else ["red", "blue"]
n = int(abs(team_diff) / 2)
last = self.identify_player_to_move()
if team_diff % 2 == 0:
amount_players_moved = last.name if n == 1 else "{} players".format(n)
self.msg(
"^6Uneven teams detected!^7 At round start i'll move {} to {}".format(amount_players_moved, even_to))
return
amount_players_moved = "lowest player" if n == 1 else "{} lowest players".format(n)
message = " and move {} to {}".format(amount_players_moved, even_to) if n else ''
self.msg("^6Uneven teams detected!^7 Server will auto spec {}{}.".format(last.name, message))
def identify_player_to_move(self):
teams = self.teams()
# See which team is bigger than the other
if len(teams["blue"]) > len(teams["red"]):
bigger_team = teams["blue"].copy()
elif len(teams["red"]) > len(teams["blue"]):
bigger_team = teams["red"].copy()
else:
self.msg("Cannot pick last player since there are none.")
return
if (self.game.red_score + self.game.blue_score) >= 1:
self.msg("Picking someone to {} based on score".format(self.last_action))
lowest_score = bigger_team[0].score
lowest_players = [bigger_team[0]]
for p in bigger_team:
if lowest_score == 0 and p.score <= lowest_score:
lowest_players.append(p)
elif p.score < lowest_players[0].score:
lowest_score = max(p.score, 0)
lowest_players = [p]
elif p.score == lowest_players[0].score:
lowest_players.append(p)
if len(lowest_players) == 1:
lowest_player = lowest_players[0]
else:
lowest_players2 = [lowest_players[0]]
for player in lowest_players:
if player.stats.damage_dealt < lowest_players2[0].stats.damage_dealt:
lowest_players2 = [player]
elif player.stats.damage_dealt == lowest_players2[0].stats.damage_dealt:
lowest_players2.append(player)
if len(lowest_players2) == 1:
lowest_player = lowest_players2[0]
else:
lowest_player = max(lowest_players2, key=lambda e1: self.find_time(e1))
else:
self.msg("Picking someone to {} based on join times.".format(self.last_action))
lowest_player = max(bigger_team, key=lambda e1: self.find_time(e1))
self.msg("Picked {} from the {} team.".format(lowest_player.name, lowest_player.team))
return lowest_player
def handle_round_start(self, round_number):
self.last_new_player_id = None
self.in_countdown = False
self.balance_before_start(round_number, True)
@minqlx.thread
def balance_before_start(self, roundnumber, direct=False):
@minqlx.next_frame
def game_logic(func):
func()
@minqlx.next_frame
def slay_player(p):
p.health = 0
def exclude_player(p):
t = self.teams().copy()
if p in t['red']:
t['red'].remove(p)
if p in t['blue']:
t['blue'].remove(p)
return t
countdown = int(self.get_cvar('g_roundWarmupDelay'))
if self.game.type_short == "ft":
countdown = int(self.get_cvar('g_freezeRoundDelay'))
if not direct:
time.sleep(max(countdown / 1000 - 0.8, 0))
teams = self.teams()
player_count = len(teams["red"] + teams["blue"])
if player_count == 1 or self.game.state not in ["in_progress"]:
return
if self.game.type_short == "ca":
if self.game.roundlimit in [self.game.blue_score, self.game.red_score]:
return
if self.game.type_short == "tdm":
if self.game.fraglimit in [self.game.blue_score, self.game.red_score]:
return
if self.game.type_short == "ctf":
if self.game.capturelimit in [self.game.blue_score, self.game.red_score]:
return
team_diff = len(teams["red"]) - len(teams["blue"])
while abs(team_diff) >= 1:
last = self.identify_player_to_move()
if not last:
self.msg(
"Error: Trying to balance before round {} start. Red({}) - Blue({}) players"
.format(roundnumber, len(teams['red']), len(teams['blue'])))
return
if team_diff % 2 == 0:
even_to, even_from = ["blue", "red"] if team_diff > 0 else ["red", "blue"]
game_logic(lambda: last.put(even_to))
self.msg("^6Uneven teams action^7: Moved {} from {} to {}".format(last.name, even_from, even_to))
else:
if self.prevent or self.last_action == "ignore":
excluded_teams = exclude_player(last)
self.msg("^6Uneven teams^7: {} will not be moved to spec".format(last.name))
elif self.last_action == "slay":
if "anti_rape" in minqlx.Plugin._loaded_plugins:
game_logic(lambda: last.put("spectator"))
self.msg("^6Uneven teams action^7: {} was moved to spec to even teams!".format(last.name))
self.msg("Not slayed because anti_rape plugin is loaded.")
else:
slay_player(last)
self.msg("{} ^7has been ^1slain ^7to even the teams!")
else:
self.msg("^6Uneven teams action^7: {} was moved to spec to even teams!".format(last.name))
game_logic(lambda: last.put("spectator"))
time.sleep(0.2)
def handle_game_end(self, data):
if not self.game or bool(data["ABORTED"]):
return
teams = self.teams()
self.previous_teams = [player.steam_id for player in teams["red"]], \
[player.steam_id for player in teams["blue"]]
self.previous_map = data["MAP"].lower()
self.previous_gametype = data["GAME_TYPE"].lower()
# self.record_team_stats(self.previous_gametype)
if len(teams["red"] + teams["blue"]) == 4 and self.twovstwo_iter is None:
steam_ids = [player.steam_id for player in teams["red"] + teams["blue"]]
self.twovstwo_steam_ids = steam_ids
self.twovstwo_combinations = [(steam_ids[0], steam_ids[1]),
(steam_ids[0], steam_ids[2]),
(steam_ids[0], steam_ids[3])]
self.twovstwo_iter = random_iterator(self.twovstwo_combinations)
next_twovstwo = sorted(list(next(self.twovstwo_iter)))
other_twovstwo = sorted([steam_id for steam_id in steam_ids if steam_id not in next_twovstwo])
red_steam_ids = sorted([player.steam_id for player in teams["red"]])
blue_steam_ids = sorted([player.steam_id for player in teams["blue"]])
while not (next_twovstwo == red_steam_ids or
next_twovstwo == blue_steam_ids or
other_twovstwo == red_steam_ids or
other_twovstwo == blue_steam_ids):
next_twovstwo = sorted(list(next(self.twovstwo_iter)))
other_twovstwo = sorted([steam_id for steam_id in steam_ids if steam_id not in next_twovstwo])
@minqlx.thread
def record_team_stats(self, gametype):
teams = self.teams()
if len(teams["red"] + teams["blue"]) == 2:
return
stats = [
self.game.map,
self.game.red_score,
self.game.blue_score,
self.team_stats(teams["red"], gametype),
self.team_stats(teams["blue"], gametype)
]
elostats_filename = os.path.join(self.get_cvar("fs_homepath"), "elostats.txt")
with open(elostats_filename, "a") as elostats_file:
elostats_file.write("{}\n".format(stats))
def team_stats(self, team, gametype):
returned = {}
for player in team:
a_elo = 0
if A_ELO.name in self.ratings and player.steam_id in self.ratings[A_ELO.name]:
a_elo = self.ratings[A_ELO.name][player.steam_id][gametype]["elo"]
b_elo = 0
if B_ELO.name in self.ratings and player.steam_id in self.ratings[B_ELO.name]:
b_elo = self.ratings[B_ELO.name][player.steam_id][gametype]["elo"]
truskill = 0
if TRUSKILLS.name in self.ratings and player.steam_id in self.ratings[TRUSKILLS.name]:
truskill = self.ratings[TRUSKILLS.name][player.steam_id][gametype]["elo"]
returned[player.steam_id] = [a_elo, b_elo, truskill]
return returned
FILTERED_OUT_GAMETYPE_RESPONSES = ["steamid"]
class SkillRatingProvider:
def __init__(self, name, url_base, balance_api, timeout=7):
self.name = name
self.url_base = url_base
self.balance_api = balance_api
self.timeout = timeout
def fetch_elos(self, steam_ids, headers=None):
if len(steam_ids) == 0:
return None
request_url = self.url_base + "{}/{}".format(self.balance_api,
"+".join([str(steam_id) for steam_id in steam_ids]))
try:
result = requests_retry_session().get(request_url, headers=headers, timeout=self.timeout)
except requests.RequestException as exception:
minqlx.get_logger("balancetwo").debug("request exception: {}".format(exception))
return None
if result.status_code != requests.codes.ok:
return None
return result.json()
TRUSKILLS = SkillRatingProvider("Truskill", "http://stats.houseofquake.com/", "elo/map_based")
A_ELO = SkillRatingProvider("Elo", "http://qlstats.net/", "elo", timeout=15)
B_ELO = SkillRatingProvider("B-Elo", "http://qlstats.net/", "elo_b", timeout=15)
class RatingProvider:
def __init__(self, json):
self.jsons = [json]
def __iter__(self):
return iter(self.rated_steam_ids())
def __contains__(self, item):
if not isinstance(item, int) and not isinstance(item, str):
return False
steam_id = item
if isinstance(item, str):
try:
steam_id = int(item)
except ValueError:
return False
for json_rating in self.jsons:
if "playerinfo" not in json_rating:
continue
if str(steam_id) in json_rating["playerinfo"]:
return True
return False
def __getitem__(self, item):
if item not in self:
raise TypeError
steam_id = item
if isinstance(item, str):
try:
steam_id = int(item)
except ValueError:
raise TypeError
for json_rating in reversed(self.jsons):
if "playerinfo" not in json_rating:
continue
if str(steam_id) not in json_rating["playerinfo"]:
continue
return PlayerRating(json_rating["playerinfo"][str(steam_id)])
return None
def __sub__(self, other):
returned = {}
if not isinstance(other, RatingProvider):
raise TypeError("Can't subtract '{}' from a RatingProvider".format(type(other).__name__))
for steam_id in self:
if steam_id not in other:
returned[steam_id] = {gametype: self.gametype_data_for(steam_id, gametype)
for gametype in self.rated_gametypes_for(steam_id)}
continue
returned[steam_id] = {}
for gametype in self.rated_gametypes_for(steam_id):
if gametype not in other.rated_gametypes_for(steam_id):
returned[steam_id][gametype] = self.gametype_data_for(steam_id, gametype)
continue
gametype_diff = self.gametype_data_for(steam_id, gametype)["elo"] - \
other.gametype_data_for(steam_id, gametype)["elo"]
if gametype_diff == 0:
continue
returned[steam_id][gametype] = round(gametype_diff, 2)
return returned
@staticmethod
def from_json(json_response):
return RatingProvider(json_response)
def append_ratings(self, json_response):
self.jsons.append(json_response)
def player_data_for(self, steam_id):
return self[steam_id]
def gametype_data_for(self, steam_id, gametype):
if gametype not in self[steam_id]:
return None
return self[steam_id][gametype]
def rating_for(self, steam_id, gametype):
if gametype not in self[steam_id]:
return None
if "elo" not in self[steam_id][gametype]:
return None
return self[steam_id][gametype]["elo"]
def games_for(self, steam_id, gametype):
if gametype not in self[steam_id]:
return None
if "games" not in self[steam_id][gametype]:
return None
return self[steam_id][gametype]["games"]
def rated_gametypes_for(self, steam_id):
player_data = self[steam_id]
if player_data is None:
return []
return [gametype for gametype in player_data if gametype not in FILTERED_OUT_GAMETYPE_RESPONSES]
def privacy_for(self, steam_id):
player_data = self[steam_id]
if player_data is None:
return None
if "privacy" not in player_data:
return "private"
return player_data["privacy"]
def rated_steam_ids(self):
returned = []
for json_rating in self.jsons:
if "playerinfo" not in json_rating:
continue
returned = returned + [int(steam_id) for steam_id in json_rating["playerinfo"]]
return [steam_id for steam_id in set(returned)]
def format_elos(self, steam_id):
result = ""
for gametype in self.rated_gametypes_for(steam_id):
if self.games_for(steam_id, gametype) != 0:
result += "^2{0}^7: ^4{1}^7 ({2} games) ".format(gametype.upper(),
self[steam_id][gametype]["elo"],
self[steam_id][gametype]["games"])
return result
def has_ratings_for_all(self, gametype, steam_ids):
for steam_id in steam_ids:
if steam_id not in self:
return False
if gametype not in self[steam_id]:
return False
if self[steam_id][gametype]["games"] == 0:
return False
return True
class PlayerRating:
def __init__(self, ratings, _time=-1, local=False):
self.ratings = ratings
self.time = _time
self.local = local
def __iter__(self):
return iter(self.ratings["ratings"])
def __contains__(self, item):
if not isinstance(item, str):
return False
return item in self.ratings["ratings"]
def __getitem__(self, item):
if item not in self:
raise KeyError
if not isinstance(item, str):
raise KeyError
returned = self.ratings["ratings"][item].copy()
returned["time"] = self.time
returned["local"] = self.local
return returned
def __getattr__(self, attr):
if attr not in ["privacy"]:
raise AttributeError("'{}' object has no atrribute '{}'".format(self.__class__.__name__, attr))
return self.ratings["privacy"]
class SuggestionRatingStrategy:
@abstractmethod
def best_suggestion(self, suggestions):
pass
class DiffSuggestionRatingStrategy(SuggestionRatingStrategy):
def best_suggestion(self, suggestions):
return min(suggestions, key=lambda suggestion: abs(suggestion.avg_diff))
class SuggestionQueue:
def __init__(self, items=None, strategy=DiffSuggestionRatingStrategy()):
self.suggestions = items if items is not None else []
self.strategy = strategy
def __str__(self):
return "[{}]".format(", ".join([str(suggestion) for suggestion in self.suggestions]))
def __len__(self):
return len(self.suggestions)
def best_suggestion(self):
if len(self.suggestions) == 0:
return None
if len(self.suggestions) == 1:
return self.suggestions[0]
return self.strategy.best_suggestion(self.suggestions)
class Suggestion:
def __init__(self, red_player, blue_player, avg_diff, stddev_diff=0):
self.red_player = red_player
self.blue_player = blue_player
self.avg_diff = avg_diff
self.stddev_diff = stddev_diff
self._agreed = dict()
self.auto_switch = Plugin.get_cvar("qlx_balancetwo_autoSwitch", bool)
def __eq__(self, other):
if not isinstance(other, Suggestion):
return False
return self.red_player == other.red_player and self.blue_player == other.blue_player and \
self.avg_diff == other.avg_diff and self.stddev_diff == other.stddev_diff
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
red_player = "({}, score: {}, dmg: {}, time: {})".format(self.red_player.clean_name,
self.red_player.score,
self.red_player.stats.damage_dealt,
self.red_player.stats.time)
blue_player = "({}, score: {}, dmg: {}, time: {})".format(self.blue_player.clean_name,
self.blue_player.score,
self.blue_player.stats.damage_dealt,
self.blue_player.stats.time)
return "Switch {} with {}, resulting diff: {}" \
.format(red_player, blue_player, self.avg_diff, self.stddev_diff)
def announcement(self):
if not self.auto_switch:
return "SUGGESTION: switch ^6{}^7 with ^6{}^7. Mentioned players can type ^6!a^7 to agree." \
.format(self.red_player.clean_name, self.blue_player.clean_name)
return "NOTICE: Server will switch ^6{}^7 with ^6{}^7 at start of next round. " \
"Both mentioned players need to type ^6!v^7 to veto the switch." \
.format(self.red_player.clean_name, self.blue_player.clean_name)
def agree(self, player):
self._agreed[player.steam_id] = True
def agreed(self, player):
return self._agreed.get(player.steam_id, False)
def all_agreed(self):
return self.agreed(self.red_player) and self.agreed(self.blue_player)
def affected_steam_ids(self):
return [self.red_player.steam_id, self.blue_player.steam_id]
def validate_players(self):
self.red_player.update()
self.blue_player.update()
def execute(self):
self.red_player.update()
self.blue_player.update()
if self.red_player.team == "spectator":
raise PlayerMovedToSpecError(self.red_player)
if self.blue_player.team == "spectator":
raise PlayerMovedToSpecError(self.blue_player)
Plugin.switch(self.red_player, self.blue_player)
@property
def max_score(self):
return max(self.red_player.score, self.blue_player.score)
@property
def score_sum(self):
return self.red_player.score + self.blue_player.score
class KickThread(threading.Thread):
def __init__(self, steam_id, rating, highlow):
threading.Thread.__init__(self)
self.steam_id = steam_id
self.rating = rating
self.highlow = highlow
self.go = True
def try_msg(self):
time.sleep(5)
player = Plugin.player(self.steam_id)
if not player:
return
if not self.go:
return
kickmsg = "so you'll be ^6kicked ^7shortly..."
Plugin.msg("^7Sorry, {} your rating ({}) is too {}, {}".format(player.name, self.rating, self.highlow, kickmsg))
def try_mute(self):
@minqlx.next_frame
def execute():
try:
player.mute()
except ValueError:
pass
time.sleep(5)
player = Plugin.player(self.steam_id)
if not player:
return
if not self.go:
return
execute()
def try_kick(self):
@minqlx.next_frame
def execute():
try:
player.kick("^1GOT KICKED!^7 Rating ({}) was too {} for this server.".format(self.rating, self.highlow))
except ValueError:
pass
time.sleep(30)
player = Plugin.player(self.steam_id)
if not player:
return
if not self.go:
return
execute()
def run(self):
self.try_mute()
self.try_msg()
self.try_kick()
def stop(self):
self.go = False
class PlayerMovedToSpecError(Exception):
def __init__(self, player):
self.player = player
class random_iterator:
def __init__(self, seq):
self.seq = seq
self.random_seq = random.sample(self.seq, len(self.seq))
self.iterator = iter(self.random_seq)
def __iter__(self):
return self
def __next__(self):
try:
return next(self.iterator)
except StopIteration:
self.random_seq = random.sample(self.seq, len(self.seq))
self.iterator = iter(self.random_seq)
return next(self.iterator)
| [
[
[
182,
188
],
[
1160,
1166
],
[
7687,
7693
],
[
9256,
9262
],
[
17403,
17409
],
[
43938,
43944
],
[
53694,
53700
],
[
54045,
54051
],
[
65611,
65617
],
[
69610,
69616
],
[
74348,
74354
],
[
6396,
6402
],
[
8973,
8979
],
[
9007,
9013
],
[
9076,
9082
],
[
9304,
9310
],
[
9341,
9347
],
[
12491,
12497
],
[
12828,
12834
],
[
12858,
12864
],
[
12912,
12918
],
[
12943,
12949
],
[
12989,
12995
],
[
17226,
17232
],
[
17260,
17266
],
[
17330,
17336
],
[
17450,
17456
],
[
17487,
17493
],
[
27557,
27563
],
[
27937,
27943
],
[
28142,
28148
],
[
28283,
28289
],
[
43011,
43017
],
[
43210,
43216
],
[
43539,
43545
],
[
43852,
43858
],
[
50512,
50518
],
[
51934,
51940
],
[
52580,
52586
],
[
52836,
52842
],
[
53086,
53092
],
[
53214,
53220
],
[
55428,
55434
],
[
57376,
57382
],
[
57500,
57506
],
[
57768,
57774
],
[
58408,
58414
],
[
59548,
59554
],
[
59666,
59672
],
[
59837,
59843
],
[
59913,
59919
],
[
60087,
60093
],
[
61483,
61489
],
[
61780,
61786
],
[
62116,
62122
],
[
62152,
62158
],
[
65752,
65758
],
[
65835,
65841
],
[
69696,
69702
],
[
69773,
69779
],
[
71959,
71965
],
[
76462,
76468
],
[
87421,
87427
],
[
87769,
87775
]
],
[
[
208,
214
],
[
83919,
83925
],
[
86529,
86535
],
[
87095,
87101
],
[
87274,
87280
],
[
87601,
87607
],
[
88041,
88047
]
],
[
[
244,
249
],
[
2896,
2901
]
],
[
[
258,
260
],
[
74763,
74765
]
],
[
[
268,
272
],
[
42731,
42735
]
],
[
[
280,
284
],
[
30233,
30237
],
[
55796,
55800
],
[
55914,
55918
],
[
70268,
70272
],
[
72608,
72612
],
[
87064,
87068
],
[
87570,
87574
],
[
88009,
88013
]
],
[
[
292,
298
],
[
34685,
34691
],
[
43627,
43633
],
[
48934,
48940
],
[
88518,
88524
],
[
88785,
88791
]
],
[
[
306,
315
],
[
33222,
33231
]
],
[
[
323,
332
],
[
86805,
86814
],
[
86884,
86893
]
],
[
[
350,
364
],
[
82745,
82759
]
],
[
[
387,
397
],
[
34830,
34840
]
],
[
[
406,
414
],
[
804,
812
],
[
13858,
13866
],
[
14027,
14035
],
[
76410,
76418
],
[
76601,
76609
]
],
[
[
445,
456
],
[
1014,
1025
]
],
[
[
506,
511
],
[
835,
840
]
],
[
[
513,
524
],
[
9621,
9632
],
[
13108,
13119
],
[
13210,
13221
],
[
16869,
16880
],
[
16979,
16990
],
[
17767,
17778
]
],
[
[
548,
556
],
[
13338,
13346
],
[
13458,
13466
]
],
[
[
573,
592
],
[
27815,
27834
],
[
42889,
42908
],
[
57460,
57479
]
],
[
[
641,
663
],
[
13700,
13722
],
[
76314,
76336
]
],
[
[
1149,
1159
]
],
[
[
75727,
75758
],
[
80384,
80415
]
],
[
[
75781,
75800
],
[
76687,
76706
],
[
76778,
76797
],
[
76855,
76874
]
],
[
[
76675,
76684
],
[
7963,
7972
],
[
8385,
8394
],
[
8510,
8519
],
[
10602,
10611
],
[
10963,
10972
],
[
19334,
19343
],
[
19362,
19371
],
[
19498,
19507
],
[
19723,
19732
],
[
19739,
19748
],
[
38668,
38677
],
[
40872,
40881
],
[
41004,
41013
],
[
54129,
54138
],
[
54800,
54809
],
[
54923,
54932
],
[
58688,
58697
],
[
63738,
63747
],
[
63812,
63821
],
[
64962,
64971
],
[
75461,
75470
],
[
75528,
75537
],
[
75585,
75594
]
],
[
[
76770,
76775
],
[
7974,
7979
],
[
10681,
10686
],
[
13637,
13642
],
[
13828,
13833
],
[
20815,
20820
],
[
20925,
20930
],
[
21179,
21184
],
[
41081,
41086
],
[
54140,
54145
],
[
63828,
63833
],
[
75066,
75071
],
[
75129,
75134
],
[
75179,
75184
]
],
[
[
76847,
76852
],
[
7981,
7986
],
[
10756,
10761
],
[
20846,
20851
],
[
20990,
20995
],
[
41154,
41159
],
[
54147,
54152
],
[
63840,
63845
],
[
75262,
75267
],
[
75325,
75330
],
[
75375,
75380
]
],
[
[
76936,
76950
],
[
8899,
8913
],
[
10577,
10591
],
[
10656,
10670
],
[
10731,
10745
],
[
10921,
10935
],
[
54589,
54603
],
[
55286,
55300
],
[
78265,
78279
],
[
79345,
79359
]
],
[
[
81805,
81817
],
[
78102,
78114
]
],
[
[
82714,
82738
],
[
82854,
82878
]
],
[
[
82825,
82853
],
[
83075,
83103
],
[
49218,
49246
]
],
[
[
83014,
83029
],
[
48538,
48553
]
],
[
[
83628,
83638
],
[
48212,
48222
],
[
84033,
84043
]
],
[
[
86794,
86804
],
[
56901,
56911
]
],
[
[
88319,
88341
],
[
50619,
50641
],
[
86371,
86393
],
[
86479,
86501
]
],
[
[
88423,
88438
],
[
31029,
31044
],
[
73546,
73561
]
]
] |
"""
Module imports for templates.python.business_logic.my_project.my_app.migrations
This file is automatically generated by ./scripts/empty_pyinit.sh
DO NOT EDIT IT MANUALLY
"""
| [] |
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
# The simplest Django settings possible
# support for Django < 1.5
DATABASE_ENGINE = 'django.db.backends.sqlite3'
DATABASE_NAME = ':memory:'
# support for Django >= 1.5
SECRET_KEY = 'unittest'
DATABASES = {
'default': {
'ENGINE': DATABASE_ENGINE,
'NAME': DATABASE_NAME,
}
}
INSTALLED_APPS = ('django_app.adapters',)
MIDDLEWARE_CLASSES = ()
| [
[
[
135,
150
],
[
312,
327
]
],
[
[
182,
195
],
[
345,
358
]
],
[
[
238,
248
]
],
[
[
263,
272
]
],
[
[
370,
384
]
],
[
[
412,
430
]
]
] |
# Copyright (c) 2021, Manfred Moitzi
# License: MIT License
import copy
import math
from typing import Iterable, List, Optional, Tuple
from ezdxf import colors
from ezdxf.entities import MText
from ezdxf.lldxf import const
from ezdxf.math import Matrix44, Vec3
from ezdxf.render.abstract_mtext_renderer import AbstractMTextRenderer
from ezdxf.tools import text_layout as tl, fonts
from ezdxf.tools.text import MTextContext
from .backend import BackendInterface
from .properties import Properties, RenderContext, rgb_to_hex
from .type_hints import Color
__all__ = ["complex_mtext_renderer"]
def corner_vertices(
left: float,
bottom: float,
right: float,
top: float,
m: Matrix44 = None,
) -> Iterable[Vec3]:
corners = [ # closed polygon: fist vertex == last vertex
(left, top),
(right, top),
(right, bottom),
(left, bottom),
(left, top),
]
if m is None:
return Vec3.generate(corners)
else:
return m.transform_vertices(corners)
class FrameRenderer(tl.ContentRenderer):
def __init__(self, properties: Properties, backend: BackendInterface):
self.properties = properties
self.backend = backend
def render(
self,
left: float,
bottom: float,
right: float,
top: float,
m: Matrix44 = None,
) -> None:
self._render_outline(list(corner_vertices(left, bottom, right, top, m)))
def _render_outline(self, vertices: List[Vec3]) -> None:
backend = self.backend
properties = self.properties
prev = vertices.pop(0)
for vertex in vertices:
backend.draw_line(prev, vertex, properties)
prev = vertex
def line(
self, x1: float, y1: float, x2: float, y2: float, m: Matrix44 = None
) -> None:
points = [(x1, y1), (x2, y2)]
if m is not None:
p1, p2 = m.transform_vertices(points)
else:
p1, p2 = Vec3.generate(points)
self.backend.draw_line(p1, p2, self.properties)
class ColumnBackgroundRenderer(FrameRenderer):
def __init__(
self,
properties: Properties,
backend: BackendInterface,
bg_properties: Properties = None,
offset: float = 0,
text_frame: bool = False,
):
super().__init__(properties, backend)
self.bg_properties = bg_properties
self.offset = offset # background border offset
self.has_text_frame = text_frame
def render(
self,
left: float,
bottom: float,
right: float,
top: float,
m: Matrix44 = None,
) -> None:
# Important: this is not a clipping box, it is possible to
# render anything outside of the given borders!
offset = self.offset
vertices = list(
corner_vertices(
left - offset, bottom - offset, right + offset, top + offset, m
)
)
if self.bg_properties is not None:
self.backend.draw_filled_polygon(vertices, self.bg_properties)
if self.has_text_frame:
self._render_outline(vertices)
class TextRenderer(FrameRenderer):
"""Text content renderer."""
def __init__(
self,
text: str,
cap_height: float,
width_factor: float,
oblique: float, # angle in degrees
properties: Properties,
backend: BackendInterface,
):
super().__init__(properties, backend)
self.text = text
self.cap_height = cap_height
self.width_factor = width_factor
self.oblique = oblique # angle in degrees
def render(
self,
left: float,
bottom: float,
right: float,
top: float,
m: Matrix44 = None,
):
"""Create/render the text content"""
sx = 1.0
tx = 0.0
if not math.isclose(self.width_factor, 1.0, rel_tol=1e-6):
sx = self.width_factor
if abs(self.oblique) > 1e-3: # degrees
tx = math.tan(math.radians(self.oblique))
# fmt: off
t = Matrix44((
sx, 0.0, 0.0, 0.0,
tx, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
left, bottom, 0.0, 1.0
))
# fmt: on
if m is not None:
t *= m
self.backend.draw_text(self.text, t, self.properties, self.cap_height)
def complex_mtext_renderer(
ctx: RenderContext, backend: BackendInterface, mtext: MText, properties: Properties
) -> None:
cmr = ComplexMTextRenderer(ctx, backend, properties)
align = tl.LayoutAlignment(mtext.dxf.attachment_point)
layout_engine = cmr.layout_engine(mtext)
layout_engine.place(align=align)
layout_engine.render(mtext.ucs().matrix)
class ComplexMTextRenderer(AbstractMTextRenderer):
def __init__(
self,
ctx: RenderContext,
backend: BackendInterface,
properties: Properties,
):
super().__init__()
self._render_ctx = ctx
self._backend = backend
self._properties = properties
# Implementation of required AbstractMTextRenderer methods:
def word(self, text: str, ctx: MTextContext) -> tl.ContentCell:
return tl.Text(
width=self.get_font(ctx).text_width(text),
height=ctx.cap_height,
valign=tl.CellAlignment(ctx.align),
stroke=self.get_stroke(ctx),
renderer=TextRenderer(
text,
ctx.cap_height,
ctx.width_factor,
ctx.oblique,
self.new_text_properties(self._properties, ctx),
self._backend,
))
def fraction(
self, data: Tuple[str, str, str], ctx: MTextContext
) -> tl.ContentCell:
upr, lwr, type_ = data
if type_:
return tl.Fraction(
top=self.word(upr, ctx),
bottom=self.word(lwr, ctx),
stacking=self.get_stacking(type_),
# renders just the divider line:
renderer=FrameRenderer(self._properties, self._backend),
)
else:
return self.word(upr, ctx)
def get_font_face(self, mtext: MText) -> fonts.FontFace:
return self._properties.font # type: ignore
def make_bg_renderer(self, mtext: MText) -> tl.ContentRenderer:
dxf = mtext.dxf
bg_fill = dxf.get("bg_fill", 0)
bg_aci = None
bg_true_color = None
bg_properties: Optional[Properties] = None
has_text_frame = False
offset = 0
if bg_fill:
# The fill scale is a multiple of the initial char height and
# a scale of 1, fits exact the outer border
# of the column -> offset = 0
offset = dxf.char_height * (dxf.get("box_fill_scale", 1.5) - 1)
if bg_fill & const.MTEXT_BG_COLOR:
if dxf.hasattr("bg_fill_color"):
bg_aci = dxf.bg_fill_color
if dxf.hasattr("bg_fill_true_color"):
bg_aci = None
bg_true_color = dxf.bg_fill_true_color
if (bg_fill & 3) == 3: # canvas color = bit 0 and 1 set
# can not detect canvas color from DXF document!
# do not draw any background:
bg_aci = None
bg_true_color = None
if bg_fill & const.MTEXT_TEXT_FRAME:
has_text_frame = True
bg_properties = self.new_bg_properties(bg_aci, bg_true_color)
return ColumnBackgroundRenderer(
self._properties,
self._backend,
bg_properties,
offset=offset,
text_frame=has_text_frame,
)
# Implementation details of ComplexMTextRenderer:
@property
def backend(self) -> BackendInterface:
return self._backend
def resolve_aci_color(self, aci: int) -> Color:
return self._render_ctx.resolve_aci_color(aci, self._properties.layer)
def new_text_properties(
self, properties: Properties, ctx: MTextContext
) -> Properties:
new_properties = copy.copy(properties)
if ctx.rgb is None:
new_properties.color = self.resolve_aci_color(ctx.aci)
else:
new_properties.color = rgb_to_hex(ctx.rgb)
new_properties.font = ctx.font_face
return new_properties
def new_bg_properties(
self, aci: Optional[int], true_color: Optional[int]
) -> Properties:
new_properties = copy.copy(self._properties)
new_properties.color = ( # canvas background color
self._render_ctx.current_layout_properties.background_color
)
if true_color is None:
if aci is not None:
new_properties.color = self.resolve_aci_color(aci)
# else canvas background color
else:
new_properties.color = rgb_to_hex(colors.int2rgb(true_color))
return new_properties
| [
[
[
69,
73
],
[
8219,
8223
],
[
8613,
8617
]
],
[
[
81,
85
],
[
3911,
3915
],
[
4063,
4067
],
[
4072,
4076
]
],
[
[
105,
113
],
[
716,
724
]
],
[
[
115,
119
],
[
1493,
1497
]
],
[
[
121,
129
],
[
6544,
6552
],
[
8526,
8534
],
[
8553,
8561
]
],
[
[
131,
136
],
[
5750,
5755
]
],
[
[
156,
162
],
[
9016,
9022
]
],
[
[
190,
195
],
[
4512,
4517
],
[
6257,
6262
],
[
6375,
6380
]
],
[
[
220,
225
],
[
6915,
6920
],
[
7475,
7480
]
],
[
[
249,
257
],
[
694,
702
],
[
1339,
1347
],
[
1803,
1811
],
[
2634,
2642
],
[
3793,
3801
],
[
4131,
4139
]
],
[
[
259,
263
],
[
725,
729
],
[
947,
951
],
[
1498,
1502
],
[
1983,
1987
]
],
[
[
313,
334
],
[
4825,
4846
]
],
[
[
359,
376
],
[
1047,
1049
],
[
4622,
4624
],
[
5229,
5231
],
[
5260,
5262
],
[
5378,
5380
],
[
5799,
5801
],
[
5883,
5885
],
[
6385,
6387
]
],
[
[
378,
383
],
[
6267,
6272
]
],
[
[
413,
425
],
[
5212,
5224
],
[
5777,
5789
],
[
8160,
8172
]
],
[
[
447,
463
],
[
1124,
1140
],
[
2191,
2207
],
[
3440,
3456
],
[
4487,
4503
],
[
4926,
4942
],
[
7908,
7924
]
],
[
[
488,
498
],
[
1103,
1113
],
[
2162,
2172
],
[
2232,
2242
],
[
3411,
3421
],
[
4531,
4541
],
[
4964,
4974
],
[
6553,
6563
],
[
8182,
8192
],
[
8143,
8153
],
[
8576,
8586
]
],
[
[
500,
513
],
[
4463,
4476
],
[
4894,
4907
]
],
[
[
515,
525
],
[
8385,
8395
],
[
9005,
9015
]
],
[
[
550,
555
],
[
8001,
8006
]
],
[
[
557,
564
]
],
[
[
600,
615
],
[
1405,
1420
],
[
2855,
2870
]
],
[
[
1033,
1046
],
[
2094,
2107
],
[
3190,
3203
],
[
6106,
6119
]
],
[
[
2069,
2093
],
[
7627,
7651
]
],
[
[
3177,
3189
],
[
5469,
5481
]
],
[
[
4430,
4452
]
],
[
[
4804,
4824
],
[
4563,
4583
]
]
] |
# -*- coding: utf-8 -*-
#
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache 2 License.
#
# This product includes software developed at Datadog
# (https://www.datadoghq.com/).
#
# Copyright 2018 Datadog, Inc.
#
"""database.py
Testing utils for creating database records needed for associations.
"""
from tests.utils import default_board_id, default_repo_id, default_list_id, \
default_issue_id, default_card_id, default_pull_request_id
from app import db
from app.models import Board, Issue, List, PullRequest, Repo, Subscription, \
SubscribedList
def create_board():
"""Create the board needed for the foreign key constraint."""
db.session.add(
Board(
name='board_name',
url=f"https://trello.com/b/{default_board_id}",
trello_board_id=default_board_id
)
)
def create_repo():
"""Create the repo needed for the foreign key constraint."""
db.session.add(
Repo(
name='repo_name',
url='https://github.com/user/repo',
github_repo_id=default_repo_id
)
)
def create_list():
"""Create the list needed for the foreign key constraint."""
db.session.add(
List(
name='list_name',
trello_list_id=default_list_id,
board_id=default_board_id
)
)
def create_subscription(issue_autocard=True, pull_request_autocard=True):
"""Create a subscription."""
db.session.add(
Subscription(
board_id=default_board_id,
repo_id=default_repo_id,
issue_autocard=issue_autocard,
pull_request_autocard=pull_request_autocard
)
)
def create_subscribed_list():
"""Create a subscribed list to create cards for."""
db.session.add(
SubscribedList(
subscription_board_id=default_board_id,
subscription_repo_id=default_repo_id,
list_id=default_list_id
)
)
def create_issue():
"""Create a GitHub issue representation."""
db.session.add(
Issue(
name='Test adding a new issue',
url='https://github.com/a-organization/a-repo/issues/56',
github_issue_id=default_issue_id,
repo_id=default_repo_id,
trello_board_id=default_board_id,
trello_card_id=default_card_id
)
)
def create_pull_request():
"""Create a GitHub pull request representation."""
db.session.add(
PullRequest(
name='Update README.md',
url='https://github.com/a-organization/a-repo/pulls/57',
github_pull_request_id=default_pull_request_id,
repo_id=default_repo_id,
trello_board_id=default_board_id,
trello_card_id=default_card_id
)
)
| [
[
[
374,
390
],
[
801,
817
],
[
849,
865
],
[
1354,
1370
],
[
1559,
1575
],
[
1895,
1911
],
[
2345,
2361
],
[
2778,
2794
]
],
[
[
392,
407
],
[
1107,
1122
],
[
1597,
1612
],
[
1946,
1961
],
[
2300,
2315
],
[
2733,
2748
]
],
[
[
409,
424
],
[
1316,
1331
],
[
1983,
1998
]
],
[
[
432,
448
],
[
2262,
2278
]
],
[
[
450,
465
],
[
2390,
2405
],
[
2823,
2838
]
],
[
[
467,
490
],
[
2688,
2711
]
],
[
[
507,
509
],
[
699,
701
],
[
972,
974
],
[
1229,
1231
],
[
1500,
1502
],
[
1821,
1823
],
[
2089,
2091
],
[
2510,
2512
]
],
[
[
533,
538
],
[
723,
728
]
],
[
[
540,
545
],
[
2113,
2118
]
],
[
[
547,
551
],
[
1253,
1257
]
],
[
[
553,
564
],
[
2534,
2545
]
],
[
[
566,
570
],
[
996,
1000
]
],
[
[
572,
584
],
[
1524,
1536
]
],
[
[
592,
606
],
[
1845,
1859
]
],
[
[
613,
625
]
],
[
[
888,
899
]
],
[
[
1145,
1156
]
],
[
[
1393,
1412
]
],
[
[
1735,
1757
]
],
[
[
2021,
2033
]
],
[
[
2428,
2447
]
]
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('postcode_api', '0003_populate_postcode_area'),
]
operations = [
migrations.CreateModel(
name='LocalAuthority',
fields=[
('gss_code', models.CharField(
max_length=9, serialize=False,
primary_key=True, db_index=True)),
('name', models.CharField(max_length=128,
db_index=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PostcodeGssCode',
fields=[
('postcode_index', models.CharField(
max_length=7, db_index=True)),
('local_authority_gss_code', models.CharField(
max_length=9, serialize=False,
primary_key=True, db_index=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterField(
model_name='address',
name='postcode_area',
field=models.CharField(
default=b'', max_length=4, db_index=True, blank=True),
preserve_default=True,
),
]
| [
[
[
47,
63
]
],
[
[
87,
93
],
[
369,
375
],
[
518,
524
],
[
681,
687
],
[
832,
838
],
[
946,
952
],
[
1141,
1147
],
[
1285,
1291
]
],
[
[
95,
105
],
[
124,
134
],
[
260,
270
],
[
716,
726
],
[
1176,
1186
]
],
[
[
114,
123
]
]
] |
# coding: utf-8
from collections import namedtuple
from supervisely_lib.api.module_api import ApiField, ModuleApi
from supervisely_lib._utils import camel_to_snake
class PluginApi(ModuleApi):
_info_sequence = [ApiField.ID,
ApiField.NAME,
ApiField.DESCRIPTION,
ApiField.TYPE,
ApiField.DEFAULT_VERSION,
ApiField.DOCKER_IMAGE,
ApiField.README,
ApiField.CONFIGS,
ApiField.VERSIONS,
ApiField.CREATED_AT,
ApiField.UPDATED_AT]
Info = namedtuple('PluginInfo', [camel_to_snake(name) for name in _info_sequence])
def get_list(self, team_id, filters=None):
return self.get_list_all_pages('plugins.list', {ApiField.TEAM_ID: team_id, ApiField.FILTER: filters or []})
def get_info_by_id(self, team_id, plugin_id):
filters = [{"field": ApiField.ID, "operator": "=", "value": plugin_id}]
return self._get_info_by_filters(team_id, filters)
| [
[
[
41,
51
],
[
658,
668
]
],
[
[
95,
103
],
[
217,
225
],
[
252,
260
],
[
289,
297
],
[
333,
341
],
[
370,
378
],
[
418,
426
],
[
463,
471
],
[
502,
510
],
[
542,
550
],
[
583,
591
],
[
626,
634
],
[
839,
847
],
[
866,
874
],
[
979,
987
]
],
[
[
105,
114
],
[
183,
192
]
],
[
[
150,
164
],
[
684,
698
]
],
[
[
173,
182
]
]
] |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
import torch.nn.functional as F
# import torch.utils.checkpoint as cp
from fairseq.modules import (
MaskedConvolution, MultiheadMaskedConvolution
)
class ExpandingResNet(nn.Module):
""" A network of residual convolutional layers"""
def __init__(self, num_init_features, args):
super().__init__()
num_layers = args.num_layers
num_features = num_init_features
self.reduce_channels = Linear(num_features, num_features // args.divide_channels) if args.divide_channels > 1 else None
num_features = num_features // args.divide_channels
self.output_channels = num_features
self.add_up_scale = 1 / (num_layers + 1)
self.residual_blocks = nn.ModuleList([])
for i in range(num_layers):
kernel_size = 2 * (i + 1) + 1
print('Layer ', i, kernel_size)
self.residual_blocks.append(_ResLayer(num_features, kernel_size, args))
def forward(self, x,
encoder_mask=None,
decoder_mask=None,
incremental_state=None):
"""
Input : N, Tt, Ts, C
Output : N, Tt, Ts, C
"""
if self.reduce_channels is not None:
x = self.reduce_channels(x)
add_up = self.add_up_scale * x
for layer in self.residual_blocks:
x = layer(x,
encoder_mask=encoder_mask,
decoder_mask=decoder_mask,
incremental_state=incremental_state)
add_up += self.add_up_scale * x
return add_up
class _ResLayer(nn.Module):
""" Single residual layer
num_input_features - number of input channels to the layer
kernel_size - size of masked convolution, k x (k // 2)
drop_rate - dropout rate
"""
def __init__(self, num_features, kernel_size, args):
super().__init__()
self.drop_rate = args.convolution_dropout
ffn_dim = args.ffn_dim
mid_features = args.reduce_dim
stride = args.conv_stride # source dimension stride
dilsrc = args.source_dilation
diltrg = args.target_dilation
resolution = args.maintain_resolution
if resolution:
if not stride == 1:
raise ValueError('Could not maintain the resolution with stride=%d' % stride)
# choose the padding accordingly:
padding_trg = diltrg * (kernel_size - 1) // 2
padding_src = dilsrc * (kernel_size - 1) // 2
padding = (padding_trg, padding_src)
else:
# must maintain the target resolution:
padding = (diltrg * (kernel_size - 1) // 2, 0)
# Reduce dim should be dividible by groups
self.conv1 = nn.Conv2d(num_features,
mid_features,
kernel_size=1,
stride=1,
bias=False)
self.mconv2 = MaskedConvolution(
mid_features, num_features,
kernel_size, args,
padding=padding,
)
self.fc1 = Linear(num_features, ffn_dim)
self.fc2 = Linear(ffn_dim, num_features)
self.scale = 0.5 ** .5
def forward(self, x,
encoder_mask=None,
decoder_mask=None,
incremental_state=None):
residual = x
x = x.permute(0, 3, 1, 2)
x = self.conv1(x)
# x = F.relu(x)
x = self.mconv2(x, incremental_state)
if self.training:
if encoder_mask is not None:
x = x.masked_fill(encoder_mask.unsqueeze(1).unsqueeze(1), 0)
if decoder_mask is not None:
x = x.masked_fill(decoder_mask.unsqueeze(1).unsqueeze(-1), 0)
if self.drop_rate:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = x.permute(0, 2, 3, 1)
x = self.scale * (x + residual) # N, C, Tt, Ts
# FFN:
residual = x
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
if self.drop_rate:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.scale * (x + residual)
return x
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
| [
[
[
293,
298
]
],
[
[
306,
320
],
[
499,
501
],
[
1919,
1921
],
[
1033,
1035
],
[
3067,
3069
],
[
4609,
4611
],
[
4656,
4658
],
[
4711,
4713
]
],
[
[
328,
352
],
[
4143,
4144
],
[
4360,
4361
],
[
4437,
4438
]
],
[
[
427,
444
],
[
3289,
3306
]
],
[
[
446,
472
]
],
[
[
483,
498
]
],
[
[
1909,
1918
],
[
1213,
1222
]
],
[
[
4555,
4561
],
[
751,
757
],
[
3437,
3443
],
[
3486,
3492
]
]
] |
#
from resolve import resolve
####################################
####################################
# 以下にプラグインの内容をペーストする
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
print('------------')
print(out)
print('------------')
self.assertEqual(out, output)
def test_入力例_1(self):
input = """3 3
...
...
..."""
output = """4"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """3 5
...#.
.###.
.#..."""
output = """4"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """20 20
....................
....................
....................
....................
....................
....................
....................
....................
....................
....................
....................
....................
....................
....................
....................
....................
....................
....................
....................
...................."""
output = """38"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main() | [
[
[
22,
29
],
[
370,
377
]
],
[
[
136,
139
],
[
280,
283
],
[
292,
295
],
[
388,
391
],
[
421,
424
]
],
[
[
155,
163
],
[
334,
342
],
[
346,
354
]
],
[
[
171,
179
],
[
197,
205
],
[
1435,
1443
]
],
[
[
187,
196
]
]
] |
from catalyst.contrib.datasets.misc_cv import ImageClassificationDataset
class Imagewang(ImageClassificationDataset):
"""
`Imagewang <https://github.com/fastai/imagenette#image%E7%BD%91>`_ Dataset.
.. note::
catalyst[cv] required for this dataset.
"""
name = "imagewang"
resources = [
(
"https://s3.amazonaws.com/fast-ai-imageclas/imagewang.tgz",
"46f9749616a29837e7cd67b103396f6e",
)
]
class Imagewang160(ImageClassificationDataset):
"""
`Imagewang <https://github.com/fastai/imagenette#image%E7%BD%91>`_ Dataset
with images resized so that the shortest size is 160 px.
.. note::
catalyst[cv] required for this dataset.
"""
name = "imagewang-160"
resources = [
(
"https://s3.amazonaws.com/fast-ai-imageclas/imagewang-160.tgz",
"1dc388d37d1dc52836c06749e14e37bc",
)
]
class Imagewang320(ImageClassificationDataset):
"""
`Imagewang <https://github.com/fastai/imagenette#image%E7%BD%91>`_ Dataset
with images resized so that the shortest size is 320 px.
.. note::
catalyst[cv] required for this dataset.
"""
name = "imagewang-320"
resources = [
(
"https://s3.amazonaws.com/fast-ai-imageclas/imagewang-320.tgz",
"ff01d7c126230afce776bdf72bda87e6",
)
]
__all__ = ["Imagewang", "Imagewang160", "Imagewang320"]
| [
[
[
46,
72
],
[
91,
117
],
[
488,
514
],
[
953,
979
]
],
[
[
81,
90
]
],
[
[
475,
487
]
],
[
[
940,
952
]
],
[
[
1399,
1406
]
]
] |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, StringType
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession(sparkContext)
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=None):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is not None, return
defaultValue. If the key is not set and defaultValue is None, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a python function (including lambda function) as a UDF
so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param f: python function
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
"""
self.sparkSession.catalog.registerFunction(name, f, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or
:class:`pyspark.sql.types.StringType`, it must match the
real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| [
[
[
808,
822
]
],
[
[
830,
833
],
[
854,
857
]
],
[
[
841,
849
],
[
19150,
19158
]
],
[
[
878,
888
]
],
[
[
891,
898
]
],
[
[
926,
931
],
[
3960,
3965
],
[
4420,
4425
],
[
4729,
4734
],
[
4927,
4932
],
[
5658,
5663
],
[
5858,
5863
],
[
6782,
6787
],
[
8664,
8669
],
[
13083,
13088
],
[
13461,
13466
],
[
13746,
13751
],
[
14540,
14545
],
[
15015,
15020
],
[
15433,
15438
],
[
16311,
16316
],
[
16967,
16972
],
[
17118,
17123
],
[
17289,
17294
],
[
17449,
17454
],
[
17713,
17718
],
[
18151,
18156
]
],
[
[
956,
977
],
[
2154,
2175
],
[
4900,
4921
],
[
6755,
6776
],
[
8680,
8701
],
[
14513,
14534
],
[
15406,
15427
]
],
[
[
1010,
1027
],
[
3535,
3552
]
],
[
[
1029,
1041
],
[
3338,
3350
],
[
4282,
4294
],
[
19406,
19418
],
[
19502,
19514
]
],
[
[
1076,
1085
],
[
16184,
16193
],
[
16258,
16267
]
],
[
[
1121,
1136
],
[
17671,
17686
]
],
[
[
1171,
1187
],
[
18108,
18124
]
],
[
[
1218,
1221
]
],
[
[
1223,
1233
],
[
6844,
6854
],
[
20843,
20853
]
],
[
[
1264,
1289
],
[
3580,
3605
]
],
[
[
1291,
1298
]
],
[
[
1358,
1368
],
[
18533,
18543
],
[
20951,
20961
],
[
3619,
3629
],
[
3672,
3682
],
[
19566,
19576
]
],
[
[
18521,
18532
]
],
[
[
20642,
20657
],
[
5830,
5845
]
],
[
[
20993,
20998
],
[
22264,
22269
]
]
] |
import unittest
from src.google_foobar.P008_carrotland.solution_01 import answer
class TestSolution(unittest.TestCase):
def testcase_001(self):
vertices = [[2, 3], [6, 9], [10, 160]]
expected = 289
self.assertEqual(answer(vertices), expected)
def testcase_002(self):
vertices = [[91207, 89566], [-88690, -83026], [67100, 47194]]
expected = 1730960165
self.assertEqual(answer(vertices), expected)
def testcase_003(self):
vertices = [[0, 0], [0, 1], [1, 0]]
expected = 0
self.assertEqual(answer(vertices), expected)
# Illustrated as problem_analysis_triangle_01.png
def testcase_004(self):
vertices = [[-1, -1], [1, 0], [0, 1]]
expected = 1
self.assertEqual(answer(vertices), expected)
# Illustrated as problem_analysis_triangle_02.png
def testcase_005(self):
vertices = [[0, 0], [0, 10], [10, 0]]
expected = 36
self.assertEqual(answer(vertices), expected)
# Illustrated as problem_analysis_triangle_03.png
def testcase_006(self):
vertices = [[1, 1], [4, 10], [10, 6]]
expected = 31
self.assertEqual(answer(vertices), expected)
# Illustrated as problem_analysis_triangle_04.png
def testcase_007(self):
vertices = [[-5, 4], [4, 6], [3, -3]]
expected = 39
self.assertEqual(answer(vertices), expected)
# Illustrated as problem_analysis_triangle_05.png
def testcase_008(self):
vertices = [[-5, -3], [5, -3], [0, 6]]
expected = 40
self.assertEqual(answer(vertices), expected)
if __name__ == '__main__':
unittest.main()
| [
[
[
7,
15
],
[
103,
111
],
[
1656,
1664
]
],
[
[
75,
81
],
[
246,
252
],
[
428,
434
],
[
575,
581
],
[
778,
784
],
[
982,
988
],
[
1186,
1192
],
[
1390,
1396
],
[
1595,
1601
]
],
[
[
90,
102
]
]
] |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the testing base code."""
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
import six
from jacket import rpc
from jacket.compute import test
from jacket.tests.compute import fixtures
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_local', 'compute.conductor.api', group='conductor')
class IsolationTestCase(test.TestCase):
"""Ensure that things are cleaned up after failed tests.
These tests don't really do much here, but if isolation fails a bunch
of other tests should fail.
"""
def test_service_isolation(self):
self.flags(use_local=True, group='conductor')
self.useFixture(fixtures.ServiceFixture('compute'))
def test_rpc_consumer_isolation(self):
class NeverCalled(object):
def __getattribute__(*args):
assert False, "I should never get called."
server = rpc.get_server(messaging.Target(topic='compute',
server=CONF.host),
endpoints=[NeverCalled()])
server.start()
class JsonTestCase(test.NoDBTestCase):
def test_json_equal(self):
expected = {
"employees": [
{"firstName": "Anna", "lastName": "Smith"},
{"firstName": "John", "lastName": "Doe"},
{"firstName": "Peter", "lastName": "Jones"}
],
"locations": set(['Boston', 'Mumbai', 'Beijing', 'Perth'])
}
observed = """{
"employees": [
{
"lastName": "Doe",
"firstName": "John"
},
{
"lastName": "Smith",
"firstName": "Anna"
},
{
"lastName": "Jones",
"firstName": "Peter"
}
],
"locations": [
"Perth",
"Boston",
"Mumbai",
"Beijing"
]
}"""
self.assertJsonEqual(expected, observed)
def test_json_equal_fail_on_length(self):
expected = {
'top': {
'l1': {
'l2': ['a', 'b', 'c']
}
}
}
observed = {
'top': {
'l1': {
'l2': ['c', 'a', 'b', 'd']
}
}
}
try:
self.assertJsonEqual(expected, observed)
except Exception as e:
# error reported is going to be a cryptic length failure
# on the level2 structure.
self.assertEqual(e.mismatch.describe(), "3 != 4")
self.assertIn(
"Matchee: {'top': {'l1': {'l2': ['c', 'a', 'b', 'd']}}}",
six.text_type(e))
self.assertIn(
"Matcher: {'top': {'l1': {'l2': ['a', 'b', 'c']}}}",
six.text_type(e))
else:
self.fail("This should have raised a mismatch exception")
def test_json_equal_fail_on_inner(self):
expected = {
'top': {
'l1': {
'l2': ['a', 'b', 'c']
}
}
}
observed = {
'top': {
'l1': {
'l2': ['c', 'a', 'd']
}
}
}
try:
self.assertJsonEqual(expected, observed)
except Exception as e:
# error reported is going to be a cryptic length failure
# on the level2 structure.
self.assertEqual(e.mismatch.describe(), "'b' != 'c'")
self.assertIn(
"Matchee: {'top': {'l1': {'l2': ['c', 'a', 'd']}}}",
six.text_type(e))
self.assertIn(
"Matcher: {'top': {'l1': {'l2': ['a', 'b', 'c']}}}",
six.text_type(e))
else:
self.fail("This should have raised a mismatch exception")
class BadLogTestCase(test.NoDBTestCase):
"""Make sure a mis-formatted debug log will get caught."""
def test_bad_debug_log(self):
self.assertRaises(KeyError,
LOG.debug, "this is a misformated %(log)s", {'nothing': 'nothing'})
class MatchTypeTestCase(test.NoDBTestCase):
def test_match_type_simple(self):
matcher = test.MatchType(dict)
self.assertEqual(matcher, {})
self.assertEqual(matcher, {"hello": "world"})
self.assertEqual(matcher, {"hello": ["world"]})
self.assertNotEqual(matcher, [])
self.assertNotEqual(matcher, [{"hello": "world"}])
self.assertNotEqual(matcher, 123)
self.assertNotEqual(matcher, "foo")
def test_match_type_object(self):
class Hello(object):
pass
class World(object):
pass
matcher = test.MatchType(Hello)
self.assertEqual(matcher, Hello())
self.assertNotEqual(matcher, World())
self.assertNotEqual(matcher, 123)
self.assertNotEqual(matcher, "foo")
| [
[
[
796,
799
],
[
1023,
1026
]
],
[
[
821,
835
],
[
987,
994
]
],
[
[
843,
870
],
[
1689,
1698
]
],
[
[
878,
881
],
[
3459,
3462
],
[
3589,
3592
],
[
4420,
4423
],
[
4550,
4553
]
],
[
[
902,
905
],
[
1674,
1677
]
],
[
[
933,
937
],
[
1131,
1135
],
[
1894,
1898
],
[
4675,
4679
],
[
4935,
4939
],
[
5012,
5016
],
[
5519,
5523
]
],
[
[
971,
979
],
[
1440,
1448
]
],
[
[
981,
984
],
[
4841,
4844
]
],
[
[
1016,
1020
],
[
1032,
1036
],
[
1779,
1783
]
],
[
[
1113,
1130
]
],
[
[
1881,
1893
]
],
[
[
4660,
4674
]
],
[
[
4917,
4934
]
]
] |
from keras.layers import Input
from keras.layers.merge import Concatenate
from keras.models import Model
from keras.optimizers import Adam
from .keras_base import KerasBaseExp
from .keras_base import exp_bag_of_strokes
from .blocks import fc_branch, final_type1
class mlp_type1(KerasBaseExp):
def initialize_model(self, in_dims, out_dims):
input_layer = [Input(shape=(d, )) for d in in_dims]
if len(input_layer) > 1:
layer = Concatenate()(input_layer)
else:
layer = input_layer[0]
layer = fc_branch(layer, self.decay)
self.model = Model(inputs=input_layer, outputs=final_type1(layer, out_dims))
opt = Adam(lr=self.learning_rate)
self.model.compile(optimizer=opt, metrics=['accuracy'], loss='categorical_crossentropy')
class EXP1(mlp_type1, exp_bag_of_strokes):
pass
| [
[
[
25,
30
],
[
371,
376
]
],
[
[
62,
73
],
[
462,
473
]
],
[
[
99,
104
],
[
606,
611
]
],
[
[
134,
138
],
[
684,
688
]
],
[
[
164,
176
],
[
282,
294
]
],
[
[
201,
219
],
[
833,
851
]
],
[
[
241,
250
],
[
555,
564
]
],
[
[
252,
263
],
[
640,
651
]
],
[
[
272,
281
],
[
822,
831
]
],
[
[
817,
821
]
]
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Infoblox, Inc.
# Authors: Amit Mishra (@amishra2-infoblox), Vedant Sethia (@vedantsethia)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
try:
import requests
import json
import ipaddress
except:
raise ImportError("Requests module not found")
__metaclass__ = type
class Request(object):
'''API Request class for Infoblox BloxOne's CRUD API operations
'''
def __init__(self,baseUrl, token):
'''Initialize the API class with baseUrl and API token
'''
self.baseUrl = baseUrl
self.token = token
def get(self,endpoint,data={}):
'''GET API request object
'''
try:
headers = {'Authorization': 'Token {}'.format(self.token)}
url = '{}{}'.format(self.baseUrl, endpoint)
result = requests.get(url, json.dumps(data), headers=headers)
except:
raise Exception("API request failed")
if result.status_code in [200,201,204]:
return (False, False, result.json())
elif result.status_code == 401:
return (True, False, result.content)
else:
meta = {'status': result.status_code, 'response': result.json()}
return (True, False, meta)
def create(self,endpoint,data={},body=True):
'''POST API request object
'''
try:
headers = {'Authorization': 'Token {}'.format(self.token)}
url = '{}{}'.format(self.baseUrl, endpoint)
if(body==True):
result = requests.post(url, json.dumps(data), headers=headers)
else:
result = requests.post(url, headers=headers)
except:
raise Exception("API request failed")
if result.status_code in [200,201,204]:
return (False, False, result.json())
elif result.status_code == 401:
return (True, False, result.content)
else:
meta = {'status': result.status_code, 'response': result.json()}
return (True, False, meta)
def update(self,endpoint,data={}):
'''PATCH API request object
'''
try:
headers = {'Authorization': 'Token {}'.format(self.token)}
url = '{}{}'.format(self.baseUrl, endpoint)
result = requests.patch(url, json.dumps(data), headers=headers)
except:
raise Exception("API request failed")
if result.status_code in [200,201,204]:
return (False, False, result.json())
elif result.status_code == 401:
return (True, False, result.content)
else:
meta = {'status': result.status_code, 'response': result.json()}
return (True, False, meta)
def put(self,endpoint,data={}):
'''PUT API request object
'''
try:
headers = {'Authorization': 'Token {}'.format(self.token)}
url = '{}{}'.format(self.baseUrl, endpoint)
result = requests.put(url, json.dumps(data), headers=headers)
except:
raise Exception("API request failed")
if result.status_code in [200,201,204]:
return (False, False, result.json())
elif result.status_code == 401:
return (True, False, result.content)
else:
meta = {'status': result.status_code, 'response': result.json()}
return (True, False, meta)
def delete(self,endpoint,data={}, body=False):
'''DELETE API request object
'''
try:
headers = {'Authorization': 'Token {}'.format(self.token)}
url = '{}{}'.format(self.baseUrl, endpoint)
if(body==True):
result = requests.delete(url, json.dumps(data), headers=headers)
else:
result = requests.delete(url, headers=headers)
except:
raise Exception("API request failed")
if result.status_code in [200,201,204]:
return (False, False, result.json())
elif result.status_code == 401:
return (True, False, result.content)
else:
meta = {'status': result.status_code, 'response': result.json()}
return (True, False, meta)
class Utilities(object):
'''Helper Functions for BloxOne DDI object operations
'''
def __init__(self):
'''Initializes the object
'''
pass
def normalize_ip(self, address, cidr=-1):
'''Validates the IP Address
'''
address = address.split('/')
try:
ipaddress.ip_address(address[0])
except:
return ['','']
if cidr != -1 and int(cidr) < 32:
return [address[0],cidr]
elif len(address) == 2:
return [address[0],address[1]]
else:
return [address[0],'']
def flatten_dict_object(self,key,data):
'''Modify the dictionary input object
'''
payload = {}
for i in data[key]:
for k,v in i.items():
payload[k]=v
return payload
def dhcp_options(self, key, data, dhcp_option_codes):
"""Create a list of DHCP option dicts"""
payload = []
for i in data[key]:
for k, v in i.items():
dhcp_option = {}
for item in dhcp_option_codes:
if item["name"] == k:
dhcp_option_code = item["id"]
break
if dhcp_option_code:
dhcp_option["option_code"] = dhcp_option_code
dhcp_option["option_value"] = v
dhcp_option["type"] = "option"
payload.append(dhcp_option)
return payload
| [
[
[
271,
286
]
],
[
[
288,
296
]
],
[
[
298,
312
]
],
[
[
331,
339
],
[
974,
982
],
[
1708,
1716
],
[
1805,
1813
],
[
2481,
2489
],
[
3179,
3187
],
[
3929,
3937
],
[
4028,
4036
]
],
[
[
351,
355
],
[
992,
996
],
[
1727,
1731
],
[
2501,
2505
],
[
3197,
3201
],
[
3950,
3954
]
],
[
[
367,
376
],
[
4797,
4806
]
],
[
[
437,
450
]
],
[
[
465,
472
]
],
[
[
4472,
4481
]
]
] |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API endpoints for managing a Product resource."""
import json
from flask_restx import Namespace, Resource, cors
from auth_api import status as http_status
from auth_api.exceptions import BusinessException
from auth_api.services import Product as ProductService
from auth_api.tracer import Tracer
from auth_api.utils.util import cors_preflight
API = Namespace('products', description='Endpoints for products management')
TRACER = Tracer.get_instance()
@cors_preflight('GET,OPTIONS')
@API.route('', methods=['GET', 'OPTIONS'])
class Products(Resource):
"""Resource for managing products."""
@staticmethod
@TRACER.trace()
@cors.crossdomain(origin='*')
def get():
"""Get a list of all products."""
try:
response, status = json.dumps(ProductService.get_products()), http_status.HTTP_200_OK
except BusinessException as exception:
response, status = {'code': exception.code, 'message': exception.message}, exception.status_code
return response, status
| [
[
[
655,
659
],
[
1371,
1375
]
],
[
[
685,
694
],
[
951,
960
]
],
[
[
696,
704
],
[
1144,
1152
]
],
[
[
706,
710
],
[
1241,
1245
]
],
[
[
733,
754
],
[
1414,
1425
]
],
[
[
787,
804
],
[
1453,
1470
]
],
[
[
835,
860
],
[
1382,
1396
]
],
[
[
889,
895
],
[
1031,
1037
]
],
[
[
928,
942
],
[
1056,
1070
]
],
[
[
945,
948
],
[
1087,
1090
]
],
[
[
1022,
1028
],
[
1221,
1227
]
],
[
[
1135,
1143
]
]
] |
"""Autocorrelation plot of data."""
from ..data import convert_to_dataset
from ..labels import BaseLabeller
from ..sel_utils import xarray_var_iter
from ..rcparams import rcParams
from ..utils import _var_names
from .plot_utils import default_grid, filter_plotters_list, get_plotting_function
def plot_autocorr(
data,
var_names=None,
filter_vars=None,
max_lag=None,
combined=False,
grid=None,
figsize=None,
textsize=None,
labeller=None,
ax=None,
backend=None,
backend_config=None,
backend_kwargs=None,
show=None,
):
"""Bar plot of the autocorrelation function for a sequence of data.
Useful in particular for posteriors from MCMC samples which may display correlation.
Parameters
----------
data: obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
var_names: list of variable names, optional
Variables to be plotted, if None all variable are plotted. Prefix the
variables by `~` when you want to exclude them from the plot. Vector-value
stochastics are handled automatically.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
`pandas.filter`.
max_lag: int, optional
Maximum lag to calculate autocorrelation. Defaults to 100 or num draws, whichever is smaller
combined: bool
Flag for combining multiple chains into a single chain. If False (default), chains will be
plotted separately.
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None it will be defined automatically.
Note this is not used if ax is supplied.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
labeller : labeller instance, optional
Class providing the method `make_label_vert` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_config: dict, optional
Currently specifies the bounds to use for bokeh axes. Defaults to value set in rcParams.
backend_kwargs: dict, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
Plot default autocorrelation
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_autocorr(data)
Plot subset variables by specifying variable name exactly
.. plot::
:context: close-figs
>>> az.plot_autocorr(data, var_names=['mu', 'tau'] )
Combine chains by variable and select variables by excluding some with partial naming
.. plot::
:context: close-figs
>>> az.plot_autocorr(data, var_names=['~thet'], filter_vars="like", combined=True)
Specify maximum lag (x axis bound)
.. plot::
:context: close-figs
>>> az.plot_autocorr(data, var_names=['mu', 'tau'], max_lag=200, combined=True)
"""
data = convert_to_dataset(data, group="posterior")
var_names = _var_names(var_names, data, filter_vars)
# Default max lag to 100 or max length of chain
if max_lag is None:
max_lag = min(100, data["draw"].shape[0])
if labeller is None:
labeller = BaseLabeller()
plotters = filter_plotters_list(
list(xarray_var_iter(data, var_names, combined)), "plot_autocorr"
)
rows, cols = default_grid(len(plotters), grid=grid)
autocorr_plot_args = dict(
axes=ax,
plotters=plotters,
max_lag=max_lag,
figsize=figsize,
rows=rows,
cols=cols,
combined=combined,
textsize=textsize,
labeller=labeller,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
if backend == "bokeh":
autocorr_plot_args.update(backend_config=backend_config)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_autocorr", "autocorrplot", backend)
axes = plot(**autocorr_plot_args)
return axes
| [
[
[
55,
73
],
[
3990,
4008
]
],
[
[
95,
107
],
[
4263,
4275
]
],
[
[
132,
147
],
[
4329,
4344
]
],
[
[
171,
179
],
[
4804,
4812
]
],
[
[
200,
210
],
[
4050,
4060
]
],
[
[
235,
247
],
[
4413,
4425
]
],
[
[
249,
269
],
[
4294,
4314
]
],
[
[
271,
292
],
[
4995,
5016
]
],
[
[
299,
312
]
]
] |
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from .models import RemOrganization, RemRole, RemUser, Nursery, NurseryPlantsHistory, MotherTree, Plantation, BeninYield, AlteiaData, DeptSatellite, CommuneSatellite, SpecialTuple
admin.site.register(RemOrganization)
admin.site.register(RemRole)
admin.site.register(RemUser)
admin.site.register(Nursery)
admin.site.register(NurseryPlantsHistory)
admin.site.register(MotherTree)
admin.site.register(Plantation)
admin.site.register(BeninYield)
admin.site.register(AlteiaData)
admin.site.register(DeptSatellite)
admin.site.register(CommuneSatellite)
admin.site.register(SpecialTuple)
| [
[
[
102,
107
]
],
[
[
165,
170
],
[
353,
358
],
[
390,
395
],
[
419,
424
],
[
448,
453
],
[
477,
482
],
[
519,
524
],
[
551,
556
],
[
583,
588
],
[
615,
620
],
[
647,
652
],
[
682,
687
],
[
720,
725
]
],
[
[
192,
207
],
[
373,
388
]
],
[
[
209,
216
],
[
410,
417
]
],
[
[
218,
225
],
[
439,
446
]
],
[
[
227,
234
],
[
468,
475
]
],
[
[
236,
256
],
[
497,
517
]
],
[
[
258,
268
],
[
539,
549
]
],
[
[
270,
280
],
[
571,
581
]
],
[
[
282,
292
],
[
603,
613
]
],
[
[
294,
304
],
[
635,
645
]
],
[
[
306,
319
],
[
667,
680
]
],
[
[
321,
337
],
[
702,
718
]
],
[
[
339,
351
],
[
740,
752
]
]
] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache License.
from __future__ import print_function
import contextlib
import glob
import json
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import time
import unittest
import zipfile
from datetime import datetime, timedelta
from threading import currentThread
_ORIGINAL_POPEN = subprocess.Popen
from mock import PropertyMock
from azurelinuxagent.common import conf
from azurelinuxagent.common.event import EVENTS_DIRECTORY, WALAEventOperation
from azurelinuxagent.common.exception import ProtocolError, UpdateError, ResourceGoneError, HttpError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.persist_firewall_rules import PersistFirewallRulesHandler
from azurelinuxagent.common.protocol.hostplugin import URI_FORMAT_GET_API_VERSIONS, HOST_PLUGIN_PORT, \
URI_FORMAT_GET_EXTENSION_ARTIFACT, HostPluginProtocol
from azurelinuxagent.common.protocol.restapi import VMAgentManifest, \
ExtHandlerPackage, ExtHandlerPackageList, ExtHandler, VMStatus, ExtHandlerStatus, ExtensionStatus
from azurelinuxagent.common.protocol.util import ProtocolUtil
from azurelinuxagent.common.protocol.wire import WireProtocol
from azurelinuxagent.common.utils import fileutil, restutil, textutil
from azurelinuxagent.common.utils.flexible_version import FlexibleVersion
from azurelinuxagent.common.utils.networkutil import FirewallCmdDirectCommands
from azurelinuxagent.common.version import AGENT_PKG_GLOB, AGENT_DIR_GLOB, AGENT_NAME, AGENT_DIR_PATTERN, \
AGENT_VERSION, CURRENT_AGENT, CURRENT_VERSION
from azurelinuxagent.ga.exthandlers import ExtHandlersHandler, ExtHandlerInstance, HandlerEnvironment, ExtensionStatusValue
from azurelinuxagent.ga.update import GuestAgent, GuestAgentError, MAX_FAILURE, AGENT_MANIFEST_FILE, \
get_update_handler, ORPHAN_POLL_INTERVAL, AGENT_PARTITION_FILE, AGENT_ERROR_FILE, ORPHAN_WAIT_INTERVAL, \
CHILD_LAUNCH_RESTART_MAX, CHILD_HEALTH_INTERVAL, GOAL_STATE_PERIOD_EXTENSIONS_DISABLED, UpdateHandler, \
READONLY_FILE_GLOBS, ExtensionsSummary, AgentUpgradeType
from tests.protocol.mocks import mock_wire_protocol
from tests.protocol.mockwiredata import DATA_FILE, DATA_FILE_MULTIPLE_EXT
from tests.tools import AgentTestCase, data_dir, DEFAULT, patch, load_bin_data, Mock, MagicMock, \
clear_singleton_instances, mock_sleep, skip_if_predicate_true
from tests.protocol import mockwiredata
from tests.protocol.HttpRequestPredicates import HttpRequestPredicates
NO_ERROR = {
"last_failure": 0.0,
"failure_count": 0,
"was_fatal": False
}
FATAL_ERROR = {
"last_failure": 42.42,
"failure_count": 2,
"was_fatal": True
}
WITH_ERROR = {
"last_failure": 42.42,
"failure_count": 2,
"was_fatal": False
}
EMPTY_MANIFEST = {
"name": "WALinuxAgent",
"version": 1.0,
"handlerManifest": {
"installCommand": "",
"uninstallCommand": "",
"updateCommand": "",
"enableCommand": "",
"disableCommand": "",
"rebootAfterInstall": False,
"reportHeartbeat": False
}
}
def faux_logger():
print("STDOUT message")
print("STDERR message", file=sys.stderr)
return DEFAULT
@contextlib.contextmanager
def _get_update_handler(iterations=1, test_data=None):
"""
This function returns a mocked version of the UpdateHandler object to be used for testing. It will only run the
main loop [iterations] no of times.
To reuse the same object, be sure to reset the iterations by using the update_handler.set_iterations() function.
:param iterations: No of times the UpdateHandler.run() method should run.
:return: Mocked object of UpdateHandler() class and object of the MockWireProtocol().
"""
def _set_iterations(iterations_):
# This will reset the current iteration and the max iterations to run for this test object.
update_handler._cur_iteration = 0
update_handler._iterations = iterations_
def check_running(*val, **__):
# This method will determine if the current UpdateHandler object is supposed to run or not.
# There can be scenarios where the UpdateHandler.is_running.setter is called, in that case, return the first
# value of the tuple and not increment the cur_iteration
if len(val) > 0:
return val[0]
if update_handler._cur_iteration < update_handler._iterations:
update_handler._cur_iteration += 1
return True
return False
test_data = DATA_FILE if test_data is None else test_data
with mock_wire_protocol(test_data) as protocol:
protocol_util = MagicMock()
protocol_util.get_protocol = Mock(return_value=protocol)
with patch("azurelinuxagent.ga.update.get_protocol_util", return_value=protocol_util):
with patch("azurelinuxagent.common.conf.get_autoupdate_enabled", return_value=False):
with patch.object(HostPluginProtocol, "is_default_channel", False):
update_handler = get_update_handler()
# Setup internal state for the object required for testing
update_handler._cur_iteration = 0
update_handler._iterations = 0
update_handler.set_iterations = _set_iterations
update_handler.get_iterations = lambda: update_handler._cur_iteration
type(update_handler).is_running = PropertyMock(side_effect=check_running)
with patch("time.sleep", side_effect=lambda _: mock_sleep(0.001)):
with patch('sys.exit') as exit_mock:
# Setup the initial number of iterations
update_handler.set_iterations(iterations)
update_handler.exit_mock = exit_mock
try:
yield update_handler, protocol
finally:
# Since PropertyMock requires us to mock the type(ClassName).property of the object,
# reverting it back to keep the state of the test clean
type(update_handler).is_running = True
class UpdateTestCase(AgentTestCase):
_test_suite_tmp_dir = None
_agent_zip_dir = None
@classmethod
def setUpClass(cls):
AgentTestCase.setUpClass()
# copy data_dir/ga/WALinuxAgent-0.0.0.0.zip to _test_suite_tmp_dir/waagent-zip/WALinuxAgent-<AGENT_VERSION>.zip
sample_agent_zip = "WALinuxAgent-0.0.0.0.zip"
test_agent_zip = sample_agent_zip.replace("0.0.0.0", AGENT_VERSION)
UpdateTestCase._test_suite_tmp_dir = tempfile.mkdtemp()
UpdateTestCase._agent_zip_dir = os.path.join(UpdateTestCase._test_suite_tmp_dir, "waagent-zip")
os.mkdir(UpdateTestCase._agent_zip_dir)
source = os.path.join(data_dir, "ga", sample_agent_zip)
target = os.path.join(UpdateTestCase._agent_zip_dir, test_agent_zip)
shutil.copyfile(source, target)
@classmethod
def tearDownClass(cls):
AgentTestCase.tearDownClass()
shutil.rmtree(UpdateTestCase._test_suite_tmp_dir)
@staticmethod
def _get_agent_pkgs(in_dir=None):
if in_dir is None:
in_dir = UpdateTestCase._agent_zip_dir
path = os.path.join(in_dir, AGENT_PKG_GLOB)
return glob.glob(path)
@staticmethod
def _get_agents(in_dir=None):
if in_dir is None:
in_dir = UpdateTestCase._agent_zip_dir
path = os.path.join(in_dir, AGENT_DIR_GLOB)
return [a for a in glob.glob(path) if os.path.isdir(a)]
@staticmethod
def _get_agent_file_path():
return UpdateTestCase._get_agent_pkgs()[0]
@staticmethod
def _get_agent_file_name():
return os.path.basename(UpdateTestCase._get_agent_file_path())
@staticmethod
def _get_agent_path():
return fileutil.trim_ext(UpdateTestCase._get_agent_file_path(), "zip")
@staticmethod
def _get_agent_name():
return os.path.basename(UpdateTestCase._get_agent_path())
@staticmethod
def _get_agent_version():
return FlexibleVersion(UpdateTestCase._get_agent_name().split("-")[1])
@staticmethod
def _add_write_permission_to_goal_state_files():
# UpdateHandler.run() marks some of the files from the goal state as read-only. Those files are overwritten when
# a new goal state is fetched. This is not a problem for the agent, since it runs as root, but tests need
# to make those files writtable before fetching a new goal state. Note that UpdateHandler.run() fetches a new
# goal state, so tests that make multiple calls to that method need to call this function in-between calls.
for gb in READONLY_FILE_GLOBS:
for path in glob.iglob(os.path.join(conf.get_lib_dir(), gb)):
fileutil.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
def agent_bin(self, version, suffix):
return "bin/{0}-{1}{2}.egg".format(AGENT_NAME, version, suffix)
def rename_agent_bin(self, path, dst_v):
src_bin = glob.glob(os.path.join(path, self.agent_bin("*.*.*.*", '*')))[0]
dst_bin = os.path.join(path, self.agent_bin(dst_v, ''))
shutil.move(src_bin, dst_bin)
def agents(self):
return [GuestAgent(path=path) for path in self.agent_dirs()]
def agent_count(self):
return len(self.agent_dirs())
def agent_dirs(self):
return self._get_agents(in_dir=self.tmp_dir)
def agent_dir(self, version):
return os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, version))
def agent_paths(self):
paths = glob.glob(os.path.join(self.tmp_dir, "*"))
paths.sort()
return paths
def agent_pkgs(self):
return self._get_agent_pkgs(in_dir=self.tmp_dir)
def agent_versions(self):
v = [FlexibleVersion(AGENT_DIR_PATTERN.match(a).group(1)) for a in self.agent_dirs()]
v.sort(reverse=True)
return v
@contextlib.contextmanager
def get_error_file(self, error_data=None):
if error_data is None:
error_data = NO_ERROR
with tempfile.NamedTemporaryFile(mode="w") as fp:
json.dump(error_data if error_data is not None else NO_ERROR, fp)
fp.seek(0)
yield fp
def create_error(self, error_data=None):
if error_data is None:
error_data = NO_ERROR
with self.get_error_file(error_data) as path:
err = GuestAgentError(path.name)
err.load()
return err
def copy_agents(self, *agents):
if len(agents) <= 0:
agents = self._get_agent_pkgs()
for agent in agents:
shutil.copy(agent, self.tmp_dir)
return
def expand_agents(self):
for agent in self.agent_pkgs():
path = os.path.join(self.tmp_dir, fileutil.trim_ext(agent, "zip"))
zipfile.ZipFile(agent).extractall(path)
def prepare_agent(self, version):
"""
Create a download for the current agent version, copied from test data
"""
self.copy_agents(self._get_agent_pkgs()[0])
self.expand_agents()
versions = self.agent_versions()
src_v = FlexibleVersion(str(versions[0]))
from_path = self.agent_dir(src_v)
dst_v = FlexibleVersion(str(version))
to_path = self.agent_dir(dst_v)
if from_path != to_path:
shutil.move(from_path + ".zip", to_path + ".zip")
shutil.move(from_path, to_path)
self.rename_agent_bin(to_path, dst_v)
return
def prepare_agents(self,
count=20,
is_available=True):
# Ensure the test data is copied over
agent_count = self.agent_count()
if agent_count <= 0:
self.copy_agents(self._get_agent_pkgs()[0])
self.expand_agents()
count -= 1
# Determine the most recent agent version
versions = self.agent_versions()
src_v = FlexibleVersion(str(versions[0]))
# Create agent packages and directories
return self.replicate_agents(
src_v=src_v,
count=count - agent_count,
is_available=is_available)
def remove_agents(self):
for agent in self.agent_paths():
try:
if os.path.isfile(agent):
os.remove(agent)
else:
shutil.rmtree(agent)
except: # pylint: disable=bare-except
pass
return
def replicate_agents(self,
count=5,
src_v=AGENT_VERSION,
is_available=True,
increment=1):
from_path = self.agent_dir(src_v)
dst_v = FlexibleVersion(str(src_v))
for i in range(0, count): # pylint: disable=unused-variable
dst_v += increment
to_path = self.agent_dir(dst_v)
shutil.copyfile(from_path + ".zip", to_path + ".zip")
shutil.copytree(from_path, to_path)
self.rename_agent_bin(to_path, dst_v)
if not is_available:
GuestAgent(to_path).mark_failure(is_fatal=True)
return dst_v
class TestGuestAgentError(UpdateTestCase):
def test_creation(self):
self.assertRaises(TypeError, GuestAgentError)
self.assertRaises(UpdateError, GuestAgentError, None)
with self.get_error_file(error_data=WITH_ERROR) as path:
err = GuestAgentError(path.name)
err.load()
self.assertEqual(path.name, err.path)
self.assertNotEqual(None, err)
self.assertEqual(WITH_ERROR["last_failure"], err.last_failure)
self.assertEqual(WITH_ERROR["failure_count"], err.failure_count)
self.assertEqual(WITH_ERROR["was_fatal"], err.was_fatal)
return
def test_clear(self):
with self.get_error_file(error_data=WITH_ERROR) as path:
err = GuestAgentError(path.name)
err.load()
self.assertEqual(path.name, err.path)
self.assertNotEqual(None, err)
err.clear()
self.assertEqual(NO_ERROR["last_failure"], err.last_failure)
self.assertEqual(NO_ERROR["failure_count"], err.failure_count)
self.assertEqual(NO_ERROR["was_fatal"], err.was_fatal)
return
def test_save(self):
err1 = self.create_error()
err1.mark_failure()
err1.mark_failure(is_fatal=True)
err2 = self.create_error(err1.to_json())
self.assertEqual(err1.last_failure, err2.last_failure)
self.assertEqual(err1.failure_count, err2.failure_count)
self.assertEqual(err1.was_fatal, err2.was_fatal)
def test_mark_failure(self):
err = self.create_error()
self.assertFalse(err.is_blacklisted)
for i in range(0, MAX_FAILURE): # pylint: disable=unused-variable
err.mark_failure()
# Agent failed >= MAX_FAILURE, it should be blacklisted
self.assertTrue(err.is_blacklisted)
self.assertEqual(MAX_FAILURE, err.failure_count)
return
def test_mark_failure_permanent(self):
err = self.create_error()
self.assertFalse(err.is_blacklisted)
# Fatal errors immediately blacklist
err.mark_failure(is_fatal=True)
self.assertTrue(err.is_blacklisted)
self.assertTrue(err.failure_count < MAX_FAILURE)
return
def test_str(self):
err = self.create_error(error_data=NO_ERROR)
s = "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format(
NO_ERROR["last_failure"],
NO_ERROR["failure_count"],
NO_ERROR["was_fatal"])
self.assertEqual(s, str(err))
err = self.create_error(error_data=WITH_ERROR)
s = "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format(
WITH_ERROR["last_failure"],
WITH_ERROR["failure_count"],
WITH_ERROR["was_fatal"])
self.assertEqual(s, str(err))
return
class TestGuestAgent(UpdateTestCase):
def setUp(self):
UpdateTestCase.setUp(self)
self.copy_agents(self._get_agent_file_path())
self.agent_path = os.path.join(self.tmp_dir, self._get_agent_name())
def test_creation(self):
self.assertRaises(UpdateError, GuestAgent, "A very bad file name")
n = "{0}-a.bad.version".format(AGENT_NAME)
self.assertRaises(UpdateError, GuestAgent, n)
self.expand_agents()
agent = GuestAgent(path=self.agent_path)
self.assertNotEqual(None, agent)
self.assertEqual(self._get_agent_name(), agent.name)
self.assertEqual(self._get_agent_version(), agent.version)
self.assertEqual(self.agent_path, agent.get_agent_dir())
path = os.path.join(self.agent_path, AGENT_MANIFEST_FILE)
self.assertEqual(path, agent.get_agent_manifest_path())
self.assertEqual(
os.path.join(self.agent_path, AGENT_ERROR_FILE),
agent.get_agent_error_file())
path = ".".join((os.path.join(conf.get_lib_dir(), self._get_agent_name()), "zip"))
self.assertEqual(path, agent.get_agent_pkg_path())
self.assertTrue(agent.is_downloaded)
self.assertFalse(agent.is_blacklisted)
self.assertTrue(agent.is_available)
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
def test_clear_error(self, mock_downloaded): # pylint: disable=unused-argument
self.expand_agents()
agent = GuestAgent(path=self.agent_path)
agent.mark_failure(is_fatal=True)
self.assertTrue(agent.error.last_failure > 0.0)
self.assertEqual(1, agent.error.failure_count)
self.assertTrue(agent.is_blacklisted)
self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted)
agent.clear_error()
self.assertEqual(0.0, agent.error.last_failure)
self.assertEqual(0, agent.error.failure_count)
self.assertFalse(agent.is_blacklisted)
self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted)
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
def test_is_available(self, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
agent = GuestAgent(path=self.agent_path)
self.assertFalse(agent.is_available)
agent._unpack()
self.assertTrue(agent.is_available)
agent.mark_failure(is_fatal=True)
self.assertFalse(agent.is_available)
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
def test_is_blacklisted(self, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
agent = GuestAgent(path=self.agent_path)
self.assertFalse(agent.is_blacklisted)
agent._unpack()
self.assertFalse(agent.is_blacklisted)
self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted)
agent.mark_failure(is_fatal=True)
self.assertTrue(agent.is_blacklisted)
self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted)
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
def test_resource_gone_error_not_blacklisted(self, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
try:
mock_downloaded.side_effect = ResourceGoneError()
agent = GuestAgent(path=self.agent_path)
self.assertFalse(agent.is_blacklisted)
except ResourceGoneError:
pass
except: # pylint: disable=bare-except
self.fail("Exception was not expected!")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
def test_ioerror_not_blacklisted(self, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
try:
mock_downloaded.side_effect = IOError()
agent = GuestAgent(path=self.agent_path)
self.assertFalse(agent.is_blacklisted)
except IOError:
pass
except: # pylint: disable=bare-except
self.fail("Exception was not expected!")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
def test_is_downloaded(self, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
agent = GuestAgent(path=self.agent_path)
self.assertFalse(agent.is_downloaded)
agent._unpack()
self.assertTrue(agent.is_downloaded)
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
def test_mark_failure(self, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
agent = GuestAgent(path=self.agent_path)
agent.mark_failure()
self.assertEqual(1, agent.error.failure_count)
agent.mark_failure(is_fatal=True)
self.assertEqual(2, agent.error.failure_count)
self.assertTrue(agent.is_blacklisted)
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
def test_unpack(self, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
agent = GuestAgent(path=self.agent_path)
self.assertFalse(os.path.isdir(agent.get_agent_dir()))
agent._unpack()
self.assertTrue(os.path.isdir(agent.get_agent_dir()))
self.assertTrue(os.path.isfile(agent.get_agent_manifest_path()))
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
def test_unpack_fail(self, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
agent = GuestAgent(path=self.agent_path)
self.assertFalse(os.path.isdir(agent.get_agent_dir()))
os.remove(agent.get_agent_pkg_path())
self.assertRaises(UpdateError, agent._unpack)
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
def test_load_manifest(self, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
agent = GuestAgent(path=self.agent_path)
agent._unpack()
agent._load_manifest()
self.assertEqual(agent.manifest.get_enable_command(),
agent.get_agent_cmd())
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
def test_load_manifest_missing(self, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
agent = GuestAgent(path=self.agent_path)
self.assertFalse(os.path.isdir(agent.get_agent_dir()))
agent._unpack()
os.remove(agent.get_agent_manifest_path())
self.assertRaises(UpdateError, agent._load_manifest)
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
def test_load_manifest_is_empty(self, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
agent = GuestAgent(path=self.agent_path)
self.assertFalse(os.path.isdir(agent.get_agent_dir()))
agent._unpack()
self.assertTrue(os.path.isfile(agent.get_agent_manifest_path()))
with open(agent.get_agent_manifest_path(), "w") as file: # pylint: disable=redefined-builtin
json.dump(EMPTY_MANIFEST, file)
self.assertRaises(UpdateError, agent._load_manifest)
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
def test_load_manifest_is_malformed(self, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
agent = GuestAgent(path=self.agent_path)
self.assertFalse(os.path.isdir(agent.get_agent_dir()))
agent._unpack()
self.assertTrue(os.path.isfile(agent.get_agent_manifest_path()))
with open(agent.get_agent_manifest_path(), "w") as file: # pylint: disable=redefined-builtin
file.write("This is not JSON data")
self.assertRaises(UpdateError, agent._load_manifest)
def test_load_error(self):
agent = GuestAgent(path=self.agent_path)
agent.error = None
agent._load_error()
self.assertTrue(agent.error is not None)
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
@patch("azurelinuxagent.ga.update.restutil.http_get")
def test_download(self, mock_http_get, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
self.remove_agents()
self.assertFalse(os.path.isdir(self.agent_path))
agent_pkg = load_bin_data(self._get_agent_file_name(), self._agent_zip_dir)
mock_http_get.return_value = ResponseMock(response=agent_pkg)
pkg = ExtHandlerPackage(version=str(self._get_agent_version()))
pkg.uris.append(None)
agent = GuestAgent(pkg=pkg)
agent._download()
self.assertTrue(os.path.isfile(agent.get_agent_pkg_path()))
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
@patch("azurelinuxagent.ga.update.restutil.http_get")
def test_download_fail(self, mock_http_get, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
self.remove_agents()
self.assertFalse(os.path.isdir(self.agent_path))
mock_http_get.return_value = ResponseMock(status=restutil.httpclient.SERVICE_UNAVAILABLE)
pkg = ExtHandlerPackage(version=str(self._get_agent_version()))
pkg.uris.append(None)
agent = GuestAgent(pkg=pkg)
self.assertRaises(UpdateError, agent._download)
self.assertFalse(os.path.isfile(agent.get_agent_pkg_path()))
self.assertFalse(agent.is_downloaded)
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded")
@patch("azurelinuxagent.ga.update.GuestAgent._ensure_loaded")
@patch("azurelinuxagent.ga.update.restutil.http_get")
@patch("azurelinuxagent.ga.update.restutil.http_post")
def test_download_fallback(self, mock_http_post, mock_http_get, mock_loaded, mock_downloaded): # pylint: disable=unused-argument
self.remove_agents()
self.assertFalse(os.path.isdir(self.agent_path))
mock_http_get.return_value = ResponseMock(
status=restutil.httpclient.SERVICE_UNAVAILABLE,
response="")
ext_uri = 'ext_uri'
host_uri = 'host_uri'
api_uri = URI_FORMAT_GET_API_VERSIONS.format(host_uri, HOST_PLUGIN_PORT)
art_uri = URI_FORMAT_GET_EXTENSION_ARTIFACT.format(host_uri, HOST_PLUGIN_PORT)
mock_host = HostPluginProtocol(host_uri,
'container_id',
'role_config')
pkg = ExtHandlerPackage(version=str(self._get_agent_version()))
pkg.uris.append(ext_uri)
agent = GuestAgent(pkg=pkg)
agent.host = mock_host
# ensure fallback fails gracefully, no http
self.assertRaises(UpdateError, agent._download)
self.assertEqual(mock_http_get.call_count, 2)
self.assertEqual(mock_http_get.call_args_list[0][0][0], ext_uri)
self.assertEqual(mock_http_get.call_args_list[1][0][0], api_uri)
# ensure fallback fails gracefully, artifact api failure
with patch.object(HostPluginProtocol,
"ensure_initialized",
return_value=True):
self.assertRaises(UpdateError, agent._download)
self.assertEqual(mock_http_get.call_count, 4)
self.assertEqual(mock_http_get.call_args_list[2][0][0], ext_uri)
self.assertEqual(mock_http_get.call_args_list[3][0][0], art_uri)
a, k = mock_http_get.call_args_list[3] # pylint: disable=unused-variable
self.assertEqual(False, k['use_proxy'])
# ensure fallback works as expected
with patch.object(HostPluginProtocol,
"get_artifact_request",
return_value=[art_uri, {}]):
self.assertRaises(UpdateError, agent._download)
self.assertEqual(mock_http_get.call_count, 6)
a, k = mock_http_get.call_args_list[3]
self.assertEqual(False, k['use_proxy'])
self.assertEqual(mock_http_get.call_args_list[4][0][0], ext_uri)
a, k = mock_http_get.call_args_list[4]
self.assertEqual(mock_http_get.call_args_list[5][0][0], art_uri)
a, k = mock_http_get.call_args_list[5]
self.assertEqual(False, k['use_proxy'])
@patch("azurelinuxagent.ga.update.restutil.http_get")
def test_ensure_downloaded(self, mock_http_get):
self.remove_agents()
self.assertFalse(os.path.isdir(self.agent_path))
agent_pkg = load_bin_data(self._get_agent_file_name(), self._agent_zip_dir)
mock_http_get.return_value = ResponseMock(response=agent_pkg)
pkg = ExtHandlerPackage(version=str(self._get_agent_version()))
pkg.uris.append(None)
agent = GuestAgent(pkg=pkg)
self.assertTrue(os.path.isfile(agent.get_agent_manifest_path()))
self.assertTrue(agent.is_downloaded)
@patch("azurelinuxagent.ga.update.GuestAgent._download", side_effect=UpdateError)
def test_ensure_downloaded_download_fails(self, mock_download): # pylint: disable=unused-argument
self.remove_agents()
self.assertFalse(os.path.isdir(self.agent_path))
pkg = ExtHandlerPackage(version=str(self._get_agent_version()))
pkg.uris.append(None)
agent = GuestAgent(pkg=pkg)
self.assertEqual(1, agent.error.failure_count)
self.assertFalse(agent.error.was_fatal)
self.assertFalse(agent.is_blacklisted)
@patch("azurelinuxagent.ga.update.GuestAgent._download")
@patch("azurelinuxagent.ga.update.GuestAgent._unpack", side_effect=UpdateError)
def test_ensure_downloaded_unpack_fails(self, mock_unpack, mock_download): # pylint: disable=unused-argument
self.assertFalse(os.path.isdir(self.agent_path))
pkg = ExtHandlerPackage(version=str(self._get_agent_version()))
pkg.uris.append(None)
agent = GuestAgent(pkg=pkg)
self.assertEqual(1, agent.error.failure_count)
self.assertTrue(agent.error.was_fatal)
self.assertTrue(agent.is_blacklisted)
@patch("azurelinuxagent.ga.update.GuestAgent._download")
@patch("azurelinuxagent.ga.update.GuestAgent._unpack")
@patch("azurelinuxagent.ga.update.GuestAgent._load_manifest", side_effect=UpdateError)
def test_ensure_downloaded_load_manifest_fails(self, mock_manifest, mock_unpack, mock_download): # pylint: disable=unused-argument
self.assertFalse(os.path.isdir(self.agent_path))
pkg = ExtHandlerPackage(version=str(self._get_agent_version()))
pkg.uris.append(None)
agent = GuestAgent(pkg=pkg)
self.assertEqual(1, agent.error.failure_count)
self.assertTrue(agent.error.was_fatal)
self.assertTrue(agent.is_blacklisted)
@patch("azurelinuxagent.ga.update.GuestAgent._download")
@patch("azurelinuxagent.ga.update.GuestAgent._unpack")
@patch("azurelinuxagent.ga.update.GuestAgent._load_manifest")
def test_ensure_download_skips_blacklisted(self, mock_manifest, mock_unpack, mock_download): # pylint: disable=unused-argument
agent = GuestAgent(path=self.agent_path)
self.assertEqual(0, mock_download.call_count)
agent.clear_error()
agent.mark_failure(is_fatal=True)
self.assertTrue(agent.is_blacklisted)
pkg = ExtHandlerPackage(version=str(self._get_agent_version()))
pkg.uris.append(None)
agent = GuestAgent(pkg=pkg)
self.assertEqual(1, agent.error.failure_count)
self.assertTrue(agent.error.was_fatal)
self.assertTrue(agent.is_blacklisted)
self.assertEqual(0, mock_download.call_count)
self.assertEqual(0, mock_unpack.call_count)
class TestUpdate(UpdateTestCase):
def setUp(self):
UpdateTestCase.setUp(self)
self.event_patch = patch('azurelinuxagent.common.event.add_event')
self.update_handler = get_update_handler()
protocol = Mock()
protocol.get_ext_handlers = Mock(return_value=(Mock(), Mock()))
self.update_handler.protocol_util = Mock()
self.update_handler.protocol_util.get_protocol = Mock(return_value=protocol)
# Since ProtocolUtil is a singleton per thread, we need to clear it to ensure that the test cases do not reuse
# a previous state
clear_singleton_instances(ProtocolUtil)
def test_creation(self):
self.assertEqual(None, self.update_handler.last_attempt_time)
self.assertEqual(0, len(self.update_handler.agents))
self.assertEqual(None, self.update_handler.child_agent)
self.assertEqual(None, self.update_handler.child_launch_time)
self.assertEqual(0, self.update_handler.child_launch_attempts)
self.assertEqual(None, self.update_handler.child_process)
self.assertEqual(None, self.update_handler.signal_handler)
def test_emit_restart_event_emits_event_if_not_clean_start(self):
try:
mock_event = self.event_patch.start()
self.update_handler._set_sentinel()
self.update_handler._emit_restart_event()
self.assertEqual(1, mock_event.call_count)
except Exception as e: # pylint: disable=unused-variable
pass
self.event_patch.stop()
def _create_protocol(self, count=20, versions=None):
latest_version = self.prepare_agents(count=count)
if versions is None or len(versions) <= 0:
versions = [latest_version]
return ProtocolMock(versions=versions)
def _test_ensure_no_orphans(self, invocations=3, interval=ORPHAN_WAIT_INTERVAL, pid_count=0):
with patch.object(self.update_handler, 'osutil') as mock_util:
# Note:
# - Python only allows mutations of objects to which a function has
# a reference. Incrementing an integer directly changes the
# reference. Incrementing an item of a list changes an item to
# which the code has a reference.
# See http://stackoverflow.com/questions/26408941/python-nested-functions-and-variable-scope
iterations = [0]
def iterator(*args, **kwargs): # pylint: disable=unused-argument
iterations[0] += 1
return iterations[0] < invocations
mock_util.check_pid_alive = Mock(side_effect=iterator)
pid_files = self.update_handler._get_pid_files()
self.assertEqual(pid_count, len(pid_files))
with patch('os.getpid', return_value=42):
with patch('time.sleep', return_value=None) as mock_sleep: # pylint: disable=redefined-outer-name
self.update_handler._ensure_no_orphans(orphan_wait_interval=interval)
for pid_file in pid_files:
self.assertFalse(os.path.exists(pid_file))
return mock_util.check_pid_alive.call_count, mock_sleep.call_count
def test_ensure_no_orphans(self):
fileutil.write_file(os.path.join(self.tmp_dir, "0_waagent.pid"), ustr(41))
calls, sleeps = self._test_ensure_no_orphans(invocations=3, pid_count=1)
self.assertEqual(3, calls)
self.assertEqual(2, sleeps)
def test_ensure_no_orphans_skips_if_no_orphans(self):
calls, sleeps = self._test_ensure_no_orphans(invocations=3)
self.assertEqual(0, calls)
self.assertEqual(0, sleeps)
def test_ensure_no_orphans_ignores_exceptions(self):
with patch('azurelinuxagent.common.utils.fileutil.read_file', side_effect=Exception):
calls, sleeps = self._test_ensure_no_orphans(invocations=3)
self.assertEqual(0, calls)
self.assertEqual(0, sleeps)
def test_ensure_no_orphans_kills_after_interval(self):
fileutil.write_file(os.path.join(self.tmp_dir, "0_waagent.pid"), ustr(41))
with patch('os.kill') as mock_kill:
calls, sleeps = self._test_ensure_no_orphans(
invocations=4,
interval=3 * ORPHAN_POLL_INTERVAL,
pid_count=1)
self.assertEqual(3, calls)
self.assertEqual(2, sleeps)
self.assertEqual(1, mock_kill.call_count)
@patch('azurelinuxagent.ga.update.datetime')
def test_ensure_partition_assigned(self, mock_time):
path = os.path.join(conf.get_lib_dir(), AGENT_PARTITION_FILE)
mock_time.utcnow = Mock()
self.assertFalse(os.path.exists(path))
for n in range(0, 99):
mock_time.utcnow.return_value = Mock(microsecond=n * 10000)
self.update_handler._ensure_partition_assigned()
self.assertTrue(os.path.exists(path))
s = fileutil.read_file(path)
self.assertEqual(n, int(s))
os.remove(path)
def test_ensure_readonly_sets_readonly(self):
test_files = [
os.path.join(conf.get_lib_dir(), "faux_certificate.crt"),
os.path.join(conf.get_lib_dir(), "faux_certificate.p7m"),
os.path.join(conf.get_lib_dir(), "faux_certificate.pem"),
os.path.join(conf.get_lib_dir(), "faux_certificate.prv"),
os.path.join(conf.get_lib_dir(), "ovf-env.xml")
]
for path in test_files:
fileutil.write_file(path, "Faux content")
os.chmod(path,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
self.update_handler._ensure_readonly_files()
for path in test_files:
mode = os.stat(path).st_mode
mode &= (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
self.assertEqual(0, mode ^ stat.S_IRUSR)
def test_ensure_readonly_leaves_unmodified(self):
test_files = [
os.path.join(conf.get_lib_dir(), "faux.xml"),
os.path.join(conf.get_lib_dir(), "faux.json"),
os.path.join(conf.get_lib_dir(), "faux.txt"),
os.path.join(conf.get_lib_dir(), "faux")
]
for path in test_files:
fileutil.write_file(path, "Faux content")
os.chmod(path,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
self.update_handler._ensure_readonly_files()
for path in test_files:
mode = os.stat(path).st_mode
mode &= (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
self.assertEqual(
stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH,
mode)
def _test_evaluate_agent_health(self, child_agent_index=0):
self.prepare_agents()
latest_agent = self.update_handler.get_latest_agent()
self.assertTrue(latest_agent.is_available)
self.assertFalse(latest_agent.is_blacklisted)
self.assertTrue(len(self.update_handler.agents) > 1)
child_agent = self.update_handler.agents[child_agent_index]
self.assertTrue(child_agent.is_available)
self.assertFalse(child_agent.is_blacklisted)
self.update_handler.child_agent = child_agent
self.update_handler._evaluate_agent_health(latest_agent)
def test_evaluate_agent_health_ignores_installed_agent(self):
self.update_handler._evaluate_agent_health(None)
def test_evaluate_agent_health_raises_exception_for_restarting_agent(self):
self.update_handler.child_launch_time = time.time() - (4 * 60)
self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 1
self.assertRaises(Exception, self._test_evaluate_agent_health)
def test_evaluate_agent_health_will_not_raise_exception_for_long_restarts(self):
self.update_handler.child_launch_time = time.time() - 24 * 60
self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX
self._test_evaluate_agent_health()
def test_evaluate_agent_health_will_not_raise_exception_too_few_restarts(self):
self.update_handler.child_launch_time = time.time()
self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 2
self._test_evaluate_agent_health()
def test_evaluate_agent_health_resets_with_new_agent(self):
self.update_handler.child_launch_time = time.time() - (4 * 60)
self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 1
self._test_evaluate_agent_health(child_agent_index=1)
self.assertEqual(1, self.update_handler.child_launch_attempts)
def test_filter_blacklisted_agents(self):
self.prepare_agents()
self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()])
self.assertEqual(len(self.agent_dirs()), len(self.update_handler.agents))
kept_agents = self.update_handler.agents[::2]
blacklisted_agents = self.update_handler.agents[1::2]
for agent in blacklisted_agents:
agent.mark_failure(is_fatal=True)
self.update_handler._filter_blacklisted_agents()
self.assertEqual(kept_agents, self.update_handler.agents)
def test_find_agents(self):
self.prepare_agents()
self.assertTrue(0 <= len(self.update_handler.agents))
self.update_handler._find_agents()
self.assertEqual(len(self._get_agents(self.tmp_dir)), len(self.update_handler.agents))
def test_find_agents_does_reload(self):
self.prepare_agents()
self.update_handler._find_agents()
agents = self.update_handler.agents
self.update_handler._find_agents()
self.assertNotEqual(agents, self.update_handler.agents)
def test_find_agents_sorts(self):
self.prepare_agents()
self.update_handler._find_agents()
v = FlexibleVersion("100000")
for a in self.update_handler.agents:
self.assertTrue(v > a.version)
v = a.version
@patch('azurelinuxagent.common.protocol.wire.WireClient.get_host_plugin')
def test_get_host_plugin_returns_host_for_wireserver(self, mock_get_host):
protocol = WireProtocol('12.34.56.78')
mock_get_host.return_value = "faux host"
host = self.update_handler._get_host_plugin(protocol=protocol)
print("mock_get_host call cound={0}".format(mock_get_host.call_count))
self.assertEqual(1, mock_get_host.call_count)
self.assertEqual("faux host", host)
def test_get_latest_agent(self):
latest_version = self.prepare_agents()
latest_agent = self.update_handler.get_latest_agent()
self.assertEqual(len(self._get_agents(self.tmp_dir)), len(self.update_handler.agents))
self.assertEqual(latest_version, latest_agent.version)
def test_get_latest_agent_excluded(self):
self.prepare_agent(AGENT_VERSION)
self.assertFalse(self._test_upgrade_available(
versions=self.agent_versions(),
count=1))
self.assertEqual(None, self.update_handler.get_latest_agent())
def test_get_latest_agent_no_updates(self):
self.assertEqual(None, self.update_handler.get_latest_agent())
def test_get_latest_agent_skip_updates(self):
conf.get_autoupdate_enabled = Mock(return_value=False)
self.assertEqual(None, self.update_handler.get_latest_agent())
def test_get_latest_agent_skips_unavailable(self):
self.prepare_agents()
prior_agent = self.update_handler.get_latest_agent()
latest_version = self.prepare_agents(count=self.agent_count() + 1, is_available=False)
latest_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, latest_version))
self.assertFalse(GuestAgent(latest_path).is_available)
latest_agent = self.update_handler.get_latest_agent()
self.assertTrue(latest_agent.version < latest_version)
self.assertEqual(latest_agent.version, prior_agent.version)
def test_get_pid_files(self):
pid_files = self.update_handler._get_pid_files()
self.assertEqual(0, len(pid_files))
def test_get_pid_files_returns_previous(self):
for n in range(1250):
fileutil.write_file(os.path.join(self.tmp_dir, str(n) + "_waagent.pid"), ustr(n + 1))
pid_files = self.update_handler._get_pid_files()
self.assertEqual(1250, len(pid_files))
pid_dir, pid_name, pid_re = self.update_handler._get_pid_parts() # pylint: disable=unused-variable
for p in pid_files:
self.assertTrue(pid_re.match(os.path.basename(p)))
def test_is_clean_start_returns_true_when_no_sentinel(self):
self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path()))
self.assertTrue(self.update_handler._is_clean_start)
def test_is_clean_start_returns_false_when_sentinel_exists(self):
self.update_handler._set_sentinel(agent=CURRENT_AGENT)
self.assertFalse(self.update_handler._is_clean_start)
def test_is_clean_start_returns_false_for_exceptions(self):
self.update_handler._set_sentinel()
with patch("azurelinuxagent.common.utils.fileutil.read_file", side_effect=Exception):
self.assertFalse(self.update_handler._is_clean_start)
def test_is_orphaned_returns_false_if_parent_exists(self):
fileutil.write_file(conf.get_agent_pid_file_path(), ustr(42))
with patch('os.getppid', return_value=42):
self.assertFalse(self.update_handler._is_orphaned)
def test_is_orphaned_returns_true_if_parent_is_init(self):
with patch('os.getppid', return_value=1):
self.assertTrue(self.update_handler._is_orphaned)
def test_is_orphaned_returns_true_if_parent_does_not_exist(self):
fileutil.write_file(conf.get_agent_pid_file_path(), ustr(24))
with patch('os.getppid', return_value=42):
self.assertTrue(self.update_handler._is_orphaned)
def test_is_version_available(self):
self.prepare_agents(is_available=True)
self.update_handler.agents = self.agents()
for agent in self.agents():
self.assertTrue(self.update_handler._is_version_eligible(agent.version))
@patch("azurelinuxagent.ga.update.is_current_agent_installed", return_value=False)
def test_is_version_available_rejects(self, mock_current): # pylint: disable=unused-argument
self.prepare_agents(is_available=True)
self.update_handler.agents = self.agents()
self.update_handler.agents[0].mark_failure(is_fatal=True)
self.assertFalse(self.update_handler._is_version_eligible(self.agents()[0].version))
@patch("azurelinuxagent.ga.update.is_current_agent_installed", return_value=True)
def test_is_version_available_accepts_current(self, mock_current): # pylint: disable=unused-argument
self.update_handler.agents = []
self.assertTrue(self.update_handler._is_version_eligible(CURRENT_VERSION))
@patch("azurelinuxagent.ga.update.is_current_agent_installed", return_value=False)
def test_is_version_available_rejects_by_default(self, mock_current): # pylint: disable=unused-argument
self.prepare_agents()
self.update_handler.agents = []
v = self.agents()[0].version
self.assertFalse(self.update_handler._is_version_eligible(v))
def test_purge_agents(self):
self.prepare_agents()
self.update_handler._find_agents()
# Ensure at least three agents initially exist
self.assertTrue(2 < len(self.update_handler.agents))
# Purge every other agent. Don't add the current version to agents_to_keep explicitly;
# the current version is never purged
agents_to_keep = []
kept_agents = []
purged_agents = []
for i in range(0, len(self.update_handler.agents)):
if self.update_handler.agents[i].version == CURRENT_VERSION:
kept_agents.append(self.update_handler.agents[i])
else:
if i % 2 == 0:
agents_to_keep.append(self.update_handler.agents[i])
kept_agents.append(self.update_handler.agents[i])
else:
purged_agents.append(self.update_handler.agents[i])
# Reload and assert only the kept agents remain on disk
self.update_handler.agents = agents_to_keep
self.update_handler._purge_agents()
self.update_handler._find_agents()
self.assertEqual(
[agent.version for agent in kept_agents],
[agent.version for agent in self.update_handler.agents])
# Ensure both directories and packages are removed
for agent in purged_agents:
agent_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, agent.version))
self.assertFalse(os.path.exists(agent_path))
self.assertFalse(os.path.exists(agent_path + ".zip"))
# Ensure kept agent directories and packages remain
for agent in kept_agents:
agent_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, agent.version))
self.assertTrue(os.path.exists(agent_path))
self.assertTrue(os.path.exists(agent_path + ".zip"))
def _test_run_latest(self, mock_child=None, mock_time=None, child_args=None):
if mock_child is None:
mock_child = ChildMock()
if mock_time is None:
mock_time = TimeMock()
with patch('azurelinuxagent.ga.update.subprocess.Popen', return_value=mock_child) as mock_popen:
with patch('time.time', side_effect=mock_time.time):
with patch('time.sleep', side_effect=mock_time.sleep):
self.update_handler.run_latest(child_args=child_args)
agent_calls = [args[0] for (args, _) in mock_popen.call_args_list if
"run-exthandlers" in ''.join(args[0])]
self.assertEqual(1, len(agent_calls),
"Expected a single call to the latest agent; got: {0}. All mocked calls: {1}".format(
agent_calls, mock_popen.call_args_list))
return mock_popen.call_args
def test_run_latest(self):
self.prepare_agents()
agent = self.update_handler.get_latest_agent()
args, kwargs = self._test_run_latest()
args = args[0]
cmds = textutil.safe_shlex_split(agent.get_agent_cmd())
if cmds[0].lower() == "python":
cmds[0] = sys.executable
self.assertEqual(args, cmds)
self.assertTrue(len(args) > 1)
self.assertRegex(args[0], r"^(/.*/python[\d.]*)$", "The command doesn't contain full python path")
self.assertEqual("-run-exthandlers", args[len(args) - 1])
self.assertEqual(True, 'cwd' in kwargs)
self.assertEqual(agent.get_agent_dir(), kwargs['cwd'])
self.assertEqual(False, '\x00' in cmds[0])
def test_run_latest_passes_child_args(self):
self.prepare_agents()
agent = self.update_handler.get_latest_agent() # pylint: disable=unused-variable
args, kwargs = self._test_run_latest(child_args="AnArgument") # pylint: disable=unused-variable
args = args[0]
self.assertTrue(len(args) > 1)
self.assertRegex(args[0], r"^(/.*/python[\d.]*)$", "The command doesn't contain full python path")
self.assertEqual("AnArgument", args[len(args) - 1])
def test_run_latest_polls_and_waits_for_success(self):
mock_child = ChildMock(return_value=None)
mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL / 3)
self._test_run_latest(mock_child=mock_child, mock_time=mock_time)
self.assertEqual(2, mock_child.poll.call_count)
self.assertEqual(1, mock_child.wait.call_count)
def test_run_latest_polling_stops_at_success(self):
mock_child = ChildMock(return_value=0)
mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL / 3)
self._test_run_latest(mock_child=mock_child, mock_time=mock_time)
self.assertEqual(1, mock_child.poll.call_count)
self.assertEqual(0, mock_child.wait.call_count)
def test_run_latest_polling_stops_at_failure(self):
mock_child = ChildMock(return_value=42)
mock_time = TimeMock()
self._test_run_latest(mock_child=mock_child, mock_time=mock_time)
self.assertEqual(1, mock_child.poll.call_count)
self.assertEqual(0, mock_child.wait.call_count)
def test_run_latest_polls_frequently_if_installed_is_latest(self):
mock_child = ChildMock(return_value=0) # pylint: disable=unused-variable
mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL / 2)
self._test_run_latest(mock_time=mock_time)
self.assertEqual(1, mock_time.sleep_interval)
def test_run_latest_polls_every_second_if_installed_not_latest(self):
self.prepare_agents()
mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL / 2)
self._test_run_latest(mock_time=mock_time)
self.assertEqual(1, mock_time.sleep_interval)
def test_run_latest_defaults_to_current(self):
self.assertEqual(None, self.update_handler.get_latest_agent())
args, kwargs = self._test_run_latest()
self.assertEqual(args[0], [sys.executable, "-u", sys.argv[0], "-run-exthandlers"])
self.assertEqual(True, 'cwd' in kwargs)
self.assertEqual(os.getcwd(), kwargs['cwd'])
def test_run_latest_forwards_output(self):
try:
tempdir = tempfile.mkdtemp()
stdout_path = os.path.join(tempdir, "stdout")
stderr_path = os.path.join(tempdir, "stderr")
with open(stdout_path, "w") as stdout:
with open(stderr_path, "w") as stderr:
saved_stdout, sys.stdout = sys.stdout, stdout
saved_stderr, sys.stderr = sys.stderr, stderr
try:
self._test_run_latest(mock_child=ChildMock(side_effect=faux_logger))
finally:
sys.stdout = saved_stdout
sys.stderr = saved_stderr
with open(stdout_path, "r") as stdout:
self.assertEqual(1, len(stdout.readlines()))
with open(stderr_path, "r") as stderr:
self.assertEqual(1, len(stderr.readlines()))
finally:
shutil.rmtree(tempdir, True)
def test_run_latest_nonzero_code_marks_failures(self):
# logger.add_logger_appender(logger.AppenderType.STDOUT)
self.prepare_agents()
latest_agent = self.update_handler.get_latest_agent()
self.assertTrue(latest_agent.is_available)
self.assertEqual(0.0, latest_agent.error.last_failure)
self.assertEqual(0, latest_agent.error.failure_count)
with patch('azurelinuxagent.ga.update.UpdateHandler.get_latest_agent', return_value=latest_agent):
self._test_run_latest(mock_child=ChildMock(return_value=1))
self.assertTrue(latest_agent.is_blacklisted)
self.assertFalse(latest_agent.is_available)
self.assertNotEqual(0.0, latest_agent.error.last_failure)
self.assertEqual(1, latest_agent.error.failure_count)
def test_run_latest_exception_blacklists(self):
self.prepare_agents()
latest_agent = self.update_handler.get_latest_agent()
self.assertTrue(latest_agent.is_available)
self.assertEqual(0.0, latest_agent.error.last_failure)
self.assertEqual(0, latest_agent.error.failure_count)
with patch('azurelinuxagent.ga.update.UpdateHandler.get_latest_agent', return_value=latest_agent):
self._test_run_latest(mock_child=ChildMock(side_effect=Exception("Force blacklisting")))
self.assertFalse(latest_agent.is_available)
self.assertTrue(latest_agent.error.is_blacklisted)
self.assertNotEqual(0.0, latest_agent.error.last_failure)
self.assertEqual(1, latest_agent.error.failure_count)
def test_run_latest_exception_does_not_blacklist_if_terminating(self):
self.prepare_agents()
latest_agent = self.update_handler.get_latest_agent()
self.assertTrue(latest_agent.is_available)
self.assertEqual(0.0, latest_agent.error.last_failure)
self.assertEqual(0, latest_agent.error.failure_count)
with patch('azurelinuxagent.ga.update.UpdateHandler.get_latest_agent', return_value=latest_agent):
self.update_handler.is_running = False
self._test_run_latest(mock_child=ChildMock(side_effect=Exception("Attempt blacklisting")))
self.assertTrue(latest_agent.is_available)
self.assertFalse(latest_agent.error.is_blacklisted)
self.assertEqual(0.0, latest_agent.error.last_failure)
self.assertEqual(0, latest_agent.error.failure_count)
@patch('signal.signal')
def test_run_latest_captures_signals(self, mock_signal):
self._test_run_latest()
self.assertEqual(1, mock_signal.call_count)
@patch('signal.signal')
def test_run_latest_creates_only_one_signal_handler(self, mock_signal):
self.update_handler.signal_handler = "Not None"
self._test_run_latest()
self.assertEqual(0, mock_signal.call_count)
def _test_run(self, invocations=1, calls=1, enable_updates=False, sleep_interval=(6,)):
conf.get_autoupdate_enabled = Mock(return_value=enable_updates)
def iterator(*_, **__):
iterator.count += 1
if iterator.count <= invocations:
return True
return False
iterator.count = 0
fileutil.write_file(conf.get_agent_pid_file_path(), ustr(42))
with patch('azurelinuxagent.ga.exthandlers.get_exthandlers_handler') as mock_handler:
mock_handler.run_ext_handlers = Mock()
with patch('azurelinuxagent.ga.update.get_monitor_handler') as mock_monitor:
with patch.object(UpdateHandler, 'is_running') as mock_is_running:
mock_is_running.__get__ = Mock(side_effect=iterator)
with patch('azurelinuxagent.ga.remoteaccess.get_remote_access_handler') as mock_ra_handler:
with patch('azurelinuxagent.ga.update.get_env_handler') as mock_env:
with patch('azurelinuxagent.ga.update.get_collect_logs_handler') as mock_collect_logs:
with patch('azurelinuxagent.ga.update.get_send_telemetry_events_handler') as mock_telemetry_send_events:
with patch('azurelinuxagent.ga.update.get_collect_telemetry_events_handler') as mock_event_collector:
with patch('azurelinuxagent.ga.update.initialize_event_logger_vminfo_common_parameters'):
with patch('azurelinuxagent.ga.update.is_log_collection_allowed', return_value=True):
with patch('time.sleep') as sleep_mock:
with patch('sys.exit') as mock_exit:
if isinstance(os.getppid, MagicMock):
self.update_handler.run()
else:
with patch('os.getppid', return_value=42):
self.update_handler.run()
self.assertEqual(1, mock_handler.call_count)
self.assertEqual(calls, len([c for c in [call[0] for call in mock_handler.return_value.method_calls] if c == 'run']))
self.assertEqual(1, mock_ra_handler.call_count)
self.assertEqual(calls, len(mock_ra_handler.return_value.method_calls))
if calls > 0:
self.assertEqual(sleep_interval, sleep_mock.call_args[0])
self.assertEqual(1, mock_monitor.call_count)
self.assertEqual(1, mock_env.call_count)
self.assertEqual(1, mock_collect_logs.call_count)
self.assertEqual(1, mock_telemetry_send_events.call_count)
self.assertEqual(1, mock_event_collector.call_count)
self.assertEqual(1, mock_exit.call_count)
def test_run(self):
self._test_run()
def test_run_stops_if_update_available(self):
self.update_handler._check_and_download_agent_if_upgrade_available = Mock(return_value=True)
self._test_run(invocations=0, calls=0, enable_updates=True)
def test_run_stops_if_orphaned(self):
with patch('os.getppid', return_value=1):
self._test_run(invocations=0, calls=0, enable_updates=True)
def test_run_clears_sentinel_on_successful_exit(self):
self._test_run()
self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path()))
def test_run_leaves_sentinel_on_unsuccessful_exit(self):
self.update_handler._check_and_download_agent_if_upgrade_available = Mock(side_effect=Exception)
self._test_run(invocations=1, calls=0, enable_updates=True)
self.assertTrue(os.path.isfile(self.update_handler._sentinel_file_path()))
def test_run_emits_restart_event(self):
self.update_handler._emit_restart_event = Mock()
self._test_run()
self.assertEqual(1, self.update_handler._emit_restart_event.call_count)
def test_set_agents_sets_agents(self):
self.prepare_agents()
self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()])
self.assertTrue(len(self.update_handler.agents) > 0)
self.assertEqual(len(self.agent_dirs()), len(self.update_handler.agents))
def test_set_agents_sorts_agents(self):
self.prepare_agents()
self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()])
v = FlexibleVersion("100000")
for a in self.update_handler.agents:
self.assertTrue(v > a.version)
v = a.version
def test_set_sentinel(self):
self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path()))
self.update_handler._set_sentinel()
self.assertTrue(os.path.isfile(self.update_handler._sentinel_file_path()))
def test_set_sentinel_writes_current_agent(self):
self.update_handler._set_sentinel()
self.assertTrue(
fileutil.read_file(self.update_handler._sentinel_file_path()),
CURRENT_AGENT)
def test_shutdown(self):
self.update_handler._set_sentinel()
self.update_handler._shutdown()
self.assertFalse(self.update_handler.is_running)
self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path()))
def test_shutdown_ignores_missing_sentinel_file(self):
self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path()))
self.update_handler._shutdown()
self.assertFalse(self.update_handler.is_running)
self.assertFalse(os.path.isfile(self.update_handler._sentinel_file_path()))
def test_shutdown_ignores_exceptions(self):
self.update_handler._set_sentinel()
try:
with patch("os.remove", side_effect=Exception):
self.update_handler._shutdown()
except Exception as e: # pylint: disable=unused-variable
self.assertTrue(False, "Unexpected exception") # pylint: disable=redundant-unittest-assert
def _test_upgrade_available(
self,
base_version=FlexibleVersion(AGENT_VERSION),
protocol=None,
versions=None,
count=20):
if protocol is None:
protocol = self._create_protocol(count=count, versions=versions)
self.update_handler.protocol_util = protocol
conf.get_autoupdate_gafamily = Mock(return_value=protocol.family)
return self.update_handler._check_and_download_agent_if_upgrade_available(protocol, base_version=base_version)
def test_upgrade_available_returns_true_on_first_use(self):
self.assertTrue(self._test_upgrade_available())
def test_upgrade_available_handles_missing_family(self):
data_file = mockwiredata.DATA_FILE.copy()
data_file["ext_conf"] = "wire/ext_conf_missing_family.xml"
with mock_wire_protocol(data_file) as protocol:
self.update_handler.protocol_util = protocol
with patch('azurelinuxagent.common.logger.warn') as mock_logger:
with patch('tests.ga.test_update.ProtocolMock.get_vmagent_pkgs', side_effect=ProtocolError):
self.assertFalse(self.update_handler._check_and_download_agent_if_upgrade_available(protocol, base_version=CURRENT_VERSION))
self.assertEqual(0, mock_logger.call_count)
def test_upgrade_available_includes_old_agents(self):
self.prepare_agents()
old_version = self.agent_versions()[-1]
old_count = old_version.version[-1]
self.replicate_agents(src_v=old_version, count=old_count, increment=-1)
all_count = len(self.agent_versions())
self.assertTrue(self._test_upgrade_available(versions=self.agent_versions()))
self.assertEqual(all_count, len(self.update_handler.agents))
def test_upgrade_available_purges_old_agents(self):
self.prepare_agents()
agent_count = self.agent_count()
self.assertEqual(20, agent_count)
agent_versions = self.agent_versions()[:3]
self.assertTrue(self._test_upgrade_available(versions=agent_versions))
self.assertEqual(len(agent_versions), len(self.update_handler.agents))
# Purging always keeps the running agent
if CURRENT_VERSION not in agent_versions:
agent_versions.append(CURRENT_VERSION)
self.assertEqual(agent_versions, self.agent_versions())
def test_update_available_returns_true_if_current_gets_blacklisted(self):
self.update_handler._is_version_eligible = Mock(return_value=False)
self.assertTrue(self._test_upgrade_available())
def test_upgrade_available_skips_if_too_frequent(self):
conf.get_autoupdate_frequency = Mock(return_value=10000)
self.update_handler.last_attempt_time = time.time()
self.assertFalse(self._test_upgrade_available())
def test_upgrade_available_skips_if_when_no_new_versions(self):
self.prepare_agents()
base_version = self.agent_versions()[0] + 1
self.update_handler._is_version_eligible = lambda x: x == base_version
self.assertFalse(self._test_upgrade_available(base_version=base_version))
def test_upgrade_available_skips_when_no_versions(self):
self.assertFalse(self._test_upgrade_available(protocol=ProtocolMock()))
def test_upgrade_available_skips_when_updates_are_disabled(self):
conf.get_autoupdate_enabled = Mock(return_value=False)
self.assertFalse(self._test_upgrade_available())
def test_upgrade_available_sorts(self):
self.prepare_agents()
self._test_upgrade_available()
v = FlexibleVersion("100000")
for a in self.update_handler.agents:
self.assertTrue(v > a.version)
v = a.version
def test_write_pid_file(self):
for n in range(1112):
fileutil.write_file(os.path.join(self.tmp_dir, str(n) + "_waagent.pid"), ustr(n + 1))
with patch('os.getpid', return_value=1112):
pid_files, pid_file = self.update_handler._write_pid_file()
self.assertEqual(1112, len(pid_files))
self.assertEqual("1111_waagent.pid", os.path.basename(pid_files[-1]))
self.assertEqual("1112_waagent.pid", os.path.basename(pid_file))
self.assertEqual(fileutil.read_file(pid_file), ustr(1112))
def test_write_pid_file_ignores_exceptions(self):
with patch('azurelinuxagent.common.utils.fileutil.write_file', side_effect=Exception):
with patch('os.getpid', return_value=42):
pid_files, pid_file = self.update_handler._write_pid_file()
self.assertEqual(0, len(pid_files))
self.assertEqual(None, pid_file)
@patch('azurelinuxagent.common.conf.get_extensions_enabled', return_value=False)
def test_update_happens_when_extensions_disabled(self, _):
"""
Although the extension enabled config will not get checked
before an update is found, this test attempts to ensure that
behavior never changes.
"""
self.update_handler._check_and_download_agent_if_upgrade_available = Mock(return_value=True)
self._test_run(invocations=0, calls=0, enable_updates=True, sleep_interval=(300,))
@patch("azurelinuxagent.common.logger.info")
@patch("azurelinuxagent.ga.update.add_event")
def test_telemetry_heartbeat_creates_event(self, patch_add_event, patch_info, *_):
update_handler = get_update_handler()
mock_protocol = WireProtocol("foo.bar")
update_handler.last_telemetry_heartbeat = datetime.utcnow() - timedelta(hours=1)
update_handler._send_heartbeat_telemetry(mock_protocol)
self.assertEqual(1, patch_add_event.call_count)
self.assertTrue(any(call_args[0] == "[HEARTBEAT] Agent {0} is running as the goal state agent {1}"
for call_args in patch_info.call_args), "The heartbeat was not written to the agent's log")
@staticmethod
def _get_test_ext_handler_instance(protocol, name="OSTCExtensions.ExampleHandlerLinux", version="1.0.0"):
eh = ExtHandler(name=name)
eh.properties.version = version
return ExtHandlerInstance(eh, protocol)
def test_it_should_recreate_handler_env_on_service_startup(self):
iterations = 5
with _get_update_handler(iterations) as (update_handler, protocol):
update_handler.run(debug=True)
expected_handler = self._get_test_ext_handler_instance(protocol)
handler_env_file = expected_handler.get_env_file()
self.assertTrue(os.path.exists(expected_handler.get_base_dir()), "Extension not found")
# First iteration should install the extension handler and
# subsequent iterations should not recreate the HandlerEnvironment file
last_modification_time = os.path.getmtime(handler_env_file)
self.assertEqual(os.path.getctime(handler_env_file), last_modification_time,
"The creation time and last modified time of the HandlerEnvironment file dont match")
# Simulate a service restart by getting a new instance of the update handler and protocol and
# re-runnning the update handler. Then,ensure that the HandlerEnvironment file is recreated with eventsFolder
# flag in HandlerEnvironment.json file.
self._add_write_permission_to_goal_state_files()
with _get_update_handler(iterations) as (update_handler, protocol):
with patch("azurelinuxagent.common.agent_supported_feature._ETPFeature.is_supported", True):
update_handler.set_iterations(1)
update_handler.run(debug=True)
self.assertGreater(os.path.getmtime(handler_env_file), last_modification_time,
"HandlerEnvironment file didn't get overwritten")
with open(handler_env_file, 'r') as handler_env_content_file:
content = json.load(handler_env_content_file)
self.assertIn(HandlerEnvironment.eventsFolder, content[0][HandlerEnvironment.handlerEnvironment],
"{0} not found in HandlerEnv file".format(HandlerEnvironment.eventsFolder))
def test_it_should_not_setup_persistent_firewall_rules_if_EnableFirewall_is_disabled(self):
executed_firewall_commands = []
def _mock_popen(cmd, *args, **kwargs):
if 'firewall-cmd' in cmd:
executed_firewall_commands.append(cmd)
cmd = ["echo", "running"]
return _ORIGINAL_POPEN(cmd, *args, **kwargs)
with patch("azurelinuxagent.common.logger.info") as patch_info:
with _get_update_handler(iterations=1) as (update_handler, _):
with patch("azurelinuxagent.common.utils.shellutil.subprocess.Popen", side_effect=_mock_popen):
with patch('azurelinuxagent.common.conf.enable_firewall', return_value=False):
update_handler.run(debug=True)
self.assertEqual(0, len(executed_firewall_commands), "firewall-cmd should not be called at all")
self.assertTrue(any(
"Not setting up persistent firewall rules as OS.EnableFirewall=False" == args[0] for (args, _) in
patch_info.call_args_list), "Info not logged properly, got: {0}".format(patch_info.call_args_list))
def test_it_should_setup_persistent_firewall_rules_on_startup(self):
iterations = 1
executed_commands = []
def _mock_popen(cmd, *args, **kwargs):
if 'firewall-cmd' in cmd:
executed_commands.append(cmd)
cmd = ["echo", "running"]
return _ORIGINAL_POPEN(cmd, *args, **kwargs)
with _get_update_handler(iterations) as (update_handler, _):
with patch("azurelinuxagent.common.utils.shellutil.subprocess.Popen", side_effect=_mock_popen) as mock_popen:
with patch('azurelinuxagent.common.conf.enable_firewall', return_value=True):
with patch('azurelinuxagent.common.osutil.systemd.is_systemd', return_value=True):
update_handler.run(debug=True)
# Firewall-cmd should only be called 3 times - 1st to check if running, 2nd & 3rd for the QueryPassThrough cmd
self.assertEqual(3, len(executed_commands),
"The number of times firewall-cmd should be called is only 3; Executed firewall commands: {0}; All popen calls: {1}".format(
executed_commands, mock_popen.call_args_list))
self.assertEqual(PersistFirewallRulesHandler._FIREWALLD_RUNNING_CMD, executed_commands.pop(0),
"First command should be to check if firewalld is running")
self.assertTrue([FirewallCmdDirectCommands.QueryPassThrough in cmd for cmd in executed_commands],
"The remaining commands should only be for querying the firewall commands")
@contextlib.contextmanager
def _setup_test_for_ext_event_dirs_retention(self):
try:
with _get_update_handler(test_data=DATA_FILE_MULTIPLE_EXT) as (update_handler, protocol):
with patch("azurelinuxagent.common.agent_supported_feature._ETPFeature.is_supported", True):
update_handler.run(debug=True)
expected_events_dirs = glob.glob(os.path.join(conf.get_ext_log_dir(), "*", EVENTS_DIRECTORY))
no_of_extensions = protocol.mock_wire_data.get_no_of_plugins_in_extension_config()
# Ensure extensions installed and events directory created
self.assertEqual(len(expected_events_dirs), no_of_extensions, "Extension events directories dont match")
for ext_dir in expected_events_dirs:
self.assertTrue(os.path.exists(ext_dir), "Extension directory {0} not created!".format(ext_dir))
yield update_handler, expected_events_dirs
finally:
# The TestUpdate.setUp() initializes the self.tmp_dir to be used as a placeholder
# for everything (event logger, status logger, conf.get_lib_dir() and more).
# Since we add more data to the dir for this test, ensuring its completely clean before exiting the test.
shutil.rmtree(self.tmp_dir, ignore_errors=True)
self.tmp_dir = None
def test_it_should_delete_extension_events_directory_if_extension_telemetry_pipeline_disabled(self):
# Disable extension telemetry pipeline and ensure events directory got deleted
with self._setup_test_for_ext_event_dirs_retention() as (update_handler, expected_events_dirs):
with patch("azurelinuxagent.common.agent_supported_feature._ETPFeature.is_supported", False):
self._add_write_permission_to_goal_state_files()
update_handler.run(debug=True)
for ext_dir in expected_events_dirs:
self.assertFalse(os.path.exists(ext_dir), "Extension directory {0} still exists!".format(ext_dir))
def test_it_should_retain_extension_events_directories_if_extension_telemetry_pipeline_enabled(self):
# Rerun update handler again with extension telemetry pipeline enabled to ensure we dont delete events directories
with self._setup_test_for_ext_event_dirs_retention() as (update_handler, expected_events_dirs):
self._add_write_permission_to_goal_state_files()
update_handler.run(debug=True)
for ext_dir in expected_events_dirs:
self.assertTrue(os.path.exists(ext_dir), "Extension directory {0} should exist!".format(ext_dir))
def test_it_should_recreate_extension_event_directories_for_existing_extensions_if_extension_telemetry_pipeline_enabled(self):
with self._setup_test_for_ext_event_dirs_retention() as (update_handler, expected_events_dirs):
# Delete existing events directory
for ext_dir in expected_events_dirs:
shutil.rmtree(ext_dir, ignore_errors=True)
self.assertFalse(os.path.exists(ext_dir), "Extension directory not deleted")
with patch("azurelinuxagent.common.agent_supported_feature._ETPFeature.is_supported", True):
self._add_write_permission_to_goal_state_files()
update_handler.run(debug=True)
for ext_dir in expected_events_dirs:
self.assertTrue(os.path.exists(ext_dir), "Extension directory {0} should exist!".format(ext_dir))
class TestAgentUpgrade(UpdateTestCase):
@contextlib.contextmanager
def create_conf_mocks(self, hotfix_frequency, normal_frequency):
# Disabling extension processing to speed up tests as this class deals with testing agent upgrades
with patch("azurelinuxagent.common.conf.get_extensions_enabled", return_value=False):
with patch("azurelinuxagent.common.conf.get_autoupdate_enabled", return_value=True):
with patch("azurelinuxagent.common.conf.get_autoupdate_frequency", return_value=0.001):
with patch("azurelinuxagent.common.conf.get_hotfix_upgrade_frequency",
return_value=hotfix_frequency):
with patch("azurelinuxagent.common.conf.get_normal_upgrade_frequency",
return_value=normal_frequency):
yield
@contextlib.contextmanager
def __get_update_handler(self, iterations=1, test_data=None, hotfix_frequency=1.0, normal_frequency=2.0,
reload_conf=None):
test_data = DATA_FILE if test_data is None else test_data
with _get_update_handler(iterations, test_data) as (update_handler, protocol):
def get_handler(url, **kwargs):
if reload_conf is not None:
reload_conf(url, protocol.mock_wire_data)
if HttpRequestPredicates.is_agent_package_request(url):
agent_pkg = load_bin_data(self._get_agent_file_name(), self._agent_zip_dir)
return ResponseMock(response=agent_pkg)
return protocol.mock_wire_data.mock_http_get(url, **kwargs)
protocol.set_http_handlers(http_get_handler=get_handler)
with self.create_conf_mocks(hotfix_frequency, normal_frequency):
with patch("azurelinuxagent.ga.update.add_event") as mock_telemetry:
yield update_handler, mock_telemetry
def __assert_exit_code_successful(self, exit_mock):
self.assertTrue(exit_mock.called, "The process should have exited")
exit_args, _ = exit_mock.call_args
self.assertEqual(exit_args[0], 0, "Exit code should be 0")
def __assert_upgrade_telemetry_emitted(self, mock_telemetry, upgrade_type=AgentUpgradeType.Normal):
upgrade_event_msgs = [kwarg['message'] for _, kwarg in mock_telemetry.call_args_list if
'{0} Agent upgrade discovered, updating to WALinuxAgent-99999.0.0.0 -- exiting'.format(
upgrade_type) in kwarg['message'] and kwarg[
'op'] == WALAEventOperation.AgentUpgrade]
self.assertEqual(1, len(upgrade_event_msgs), "Agent not upgraded properly")
def __assert_agent_directories_available(self, versions):
for version in versions:
self.assertTrue(os.path.exists(self.agent_dir(version)), "Agent directory {0} not found".format(version))
def __assert_no_agent_upgrade_telemetry(self, mock_telemetry):
self.assertEqual(0, len([kwarg['message'] for _, kwarg in mock_telemetry.call_args_list if
"Agent upgrade discovered, updating to" in kwarg['message'] and kwarg[
'op'] == WALAEventOperation.AgentUpgrade]), "Unwanted upgrade")
def test_it_should_upgrade_agent_on_process_start_if_auto_upgrade_enabled(self):
with self.__get_update_handler(iterations=10) as (update_handler, mock_telemetry):
update_handler.run(debug=True)
self.__assert_exit_code_successful(update_handler.exit_mock)
self.assertEqual(1, update_handler.get_iterations(), "Update handler should've exited after the first run")
self.__assert_agent_directories_available(versions=["99999.0.0.0"])
self.__assert_upgrade_telemetry_emitted(mock_telemetry)
def test_it_should_download_new_agents_and_not_auto_upgrade_if_not_permitted(self):
no_of_iterations = 10
data_file = DATA_FILE.copy()
data_file['ga_manifest'] = "wire/ga_manifest_no_upgrade.xml"
def reload_conf(url, mock_wire_data):
# This function reloads the conf mid-run to mimic an actual customer scenario
if HttpRequestPredicates.is_ga_manifest_request(url) and mock_wire_data.call_counts["manifest_of_ga.xml"] >= no_of_iterations/2:
reload_conf.call_count += 1
# Ensure the first set of versions were downloaded as part of the first manifest
self.__assert_agent_directories_available(versions=["1.0.0", "1.1.0", "1.2.0"])
# As per our current agent upgrade model, we don't rely on an incarnation update to upgrade the agent. Mocking the same
mock_wire_data.data_files["ga_manifest"] = "wire/ga_manifest.xml"
mock_wire_data.reload()
reload_conf.call_count = 0
with self.__get_update_handler(iterations=no_of_iterations, test_data=data_file, hotfix_frequency=10,
normal_frequency=10, reload_conf=reload_conf) as (update_handler, mock_telemetry):
update_handler.run(debug=True)
self.assertGreater(reload_conf.call_count, 0, "Ensure the conf reload was called")
self.__assert_exit_code_successful(update_handler.exit_mock)
self.assertEqual(no_of_iterations, update_handler.get_iterations(), "Update handler should've run its course")
# Ensure the new agent versions were also downloaded once the manifest was updated
self.__assert_agent_directories_available(versions=["2.0.0", "2.1.0", "99999.0.0.0"])
self.__assert_no_agent_upgrade_telemetry(mock_telemetry)
def test_it_should_upgrade_agent_in_given_time_window_if_permitted(self):
data_file = DATA_FILE.copy()
data_file['ga_manifest'] = "wire/ga_manifest_no_upgrade.xml"
def reload_conf(url, mock_wire_data):
# This function reloads the conf mid-run to mimic an actual customer scenario
if HttpRequestPredicates.is_ga_manifest_request(url) and mock_wire_data.call_counts["manifest_of_ga.xml"] >= 2:
reload_conf.call_count += 1
# Ensure no new agent available so far
self.assertFalse(os.path.exists(self.agent_dir("99999.0.0.0")), "New agent directory should not be found")
# As per our current agent upgrade model, we don't rely on an incarnation update to upgrade the agent. Mocking the same
mock_wire_data.data_files["ga_manifest"] = "wire/ga_manifest.xml"
mock_wire_data.reload()
reload_conf.call_count = 0
test_normal_frequency = 0.1
with self.__get_update_handler(iterations=50, test_data=data_file, reload_conf=reload_conf,
normal_frequency=test_normal_frequency) as (update_handler, mock_telemetry):
start_time = time.time()
update_handler.run(debug=True)
diff = time.time() - start_time
self.assertGreater(reload_conf.call_count, 0, "Ensure the conf reload was called")
self.__assert_exit_code_successful(update_handler.exit_mock)
self.assertGreaterEqual(update_handler.get_iterations(), 3,
"Update handler should've run at least until the new GA was available")
# A bare-bone check to ensure that the agent waited for the new agent at least for the preset frequency time
self.assertGreater(diff, test_normal_frequency, "The test run should be at least greater than the set frequency")
self.__assert_agent_directories_available(versions=["99999.0.0.0"])
self.__assert_upgrade_telemetry_emitted(mock_telemetry)
def test_it_should_not_auto_upgrade_if_auto_update_disabled(self):
with self.__get_update_handler(iterations=10) as (update_handler, mock_telemetry):
with patch("azurelinuxagent.common.conf.get_autoupdate_enabled", return_value=False):
update_handler.run(debug=True)
self.__assert_exit_code_successful(update_handler.exit_mock)
self.assertGreaterEqual(update_handler.get_iterations(), 10, "Update handler should've run 10 times")
self.__assert_no_agent_upgrade_telemetry(mock_telemetry)
self.assertFalse(os.path.exists(self.agent_dir("99999.0.0.0")),
"New agent directory should not be found")
def test_it_should_not_auto_upgrade_if_corresponding_time_not_elapsed(self):
# On Normal upgrade, should not upgrade if Hotfix time elapsed
no_of_iterations = 10
data_file = DATA_FILE.copy()
data_file['ga_manifest'] = "wire/ga_manifest_no_upgrade.xml"
def reload_conf(url, mock_wire_data):
# This function reloads the conf mid-run to mimic an actual customer scenario
if HttpRequestPredicates.is_ga_manifest_request(url) and mock_wire_data.call_counts["manifest_of_ga.xml"] >= no_of_iterations / 2:
reload_conf.call_count += 1
# As per our current agent upgrade model, we don't rely on an incarnation update to upgrade the agent. Mocking the same
mock_wire_data.data_files["ga_manifest"] = "wire/ga_manifest.xml"
mock_wire_data.reload()
reload_conf.call_count = 0
with self.__get_update_handler(iterations=no_of_iterations, test_data=data_file, hotfix_frequency=0.01,
normal_frequency=10, reload_conf=reload_conf) as (update_handler, mock_telemetry):
update_handler.run(debug=True)
self.assertGreater(reload_conf.call_count, 0, "Ensure the conf reload was called")
self.__assert_exit_code_successful(update_handler.exit_mock)
self.assertEqual(no_of_iterations, update_handler.get_iterations(), "Update handler didn't run completely")
self.__assert_no_agent_upgrade_telemetry(mock_telemetry)
upgrade_event_msgs = [kwarg['message'] for _, kwarg in mock_telemetry.call_args_list if
kwarg['op'] == WALAEventOperation.AgentUpgrade]
self.assertGreater(len([msg for msg in upgrade_event_msgs if
'Discovered new {0} upgrade WALinuxAgent-99999.0.0.0; Will upgrade on or after'.format(
AgentUpgradeType.Normal) in msg]), 0, "Error message not propagated properly")
@patch('azurelinuxagent.ga.update.get_collect_telemetry_events_handler')
@patch('azurelinuxagent.ga.update.get_send_telemetry_events_handler')
@patch('azurelinuxagent.ga.update.get_collect_logs_handler')
@patch('azurelinuxagent.ga.update.get_monitor_handler')
@patch('azurelinuxagent.ga.update.get_env_handler')
class MonitorThreadTest(AgentTestCase):
def setUp(self):
AgentTestCase.setUp(self)
self.event_patch = patch('azurelinuxagent.common.event.add_event')
currentThread().setName("ExtHandler")
protocol = Mock()
protocol.get_ext_handlers = Mock(return_value=(Mock(), Mock()))
self.update_handler = get_update_handler()
self.update_handler.protocol_util = Mock()
self.update_handler.protocol_util.get_protocol = Mock(return_value=protocol)
clear_singleton_instances(ProtocolUtil)
def _test_run(self, invocations=1):
def iterator(*_, **__):
iterator.count += 1
if iterator.count <= invocations:
return True
return False
iterator.count = 0
with patch('os.getpid', return_value=42):
with patch.object(UpdateHandler, '_is_orphaned') as mock_is_orphaned:
mock_is_orphaned.__get__ = Mock(return_value=False)
with patch.object(UpdateHandler, 'is_running') as mock_is_running:
mock_is_running.__get__ = Mock(side_effect=iterator)
with patch('azurelinuxagent.ga.exthandlers.get_exthandlers_handler'):
with patch('azurelinuxagent.ga.remoteaccess.get_remote_access_handler'):
with patch('azurelinuxagent.ga.update.initialize_event_logger_vminfo_common_parameters'):
with patch('azurelinuxagent.common.cgroupapi.CGroupsApi.cgroups_supported', return_value=False): # skip all cgroup stuff
with patch('azurelinuxagent.ga.update.is_log_collection_allowed', return_value=True):
with patch('time.sleep'):
with patch('sys.exit'):
self.update_handler.run()
def _setup_mock_thread_and_start_test_run(self, mock_thread, is_alive=True, invocations=0):
thread = MagicMock()
thread.run = MagicMock()
thread.is_alive = MagicMock(return_value=is_alive)
thread.start = MagicMock()
mock_thread.return_value = thread
self._test_run(invocations=invocations)
return thread
def test_start_threads(self, mock_env, mock_monitor, mock_collect_logs, mock_telemetry_send_events, mock_telemetry_collector):
def _get_mock_thread():
thread = MagicMock()
thread.run = MagicMock()
return thread
all_threads = [mock_telemetry_send_events, mock_telemetry_collector, mock_env, mock_monitor, mock_collect_logs]
for thread in all_threads:
thread.return_value = _get_mock_thread()
self._test_run(invocations=0)
for thread in all_threads:
self.assertEqual(1, thread.call_count)
self.assertEqual(1, thread().run.call_count)
def test_check_if_monitor_thread_is_alive(self, _, mock_monitor, *args): # pylint: disable=unused-argument
mock_monitor_thread = self._setup_mock_thread_and_start_test_run(mock_monitor, is_alive=True, invocations=1)
self.assertEqual(1, mock_monitor.call_count)
self.assertEqual(1, mock_monitor_thread.run.call_count)
self.assertEqual(1, mock_monitor_thread.is_alive.call_count)
self.assertEqual(0, mock_monitor_thread.start.call_count)
def test_check_if_env_thread_is_alive(self, mock_env, *args): # pylint: disable=unused-argument
mock_env_thread = self._setup_mock_thread_and_start_test_run(mock_env, is_alive=True, invocations=1)
self.assertEqual(1, mock_env.call_count)
self.assertEqual(1, mock_env_thread.run.call_count)
self.assertEqual(1, mock_env_thread.is_alive.call_count)
self.assertEqual(0, mock_env_thread.start.call_count)
def test_restart_monitor_thread_if_not_alive(self, _, mock_monitor, *args): # pylint: disable=unused-argument
mock_monitor_thread = self._setup_mock_thread_and_start_test_run(mock_monitor, is_alive=False, invocations=1)
self.assertEqual(1, mock_monitor.call_count)
self.assertEqual(1, mock_monitor_thread.run.call_count)
self.assertEqual(1, mock_monitor_thread.is_alive.call_count)
self.assertEqual(1, mock_monitor_thread.start.call_count)
def test_restart_env_thread_if_not_alive(self, mock_env, *args): # pylint: disable=unused-argument
mock_env_thread = self._setup_mock_thread_and_start_test_run(mock_env, is_alive=False, invocations=1)
self.assertEqual(1, mock_env.call_count)
self.assertEqual(1, mock_env_thread.run.call_count)
self.assertEqual(1, mock_env_thread.is_alive.call_count)
self.assertEqual(1, mock_env_thread.start.call_count)
def test_restart_monitor_thread(self, _, mock_monitor, *args): # pylint: disable=unused-argument
mock_monitor_thread = self._setup_mock_thread_and_start_test_run(mock_monitor, is_alive=False, invocations=1)
self.assertEqual(True, mock_monitor.called)
self.assertEqual(True, mock_monitor_thread.run.called)
self.assertEqual(True, mock_monitor_thread.is_alive.called)
self.assertEqual(True, mock_monitor_thread.start.called)
def test_restart_env_thread(self, mock_env, *args): # pylint: disable=unused-argument
mock_env_thread = self._setup_mock_thread_and_start_test_run(mock_env, is_alive=False, invocations=1)
self.assertEqual(True, mock_env.called)
self.assertEqual(True, mock_env_thread.run.called)
self.assertEqual(True, mock_env_thread.is_alive.called)
self.assertEqual(True, mock_env_thread.start.called)
class ChildMock(Mock):
def __init__(self, return_value=0, side_effect=None):
Mock.__init__(self, return_value=return_value, side_effect=side_effect)
self.poll = Mock(return_value=return_value, side_effect=side_effect)
self.wait = Mock(return_value=return_value, side_effect=side_effect)
class ProtocolMock(object):
def __init__(self, family="TestAgent", etag=42, versions=None, client=None):
self.family = family
self.client = client
self.call_counts = {
"get_vmagent_manifests": 0,
"get_vmagent_pkgs": 0,
"update_goal_state": 0
}
self.goal_state_is_stale = False
self.etag = etag
self.versions = versions if versions is not None else []
self.create_manifests()
self.create_packages()
def emulate_stale_goal_state(self):
self.goal_state_is_stale = True
def create_manifests(self):
self.agent_manifests = []
if len(self.versions) <= 0:
return
if self.family is not None:
manifest = VMAgentManifest(family=self.family)
for i in range(0, 10):
manifest.uris.append("https://nowhere.msft/agent/{0}".format(i))
self.agent_manifests.append(manifest)
def create_packages(self):
self.agent_packages = ExtHandlerPackageList()
if len(self.versions) <= 0:
return
for version in self.versions:
package = ExtHandlerPackage(str(version))
for i in range(0, 5):
package_uri = "https://nowhere.msft/agent_pkg/{0}".format(i)
package.uris.append(package_uri)
self.agent_packages.versions.append(package)
def get_protocol(self):
return self
def get_vmagent_manifests(self):
self.call_counts["get_vmagent_manifests"] += 1
if self.goal_state_is_stale:
self.goal_state_is_stale = False
raise ResourceGoneError()
return self.agent_manifests, self.etag
def get_vmagent_pkgs(self, manifest): # pylint: disable=unused-argument
self.call_counts["get_vmagent_pkgs"] += 1
if self.goal_state_is_stale:
self.goal_state_is_stale = False
raise ResourceGoneError()
return self.agent_packages
def update_goal_state(self):
self.call_counts["update_goal_state"] += 1
class ResponseMock(Mock):
def __init__(self, status=restutil.httpclient.OK, response=None, reason=None):
Mock.__init__(self)
self.status = status
self.reason = reason
self.response = response
def read(self):
return self.response
class TimeMock(Mock):
def __init__(self, time_increment=1):
Mock.__init__(self)
self.next_time = time.time()
self.time_call_count = 0
self.time_increment = time_increment
self.sleep_interval = None
def sleep(self, n):
self.sleep_interval = n
def time(self):
self.time_call_count += 1
current_time = self.next_time
self.next_time += self.time_increment
return current_time
class TryUpdateGoalStateTestCase(HttpRequestPredicates, AgentTestCase):
"""
Tests for UpdateHandler._try_update_goal_state()
"""
def test_it_should_return_true_on_success(self):
update_handler = get_update_handler()
with mock_wire_protocol(mockwiredata.DATA_FILE) as protocol:
self.assertTrue(update_handler._try_update_goal_state(protocol), "try_update_goal_state should have succeeded")
def test_it_should_return_false_on_failure(self):
with mock_wire_protocol(mockwiredata.DATA_FILE) as protocol:
def http_get_handler(url, *_, **__):
if self.is_goal_state_request(url):
return HttpError('Exception to fake an error retrieving the goal state')
return None
protocol.set_http_handlers(http_get_handler=http_get_handler)
update_handler = get_update_handler()
self.assertFalse(update_handler._try_update_goal_state(protocol), "try_update_goal_state should have failed")
def test_it_should_update_the_goal_state(self):
update_handler = get_update_handler()
with mock_wire_protocol(mockwiredata.DATA_FILE) as protocol:
protocol.mock_wire_data.set_incarnation(12345)
# the first goal state should produce an update
update_handler._try_update_goal_state(protocol)
self.assertEqual(protocol.get_incarnation(), '12345', "The goal state was not updated (received unexpected incarnation)")
# no changes in the goal state should not produce an update
update_handler._try_update_goal_state(protocol)
self.assertEqual(protocol.get_incarnation(), '12345', "The goal state should not be updated (received unexpected incarnation)")
# a new goal state should produce an update
protocol.mock_wire_data.set_incarnation(6789)
update_handler._try_update_goal_state(protocol)
self.assertEqual(protocol.get_incarnation(), '6789', "The goal state was not updated (received unexpected incarnation)")
def test_it_should_log_errors_only_when_the_error_state_changes(self):
with mock_wire_protocol(mockwiredata.DATA_FILE) as protocol:
def http_get_handler(url, *_, **__):
if self.is_goal_state_request(url):
if fail_goal_state_request:
return HttpError('Exception to fake an error retrieving the goal state')
return None
protocol.set_http_handlers(http_get_handler=http_get_handler)
@contextlib.contextmanager
def create_log_and_telemetry_mocks():
with patch("azurelinuxagent.ga.update.logger", autospec=True) as logger_patcher:
with patch("azurelinuxagent.ga.update.add_event") as add_event_patcher:
yield logger_patcher, add_event_patcher
calls_to_strings = lambda calls: (str(c) for c in calls)
filter_calls = lambda calls, regex=None: (c for c in calls_to_strings(calls) if regex is None or re.match(regex, c))
logger_calls = lambda regex=None: [m for m in filter_calls(logger.method_calls, regex)] # pylint: disable=used-before-assignment,unnecessary-comprehension
warnings = lambda: logger_calls(r'call.warn\(.*An error occurred while retrieving the goal state.*')
periodic_warnings = lambda: logger_calls(r'call.periodic_warn\(.*Attempts to retrieve the goal state are failing.*')
success_messages = lambda: logger_calls(r'call.info\(.*Retrieving the goal state recovered from previous errors.*')
telemetry_calls = lambda regex=None: [m for m in filter_calls(add_event.mock_calls, regex)] # pylint: disable=used-before-assignment,unnecessary-comprehension
goal_state_events = lambda: telemetry_calls(r".*op='FetchGoalState'.*")
#
# Initially calls to retrieve the goal state are successful...
#
update_handler = get_update_handler()
fail_goal_state_request = False
with create_log_and_telemetry_mocks() as (logger, add_event):
update_handler._try_update_goal_state(protocol)
lc = logger_calls()
self.assertTrue(len(lc) == 0, "A successful call should not produce any log messages: [{0}]".format(lc))
tc = telemetry_calls()
self.assertTrue(len(tc) == 0, "A successful call should not produce any telemetry events: [{0}]".format(tc))
#
# ... then an error happens...
#
fail_goal_state_request = True
with create_log_and_telemetry_mocks() as (logger, add_event):
update_handler._try_update_goal_state(protocol)
w = warnings()
pw = periodic_warnings()
self.assertEqual(1, len(w), "A failure should have produced a warning: [{0}]".format(w))
self.assertEqual(1, len(pw), "A failure should have produced a periodic warning: [{0}]".format(pw))
gs = goal_state_events()
self.assertTrue(len(gs) == 1 and 'is_success=False' in gs[0], "A failure should produce a telemetry event (success=false): [{0}]".format(gs))
#
# ... and errors continue happening...
#
with create_log_and_telemetry_mocks() as (logger, add_event):
update_handler._try_update_goal_state(protocol)
update_handler._try_update_goal_state(protocol)
update_handler._try_update_goal_state(protocol)
w = warnings()
pw = periodic_warnings()
self.assertTrue(len(w) == 0, "Subsequent failures should not produce warnings: [{0}]".format(w))
self.assertEqual(len(pw), 3, "Subsequent failures should produce periodic warnings: [{0}]".format(pw))
tc = telemetry_calls()
self.assertTrue(len(tc) == 0, "Subsequent failures should not produce any telemetry events: [{0}]".format(tc))
#
# ... until we finally succeed
#
fail_goal_state_request = False
with create_log_and_telemetry_mocks() as (logger, add_event):
update_handler._try_update_goal_state(protocol)
s = success_messages()
w = warnings()
pw = periodic_warnings()
self.assertEqual(len(s), 1, "Recovering after failures should have produced an info message: [{0}]".format(s))
self.assertTrue(len(w) == 0 and len(pw) == 0, "Recovering after failures should have not produced any warnings: [{0}] [{1}]".format(w, pw))
gs = goal_state_events()
self.assertTrue(len(gs) == 1 and 'is_success=True' in gs[0], "Recovering after failures should produce a telemetry event (success=true): [{0}]".format(gs))
def _create_update_handler():
"""
Creates an UpdateHandler in which agent updates are mocked as a no-op.
"""
update_handler = get_update_handler()
update_handler._check_and_download_agent_if_upgrade_available = Mock(return_value=False)
return update_handler
@contextlib.contextmanager
def _mock_exthandlers_handler(extension_statuses=None):
"""
Creates an ExtHandlersHandler that doesn't actually handle any extensions, but that returns status for 1 extension.
The returned ExtHandlersHandler uses a mock WireProtocol, and both the run() and report_ext_handlers_status() are
mocked. The mock run() is a no-op. If a list of extension_statuses is given, successive calls to the mock
report_ext_handlers_status() returns a single extension with each of the statuses in the list. If extension_statuses
is omitted all calls to report_ext_handlers_status() return a single extension with a success status.
"""
def create_vm_status(extension_status):
vm_status = VMStatus(status="Ready", message="Ready")
vm_status.vmAgent.extensionHandlers = [ExtHandlerStatus()]
vm_status.vmAgent.extensionHandlers[0].extension_status = ExtensionStatus(name="TestExtension")
vm_status.vmAgent.extensionHandlers[0].extension_status.status = extension_status
return vm_status
with mock_wire_protocol(DATA_FILE) as protocol:
exthandlers_handler = ExtHandlersHandler(protocol)
exthandlers_handler.run = Mock()
if extension_statuses is None:
exthandlers_handler.report_ext_handlers_status = Mock(return_value=create_vm_status(ExtensionStatusValue.success))
else:
exthandlers_handler.report_ext_handlers_status = Mock(side_effect=[create_vm_status(s) for s in extension_statuses])
yield exthandlers_handler
class ProcessGoalStateTestCase(AgentTestCase):
"""
Tests for UpdateHandler._process_goal_state()
"""
def test_it_should_process_goal_state_only_on_new_goal_state(self):
with _mock_exthandlers_handler() as exthandlers_handler:
update_handler = _create_update_handler()
remote_access_handler = Mock()
remote_access_handler.run = Mock()
# process a goal state
update_handler._process_goal_state(exthandlers_handler, remote_access_handler)
self.assertEqual(1, exthandlers_handler.run.call_count, "exthandlers_handler.run() should have been called on the first goal state")
self.assertEqual(1, exthandlers_handler.report_ext_handlers_status.call_count, "exthandlers_handler.report_ext_handlers_status() should have been called on the first goal state")
self.assertEqual(1, remote_access_handler.run.call_count, "remote_access_handler.run() should have been called on the first goal state")
# process the same goal state
update_handler._process_goal_state(exthandlers_handler, remote_access_handler)
self.assertEqual(1, exthandlers_handler.run.call_count, "exthandlers_handler.run() should have not been called on the same goal state")
self.assertEqual(2, exthandlers_handler.report_ext_handlers_status.call_count, "exthandlers_handler.report_ext_handlers_status() should have been called on the same goal state")
self.assertEqual(1, remote_access_handler.run.call_count, "remote_access_handler.run() should not have been called on the same goal state")
# process a new goal state
exthandlers_handler.protocol.mock_wire_data.set_incarnation(999)
exthandlers_handler.protocol.client.update_goal_state()
update_handler._process_goal_state(exthandlers_handler, remote_access_handler)
self.assertEqual(2, exthandlers_handler.run.call_count, "exthandlers_handler.run() should have been called on a new goal state")
self.assertEqual(3, exthandlers_handler.report_ext_handlers_status.call_count, "exthandlers_handler.report_ext_handlers_status() should have been called on a new goal state")
self.assertEqual(2, remote_access_handler.run.call_count, "remote_access_handler.run() should have been called on a new goal state")
class ReportStatusTestCase(AgentTestCase):
"""
Tests for UpdateHandler._report_status()
"""
def setUp(self):
self.patches = [
patch("time.sleep", side_effect=lambda _: mock_sleep(0.001)),
patch("sys.exit")
]
for p in self.patches:
p.start()
return AgentTestCase.setUp(self)
def tearDown(self):
for p in self.patches:
p.stop()
return AgentTestCase.tearDown(self)
@staticmethod
@contextlib.contextmanager
def _mock_update_handler(iterations=1, **kwargs):
"""
Creates an UpdateHandler instance that will run n iterations.
Can be supplied keyword args for:
* mock_wire_data_file: This arg is treated like mock_wire_protocol
would. Defaults to mockwiredata.DATA_FILE_STATUS_BLOB
* http_<action>_handler where action is get, put, or post: This arg
is treated like mock_wire_protocol would.
Returned UpdateHandler instance has its protocol mocked via mock_wire_protocol.
"""
# Build the side_effect list for the UpdateHandler.is_running PropertyMock.
# Return True for the first n iterations followed by a single False to stop
# and then another True because the current UpdateHandler implementation
# does a __set__ during shutdown.
is_running_return_values = [True] * iterations + [False, True]
is_running_patch = patch.object(UpdateHandler, "is_running", PropertyMock(side_effect=is_running_return_values))
mock_http_get = kwargs.get("http_get_handler")
mock_http_put = kwargs.get("http_put_handler")
mock_http_post = kwargs.get("http_post_handler")
mock_wire_data_file = kwargs.get("mock_wire_data_file", mockwiredata.DATA_FILE_STATUS_BLOB)
try:
with mock_wire_protocol(mock_wire_data_file, mock_http_get, mock_http_post, mock_http_put) as protocol:
update_handler = get_update_handler()
update_handler.protocol_util.get_protocol = Mock(return_value=protocol)
is_running_patch.start()
yield update_handler
finally:
is_running_patch.stop()
@staticmethod
def _fail_goal_state_fetch(url, **_):
"""
For each goal state requested, returns values in order before failing with an
HttpError. Is useful for getting the agent into a specific state before causing
a failure.
Relies on this function to have the property return_vals populated with a list
of values to be returned in order. Any `None` in the list will cause the mock wire
data to be queried and returned, and thus functions as a sort of default.
"""
if not HttpRequestPredicates.is_goal_state_request(url):
# url does not represent a request for a goal state; return None so
# that the mock_wire_protocol will return whatever data is in the mock
# wire data object (as per the mock_wire_protocol's docstring).
return None
try:
return ReportStatusTestCase._fail_goal_state_fetch.return_vals.pop()
except IndexError:
raise HttpError()
def test_update_handler_should_report_status_even_on_failed_goal_state_fetch(self):
try:
# Returning None forces the mock wire data to return the contents in the static
# files, as documented in mock_wire_protocol's docstring. We return thrice:
# once for protocol initialization, once for HostGAPlugin initialization,
# and once for the initial call in run().
# TODO: This test has too much knowledge of the protocol with the wireserver; rewrite it
# at the level of UpdateHanlder._process_goal_state, which is where the tested
# logic resides.
#
# TODO: For the same reason, the test below (commented out) needs to be rewritten
ReportStatusTestCase._fail_goal_state_fetch.return_vals = [None, None, None]
with ReportStatusTestCase._mock_update_handler(http_get_handler=ReportStatusTestCase._fail_goal_state_fetch) as update_handler:
update_handler.run(debug=True)
mock_protocol = update_handler.protocol_util.get_protocol()
self.assertEqual(mock_protocol.mock_wire_data.call_counts['/StatusBlob'], 1,
"Expected a single status blob to be uploaded")
finally:
# clean up the static variable
del ReportStatusTestCase._fail_goal_state_fetch.return_vals
@skip_if_predicate_true(lambda: True, "See TODO comment in test_update_handler_should_report_status_even_on_failed_goal_state_fetch")
def test_update_handler_should_report_status_for_cached_goal_state_on_failed_fetch(self):
try:
# Adds one return to the test above (test_upload_vm_status_even_on_failed_goal_state_fetch).
# The third (and last) return is to allow for the extensions to be processed once so that
# we will have extension status to test for.
ReportStatusTestCase._fail_goal_state_fetch.return_vals = [ None, None, None, None ]
with ReportStatusTestCase._mock_update_handler(iterations=2,
http_get_handler=ReportStatusTestCase._fail_goal_state_fetch) as update_handler:
update_handler.run(debug=True)
wire_data = update_handler.protocol_util.get_protocol().mock_wire_data
self.assertEqual(wire_data.call_counts['/StatusBlob'], 2,
"Expected two status blobs to be uploaded, one for each iteration of the run loop.")
latest_status_blob_str = wire_data.status_blobs[-1]
latest_status_blob = json.loads(latest_status_blob_str)
ext_handler_statuses = latest_status_blob.get('aggregateStatus', {}).get("handlerAggregateStatus")
self.assertEqual(1, len(ext_handler_statuses), "Expected status for a single extension")
expectedHandlerInfo = {
"handlerName": "OSTCExtensions.ExampleHandlerLinux",
"handlerVersion": "1.0.0"
}
for key, expected_val in expectedHandlerInfo.items():
actual_val = ext_handler_statuses[0].get(key)
msg = "Extension information '{0}' did not match the provided extension.".format(key)
self.assertEqual(actual_val, expected_val, msg)
finally:
# clean up the static variable
del ReportStatusTestCase._fail_goal_state_fetch.return_vals
def test_report_status_should_log_errors_only_once_per_goal_state(self):
update_handler = _create_update_handler()
with _mock_exthandlers_handler() as exthandlers_handler:
with patch("azurelinuxagent.ga.update.logger.warn") as logger_warn:
update_handler._report_status(exthandlers_handler, False)
self.assertEqual(0, logger_warn.call_count, "UpdateHandler._report_status() should not report WARNINGS when there are no errors")
with patch("azurelinuxagent.ga.update.ExtensionsSummary.__init__", return_value=Exception("TEST EXCEPTION")): # simulate an error during _report_status()
update_handler._report_status(exthandlers_handler, False)
update_handler._report_status(exthandlers_handler, False)
update_handler._report_status(exthandlers_handler, False)
self.assertEqual(1, logger_warn.call_count, "UpdateHandler._report_status() should report only 1 WARNING when there are multiple errors within the same goal state")
exthandlers_handler.protocol.mock_wire_data.set_incarnation(999)
update_handler._try_update_goal_state(exthandlers_handler.protocol)
update_handler._report_status(exthandlers_handler, True)
self.assertEqual(2, logger_warn.call_count, "UpdateHandler._report_status() should continue reporting errors after a new goal state")
class GoalStateIntervalTestCase(AgentTestCase):
def test_initial_goal_state_period_should_default_to_goal_state_period(self):
configuration_provider = conf.ConfigurationProvider()
test_file = os.path.join(self.tmp_dir, "waagent.conf")
with open(test_file, "w") as file_:
file_.write("Extensions.GoalStatePeriod=987654321\n")
conf.load_conf_from_file(test_file, configuration_provider)
self.assertEqual(987654321, conf.get_initial_goal_state_period(conf=configuration_provider))
def test_update_handler_should_use_the_default_goal_state_period(self):
update_handler = get_update_handler()
default = conf.get_int_default_value("Extensions.GoalStatePeriod")
self.assertEqual(default, update_handler._goal_state_period, "The UpdateHanlder is not using the default goal state period")
def test_update_handler_should_not_use_the_default_goal_state_period_when_extensions_are_disabled(self):
with patch('azurelinuxagent.common.conf.get_extensions_enabled', return_value=False):
update_handler = get_update_handler()
self.assertEqual(GOAL_STATE_PERIOD_EXTENSIONS_DISABLED, update_handler._goal_state_period, "Incorrect goal state period when extensions are disabled")
def test_the_default_goal_state_period_and_initial_goal_state_period_should_be_the_same(self):
update_handler = get_update_handler()
default = conf.get_int_default_value("Extensions.GoalStatePeriod")
self.assertEqual(default, update_handler._goal_state_period, "The UpdateHanlder is not using the default goal state period")
def test_update_handler_should_use_the_initial_goal_state_period_when_it_is_different_to_the_goal_state_period(self):
with patch('azurelinuxagent.common.conf.get_initial_goal_state_period', return_value=99999):
update_handler = get_update_handler()
self.assertEqual(99999, update_handler._goal_state_period, "Expected the initial goal state period")
def test_update_handler_should_use_the_initial_goal_state_period_until_the_goal_state_converges(self):
initial_goal_state_period, goal_state_period = 11111, 22222
with patch('azurelinuxagent.common.conf.get_initial_goal_state_period', return_value=initial_goal_state_period):
with patch('azurelinuxagent.common.conf.get_goal_state_period', return_value=goal_state_period):
with _mock_exthandlers_handler([ExtensionStatusValue.transitioning, ExtensionStatusValue.success]) as exthandlers_handler:
remote_access_handler = Mock()
update_handler = _create_update_handler()
self.assertEqual(initial_goal_state_period, update_handler._goal_state_period, "Expected the initial goal state period")
# the extension is transisioning, so we should still be using the initial goal state period
update_handler._process_goal_state(exthandlers_handler, remote_access_handler)
self.assertEqual(initial_goal_state_period, update_handler._goal_state_period, "Expected the initial goal state period when the extension is transitioning")
# the goal state converged (the extension succeeded), so we should switch to the regular goal state period
update_handler._process_goal_state(exthandlers_handler, remote_access_handler)
self.assertEqual(goal_state_period, update_handler._goal_state_period, "Expected the regular goal state period after the goal state converged")
def test_update_handler_should_switch_to_the_regular_goal_state_period_when_the_goal_state_does_not_converges(self):
initial_goal_state_period, goal_state_period = 11111, 22222
with patch('azurelinuxagent.common.conf.get_initial_goal_state_period', return_value=initial_goal_state_period):
with patch('azurelinuxagent.common.conf.get_goal_state_period', return_value=goal_state_period):
with _mock_exthandlers_handler([ExtensionStatusValue.transitioning, ExtensionStatusValue.transitioning]) as exthandlers_handler:
remote_access_handler = Mock()
update_handler = _create_update_handler()
self.assertEqual(initial_goal_state_period, update_handler._goal_state_period, "Expected the initial goal state period")
# the extension is transisioning, so we should still be using the initial goal state period
update_handler._process_goal_state(exthandlers_handler, remote_access_handler)
self.assertEqual(initial_goal_state_period, update_handler._goal_state_period, "Expected the initial goal state period when the extension is transitioning")
# a new goal state arrives before the current goal state converged (the extension is transitioning), so we should switch to the regular goal state period
exthandlers_handler.protocol.mock_wire_data.set_incarnation(100)
update_handler._process_goal_state(exthandlers_handler, remote_access_handler)
self.assertEqual(goal_state_period, update_handler._goal_state_period, "Expected the regular goal state period when the goal state does not converge")
class ExtensionsSummaryTestCase(AgentTestCase):
@staticmethod
def _create_extensions_summary(extension_statuses):
"""
Creates an ExtensionsSummary from an array of (extension name, extension status) tuples
"""
vm_status = VMStatus(status="Ready", message="Ready")
vm_status.vmAgent.extensionHandlers = [ExtHandlerStatus()] * len(extension_statuses)
for i in range(len(extension_statuses)):
vm_status.vmAgent.extensionHandlers[i].extension_status = ExtensionStatus(name=extension_statuses[i][0])
vm_status.vmAgent.extensionHandlers[0].extension_status.status = extension_statuses[i][1]
return ExtensionsSummary(vm_status)
def test_equality_operator_should_return_true_on_items_with_the_same_value(self):
summary1 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ExtensionStatusValue.success), ("Extension 2", ExtensionStatusValue.transitioning)])
summary2 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ExtensionStatusValue.success), ("Extension 2", ExtensionStatusValue.transitioning)])
self.assertTrue(summary1 == summary2, "{0} == {1} should be True".format(summary1, summary2))
def test_equality_operator_should_return_false_on_items_with_different_values(self):
summary1 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ExtensionStatusValue.success), ("Extension 2", ExtensionStatusValue.transitioning)])
summary2 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ExtensionStatusValue.success), ("Extension 2", ExtensionStatusValue.success)])
self.assertFalse(summary1 == summary2, "{0} == {1} should be False")
summary1 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ExtensionStatusValue.success)])
summary2 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ExtensionStatusValue.success), ("Extension 2", ExtensionStatusValue.success)])
self.assertFalse(summary1 == summary2, "{0} == {1} should be False")
def test_inequality_operator_should_return_true_on_items_with_different_values(self):
summary1 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ExtensionStatusValue.success), ("Extension 2", ExtensionStatusValue.transitioning)])
summary2 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ExtensionStatusValue.success), ("Extension 2", ExtensionStatusValue.success)])
self.assertTrue(summary1 != summary2, "{0} != {1} should be True".format(summary1, summary2))
summary1 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ExtensionStatusValue.success)])
summary2 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ExtensionStatusValue.success), ("Extension 2", ExtensionStatusValue.success)])
self.assertTrue(summary1 != summary2, "{0} != {1} should be True")
def test_inequality_operator_should_return_false_on_items_with_same_value(self):
summary1 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ExtensionStatusValue.success), ("Extension 2", ExtensionStatusValue.transitioning)])
summary2 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ExtensionStatusValue.success), ("Extension 2", ExtensionStatusValue.transitioning)])
self.assertFalse(summary1 != summary2, "{0} != {1} should be False".format(summary1, summary2))
if __name__ == '__main__':
unittest.main()
| [
[
[
121,
135
]
],
[
[
144,
154
],
[
3269,
3279
],
[
10148,
10158
],
[
76037,
76047
],
[
79676,
79686
],
[
80529,
80539
],
[
106778,
106788
],
[
111238,
111248
],
[
102082,
102092
]
],
[
[
162,
166
],
[
7485,
7489
],
[
7711,
7715
],
[
8942,
8946
],
[
9237,
9241
],
[
9802,
9806
],
[
76437,
76441
]
],
[
[
174,
178
],
[
10356,
10360
],
[
23997,
24001
],
[
73040,
73044
],
[
116653,
116657
]
],
[
[
186,
188
],
[
6848,
6850
],
[
6920,
6922
],
[
6977,
6979
],
[
7041,
7043
],
[
7433,
7435
],
[
7647,
7649
],
[
7730,
7732
],
[
7916,
7918
],
[
8158,
8160
],
[
8953,
8955
],
[
9247,
9249
],
[
9320,
9322
],
[
9692,
9694
],
[
9812,
9814
],
[
11010,
11012
],
[
12545,
12547
],
[
12588,
12590
],
[
16457,
16459
],
[
17049,
17051
],
[
17203,
17205
],
[
17320,
17322
],
[
21843,
21845
],
[
21929,
21931
],
[
21991,
21993
],
[
22348,
22350
],
[
22394,
22396
],
[
23254,
23256
],
[
23324,
23326
],
[
23747,
23749
],
[
23833,
23835
],
[
24413,
24415
],
[
24499,
24501
],
[
25304,
25306
],
[
25681,
25683
],
[
26088,
26090
],
[
26440,
26442
],
[
26972,
26974
],
[
29572,
29574
],
[
29923,
29925
],
[
30261,
30263
],
[
30868,
30870
],
[
31561,
31563
],
[
35927,
35929
],
[
36107,
36109
],
[
36903,
36905
],
[
37426,
37428
],
[
37541,
37543
],
[
37758,
37760
],
[
37873,
37875
],
[
37975,
37977
],
[
38045,
38047
],
[
38115,
38117
],
[
38185,
38187
],
[
38255,
38257
],
[
38411,
38413
],
[
38612,
38614
],
[
38842,
38844
],
[
38900,
38902
],
[
38959,
38961
],
[
39017,
39019
],
[
39166,
39168
],
[
39367,
39369
],
[
44561,
44563
],
[
45141,
45143
],
[
45489,
45491
],
[
45602,
45604
],
[
49667,
49669
],
[
49768,
49770
],
[
49825,
49827
],
[
49982,
49984
],
[
50082,
50084
],
[
50138,
50140
],
[
54427,
54429
],
[
54583,
54585
],
[
54641,
54643
],
[
60204,
60206
],
[
62388,
62390
],
[
62706,
62708
],
[
63668,
63670
],
[
63795,
63797
],
[
64276,
64278
],
[
64420,
64422
],
[
64601,
64603
],
[
68920,
68922
],
[
69210,
69212
],
[
69292,
69294
],
[
71659,
71661
],
[
71923,
71925
],
[
71987,
71989
],
[
72797,
72799
],
[
76447,
76449
],
[
76912,
76914
],
[
78072,
78074
],
[
78673,
78675
],
[
79179,
79181
],
[
79546,
79548
],
[
82537,
82539
],
[
88118,
88120
],
[
119235,
119237
],
[
86004,
86006
]
],
[
[
196,
198
],
[
102590,
102592
]
],
[
[
206,
212
],
[
7109,
7115
],
[
7233,
7239
],
[
9374,
9380
],
[
10873,
10879
],
[
11612,
11618
],
[
11674,
11680
],
[
12647,
12653
],
[
13192,
13198
],
[
13258,
13264
],
[
55413,
55419
],
[
77387,
77393
],
[
79103,
79109
]
],
[
[
220,
224
],
[
9029,
9033
],
[
9044,
9048
],
[
38447,
38451
],
[
38462,
38466
],
[
38477,
38481
],
[
38492,
38496
],
[
38655,
38659
],
[
38670,
38674
],
[
38685,
38689
],
[
38738,
38742
],
[
39202,
39206
],
[
39217,
39221
],
[
39232,
39236
],
[
39247,
39251
],
[
39410,
39414
],
[
39425,
39429
],
[
39440,
39444
],
[
39500,
39504
],
[
39515,
39519
],
[
39530,
39534
],
[
39545,
39549
]
],
[
[
232,
242
],
[
409,
419
]
],
[
[
250,
253
],
[
3235,
3238
],
[
51496,
51499
],
[
54298,
54301
],
[
54320,
54323
],
[
54827,
54830
],
[
54893,
54896
],
[
55083,
55086
],
[
55133,
55136
]
],
[
[
261,
269
],
[
6789,
6797
],
[
10299,
10307
],
[
54538,
54546
]
],
[
[
277,
281
],
[
40450,
40454
],
[
40759,
40763
],
[
41034,
41038
],
[
41283,
41287
],
[
67841,
67845
],
[
86665,
86669
],
[
86739,
86743
],
[
99132,
99136
]
],
[
[
289,
297
],
[
127967,
127975
]
],
[
[
305,
312
],
[
11082,
11089
]
],
[
[
334,
342
],
[
70637,
70645
]
],
[
[
344,
353
],
[
70657,
70666
]
],
[
[
376,
389
],
[
90774,
90787
]
],
[
[
391,
406
],
[
73627,
73642
],
[
74757,
74772
]
],
[
[
444,
456
],
[
5519,
5531
],
[
112283,
112295
]
],
[
[
493,
497
],
[
8966,
8970
],
[
17333,
17337
],
[
37439,
37443
],
[
37988,
37992
],
[
38058,
38062
],
[
38128,
38132
],
[
38198,
38202
],
[
38268,
38272
],
[
38855,
38859
],
[
38913,
38917
],
[
38972,
38976
],
[
39030,
39034
],
[
44170,
44174
],
[
46279,
46283
],
[
46710,
46714
],
[
58385,
58389
],
[
58669,
58673
],
[
65400,
65404
],
[
67736,
67740
],
[
68443,
68447
],
[
76460,
76464
],
[
119186,
119190
],
[
119396,
119400
],
[
119493,
119497
],
[
119699,
119703
],
[
120470,
120474
]
],
[
[
539,
555
],
[
76489,
76505
]
],
[
[
557,
575
],
[
82296,
82314
],
[
82944,
82962
],
[
89933,
89951
]
],
[
[
621,
634
],
[
66170,
66183
]
],
[
[
636,
647
],
[
30091,
30102
],
[
30716,
30727
],
[
31387,
31398
],
[
13616,
13627
],
[
16564,
16575
],
[
16690,
16701
],
[
22458,
22469
],
[
23393,
23404
],
[
24055,
24066
],
[
24725,
24736
],
[
26385,
26396
],
[
27778,
27789
],
[
28244,
28255
],
[
28872,
28883
]
],
[
[
649,
666
],
[
19796,
19813
],
[
19935,
19952
],
[
98298,
98315
],
[
98593,
98610
]
],
[
[
668,
677
],
[
114031,
114040
],
[
100169,
100178
],
[
101899,
101908
]
],
[
[
720,
724
],
[
36152,
36156
],
[
36948,
36952
],
[
45194,
45198
],
[
46311,
46315
],
[
46742,
46746
],
[
58701,
58705
],
[
68973,
68977
],
[
69379,
69383
]
],
[
[
783,
810
],
[
75662,
75689
]
],
[
[
866,
893
],
[
27218,
27245
]
],
[
[
895,
911
],
[
27263,
27279
],
[
27350,
27366
]
],
[
[
919,
952
],
[
27299,
27332
]
],
[
[
954,
972
],
[
5015,
5033
],
[
27388,
27406
],
[
28100,
28118
],
[
28705,
28723
]
],
[
[
1025,
1040
],
[
97403,
97418
]
],
[
[
1048,
1065
],
[
25506,
25523
],
[
26234,
26251
],
[
27541,
27558
],
[
29774,
29791
],
[
30308,
30325
],
[
30915,
30932
],
[
31608,
31625
],
[
32435,
32452
],
[
97807,
97824
]
],
[
[
1067,
1088
],
[
97667,
97688
]
],
[
[
1090,
1100
],
[
71165,
71175
]
],
[
[
1102,
1110
],
[
124630,
124638
],
[
107515,
107523
]
],
[
[
1112,
1128
],
[
124719,
124735
],
[
107604,
107620
]
],
[
[
1130,
1145
],
[
124884,
124899
],
[
107690,
107705
]
],
[
[
1195,
1207
],
[
33447,
33459
],
[
91131,
91143
]
],
[
[
1257,
1269
],
[
43079,
43091
],
[
70562,
70574
]
],
[
[
1311,
1319
],
[
8033,
8041
],
[
9008,
9016
],
[
11037,
11045
],
[
36087,
36095
],
[
36883,
36891
],
[
37796,
37804
],
[
38357,
38365
],
[
39112,
39120
],
[
45121,
45129
],
[
46259,
46267
],
[
46690,
46698
],
[
58649,
58657
],
[
63990,
63998
],
[
68900,
68908
],
[
69349,
69357
]
],
[
[
1321,
1329
],
[
98791,
98799
],
[
26178,
26186
],
[
27075,
27083
]
],
[
[
1331,
1339
],
[
51385,
51393
]
],
[
[
1398,
1413
],
[
65122,
65137
],
[
8273,
8288
],
[
10015,
10030
],
[
11403,
11418
],
[
11496,
11511
],
[
12214,
12229
],
[
13008,
13023
],
[
42762,
42777
],
[
63469,
63484
],
[
68682,
68697
]
],
[
[
1467,
1492
],
[
75850,
75875
]
],
[
[
1536,
1550
],
[
7454,
7468
]
],
[
[
1552,
1566
],
[
7668,
7682
]
],
[
[
1568,
1578
],
[
9144,
9154
],
[
9736,
9746
],
[
16652,
16662
],
[
44605,
44615
],
[
49711,
49721
],
[
50026,
50036
]
],
[
[
1580,
1597
],
[
10031,
10048
]
],
[
[
1605,
1618
],
[
12852,
12865
],
[
65138,
65151
],
[
6729,
6742
],
[
43784,
43797
]
],
[
[
1620,
1633
],
[
45841,
45854
],
[
64065,
64078
]
],
[
[
1635,
1650
],
[
47869,
47884
],
[
48824,
48839
],
[
66313,
66328
],
[
67302,
67317
],
[
67375,
67390
]
],
[
[
1694,
1712
],
[
107926,
107944
]
],
[
[
1714,
1732
],
[
71242,
71260
]
],
[
[
1734,
1752
],
[
73102,
73120
],
[
73146,
73164
],
[
73254,
73272
]
],
[
[
1754,
1774
],
[
108131,
108151
],
[
121501,
121521
],
[
121537,
121557
],
[
123095,
123115
],
[
123131,
123151
],
[
125253,
125273
],
[
125300,
125320
],
[
125427,
125447
],
[
125474,
125494
],
[
125794,
125814
],
[
125841,
125861
],
[
125968,
125988
],
[
126015,
126035
],
[
126215,
126235
],
[
126336,
126356
],
[
126383,
126403
],
[
126673,
126693
],
[
126720,
126740
],
[
126847,
126867
],
[
126894,
126914
],
[
127119,
127139
],
[
127240,
127260
],
[
127287,
127307
],
[
127570,
127590
],
[
127617,
127637
],
[
127744,
127764
],
[
127791,
127811
]
],
[
[
1813,
1823
],
[
9443,
9453
],
[
13393,
13403
],
[
16577,
16587
],
[
16703,
16713
],
[
16765,
16775
],
[
17783,
17793
],
[
18604,
18614
],
[
19092,
19102
],
[
19836,
19846
],
[
20402,
20412
],
[
20879,
20889
],
[
21278,
21288
],
[
21785,
21795
],
[
22290,
22300
],
[
22738,
22748
],
[
23196,
23206
],
[
23689,
23699
],
[
24355,
24365
],
[
24808,
24818
],
[
25610,
25620
],
[
26338,
26348
],
[
27648,
27658
],
[
29878,
29888
],
[
30412,
30422
],
[
31019,
31029
],
[
31712,
31722
],
[
32216,
32226
],
[
32539,
32549
],
[
41639,
41649
],
[
44659,
44669
],
[
63088,
63098
],
[
63402,
63412
]
],
[
[
1825,
1840
],
[
10649,
10664
],
[
13573,
13588
],
[
13629,
13644
],
[
13736,
13751
],
[
14210,
14225
]
],
[
[
1842,
1853
],
[
15093,
15104
],
[
15307,
15318
],
[
15652,
15663
]
],
[
[
1855,
1874
],
[
17079,
17098
]
],
[
[
1882,
1900
],
[
5102,
5120
],
[
33011,
33029
],
[
70517,
70535
],
[
90940,
90958
],
[
99703,
99721
],
[
100367,
100385
],
[
100588,
100606
],
[
103537,
103555
],
[
106635,
106653
],
[
112767,
112785
],
[
119660,
119678
],
[
120122,
120140
],
[
120431,
120449
],
[
120913,
120931
]
],
[
[
1902,
1922
],
[
37120,
37140
]
],
[
[
1924,
1944
],
[
37459,
37479
]
],
[
[
1946,
1962
],
[
17233,
17249
]
],
[
[
1964,
1984
],
[
34686,
34706
]
],
[
[
1992,
2016
],
[
40525,
40549
],
[
40833,
40857
],
[
41098,
41122
],
[
41358,
41382
]
],
[
[
2018,
2039
],
[
52583,
52604
],
[
52944,
52965
],
[
53677,
53698
],
[
53959,
53980
]
],
[
[
2041,
2078
],
[
120172,
120209
]
],
[
[
2080,
2093
],
[
58980,
58993
],
[
91457,
91470
],
[
91611,
91624
],
[
112254,
112267
]
],
[
[
2101,
2120
],
[
8897,
8916
]
],
[
[
2122,
2139
],
[
125048,
125065
]
],
[
[
2141,
2157
],
[
81934,
81950
],
[
90203,
90219
]
],
[
[
2191,
2209
],
[
4644,
4662
],
[
65900,
65918
],
[
99737,
99755
],
[
99985,
100003
],
[
100622,
100640
],
[
101663,
101681
],
[
107853,
107871
],
[
112635,
112653
]
],
[
[
2250,
2259
],
[
4588,
4597
],
[
80733,
80742
],
[
83701,
83710
],
[
85525,
85534
],
[
88444,
88453
],
[
107872,
107881
]
],
[
[
2261,
2283
],
[
76179,
76201
]
],
[
[
2308,
2321
],
[
6343,
6356
],
[
90620,
90633
],
[
99540,
99553
],
[
108372,
108385
],
[
110749,
110762
],
[
119055,
119068
],
[
124400,
124413
],
[
6467,
6480
],
[
7195,
7208
],
[
90665,
90678
],
[
111057,
111070
],
[
111185,
111198
]
],
[
[
2323,
2331
],
[
6990,
6998
]
],
[
[
2333,
2340
],
[
3258,
3265
]
],
[
[
2342,
2347
],
[
17588,
17593
],
[
18359,
18364
],
[
18429,
18434
],
[
18845,
18850
],
[
18915,
18920
],
[
19489,
19494
],
[
19559,
19564
],
[
20077,
20082
],
[
20147,
20152
],
[
20633,
20638
],
[
20703,
20708
],
[
21033,
21038
],
[
21103,
21108
],
[
21546,
21551
],
[
21616,
21621
],
[
22046,
22051
],
[
22116,
22121
],
[
22492,
22497
],
[
22562,
22567
],
[
22942,
22947
],
[
23012,
23017
],
[
23434,
23439
],
[
23504,
23509
],
[
24096,
24101
],
[
24166,
24171
],
[
24952,
24957
],
[
25022,
25027
],
[
25088,
25093
],
[
25731,
25736
],
[
25801,
25806
],
[
25867,
25872
],
[
26536,
26541
],
[
26606,
26611
],
[
26672,
26677
],
[
26730,
26735
],
[
29412,
29417
],
[
30023,
30028
],
[
30589,
30594
],
[
30650,
30655
],
[
31194,
31199
],
[
31255,
31260
],
[
31314,
31319
],
[
31887,
31892
],
[
31948,
31953
],
[
32007,
32012
],
[
37310,
37315
],
[
42908,
42913
],
[
47133,
47138
],
[
47577,
47582
],
[
47893,
47898
],
[
57871,
57876
],
[
58045,
58050
],
[
69778,
69783
],
[
70311,
70316
],
[
70360,
70365
],
[
90285,
90290
],
[
90358,
90363
],
[
90428,
90433
],
[
90489,
90494
],
[
90545,
90550
],
[
4801,
4806
],
[
4900,
4905
],
[
5002,
5007
],
[
5584,
5589
],
[
5675,
5680
],
[
28087,
28092
],
[
28692,
28697
],
[
32933,
32938
],
[
34735,
34740
],
[
35597,
35602
],
[
35655,
35660
],
[
36583,
36588
],
[
36971,
36976
],
[
46040,
46045
],
[
46334,
46339
],
[
46512,
46517
],
[
46765,
46770
],
[
50405,
50410
],
[
50514,
50519
],
[
50583,
50588
],
[
55850,
55855
],
[
56586,
56591
],
[
57380,
57385
],
[
58725,
58730
],
[
58874,
58879
],
[
58967,
58972
],
[
59127,
59132
],
[
59243,
59248
],
[
59340,
59345
],
[
59459,
59464
],
[
59600,
59605
],
[
59742,
59747
],
[
59876,
59881
],
[
60010,
60015
],
[
60102,
60107
],
[
60441,
60446
],
[
62169,
62174
],
[
64784,
64789
],
[
66017,
66022
],
[
66098,
66103
],
[
68999,
69004
],
[
69459,
69464
],
[
69558,
69563
],
[
72581,
72586
],
[
73679,
73684
],
[
73834,
73839
],
[
73950,
73955
],
[
74882,
74887
],
[
75008,
75013
],
[
75106,
75111
],
[
76255,
76260
],
[
77781,
77786
],
[
79257,
79262
],
[
79891,
79896
],
[
79989,
79994
],
[
80090,
80095
],
[
80198,
80203
],
[
80356,
80361
],
[
81491,
81496
],
[
87688,
87693
],
[
90718,
90723
],
[
91390,
91395
],
[
91444,
91449
],
[
91598,
91603
],
[
91758,
91763
],
[
91852,
91857
],
[
91953,
91958
],
[
92075,
92080
],
[
92233,
92238
],
[
92359,
92364
],
[
92429,
92434
],
[
110885,
110890
],
[
110959,
110964
],
[
112241,
112246
],
[
117742,
117747
],
[
118047,
118052
],
[
120012,
120017
],
[
120796,
120801
],
[
121236,
121241
],
[
121361,
121366
],
[
122830,
122835
],
[
122955,
122960
],
[
102179,
102184
],
[
102280,
102285
]
],
[
[
2349,
2362
],
[
25357,
25370
],
[
29625,
29638
],
[
81123,
81136
]
],
[
[
2364,
2368
],
[
96328,
96332
],
[
98754,
98758
],
[
99030,
99034
],
[
4760,
4764
],
[
33051,
33055
],
[
33094,
33098
],
[
33113,
33117
],
[
33121,
33125
],
[
33174,
33178
],
[
33238,
33242
],
[
35434,
35438
],
[
37508,
37512
],
[
37639,
37643
],
[
44200,
44204
],
[
58415,
58419
],
[
58850,
58854
],
[
59075,
59079
],
[
62021,
62025
],
[
62586,
62590
],
[
62860,
62864
],
[
65431,
65435
],
[
67586,
67590
],
[
67768,
67772
],
[
68473,
68477
],
[
70190,
70194
],
[
90831,
90835
],
[
90874,
90878
],
[
90893,
90897
],
[
90901,
90905
],
[
91005,
91009
],
[
91069,
91073
],
[
91552,
91556
],
[
91706,
91710
],
[
96401,
96405
],
[
96494,
96498
],
[
96571,
96575
],
[
98852,
98856
],
[
99087,
99091
],
[
106724,
106728
],
[
107989,
107993
],
[
108096,
108100
],
[
108237,
108241
],
[
108681,
108685
],
[
108728,
108732
],
[
112865,
112869
],
[
121636,
121640
],
[
123236,
123240
]
],
[
[
2370,
2379
],
[
4711,
4720
],
[
60216,
60225
],
[
92636,
92645
],
[
92669,
92678
],
[
92707,
92716
],
[
92763,
92772
],
[
93073,
93082
],
[
93110,
93119
]
],
[
[
2387,
2412
],
[
33421,
33446
],
[
91105,
91130
]
],
[
[
2414,
2424
],
[
5626,
5636
],
[
110927,
110937
]
],
[
[
2426,
2448
],
[
115460,
115482
]
],
[
[
2476,
2488
],
[
65789,
65801
],
[
99756,
99768
],
[
100004,
100016
],
[
100641,
100653
],
[
101682,
101694
],
[
112568,
112580
]
],
[
[
2538,
2559
],
[
99517,
99538
],
[
113579,
113600
],
[
81038,
81059
],
[
83939,
83960
],
[
85763,
85784
],
[
88682,
88703
]
],
[
[
2561,
2569
],
[
10277,
10285
],
[
10408,
10416
],
[
10568,
10576
],
[
14395,
14403
],
[
14464,
14472
],
[
14535,
14543
],
[
15748,
15756
],
[
15843,
15851
],
[
15881,
15889
],
[
15920,
15928
]
],
[
[
2649,
2660
]
],
[
[
2741,
2751
],
[
13697,
13707
],
[
13901,
13911
],
[
13972,
13982
],
[
14045,
14055
],
[
14171,
14181
],
[
16025,
16035
],
[
16122,
16132
],
[
16162,
16172
],
[
16203,
16213
]
],
[
[
2833,
2847
],
[
24007,
24021
]
],
[
[
3159,
3170
],
[
55016,
55027
]
],
[
[
3299,
3318
],
[
71383,
71402
],
[
72501,
72520
],
[
73755,
73774
],
[
74809,
74828
],
[
76149,
76168
],
[
80793,
80812
]
],
[
[
6328,
6342
],
[
13490,
13504
],
[
16304,
16318
],
[
32833,
32847
],
[
79653,
79667
],
[
6752,
6766
],
[
6861,
6875
],
[
6816,
6830
],
[
6929,
6943
],
[
7054,
7068
],
[
7247,
7261
],
[
7388,
7402
],
[
7602,
7616
],
[
7814,
7828
],
[
7933,
7947
],
[
8051,
8065
],
[
8175,
8189
],
[
8289,
8303
],
[
16350,
16364
],
[
32879,
32893
]
],
[
[
13470,
13489
]
],
[
[
16289,
16303
]
],
[
[
32822,
32832
]
],
[
[
79636,
79652
]
],
[
[
90602,
90619
]
],
[
[
96318,
96327
],
[
50314,
50323
],
[
52510,
52519
],
[
52874,
52883
],
[
53235,
53244
],
[
53572,
53581
],
[
54994,
55003
],
[
55989,
55998
],
[
56725,
56734
],
[
57570,
57579
]
],
[
[
96636,
96648
],
[
34591,
34603
],
[
68347,
68359
]
],
[
[
98741,
98753
],
[
25458,
25470
],
[
26158,
26170
],
[
27042,
27054
],
[
29726,
29738
],
[
81214,
81226
]
],
[
[
99021,
99029
],
[
50380,
50388
],
[
52559,
52567
],
[
52920,
52928
],
[
53282,
53290
],
[
53653,
53661
],
[
53935,
53943
]
],
[
[
99490,
99516
]
],
[
[
106497,
106519
],
[
108620,
108642
],
[
117635,
117657
],
[
121681,
121703
],
[
123281,
123303
]
],
[
[
106808,
106833
],
[
108539,
108564
],
[
117673,
117698
],
[
121474,
121499
],
[
123068,
123093
]
],
[
[
108347,
108371
]
],
[
[
110728,
110748
],
[
113924,
113944
],
[
114819,
114839
],
[
114914,
114934
],
[
114973,
114993
],
[
115398,
115418
],
[
115977,
115997
],
[
116080,
116100
],
[
116169,
116189
],
[
117476,
117496
]
],
[
[
119029,
119054
]
],
[
[
124374,
124399
],
[
125183,
125208
],
[
125357,
125382
],
[
125724,
125749
],
[
125898,
125923
],
[
126145,
126170
],
[
126266,
126291
],
[
126603,
126628
],
[
126777,
126802
],
[
127049,
127074
],
[
127170,
127195
],
[
127500,
127525
],
[
127674,
127699
]
]
] |
from ptypes import *
class Header(pstruct.type):
_fields_ = [
(dyn.block(3), 'Signature'),
(dyn.block(3), 'Version'),
]
class LogicalScreenDescriptor(pstruct.type):
class _Flags(pbinary.struct):
_fields_ = [(1, 'Global Color Table'), (3, 'Color Resolution'), (1, 'Sort'), (3, 'Size')]
def optional(self):
if self['Flags'].li['Global Color Table'] > 0:
return dyn.clone(ColorTable, length=pow(2, self['Flags']['Size'] + 1))
return dyn.clone(ColorTable, length=0)
_fields_ = [
(pint.uint16_t, 'Width'),
(pint.uint16_t, 'Height'),
(_Flags, 'Flags'),
(pint.uint8_t, 'BackgroundColorIndex'),
(pint.uint8_t, 'PixelAspectRatio'),
(optional, 'Global Color Table')
]
class Color(pstruct.type):
_fields_ = [
(pint.uint8_t, 'r'),
(pint.uint8_t, 'g'),
(pint.uint8_t, 'b'),
]
class ColorTable(parray.type):
length = 0
_object_ = Color
class ImageDescriptor(pstruct.type):
class _Flags(pbinary.struct):
_fields_ = [(1, 'Local Color Table'), (1, 'Interlace'), (1, 'Sort'), (2, 'Reserved'), (3, 'Size')]
def optional(self):
if self['Flags'].li['Local Color Table'] > 0:
return dyn.clone(ColorTable, length=pow(2, self['Flags']['Size'] + 1))
return dyn.clone(ColorTable, length=0)
_fields_ = [
(pint.uint8_t, 'Separator'),
(pint.uint16_t, 'Left'),
(pint.uint16_t, 'Top'),
(pint.uint16_t, 'Width'),
(pint.uint16_t, 'Height'),
(_Flags, 'Flags'),
(optional, 'Color Table')
]
class Trailer(pint.uint8_t): pass
# value == 0x3b
class ImageTableData_Chunk(pstruct.type):
_fields_ = [
(pint.uint8_t, 'CodeSize'),
(ptype.type, 'something')
]
class ImageData_Chunk(pstruct.type):
_fields_ = [
(pint.uint8_t, 'Block Size'),
(lambda s: dyn.block(int(s['Block Size'].li)), 'Data Values')
]
class ImageData( parray.type ):
length = 1
_object_ = ImageData_Chunk
def isTerminator(self, v):
if int(v['Block Size']) == 0:
return True
return False
class File(pstruct.type):
_fields_ = [
(Header, 'header'),
(LogicalScreenDescriptor, 'screen'),
(ImageDescriptor, 'image'),
(ImageData, 'data')
]
if __name__ == '__main__':
import ptypes,image.gif as gif
ptypes.setsource( ptypes.provider.file('./poc.gif') )
z = gif.File()
print(z.l)
| [
[
[
19,
20
],
[
35,
42
],
[
76,
79
],
[
113,
116
],
[
176,
183
],
[
208,
215
],
[
560,
564
],
[
594,
598
],
[
656,
660
],
[
704,
708
],
[
799,
806
],
[
840,
844
],
[
869,
873
],
[
898,
902
],
[
942,
948
],
[
1015,
1022
],
[
1047,
1054
],
[
1407,
1411
],
[
1444,
1448
],
[
1477,
1481
],
[
1509,
1513
],
[
1543,
1547
],
[
1651,
1655
],
[
1719,
1726
],
[
1760,
1764
],
[
1796,
1801
],
[
1850,
1857
],
[
1891,
1895
],
[
2014,
2020
],
[
2201,
2208
],
[
422,
425
],
[
501,
504
],
[
1269,
1272
],
[
1348,
1351
],
[
1939,
1942
]
],
[
[
28,
34
],
[
2242,
2248
]
],
[
[
152,
175
],
[
2270,
2293
]
],
[
[
793,
798
],
[
986,
991
]
],
[
[
931,
941
],
[
432,
442
],
[
511,
521
],
[
1279,
1289
],
[
1358,
1368
]
],
[
[
999,
1014
],
[
2315,
2330
]
],
[
[
1643,
1650
]
],
[
[
1698,
1718
]
],
[
[
1834,
1849
],
[
2059,
2074
]
],
[
[
2003,
2012
],
[
2351,
2360
]
],
[
[
2196,
2200
]
],
[
[
2415,
2421
],
[
2443,
2449
],
[
2461,
2467
]
],
[
[
2422,
2438
],
[
2506,
2509
]
],
[
[
2502,
2503
],
[
2527,
2528
]
]
] |
import dis
import re
import sys
import textwrap
import unittest
from test.support import cpython_only
from test.bytecode_helper import BytecodeTestCase
class TestTranforms(BytecodeTestCase):
def test_unot(self):
# UNARY_NOT POP_JUMP_IF_FALSE --> POP_JUMP_IF_TRUE'
def unot(x):
if not x == 2:
del x
self.assertNotInBytecode(unot, 'UNARY_NOT')
self.assertNotInBytecode(unot, 'POP_JUMP_IF_FALSE')
self.assertInBytecode(unot, 'POP_JUMP_IF_TRUE')
def test_elim_inversion_of_is_or_in(self):
for line, cmp_op in (
('not a is b', 'is not',),
('not a in b', 'not in',),
('not a is not b', 'is',),
('not a not in b', 'in',),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'COMPARE_OP', cmp_op)
def test_global_as_constant(self):
# LOAD_GLOBAL None/True/False --> LOAD_CONST None/True/False
def f():
x = None
x = None
return x
def g():
x = True
return x
def h():
x = False
return x
for func, elem in ((f, None), (g, True), (h, False)):
self.assertNotInBytecode(func, 'LOAD_GLOBAL')
self.assertInBytecode(func, 'LOAD_CONST', elem)
def f():
'Adding a docstring made this test fail in Py2.5.0'
return None
self.assertNotInBytecode(f, 'LOAD_GLOBAL')
self.assertInBytecode(f, 'LOAD_CONST', None)
def test_while_one(self):
# Skip over: LOAD_CONST trueconst POP_JUMP_IF_FALSE xx
def f():
while 1:
pass
return list
for elem in ('LOAD_CONST', 'POP_JUMP_IF_FALSE'):
self.assertNotInBytecode(f, elem)
for elem in ('JUMP_ABSOLUTE',):
self.assertInBytecode(f, elem)
def test_pack_unpack(self):
# On PyPy, "a, b = ..." is even more optimized, by removing
# the ROT_TWO. But the ROT_TWO is not removed if assigning
# to more complex expressions, so check that.
for line, elem in (
('a, = a,', 'LOAD_CONST',),
('a[1], b = a, b', 'ROT_TWO',),
('a, b[2], c = a, b, c', 'ROT_THREE',),
):
code = compile(line,'','single')
self.assertInBytecode(code, elem)
self.assertNotInBytecode(code, 'BUILD_TUPLE')
self.assertNotInBytecode(code, 'UNPACK_TUPLE')
def test_folding_of_tuples_of_constants(self):
# On CPython, "a,b,c=1,2,3" turns into "a,b,c=<constant (1,2,3)>"
# but on PyPy, it turns into "a=1;b=2;c=3".
for line, elem in (
('a = 1,2,3', (1, 2, 3)),
('("a","b","c")', ('a', 'b', 'c')),
('(None, 1, None)', (None, 1, None)),
('((1, 2), 3, 4)', ((1, 2), 3, 4)),
):
code = compile(line,'','single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertNotInBytecode(code, 'BUILD_TUPLE')
# Long tuples should be folded too.
code = compile(repr(tuple(range(10000))),'','single')
self.assertNotInBytecode(code, 'BUILD_TUPLE')
# One LOAD_CONST for the tuple, one for the None return value
load_consts = [instr for instr in dis.get_instructions(code)
if instr.opname == 'LOAD_CONST']
self.assertEqual(len(load_consts), 2)
# Bug 1053819: Tuple of constants misidentified when presented with:
# . . . opcode_with_arg 100 unary_opcode BUILD_TUPLE 1 . . .
# The following would segfault upon compilation
def crater():
(~[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
],)
def test_folding_of_lists_of_constants(self):
for line, elem in (
# in/not in constants with BUILD_LIST should be folded to a tuple:
('a in [1,2,3]', (1, 2, 3)),
('a not in ["a","b","c"]', ('a', 'b', 'c')),
('a in [None, 1, None]', (None, 1, None)),
('a not in [(1, 2), 3, 4]', ((1, 2), 3, 4)),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertNotInBytecode(code, 'BUILD_LIST')
def test_folding_of_sets_of_constants(self):
for line, elem in (
# in/not in constants with BUILD_SET should be folded to a frozenset:
('a in {1,2,3}', frozenset({1, 2, 3})),
('a not in {"a","b","c"}', frozenset({'a', 'c', 'b'})),
('a in {None, 1, None}', frozenset({1, None})),
('a not in {(1, 2), 3, 4}', frozenset({(1, 2), 3, 4})),
('a in {1, 2, 3, 3, 2, 1}', frozenset({1, 2, 3})),
):
code = compile(line, '', 'single')
self.assertNotInBytecode(code, 'BUILD_SET')
self.assertInBytecode(code, 'LOAD_CONST', elem)
# Ensure that the resulting code actually works:
def f(a):
return a in {1, 2, 3}
def g(a):
return a not in {1, 2, 3}
self.assertTrue(f(3))
self.assertTrue(not f(4))
self.assertTrue(not g(3))
self.assertTrue(g(4))
def test_folding_of_binops_on_constants(self):
for line, elem in (
('a = 2+3+4', 9), # chained fold
('"@"*4', '@@@@'), # check string ops
('a="abc" + "def"', 'abcdef'), # check string ops
('a = 3**4', 81), # binary power
('a = 3*4', 12), # binary multiply
('a = 13//4', 3), # binary floor divide
('a = 14%4', 2), # binary modulo
('a = 2+3', 5), # binary add
('a = 13-4', 9), # binary subtract
('a = (12,13)[1]', 13), # binary subscr
('a = 13 << 2', 52), # binary lshift
('a = 13 >> 2', 3), # binary rshift
('a = 13 & 7', 5), # binary and
('a = 13 ^ 7', 10), # binary xor
('a = 13 | 7', 15), # binary or
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('BINARY_'))
# Verify that unfoldables are skipped
code = compile('a=2+"b"', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 2)
self.assertInBytecode(code, 'LOAD_CONST', 'b')
# Verify that large sequences do not result from folding
code = compile('a="x"*10000', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 10000)
self.assertNotIn("x"*10000, code.co_consts)
code = compile('a=1<<1000', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 1000)
self.assertNotIn(1<<1000, code.co_consts)
# difference to CPython: PyPy allows slightly larger constants to be
# created
code = compile('a=2**10000', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 10000)
self.assertNotIn(2**10000, code.co_consts)
@cpython_only # we currently not bother to implement that
def test_binary_subscr_on_unicode(self):
# valid code get optimized
code = compile('"foo"[0]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 'f')
self.assertNotInBytecode(code, 'BINARY_SUBSCR')
code = compile('"\u0061\uffff"[1]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', '\uffff')
self.assertNotInBytecode(code,'BINARY_SUBSCR')
# With PEP 393, non-BMP char get optimized
code = compile('"\U00012345"[0]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', '\U00012345')
self.assertNotInBytecode(code, 'BINARY_SUBSCR')
# invalid code doesn't get optimized
# out of range
code = compile('"fuu"[10]', '', 'single')
self.assertInBytecode(code, 'BINARY_SUBSCR')
def test_folding_of_unaryops_on_constants(self):
for line, elem in (
('-0.5', -0.5), # unary negative
('-0.0', -0.0), # -0.0
('-(1.0-1.0)', -0.0), # -0.0 after folding
('-0', 0), # -0
('~-2', 1), # unary invert
('+1', 1), # unary positive
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('UNARY_'))
# Check that -0.0 works after marshaling
def negzero():
return -(1.0-1.0)
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('UNARY_'))
# Verify that unfoldables are skipped
for line, elem, opname in (
('-"abc"', 'abc', 'UNARY_NEGATIVE'),
('~"abc"', 'abc', 'UNARY_INVERT'),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertInBytecode(code, opname)
def test_elim_extra_return(self):
# RETURN LOAD_CONST None RETURN --> RETURN
def f(x):
return x
self.assertNotInBytecode(f, 'LOAD_CONST', None)
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertEqual(len(returns), 1)
def test_elim_jump_to_return(self):
# JUMP_FORWARD to RETURN --> RETURN
def f(cond, true_value, false_value):
return true_value if cond else false_value
self.assertNotInBytecode(f, 'JUMP_FORWARD')
self.assertNotInBytecode(f, 'JUMP_ABSOLUTE')
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertEqual(len(returns), 2)
def test_elim_jump_after_return1(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
if cond1: return 1
if cond2: return 2
while 1:
return 3
while 1:
if cond1: return 4
return 5
return 6
self.assertNotInBytecode(f, 'JUMP_FORWARD')
self.assertNotInBytecode(f, 'JUMP_ABSOLUTE')
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertEqual(len(returns), 6)
def test_elim_jump_after_return2(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
while 1:
if cond1: return 4
self.assertNotInBytecode(f, 'JUMP_FORWARD')
# There should be one jump for the while loop.
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'JUMP_ABSOLUTE']
self.assertEqual(len(returns), 1)
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertEqual(len(returns), 2)
def test_make_function_doesnt_bail(self):
def f():
def g()->1+1:
pass
return g
self.assertNotInBytecode(f, 'BINARY_ADD')
def test_constant_folding(self):
# Issue #11244: aggressive constant folding.
exprs = [
'3 * -5',
'-3 * 5',
'2 * (3 * 4)',
'(2 * 3) * 4',
'(-1, 2, 3)',
'(1, -2, 3)',
'(1, 2, -3)',
'(1, 2, -3) * 6',
'lambda x: x in {(3 * -5) + (-1 - 6), (1, -2, 3) * 2, None}',
]
for e in exprs:
code = compile(e, '', 'single')
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('UNARY_'))
self.assertFalse(instr.opname.startswith('BINARY_'))
self.assertFalse(instr.opname.startswith('BUILD_'))
class TestBuglets(unittest.TestCase):
def test_bug_11510(self):
# folded constant set optimization was commingled with the tuple
# unpacking optimization which would fail if the set had duplicate
# elements so that the set length was unexpected
def f():
x, y = {1, 1}
return x, y
with self.assertRaises(ValueError):
f()
if __name__ == "__main__":
unittest.main()
| [
[
[
7,
10
],
[
3395,
3398
],
[
6936,
6939
],
[
9344,
9347
],
[
9565,
9568
],
[
10227,
10230
],
[
10684,
10687
],
[
11318,
11321
],
[
11800,
11803
],
[
11966,
11969
],
[
12767,
12770
]
],
[
[
18,
20
]
],
[
[
28,
31
]
],
[
[
39,
47
]
],
[
[
55,
63
],
[
13020,
13028
],
[
13436,
13444
]
],
[
[
89,
101
],
[
7879,
7891
]
],
[
[
136,
152
],
[
174,
190
]
],
[
[
160,
173
]
],
[
[
13008,
13019
]
]
] |
# -*- coding: utf-8 -*-
__author__ = 'Matt Makai'
__email__ = '[email protected]'
__version__ = '0.1.0' | [
[
[
25,
35
]
],
[
[
51,
60
]
],
[
[
83,
94
]
]
] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_allclose
from .. import Parameter, Parameters, optimize_iminuit
pytest.importorskip("iminuit")
def fcn(parameters):
x = parameters["x"].value
y = parameters["y"].value
z = parameters["z"].value
x_opt, y_opt, z_opt = 2, 3e5, 4e-5
x_err, y_err, z_err = 0.2, 3e4, 4e-6
return ((x - x_opt) / x_err) ** 2 + ((y - y_opt) / y_err) ** 2 + ((z - z_opt) / z_err) ** 2
@pytest.fixture()
def pars():
x = Parameter("x", 2.1)
y = Parameter("y", 3.1, scale=1e5)
z = Parameter("z", 4.1, scale=1e-5)
return Parameters([x, y, z])
def test_iminuit_basic(pars):
factors, info, minuit = optimize_iminuit(function=fcn, parameters=pars)
assert info["success"]
assert_allclose(fcn(pars), 0, atol=1e-5)
# Check the result in parameters is OK
assert_allclose(pars["x"].value, 2, rtol=1e-3)
assert_allclose(pars["y"].value, 3e5, rtol=1e-3)
# Precision of estimate on "z" is very poor (0.040488). Why is it so bad?
assert_allclose(pars["z"].value, 4e-5, rtol=2e-2)
# Check that minuit sees the parameter factors correctly
assert_allclose(factors, [2, 3, 4], rtol=1e-3)
assert_allclose(minuit.values["par_000_x"], 2, rtol=1e-3)
assert_allclose(minuit.values["par_001_y"], 3, rtol=1e-3)
assert_allclose(minuit.values["par_002_z"], 4, rtol=1e-3)
def test_iminuit_frozen(pars):
pars["y"].frozen = True
factors, info, minuit = optimize_iminuit(function=fcn, parameters=pars)
assert info["success"]
assert_allclose(pars["x"].value, 2, rtol=1e-4)
assert_allclose(pars["y"].value, 3.1e5)
assert_allclose(pars["z"].value, 4.e-5, rtol=1e-4)
assert_allclose(fcn(pars), 0.111112, rtol=1e-5)
assert minuit.list_of_fixed_param() == ["par_001_y"]
def test_iminuit_limits(pars):
pars["y"].min = 301000
factors, info, minuit = optimize_iminuit(function=fcn, parameters=pars)
assert info["success"]
# Check the result in parameters is OK
assert_allclose(pars["x"].value, 2, rtol=1e-2)
assert_allclose(pars["y"].value, 301000, rtol=1e-3)
# Check that minuit sees the limit factors correctly
states = minuit.get_param_states()
assert not states[0]["has_limits"]
y = states[1]
assert y["has_limits"]
assert_allclose(y["lower_limit"], 3.01)
# The next assert can be added when we no longer test on iminuit 1.2
# See https://github.com/gammapy/gammapy/pull/1771
# assert states[1]["upper_limit"] is None
| [
[
[
71,
77
],
[
176,
182
],
[
499,
505
]
],
[
[
104,
119
],
[
808,
823
],
[
897,
912
],
[
948,
963
],
[
1079,
1094
],
[
1195,
1210
],
[
1246,
1261
],
[
1308,
1323
],
[
1370,
1385
],
[
1599,
1614
],
[
1650,
1665
],
[
1694,
1709
],
[
1749,
1764
],
[
2068,
2083
],
[
2119,
2134
],
[
2357,
2372
]
],
[
[
135,
144
],
[
536,
545
],
[
564,
573
],
[
603,
612
]
],
[
[
146,
156
],
[
646,
656
]
],
[
[
158,
174
],
[
728,
744
],
[
1518,
1534
],
[
1944,
1960
]
],
[
[
213,
216
],
[
754,
757
],
[
824,
827
],
[
1544,
1547
],
[
1765,
1768
],
[
1970,
1973
]
],
[
[
520,
524
]
],
[
[
674,
692
]
],
[
[
1434,
1453
]
],
[
[
1861,
1880
]
]
] |
m = int(input())
m /= 1000
if m < 0.1:
print('00')
elif 0.1 <= m and m <= 5:
m = str(int(10 * m))
if len(m) == 1:
m = '0' + m
print(m)
elif 6 <= m and m <= 30:
print(int(m) + 50)
elif 35 <= m and m <= 70:
print((int(m) - 30) // 5 + 80)
else:
print('89')
| [
[
[
0,
1
],
[
17,
18
]
],
[
[
85,
86
],
[
117,
118
],
[
144,
145
],
[
156,
157
]
],
[
[
134,
135
],
[
156,
157
]
]
] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as td
class Flow(nn.Module):
"""
Building both normalizing flows and neural flows.
Example:
>>> import stribor as st
>>> torch.manual_seed(123)
>>> dim = 2
>>> flow = st.Flow(st.UnitNormal(dim), [st.Affine(dim)])
>>> x = torch.rand(1, dim)
>>> y, ljd = flow(x)
>>> y_inv, ljd_inv = flow.inverse(y)
Args:
base_dist (Type[torch.distributions]): Base distribution
transforms (List[st.flows]): List of invertible transformations
"""
def __init__(self, base_dist=None, transforms=[]):
super().__init__()
self.base_dist = base_dist
self.transforms = nn.ModuleList(transforms)
def forward(self, x, latent=None, mask=None, t=None, reverse=False, **kwargs):
"""
Args:
x (tensor): Input sampled from base density with shape (..., dim)
latent (tensor, optional): Conditional vector with shape (..., latent_dim)
Default: None
mask (tensor): Masking tensor with shape (..., 1)
Default: None
t (tensor, optional): Flow time end point. Default: None
reverse (bool, optional): Whether to perform an inverse. Default: False
Returns:
y (tensor): Output that follows target density (..., dim)
log_jac_diag (tensor): Log-Jacobian diagonal (..., dim)
"""
transforms = self.transforms[::-1] if reverse else self.transforms
_mask = 1 if mask is None else mask
log_jac_diag = torch.zeros_like(x).to(x)
for f in transforms:
if reverse:
x, ld = f.inverse(x * _mask, latent=latent, mask=mask, t=t, **kwargs)
else:
x, ld = f.forward(x * _mask, latent=latent, mask=mask, t=t, **kwargs)
log_jac_diag += ld * _mask
return x, log_jac_diag
def inverse(self, y, latent=None, mask=None, t=None, **kwargs):
""" Inverse of forward function with the same arguments. """
return self.forward(y, latent=latent, mask=mask, t=t, reverse=True, **kwargs)
def log_prob(self, x, **kwargs):
"""
Calculates log-probability of a sample.
Args:
x (tensor): Input with shape (..., dim)
Returns:
log_prob (tensor): Log-probability of the input with shape (..., 1)
"""
if self.base_dist is None:
raise ValueError('Please define `base_dist` if you need log-probability')
x, log_jac_diag = self.inverse(x, **kwargs)
log_prob = self.base_dist.log_prob(x) + log_jac_diag.sum(-1)
return log_prob.unsqueeze(-1)
def sample(self, num_samples, latent=None, mask=None, **kwargs):
"""
Transforms samples from the base to the target distribution.
Uses reparametrization trick.
Args:
num_samples (tuple or int): Shape of samples
latent (tensor): Latent conditioning vector with shape (..., latent_dim)
Returns:
x (tensor): Samples from target distribution with shape (*num_samples, dim)
"""
if self.base_dist is None:
raise ValueError('Please define `base_dist` if you need sampling')
if isinstance(num_samples, int):
num_samples = (num_samples,)
x = self.base_dist.rsample(num_samples)
x, log_jac_diag = self.forward(x, **kwargs)
return x
| [
[
[
7,
12
],
[
1665,
1670
]
],
[
[
21,
35
],
[
117,
119
],
[
758,
760
]
],
[
[
44,
68
]
],
[
[
77,
102
]
],
[
[
112,
116
]
]
] |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test external signer.
Verify that a blinkhashd node can use an external signer command.
See also wallet_signer.py for tests that require wallet context.
"""
import os
import platform
from test_framework.test_framework import BlinkhashTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class RPCSignerTest(BlinkhashTestFramework):
def mock_signer_path(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'signer.py')
if platform.system() == "Windows":
return "py " + path
else:
return path
def set_test_params(self):
self.num_nodes = 4
self.extra_args = [
[],
[f"-signer={self.mock_signer_path()}", '-keypool=10'],
[f"-signer={self.mock_signer_path()}", '-keypool=10'],
["-signer=fake.py"],
]
def skip_test_if_missing_module(self):
self.skip_if_no_external_signer()
def set_mock_result(self, node, res):
with open(os.path.join(node.cwd, "mock_result"), "w", encoding="utf8") as f:
f.write(res)
def clear_mock_result(self, node):
os.remove(os.path.join(node.cwd, "mock_result"))
def run_test(self):
self.log.debug(f"-signer={self.mock_signer_path()}")
assert_raises_rpc_error(-1, 'Error: restart blinkhashd with -signer=<cmd>',
self.nodes[0].enumeratesigners
)
# Handle script missing:
assert_raises_rpc_error(-1, 'execve failed: No such file or directory',
self.nodes[3].enumeratesigners
)
# Handle error thrown by script
self.set_mock_result(self.nodes[1], "2")
assert_raises_rpc_error(-1, 'RunCommandParseJSON error',
self.nodes[1].enumeratesigners
)
self.clear_mock_result(self.nodes[1])
self.set_mock_result(self.nodes[1], '0 [{"type": "trezor", "model": "trezor_t", "error": "fingerprint not found"}]')
assert_raises_rpc_error(-1, 'fingerprint not found',
self.nodes[1].enumeratesigners
)
self.clear_mock_result(self.nodes[1])
result = self.nodes[1].enumeratesigners()
assert_equal(len(result['signers']), 2)
assert_equal(result['signers'][0]["fingerprint"], "00000001")
assert_equal(result['signers'][0]["name"], "trezor_t")
if __name__ == '__main__':
RPCSignerTest().main()
| [
[
[
382,
384
],
[
644,
646
],
[
657,
659
],
[
673,
675
],
[
1265,
1267
],
[
1405,
1407
],
[
1415,
1417
]
],
[
[
392,
400
],
[
735,
743
]
],
[
[
444,
466
],
[
572,
594
]
],
[
[
505,
517
],
[
2444,
2456
],
[
2492,
2504
],
[
2562,
2574
]
],
[
[
523,
546
],
[
1549,
1572
],
[
1720,
1743
],
[
1943,
1966
],
[
2233,
2256
]
],
[
[
558,
571
],
[
2649,
2662
]
]
] |
import pandas as pd
import pathlib
from fairness.results import local_results_path
BASE_DIR = local_results_path()
PACKAGE_DIR = pathlib.Path(__file__).parents[2]
RAW_DATA_DIR = PACKAGE_DIR / 'data' / 'raw'
PROCESSED_DATA_DIR = BASE_DIR / 'data' / 'preprocessed' # Joosje: BASE_DIR used to be PACKAGE_DIR
RESULT_DIR = BASE_DIR / "results"
ANALYSIS_DIR = BASE_DIR / "analysis"
class Data():
def __init__(self):
pass
def get_dataset_name(self):
"""
This is the stub name that will be used to generate the processed filenames and is the
assumed stub for the raw data filename.
"""
return self.dataset_name
def get_class_attribute(self):
"""
Returns the name of the class attribute to be used for classification.
"""
return self.class_attr
def get_positive_class_val(self, tag):
"""
Returns the value used in the dataset to indicate the positive classification choice.
"""
# FIXME this dependence between tags and metadata is bad; don't know how to fix it right now
if tag == 'numerical-binsensitive':
return 1
else:
return self.positive_class_val
def get_sensitive_attributes(self):
"""
Returns a list of the names of any sensitive / protected attribute(s) that will be used
for a fairness analysis and should not be used to train the model.
"""
return self.sensitive_attrs
def get_sensitive_attributes_with_joint(self):
"""
Same as get_sensitive_attributes, but also includes the joint sensitive attribute if there
is more than one sensitive attribute.
"""
# Joosje: skip joint
# if len(self.get_sensitive_attributes()) > 1:
# return self.get_sensitive_attributes() + ['-'.join(self.get_sensitive_attributes())]
return self.get_sensitive_attributes()
def get_privileged_class_names(self, tag):
"""
Returns a list in the same order as the sensitive attributes list above of the
privileged class name (exactly as it appears in the data) of the associated sensitive
attribute.
"""
# FIXME this dependence between tags and privileged class names is bad; don't know how to
# fix it right now
if tag == 'numerical-binsensitive':
return [1 for x in self.get_sensitive_attributes()]
else:
return self.privileged_class_names
def get_privileged_class_names_with_joint(self, tag):
"""
Same as get_privileged_class_names, but also includes the joint sensitive attribute if there
is more than one sensitive attribute.
"""
priv_class_names = self.get_privileged_class_names(tag)
if len(priv_class_names) > 1:
return priv_class_names + ['-'.join(str(v) for v in priv_class_names)]
return priv_class_names
def get_categorical_features(self):
"""
Returns a list of features that should be expanded to one-hot versions for
numerical-only algorithms. This should not include the protected features
or the outcome class variable.
"""
return self.categorical_features
def get_features_to_keep(self):
return self.features_to_keep
def get_missing_val_indicators(self):
return self.missing_val_indicators
def load_raw_dataset(self):
data_path = self.get_raw_filename()
data_frame = pd.read_csv(data_path, error_bad_lines=False,
na_values=self.get_missing_val_indicators(),
encoding = 'ISO-8859-1')
return data_frame
def get_raw_filename(self):
RAW_DATA_DIR.mkdir(parents=True, exist_ok=True)
return RAW_DATA_DIR / (self.get_dataset_name() + '.csv')
def get_filename(self, tag):
PROCESSED_DATA_DIR.mkdir(parents=True, exist_ok=True)
return PROCESSED_DATA_DIR / (self.get_dataset_name() + "_" + tag + '.csv')
def get_results_filename(self, sensitive_attr, tag):
RESULT_DIR.mkdir(parents=True, exist_ok=True)
return RESULT_DIR / (self.get_dataset_name() + "_" + sensitive_attr + "_" + tag + '.csv')
def get_param_results_filename(self, sensitive_attr, tag, algname):
RESULT_DIR.mkdir(parents=True, exist_ok=True)
return RESULT_DIR / (algname + '_' + self.get_dataset_name() + "_" + sensitive_attr + \
"_" + tag + '.csv')
def get_analysis_filename(self, sensitive_attr, tag):
ANALYSIS_DIR.mkdir(parents=True, exist_ok=True)
return ANALYSIS_DIR / (self.get_dataset_name() + "_" + sensitive_attr + "_" + tag + '.csv')
def data_specific_processing(self, dataframe):
"""
Takes a pandas dataframe and modifies it to do any data specific processing. This should
include any ordered categorical replacement by numbers. The resulting pandas dataframe is
returned.
"""
return dataframe
def handle_missing_data(self, dataframe):
"""
This method implements any data specific missing data processing. Any missing data
not replaced by values in this step will be removed by the general preprocessing
script.
"""
return dataframe
def get_class_balance_statistics(self, data_frame=None):
if data_frame is None:
data_frame = self.load_raw_dataset()
r = data_frame.groupby(self.get_class_attribute()).size()
return r
def get_sensitive_attribute_balance_statistics(self, data_frame=None):
if data_frame is None:
data_frame = self.load_raw_dataset()
return [data_frame.groupby(a).size()
for a in self.get_sensitive_attributes()]
##########################################################################
def get_results_data_frame(self, sensitive_attr, tag):
return pd.read_csv(self.get_results_filename(sensitive_attr, tag))
def get_param_results_data_frame(self, sensitive_attr, tag):
return pd.read_csv(self.get_param_results_filename(sensitive_attr, tag))
| [
[
[
7,
19
],
[
3523,
3525
],
[
5995,
5997
],
[
6136,
6138
]
],
[
[
27,
34
],
[
130,
137
]
],
[
[
64,
82
],
[
95,
113
]
],
[
[
84,
92
],
[
229,
237
],
[
319,
327
],
[
355,
363
]
],
[
[
116,
127
],
[
179,
190
]
],
[
[
164,
176
],
[
3772,
3784
],
[
3835,
3847
]
],
[
[
208,
226
],
[
3927,
3945
],
[
3996,
4014
]
],
[
[
306,
316
],
[
4130,
4140
],
[
4191,
4201
],
[
4355,
4365
],
[
4416,
4426
]
],
[
[
340,
352
],
[
4599,
4611
],
[
4662,
4674
]
],
[
[
385,
389
]
]
] |
from django.core.urlresolvers import reverse
from django.http import Http404
from django.test import TestCase, override_settings
import mock
from rest_framework.exceptions import APIException, PermissionDenied
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.routers import SimpleRouter
from rest_framework.settings import api_settings
from rest_framework.viewsets import GenericViewSet
class DummyViewSet(GenericViewSet):
"""Dummy test viewset that raises an exception when calling list()."""
def list(self, *args, **kwargs):
raise Exception('something went wrong')
test_exception = SimpleRouter()
test_exception.register('testexcept', DummyViewSet, base_name='test-exception')
@override_settings(ROOT_URLCONF=test_exception.urls)
class TestExceptionHandlerWithViewSet(TestCase):
# The test client connects to got_request_exception, so we need to mock it
# otherwise it would immediately re-raise the exception.
@mock.patch('olympia.api.exceptions.got_request_exception')
def test_view_exception(self, got_request_exception_mock):
url = reverse('test-exception-list')
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False, DEBUG=False):
response = self.client.get(url)
assert response.status_code == 500
assert response.data == {'detail': 'Internal Server Error'}
assert got_request_exception_mock.send.call_count == 1
assert got_request_exception_mock.send.call_args[0][0] == DummyViewSet
assert isinstance(
got_request_exception_mock.send.call_args[1]['request'], Request)
# The test client connects to got_request_exception, so we need to mock it
# otherwise it would immediately re-raise the exception.
@mock.patch('olympia.api.exceptions.got_request_exception')
def test_view_exception_debug(self, got_request_exception_mock):
url = reverse('test-exception-list')
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False, DEBUG=True):
response = self.client.get(url)
assert response.status_code == 500
data = response.data
assert set(data.keys()) == set(['detail', 'traceback'])
assert data['detail'] == 'Internal Server Error'
assert 'Traceback (most recent call last):' in data['traceback']
assert got_request_exception_mock.send.call_count == 1
assert got_request_exception_mock.send.call_args[0][0] == DummyViewSet
assert isinstance(
got_request_exception_mock.send.call_args[1]['request'], Request)
class TestExceptionHandler(TestCase):
def test_api_exception_handler_returns_response(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False):
try:
raise APIException()
except Exception as exc:
response = exception_handler(exc, {})
assert isinstance(response, Response)
assert response.status_code == 500
def test_exception_handler_returns_response_for_404(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False):
try:
raise Http404()
except Exception as exc:
response = exception_handler(exc, {})
assert isinstance(response, Response)
assert response.status_code == 404
def test_exception_handler_returns_response_for_403(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False):
try:
raise PermissionDenied()
except Exception as exc:
response = exception_handler(exc, {})
assert isinstance(response, Response)
assert response.status_code == 403
def test_non_api_exception_handler_returns_response(self):
# Regular DRF exception handler does not return a Response for non-api
# exceptions, but we do.
exception_handler = api_settings.EXCEPTION_HANDLER
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=False):
try:
raise Exception()
except Exception as exc:
response = exception_handler(exc, {})
assert isinstance(response, Response)
assert response.status_code == 500
def test_api_exception_handler_with_propagation(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.assertRaises(APIException):
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=True):
try:
raise APIException()
except Exception as exc:
exception_handler(exc, {})
def test_exception_handler_404_with_propagation(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.assertRaises(Http404):
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=True):
try:
raise Http404()
except Exception as exc:
exception_handler(exc, {})
def test_exception_handler_403_with_propagation(self):
exception_handler = api_settings.EXCEPTION_HANDLER
with self.assertRaises(PermissionDenied):
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=True):
try:
raise PermissionDenied()
except Exception as exc:
exception_handler(exc, {})
def test_non_api_exception_handler_with_propagation(self):
# Regular DRF exception handler does not return a Response for non-api
# exceptions, but we do.
exception_handler = api_settings.EXCEPTION_HANDLER
with self.assertRaises(KeyError):
with self.settings(DEBUG_PROPAGATE_EXCEPTIONS=True):
try:
raise KeyError()
except Exception as exc:
exception_handler(exc, {})
| [
[
[
37,
44
],
[
1145,
1152
],
[
1950,
1957
]
],
[
[
69,
76
],
[
3329,
3336
],
[
5052,
5059
],
[
5174,
5181
]
],
[
[
101,
109
],
[
853,
861
],
[
2662,
2670
]
],
[
[
111,
128
],
[
763,
780
]
],
[
[
137,
141
],
[
1009,
1013
],
[
1808,
1812
]
],
[
[
181,
193
],
[
2893,
2905
],
[
4671,
4683
],
[
4798,
4810
]
],
[
[
195,
211
],
[
3760,
3776
],
[
5423,
5439
],
[
5554,
5570
]
],
[
[
247,
254
],
[
1653,
1660
],
[
2624,
2631
]
],
[
[
291,
299
],
[
3043,
3051
],
[
3474,
3482
],
[
3914,
3922
],
[
4459,
4467
]
],
[
[
335,
347
],
[
665,
677
]
],
[
[
384,
396
],
[
2760,
2772
],
[
3196,
3208
],
[
3627,
3639
],
[
4179,
4191
],
[
4608,
4620
],
[
4989,
5001
],
[
5360,
5372
],
[
5865,
5877
]
],
[
[
433,
447
],
[
469,
483
]
],
[
[
456,
468
],
[
718,
730
],
[
1544,
1556
],
[
2515,
2527
]
],
[
[
648,
662
],
[
680,
694
],
[
794,
808
]
],
[
[
821,
852
]
],
[
[
2641,
2661
]
]
] |
import os, sys, urllib.request
from tkinter import *
from tkinter.messagebox import *
__version__ = 3
__filename__ = "ImageRenaming"
__basename__ = os.path.basename(sys.argv[0])
__savepath__ = os.path.join(os.environ['APPDATA'], "QuentiumPrograms")
__iconpath__ = __savepath__ + "/{}.ico".format(__filename__)
try:urllib.request.urlopen("https://www.google.fr/", timeout=1); connection = True
except:connection = False
if not os.path.exists(__iconpath__):
try:os.mkdir(__savepath__)
except:pass
if connection == True:
try:urllib.request.urlretrieve("https://quentium.fr/+++PythonDL/{}.ico".format(__filename__), __iconpath__)
except:pass
if connection == True:
try:script_version = int(urllib.request.urlopen("https://quentium.fr/programs/index.php").read().decode().split(__filename__ + "<!-- Version: ")[1].split(" --></h2>")[0])
except:script_version = __version__
if script_version > __version__:
if os.path.exists(__iconpath__):popup = Tk(); popup.attributes("-topmost", 1); popup.iconbitmap(__iconpath__); popup.withdraw()
ask_update = askquestion(__filename__ + " V" + str(script_version), "Une mise à jour à été trouvée, souhaitez vous la télécharger puis l'éxécuter ?", icon="question")
if ask_update == "yes":
try:os.rename(__basename__, __filename__ + "-old.exe")
except:os.remove(__filename__ + "-old.exe"); os.rename(__basename__, __filename__ + "-old.exe")
if "-32" in str(__basename__):urllib.request.urlretrieve("https://quentium.fr/download.php?file={}-32.exe".format(__filename__), __filename__ + ".exe")
else:urllib.request.urlretrieve("https://quentium.fr/download.php?file={}.exe".format(__filename__), __filename__ + ".exe")
showwarning(__filename__, "Le programme va redémarrer pour fonctionner sous la nouvelle version.", icon="warning")
os.system("start " + __filename__ + ".exe"); os._exit(1)
__filename__ = __filename__ + " V" + str(__version__)
from datetime import datetime
from tkinter.filedialog import *
from tkinter import *
def start_rename():
directory = askdirectory()
if directory:
if askyesno(__filename__, "Êtes-vous sûr de renommer toutes les images dans ce dossier ? Cette action ne peux pas être annulée !"):
files1 = [f for f in os.listdir(directory) if f[-4:].lower() in (".jpg",".JPG",".png",".PNG",".jpeg",".JPEG",".bmp",".gif")]
for (index, filename) in enumerate(files1):
file = directory + "/" + filename
extension = os.path.splitext(filename)[1]
if check_var.get() == 0:
time1 = os.path.getctime(file)
elif check_var.get() == 1:
time1 = os.path.getmtime(file)
time2 = datetime.fromtimestamp(time1)
time = time2.strftime("%Y%m%d%H%M%S%f")
newname = time + "_" + str(os.path.getsize(file)) + extension
os.rename(file, directory + "/" + newname)
files2 = [f for f in os.listdir(directory) if f[-4:].lower() in (".jpg",".JPG",".png",".PNG",".jpeg",".JPEG",".bmp",".gif")]
for (index, filename) in enumerate(files2):
file = directory + "/" + filename
extension = os.path.splitext(filename)[1]
newname = "Image-%05d%s" % (index + 1, extension)
if os.path.exists(newname):
continue
if True:
os.rename(file, directory + "/" + newname)
imagerenaming.destroy()
os._exit(0)
else:
showwarning(__filename__, "Erreur : Aucun dossier n'a été sélectionné !")
imagerenaming = Tk()
width = 800
height = 500
imagerenaming.update_idletasks()
x = (imagerenaming.winfo_screenwidth() - width) // 2
y = (imagerenaming.winfo_screenheight() - height) // 2
imagerenaming.geometry("{}x{}+{}+{}".format(width , height, int(x), int(y)))
imagerenaming.resizable(width=False, height=False)
imagerenaming.configure(bg = "lightgray")
if os.path.exists(__iconpath__):
imagerenaming.iconbitmap(__iconpath__)
imagerenaming.title(__filename__)
Label(imagerenaming, text="Bienvenue dans le programme de renommage !", font="impact 30", fg="red", bg="lightgray").pack(pady=60)
check_var = IntVar()
check_var.set(0)
Radiobutton(imagerenaming, text="Date de création", variable=check_var, value=0, font="impact 20", bg="lightgray").pack(pady=10)
Radiobutton(imagerenaming, text="Date de modification", variable=check_var, value=1, font="impact 20", bg="lightgray").pack()
Button(imagerenaming, text="Renommer des images", command=start_rename, relief=GROOVE, width=25, font="impact 20", fg="black").pack(pady=50)
imagerenaming.mainloop()
| [
[
[
7,
9
],
[
149,
151
],
[
194,
196
],
[
207,
209
],
[
428,
430
],
[
466,
468
],
[
955,
957
],
[
1303,
1305
],
[
1373,
1375
],
[
1411,
1413
],
[
1901,
1903
],
[
1946,
1948
],
[
4081,
4083
],
[
2342,
2344
],
[
2580,
2582
],
[
2679,
2681
],
[
2773,
2775
],
[
2949,
2951
],
[
3000,
3002
],
[
3077,
3079
],
[
3315,
3317
],
[
3430,
3432
],
[
3529,
3531
],
[
3616,
3618
]
],
[
[
11,
14
],
[
166,
169
]
],
[
[
16,
30
],
[
316,
322
],
[
544,
550
],
[
721,
727
],
[
1504,
1510
],
[
1643,
1649
]
],
[
[
51,
52
]
],
[
[
84,
85
],
[
992,
994
],
[
1101,
1112
],
[
1774,
1785
]
],
[
[
87,
98
],
[
895,
906
],
[
931,
942
],
[
2000,
2011
]
],
[
[
103,
115
],
[
297,
309
],
[
619,
631
],
[
808,
820
],
[
1113,
1125
],
[
1327,
1339
],
[
1383,
1395
],
[
1435,
1447
],
[
1588,
1600
],
[
1603,
1615
],
[
1724,
1736
],
[
1739,
1751
],
[
1786,
1798
],
[
1922,
1934
],
[
1974,
1986
]
],
[
[
134,
146
],
[
1313,
1325
],
[
1421,
1433
],
[
1490,
1502
]
],
[
[
179,
191
],
[
265,
277
],
[
475,
487
]
],
[
[
250,
262
],
[
443,
455
],
[
634,
646
],
[
970,
982
],
[
1048,
1060
],
[
4096,
4108
],
[
4140,
4152
]
],
[
[
377,
387
],
[
512,
522
],
[
672,
682
]
],
[
[
402,
412
],
[
512,
522
],
[
672,
682
]
],
[
[
700,
714
],
[
914,
928
],
[
1139,
1153
]
],
[
[
878,
892
],
[
914,
928
],
[
1139,
1153
]
],
[
[
984,
989
],
[
998,
1003
],
[
1031,
1036
],
[
1063,
1068
]
],
[
[
1088,
1098
],
[
1266,
1276
]
],
[
[
1959,
1971
],
[
4174,
4186
],
[
2189,
2201
],
[
3658,
3670
]
],
[
[
2035,
2043
],
[
2820,
2828
]
],
[
[
2075,
2076
]
],
[
[
2097,
2098
],
[
3737,
3739
],
[
4188,
4193
],
[
4330,
4336
],
[
4356,
4367
],
[
4485,
4496
],
[
4611,
4617
],
[
4690,
4696
],
[
2136,
2148
],
[
2180,
2188
],
[
3646,
3657
]
],
[
[
2104,
2116
],
[
4669,
4681
]
],
[
[
3721,
3734
],
[
3767,
3780
],
[
3805,
3818
],
[
3858,
3871
],
[
3908,
3921
],
[
3985,
3998
],
[
4036,
4049
],
[
4115,
4128
],
[
4154,
4167
],
[
4194,
4207
],
[
4368,
4381
],
[
4497,
4510
],
[
4618,
4631
],
[
4752,
4765
],
[
3584,
3597
]
],
[
[
3742,
3747
],
[
3841,
3846
],
[
3952,
3957
]
],
[
[
3754,
3760
],
[
3895,
3901
],
[
3960,
3966
]
],
[
[
3800,
3801
],
[
3972,
3973
]
],
[
[
3853,
3854
],
[
3980,
3981
]
],
[
[
4318,
4327
],
[
4339,
4348
],
[
4418,
4427
],
[
4550,
4559
],
[
2629,
2638
],
[
2723,
2732
]
]
] |
# Generated by Django 3.1.4 on 2021-01-24 04:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exam', '0006_exam_duration'),
]
operations = [
migrations.AlterField(
model_name='exam',
name='duration',
field=models.CharField(default=0, max_length=4, verbose_name='Durasi Ujian'),
),
]
| [
[
[
71,
81
],
[
108,
118
],
[
227,
237
]
],
[
[
83,
89
],
[
328,
334
]
],
[
[
98,
107
]
]
] |
import asyncio
import inspect
import json
import os
import random
import unittest
from unittest.mock import Mock
import aiohttp
import aiohttp.web
from aiohttp.test_utils import unittest_run_loop, setup_test_loop, teardown_test_loop
import pep8
import jsonrpc_base
import jsonrpc_websocket.jsonrpc
from jsonrpc_websocket import Server, ProtocolError, TransportError
class JsonTestClient():
def __init__(self, loop=None):
self.test_server = None
self.loop = loop
self.connect_side_effect = None
async def ws_connect(self, *args, **kwargs):
if self.connect_side_effect:
self.connect_side_effect()
self.test_server = JsonTestServer(self.loop)
return self.test_server
class JsonTestServer():
def __init__(self, loop=None):
self.loop = loop
self.send_handler = None
self.receive_queue = asyncio.Queue(loop=loop)
self._closed = False
self.receive_side_effect = None
async def send_str(self, data):
self.send_handler(self, data)
def test_receive(self, data):
self.receive_queue.put_nowait(aiohttp.WSMessage(aiohttp.WSMsgType.TEXT, data, ''))
def test_binary(self, data=bytes()):
self.receive_queue.put_nowait(aiohttp.WSMessage(aiohttp.WSMsgType.BINARY, data, ''))
def test_error(self):
self.receive_queue.put_nowait(aiohttp.WSMessage(aiohttp.WSMsgType.ERROR, 0, ''))
def test_close(self):
self.receive_queue.put_nowait(aiohttp.WSMessage(aiohttp.WSMsgType.CLOSED, 0, ''))
def test_ping(self):
self.receive_queue.put_nowait(aiohttp.WSMessage(aiohttp.WSMsgType.PING, 0, ''))
async def receive(self):
value = await self.receive_queue.get()
if self.receive_side_effect:
self.receive_side_effect()
return (value)
async def close(self):
if not self._closed:
self._closed = True
self.receive_queue.put_nowait(aiohttp.WSMessage(aiohttp.WSMsgType.CLOSED, 0, ''))
class TestCase(unittest.TestCase):
def assertSameJSON(self, json1, json2):
"""Tells whether two json strings, once decoded, are the same dictionary"""
return self.assertDictEqual(json.loads(json1), json.loads(json2))
def assertRaisesRegex(self, *args, **kwargs):
return super(TestCase, self).assertRaisesRegex(*args, **kwargs)
class TestJSONRPCClient(TestCase):
def setUp(self):
self.loop = setup_test_loop()
self.client = JsonTestClient(self.loop)
random.randint = Mock(return_value=1)
self.server = Server('/xmlrpc', session=self.client, timeout=0.2)
self.ws_loop_future = self.loop.run_until_complete(self.server.ws_connect())
def tearDown(self):
if self.server.connected:
self.client.test_server.test_close()
self.loop.run_until_complete(self.ws_loop_future)
teardown_test_loop(self.loop)
@property
def handler(self):
return self.client.test_server.send_handler
@handler.setter
def handler(self, value):
self.client.test_server.send_handler = value
def receive(self, data):
self.client.test_server.test_receive(data)
def receive_binary(self, data):
self.client.test_server.test_binary(data)
def test_pep8_conformance(self):
"""Test that we conform to PEP8."""
source_files = []
project_dir = os.path.dirname(os.path.abspath(__file__))
package_dir = os.path.join(project_dir, 'jsonrpc_async')
for root, directories, filenames in os.walk(package_dir):
source_files.extend([os.path.join(root, f) for f in filenames if f.endswith('.py')])
pep8style = pep8.StyleGuide(quiet=False, max_line_length=120)
result = pep8style.check_files(source_files)
self.assertEqual(result.total_errors, 0, "Found code style errors (and warnings).")
def test_pending_message_response(self):
pending_message = jsonrpc_websocket.jsonrpc.PendingMessage(loop=self.loop)
pending_message.response = 10
self.assertEqual(pending_message.response, 10)
@unittest_run_loop
async def test_send_message(self):
# catch timeout responses
with self.assertRaises(TransportError) as transport_error:
def handler(server, data):
try:
asyncio.wait(asyncio.sleep(10, loop=self.loop))
except asyncio.CancelledError:
# event loop will be terminated before sleep finishes
pass
self.handler = handler
await self.server.send_message(jsonrpc_base.Request('my_method', params=None, msg_id=1))
self.assertIsInstance(transport_error.exception.args[1], asyncio.TimeoutError)
@unittest_run_loop
async def test_client_closed(self):
await self.server.close()
with self.assertRaisesRegex(TransportError, 'Client is not connected.'):
def handler(server, data):
pass
self.handler = handler
await self.server.send_message(jsonrpc_base.Request('my_method', params=None, msg_id=1))
@unittest_run_loop
async def test_double_connect(self):
with self.assertRaisesRegex(TransportError, 'Connection already open.'):
await self.server.ws_connect()
@unittest_run_loop
async def test_ws_error(self):
self.client.test_server.test_error()
with self.assertRaisesRegex(TransportError, 'Websocket error detected. Connection closed.'):
await self.ws_loop_future
@unittest_run_loop
async def test_binary(self):
self.client.test_server.test_binary()
@unittest_run_loop
async def test_message_not_json(self):
with self.assertRaises(TransportError) as transport_error:
self.receive('not json')
await self.ws_loop_future
self.assertIsInstance(transport_error.exception.args[1], ValueError)
@unittest_run_loop
async def test_message_binary_not_utf8(self):
# If we get a binary message, we should try to decode it as JSON, but
# if it's not valid we should just ignore it, and an exception should
# not be thrown
self.receive_binary(bytes((0xE0, 0x80, 0x80)))
self.client.test_server.test_close()
await self.ws_loop_future
@unittest_run_loop
async def test_message_binary_not_json(self):
# If we get a binary message, we should try to decode it as JSON, but
# if it's not valid we should just ignore it, and an exception should
# not be thrown
self.receive_binary('not json'.encode())
self.client.test_server.test_close()
await self.ws_loop_future
@unittest_run_loop
async def test_message_ping_ignored(self):
self.client.test_server.test_ping()
self.client.test_server.test_close()
await self.ws_loop_future
@unittest_run_loop
async def test_connection_timeout(self):
def bad_connect():
raise aiohttp.ClientError("Test Error")
self.client.connect_side_effect = bad_connect
await self.server.close()
with self.assertRaises(TransportError) as transport_error:
await self.server.ws_connect()
self.assertIsInstance(transport_error.exception.args[1], aiohttp.ClientError)
@unittest_run_loop
async def test_server_request(self):
def test_method():
return 1
self.server.test_method = test_method
def handler(server, data):
response = json.loads(data)
self.assertEqual(response["result"], 1)
self.handler = handler
self.receive('{"jsonrpc": "2.0", "method": "test_method", "id": 1}')
@unittest_run_loop
async def test_server_request_binary(self):
# Test that if the server sends a binary websocket message, that's a
# UTF-8 encoded JSON request we process it
def test_method():
return 1
self.server.test_method = test_method
def handler(server, data):
response = json.loads(data)
self.assertEqual(response["result"], 1)
self.handler = handler
self.receive_binary('{"jsonrpc": "2.0", "method": "test_method", "id": 1}'.encode())
@unittest_run_loop
async def test_server_notification(self):
def test_method():
pass
self.server.test_method = test_method
self.receive('{"jsonrpc": "2.0", "method": "test_method"}')
@unittest_run_loop
async def test_server_response_error(self):
def test_method():
return 1
self.server.test_method = test_method
def receive_side_effect():
raise aiohttp.ClientError("Test Error")
self.client.test_server.receive_side_effect = receive_side_effect
self.receive('{"jsonrpc": "2.0", "method": "test_method", "id": 1}')
with self.assertRaises(TransportError) as transport_error:
await self.ws_loop_future
self.assertIsInstance(transport_error.exception.args[1], aiohttp.ClientError)
@unittest_run_loop
async def test_calls(self):
# rpc call with positional parameters:
def handler1(server, data):
request = json.loads(data)
self.assertEqual(request["params"], [42, 23])
server.test_receive('{"jsonrpc": "2.0", "result": 19, "id": 1}')
self.handler = handler1
self.assertEqual((await self.server.subtract(42, 23)), 19)
def handler2(server, data):
request = json.loads(data)
self.assertEqual(request["params"], {'y': 23, 'x': 42})
server.test_receive('{"jsonrpc": "2.0", "result": 19, "id": 1}')
self.handler = handler2
self.assertEqual((await self.server.subtract(x=42, y=23)), 19)
def handler3(server, data):
request = json.loads(data)
self.assertEqual(request["params"], {'foo': 'bar'})
self.handler = handler3
await self.server.foobar({'foo': 'bar'}, _notification=True)
@unittest_run_loop
async def test_simultaneous_calls(self):
# Test that calls can be delivered simultaneously, and can return out
# of order
def handler(server, data):
pass
self.handler = handler
random.randint = Mock(return_value=1)
task1 = self.loop.create_task(self.server.call1())
random.randint = Mock(return_value=2)
task2 = self.loop.create_task(self.server.call2())
self.assertFalse(task1.done())
self.assertFalse(task2.done())
self.receive('{"jsonrpc": "2.0", "result": 2, "id": 2}')
await task2
self.assertFalse(task1.done())
self.assertTrue(task2.done())
self.receive('{"jsonrpc": "2.0", "result": 1, "id": 1}')
await task1
self.assertTrue(task1.done())
self.assertTrue(task2.done())
self.assertEqual(1, task1.result())
self.assertEqual(2, task2.result())
@unittest_run_loop
async def test_notification(self):
# Verify that we ignore the server response
def handler(server, data):
pass
self.handler = handler
self.assertIsNone((await self.server.subtract(42, 23, _notification=True)))
if __name__ == '__main__':
unittest.main()
| [
[
[
7,
14
],
[
884,
891
],
[
4782,
4789
],
[
4385,
4392
],
[
4398,
4405
],
[
4456,
4463
]
],
[
[
22,
29
]
],
[
[
37,
41
],
[
2220,
2224
],
[
2239,
2243
],
[
7614,
7618
],
[
8145,
8149
],
[
9326,
9330
],
[
9637,
9641
],
[
9962,
9966
]
],
[
[
49,
51
],
[
3432,
3434
],
[
3448,
3450
],
[
3497,
3499
],
[
3584,
3586
],
[
3639,
3641
]
],
[
[
59,
65
],
[
2534,
2540
],
[
10404,
10410
],
[
10509,
10515
]
],
[
[
73,
81
],
[
2036,
2044
],
[
11416,
11424
]
],
[
[
108,
112
],
[
2551,
2555
],
[
10421,
10425
],
[
10526,
10530
]
],
[
[
121,
128
]
],
[
[
136,
147
],
[
1126,
1133
],
[
1144,
1151
],
[
1259,
1266
],
[
1277,
1284
],
[
1379,
1386
],
[
1397,
1404
],
[
1495,
1502
],
[
1513,
1520
],
[
1611,
1618
],
[
1629,
1636
],
[
1968,
1975
],
[
1986,
1993
],
[
7375,
7382
],
[
9144,
9151
],
[
7078,
7085
],
[
8788,
8795
]
],
[
[
179,
196
],
[
4147,
4164
],
[
4810,
4827
],
[
5185,
5202
],
[
5374,
5391
],
[
5617,
5634
],
[
5720,
5737
],
[
6006,
6023
],
[
6394,
6411
],
[
6776,
6793
],
[
6970,
6987
],
[
7402,
7419
],
[
7798,
7815
],
[
8346,
8363
],
[
8574,
8591
],
[
9171,
9188
],
[
10151,
10168
],
[
11107,
11124
]
],
[
[
198,
213
],
[
2460,
2475
]
],
[
[
215,
233
],
[
2909,
2927
]
],
[
[
241,
245
],
[
3724,
3728
]
],
[
[
254,
266
],
[
4658,
4670
],
[
5121,
5133
]
],
[
[
274,
299
],
[
3991,
4008
]
],
[
[
330,
336
],
[
2594,
2600
]
],
[
[
338,
351
]
],
[
[
353,
367
],
[
4269,
4283
],
[
4938,
4952
],
[
5280,
5294
],
[
5508,
5522
],
[
5812,
5826
],
[
7231,
7245
],
[
9005,
9019
]
],
[
[
376,
390
],
[
2500,
2514
]
],
[
[
744,
758
],
[
679,
693
]
],
[
[
2027,
2035
],
[
2407,
2415
],
[
2330,
2338
]
],
[
[
2389,
2406
]
]
] |
r"""
Incidence structures (i.e. hypergraphs, i.e. set systems)
An incidence structure is specified by a list of points, blocks, or an incidence
matrix ([1]_, [2]_). :class:`IncidenceStructure` instances have the following methods:
{METHODS_OF_IncidenceStructure}
REFERENCES:
.. [1] Block designs and incidence structures from wikipedia,
:wikipedia:`Block_design`
:wikipedia:`Incidence_structure`
.. [2] \E. Assmus, J. Key, Designs and their codes, CUP, 1992.
AUTHORS:
- Peter Dobcsanyi and David Joyner (2007-2008)
This is a significantly modified form of part of the module block_design.py
(version 0.6) written by Peter Dobcsanyi [email protected].
- Vincent Delecroix (2014): major rewrite
Methods
-------
"""
#***************************************************************************
# Copyright (C) 2007 #
# #
# Peter Dobcsanyi and David Joyner #
# <[email protected]> <[email protected]> #
# #
# #
# Distributed under the terms of the GNU General Public License (GPL) #
# as published by the Free Software Foundation; either version 2 of #
# the License, or (at your option) any later version. #
# http://www.gnu.org/licenses/ #
#***************************************************************************
from __future__ import print_function
import six
from six import itervalues
from six.moves import range
from sage.rings.integer import Integer
from sage.misc.latex import latex
from sage.sets.set import Set
class IncidenceStructure(object):
r"""
A base class for incidence structures (i.e. hypergraphs, i.e. set systems)
An incidence structure (i.e. hypergraph, i.e. set system) can be defined
from a collection of blocks (i.e. sets, i.e. edges), optionally with an
explicit ground set (i.e. point set, i.e. vertex set). Alternatively they
can be defined from a binary incidence matrix.
INPUT:
- ``points`` -- (i.e. ground set, i.e. vertex set) the underlying set. If
``points`` is an integer `v`, then the set is considered to be `\{0, ...,
v-1\}`.
.. NOTE::
The following syntax, where ``points`` is ommitted, automatically
defines the ground set as the union of the blocks::
sage: H = IncidenceStructure([['a','b','c'],['c','d','e']])
sage: H.ground_set()
['a', 'b', 'c', 'd', 'e']
- ``blocks`` -- (i.e. edges, i.e. sets) the blocks defining the incidence
structure. Can be any iterable.
- ``incidence_matrix`` -- a binary incidence matrix. Each column represents
a set.
- ``name`` (a string, such as "Fano plane").
- ``check`` -- whether to check the input
- ``copy`` -- (use with caution) if set to ``False`` then ``blocks`` must be
a list of lists of integers. The list will not be copied but will be
modified in place (each block is sorted, and the whole list is
sorted). Your ``blocks`` object will become the
:class:`IncidenceStructure` instance's internal data.
EXAMPLES:
An incidence structure can be constructed by giving the number of points and
the list of blocks::
sage: IncidenceStructure(7, [[0,1,2],[0,3,4],[0,5,6],[1,3,5],[1,4,6],[2,3,6],[2,4,5]])
Incidence structure with 7 points and 7 blocks
Only providing the set of blocks is sufficient. In this case, the ground set
is defined as the union of the blocks::
sage: IncidenceStructure([[1,2,3],[2,3,4]])
Incidence structure with 4 points and 2 blocks
Or by its adjacency matrix (a `\{0,1\}`-matrix in which rows are indexed by
points and columns by blocks)::
sage: m = matrix([[0,1,0],[0,0,1],[1,0,1],[1,1,1]])
sage: IncidenceStructure(m)
Incidence structure with 4 points and 3 blocks
The points can be any (hashable) object::
sage: V = [(0,'a'),(0,'b'),(1,'a'),(1,'b')]
sage: B = [(V[0],V[1],V[2]), (V[1],V[2]), (V[0],V[2])]
sage: I = IncidenceStructure(V, B)
sage: I.ground_set()
[(0, 'a'), (0, 'b'), (1, 'a'), (1, 'b')]
sage: I.blocks()
[[(0, 'a'), (0, 'b'), (1, 'a')], [(0, 'a'), (1, 'a')], [(0, 'b'), (1, 'a')]]
The order of the points and blocks does not matter as they are sorted on
input (see :trac:`11333`)::
sage: A = IncidenceStructure([0,1,2], [[0],[0,2]])
sage: B = IncidenceStructure([1,0,2], [[0],[2,0]])
sage: B == A
True
sage: C = BlockDesign(2, [[0], [1,0]])
sage: D = BlockDesign(2, [[0,1], [0]])
sage: C == D
True
If you care for speed, you can set ``copy`` to ``False``, but in that
case, your input must be a list of lists and the ground set must be `{0,
..., v-1}`::
sage: blocks = [[0,1],[2,0],[1,2]] # a list of lists of integers
sage: I = IncidenceStructure(3, blocks, copy=False)
sage: I._blocks is blocks
True
"""
def __init__(self, points=None, blocks=None, incidence_matrix=None,
name=None, check=True, copy=True):
r"""
TESTS::
sage: IncidenceStructure(3, [[4]])
Traceback (most recent call last):
...
ValueError: Block [4] is not contained in the point set
sage: IncidenceStructure(3, [[0,1],[0,2]], check=True)
Incidence structure with 3 points and 2 blocks
sage: IncidenceStructure(2, [[0,1,2,3,4,5]], check=False)
Incidence structure with 2 points and 1 blocks
We avoid to convert to integers when the points are not (but compare
equal to integers because of coercion)::
sage: V = GF(5)
sage: e0,e1,e2,e3,e4 = V
sage: [e0,e1,e2,e3,e4] == list(range(5)) # coercion makes them equal
True
sage: blocks = [[e0,e1,e2],[e0,e1],[e2,e4]]
sage: I = IncidenceStructure(V, blocks)
sage: type(I.ground_set()[0])
<... 'sage.rings.finite_rings.integer_mod.IntegerMod_int'>
sage: type(I.blocks()[0][0])
<... 'sage.rings.finite_rings.integer_mod.IntegerMod_int'>
TESTS::
sage: IncidenceStructure([])
Incidence structure with 0 points and 0 blocks
"""
from sage.matrix.constructor import matrix
from sage.structure.element import Matrix
# Reformatting input
if isinstance(points, Matrix):
assert incidence_matrix is None, "'incidence_matrix' cannot be defined when 'points' is a matrix"
assert blocks is None, "'blocks' cannot be defined when 'points' is a matrix"
incidence_matrix = points
points = blocks = None
elif (points is not None and
blocks is None):
blocks = points
points = set().union(*blocks)
if points:
assert incidence_matrix is None, "'incidence_matrix' cannot be defined when 'points' is defined"
if incidence_matrix:
M = matrix(incidence_matrix)
v = M.nrows()
self._points = list(range(v))
self._point_to_index = None
self._blocks = sorted(M.nonzero_positions_in_column(i) for i in range(M.ncols()))
else:
if isinstance(points, (int,Integer)):
self._points = list(range(points))
self._point_to_index = None
else:
self._points = sorted(points)
if self._points == list(range(len(points))) and all(isinstance(x,(int,Integer)) for x in self._points):
self._point_to_index = None
else:
self._point_to_index = {e:i for i,e in enumerate(self._points)}
if check:
for block in blocks:
if any(x not in self._points for x in block):
raise ValueError("Block {} is not contained in the point set".format(block))
if len(block) != len(set(block)):
raise ValueError("Repeated element in block {}".format(block))
if self._point_to_index:
# translate everything to integers between 0 and v-1
blocks = [sorted(self._point_to_index[e] for e in block) for block in blocks]
elif copy:
# create a new list made of sorted blocks
blocks = [sorted(block) for block in blocks]
else:
# sort the data but avoid copying it
for b in blocks:
b.sort()
blocks.sort()
self._blocks = blocks
self._name = str(name) if name is not None else 'IncidenceStructure'
self._classes = None
self._canonical_label = None
def __iter__(self):
"""
Iterator over the blocks.
EXAMPLES::
sage: sts = designs.steiner_triple_system(9)
sage: list(sts)
[[0, 1, 5], [0, 2, 4], [0, 3, 6], [0, 7, 8], [1, 2, 3], [1, 4, 7],
[1, 6, 8], [2, 5, 8], [2, 6, 7], [3, 4, 8], [3, 5, 7], [4, 5, 6]]
sage: b = IncidenceStructure('ab', ['a','ab'])
sage: it = iter(b)
sage: next(it)
['a']
sage: next(it)
['a', 'b']
"""
if self._point_to_index is None:
for b in self._blocks:
yield b[:]
else:
for b in self._blocks:
yield [self._points[i] for i in b]
def __repr__(self):
"""
A print method.
EXAMPLES::
sage: BD = IncidenceStructure(7,[[0,1,2],[0,3,4],[0,5,6],[1,3,5],[1,4,6],[2,3,6],[2,4,5]])
sage: BD
Incidence structure with 7 points and 7 blocks
"""
return 'Incidence structure with {} points and {} blocks'.format(
self.num_points(), self.num_blocks())
__str__ = __repr__
def __eq__(self, other):
"""
Test whether the two incidence structures are equal.
TESTS::
sage: blocks = [[0,1,2],[0,3,4],[0,5,6],[1,3,5],[1,4,6],[2,3,6],[2,4,5]]
sage: BD1 = IncidenceStructure(7, blocks)
sage: M = BD1.incidence_matrix()
sage: BD2 = IncidenceStructure(incidence_matrix=M)
sage: BD1 == BD2
True
sage: e1 = frozenset([0,1])
sage: e2 = frozenset([2])
sage: sorted([e1,e2]) == [e1,e2]
True
sage: sorted([e2,e1]) == [e2,e1]
True
sage: I1 = IncidenceStructure([e1,e2], [[e1],[e1,e2]])
sage: I2 = IncidenceStructure([e1,e2], [[e2,e1],[e1]])
sage: I3 = IncidenceStructure([e2,e1], [[e1,e2],[e1]])
sage: I1 == I2 and I2 == I1 and I1 == I3 and I3 == I1 and I2 == I3 and I3 == I2
True
"""
# We are extra careful in this method since we cannot assume that a
# total order is defined on the point set.
if not isinstance(other, IncidenceStructure):
return False
if self._points == other._points:
return self._blocks == other._blocks
if (self.num_points() != other.num_points() or
self.num_blocks() != other.num_blocks()):
return False
p_to_i = self._point_to_index if self._point_to_index else list(range(self.num_points()))
if any(p not in p_to_i for p in other.ground_set()):
return False
other_blocks = sorted(sorted(p_to_i[p] for p in b) for b in other.blocks())
return self._blocks == other_blocks
def __ne__(self, other):
r"""
Difference test.
EXAMPLES::
sage: BD1 = IncidenceStructure(7, [[0,1,2],[0,3,4],[0,5,6],[1,3,5],[1,4,6],[2,3,6],[2,4,5]])
sage: M = BD1.incidence_matrix()
sage: BD2 = IncidenceStructure(incidence_matrix=M)
sage: BD1 != BD2
False
"""
return not self == other
def __contains__(self, block):
r"""
Tests if a block belongs to the incidence structure
INPUT:
- ``block`` -- a block.
EXAMPLES::
sage: [1,2,3,4] in IncidenceStructure([[1,2,3,4]])
True
sage: [1,2,4,3] in IncidenceStructure([[1,2,3,4]])
True
sage: [1,2,"3",4] in IncidenceStructure([[1,2,3,4]])
False
sage: [1,2,"3",4] in IncidenceStructure([[1,2,"3",4]])
True
More complicated examples::
sage: str="I had a dream of a time when a 3-lines patch does not kill one hour"
sage: sets = Subsets(str.split(), 4)
sage: IS = IncidenceStructure(sets) # a complete 4-uniform hypergraph
sage: ["I", "dream", "of", "one"] in IS
True
sage: ["does", "patch", "kill", "dream"] in IS
True
sage: ["Am", "I", "finally", "done ?"] in IS
False
sage: IS = designs.ProjectiveGeometryDesign(3, 1, GF(2), point_coordinates=False)
sage: [3,8,7] in IS
True
sage: [3,8,9] in IS
False
"""
try:
iter(block)
except TypeError:
return False
# Relabel to 0,...,n-1 if necessary
if self._point_to_index is not None:
try:
block = [self._point_to_index[x] for x in block]
except KeyError:
return False
return sorted(block) in self._blocks
def canonical_label(self):
r"""
Return a canonical label for the incidence structure.
A canonical label is relabeling of the points into integers
`\{0,...,n-1\}` such that isomorphic incidence structures are
relabelled to equal objects.
EXAMPLES::
sage: fano1 = designs.balanced_incomplete_block_design(7,3)
sage: fano2 = designs.projective_plane(2)
sage: fano1 == fano2
False
sage: fano1.relabel(fano1.canonical_label())
sage: fano2.relabel(fano2.canonical_label())
sage: fano1 == fano2
True
"""
if self._canonical_label is None:
from sage.graphs.graph import Graph
g = Graph()
n = self.num_points()
g.add_edges((i+n,x) for i,b in enumerate(self._blocks) for x in b)
canonical_label = g.canonical_label([list(range(n)),list(range(n,n+self.num_blocks()))],certificate=True)[1]
canonical_label = [canonical_label[x] for x in range(n)]
self._canonical_label = canonical_label
return dict(zip(self._points,self._canonical_label))
def is_isomorphic(self, other, certificate=False):
r"""
Return whether the two incidence structures are isomorphic.
INPUT:
- ``other`` -- an incidence structure.
- ``certificate`` (boolean) -- whether to return an
isomorphism from ``self`` to ``other`` instead of a boolean
answer.
EXAMPLES::
sage: fano1 = designs.balanced_incomplete_block_design(7,3)
sage: fano2 = designs.projective_plane(2)
sage: fano1.is_isomorphic(fano2)
True
sage: fano1.is_isomorphic(fano2,certificate=True)
{0: 0, 1: 1, 2: 2, 3: 6, 4: 4, 5: 3, 6: 5}
TESTS::
sage: IS = IncidenceStructure([["A",5,pi],["A",5,"Wouhou"],["A","Wouhou",(9,9)],[pi,12]])
sage: IS2 = IS.copy()
sage: IS2.relabel(IS2.canonical_label())
sage: IS.is_isomorphic(IS2)
True
sage: canon = IS.is_isomorphic(IS2,certificate=True)
sage: IS.relabel(canon)
sage: IS==IS2
True
sage: IS2 = IncidenceStructure([[1,2]])
sage: IS2.is_isomorphic(IS)
False
sage: IS2.is_isomorphic(IS,certificate=True)
{}
Checking whether two :class:`IncidenceStructure` are isomorphic
incidentally computes their canonical label (if necessary). Thus,
subsequent calls to :meth:`is_isomorphic` will be faster::
sage: IS1 = designs.projective_plane(3)
sage: IS2 = IS1.relabel(Permutations(IS1.ground_set()).random_element(),inplace=False)
sage: IS2 = IncidenceStructure(IS2.blocks())
sage: IS1._canonical_label is None and IS2._canonical_label is None
True
sage: IS1.is_isomorphic(IS2)
True
sage: IS1._canonical_label is None or IS2._canonical_label is None
False
"""
if (self.num_points() != other.num_points() or
self.num_blocks() != other.num_blocks() or
sorted(self.block_sizes()) != sorted(other.block_sizes())):
return {} if certificate else False
A_canon = self.canonical_label()
B_canon = other.canonical_label()
A = self.relabel(A_canon,inplace=False)
B = other.relabel(B_canon,inplace=False)
if A == B:
if certificate:
B_canon_rev = {y:x for x,y in six.iteritems(B_canon)}
return {x:B_canon_rev[xint] for x,xint in six.iteritems(A_canon)}
else:
return True
else:
return {} if certificate else False
def isomorphic_substructures_iterator(self, H2,induced=False):
r"""
Iterates over all copies of ``H2`` contained in ``self``.
A hypergraph `H_1` contains an isomorphic copy of a hypergraph `H_2` if
there exists an injection `f:V(H_2)\mapsto V(H_1)` such that for any set
`S_2\in E(H_2)` the set `S_1=f(S2)` belongs to `E(H_1)`.
It is an *induced* copy if no other set of `E(H_1)` is contained in
`f(V(H_2))`, i.e. `|E(H_2)|=\{S:S\in E(H_1)\text{ and }f(V(H_2))\}`.
This function lists all such injections. In particular, the number of
copies of `H` in itself is equal to *the size of its automorphism
group*.
See :mod:`~sage.combinat.designs.subhypergraph_search` for more information.
INPUT:
- ``H2`` an :class:`IncidenceStructure` object.
- ``induced`` (boolean) -- whether to require the copies to be
induced. Set to ``False`` by default.
EXAMPLES:
How many distinct `C_5` in Petersen's graph ? ::
sage: P = graphs.PetersenGraph()
sage: C = graphs.CycleGraph(5)
sage: IP = IncidenceStructure(P.edges(labels=False))
sage: IC = IncidenceStructure(C.edges(labels=False))
sage: sum(1 for _ in IP.isomorphic_substructures_iterator(IC))
120
As the automorphism group of `C_5` has size 10, the number of distinct
unlabelled copies is 12. Let us check that all functions returned
correspond to an actual `C_5` subgraph::
sage: for f in IP.isomorphic_substructures_iterator(IC):
....: assert all(P.has_edge(f[x],f[y]) for x,y in C.edges(labels=False))
The number of induced copies, in this case, is the same::
sage: sum(1 for _ in IP.isomorphic_substructures_iterator(IC,induced=True))
120
They begin to differ if we make one vertex universal::
sage: P.add_edges([(0,x) for x in P], loops=False)
sage: IP = IncidenceStructure(P.edges(labels=False))
sage: IC = IncidenceStructure(C.edges(labels=False))
sage: sum(1 for _ in IP.isomorphic_substructures_iterator(IC))
420
sage: sum(1 for _ in IP.isomorphic_substructures_iterator(IC,induced=True))
60
The number of copies of `H` in itself is the size of its automorphism
group::
sage: H = designs.projective_plane(3)
sage: sum(1 for _ in H.isomorphic_substructures_iterator(H))
5616
sage: H.automorphism_group().cardinality()
5616
"""
from sage.combinat.designs.subhypergraph_search import SubHypergraphSearch
return SubHypergraphSearch(self,H2,induced=induced)
def copy(self):
r"""
Return a copy of the incidence structure.
EXAMPLES::
sage: IS = IncidenceStructure([[1,2,3,"e"]],name="Test")
sage: IS
Incidence structure with 4 points and 1 blocks
sage: copy(IS)
Incidence structure with 4 points and 1 blocks
sage: [1, 2, 3, 'e'] in copy(IS)
True
sage: copy(IS)._name
'Test'
"""
IS = IncidenceStructure(self._blocks,
name=self._name,
check=False)
IS.relabel(dict(zip(range(self.num_points()),self._points)))
IS._canonical_label = None if self._canonical_label is None else self._canonical_label[:]
return IS
__copy__ = copy
def induced_substructure(self, points):
r"""
Return the substructure induced by a set of points.
The substructure induced in `\mathcal H` by a set `X\subseteq V(\mathcal
H)` of points is the incidence structure `\mathcal H_X` defined on `X`
whose sets are all `S\in \mathcal H` such that `S\subseteq X`.
INPUT:
- ``points`` -- a set of points.
.. NOTE::
This method goes over all sets of ``self`` before building a new
:class:`IncidenceStructure` (which involves some relabelling and
sorting). It probably should not be called in a performance-critical
code.
EXAMPLES:
A Fano plane with one point removed::
sage: F = designs.steiner_triple_system(7)
sage: F.induced_substructure([0..5])
Incidence structure with 6 points and 4 blocks
TESTS::
sage: F.induced_substructure([0..50])
Traceback (most recent call last):
...
ValueError: 7 is not a point of the incidence structure
sage: F.relabel(dict(enumerate("abcdefg")))
sage: F.induced_substructure("abc")
Incidence structure with 3 points and ...
sage: F.induced_substructure("Y")
Traceback (most recent call last):
...
ValueError: 'Y' is not a point of the incidence structure
"""
# Checking the input
if self._point_to_index is None:
n = self.num_points()
for x in points:
x = int(x)
if x < 0 or x >= n:
raise ValueError("{} is not a point of the incidence structure".format(x))
int_points = points
else:
try:
int_points = [self._point_to_index[x] for x in points]
except KeyError as bad_pt:
raise ValueError("{} is not a point of the incidence structure".format(bad_pt))
int_points = set(int_points)
return IncidenceStructure(points,
[[self._points[x] for x in S]
for S in self._blocks
if int_points.issuperset(S)])
def trace(self, points, min_size=1, multiset=True):
r"""
Return the trace of a set of points.
Given an hypergraph `\mathcal H`, the *trace* of a set `X` of points in
`\mathcal H` is the hypergraph whose blocks are all non-empty `S \cap X`
where `S \in \mathcal H`.
INPUT:
- ``points`` -- a set of points.
- ``min_size`` (integer; default 1) -- minimum size of the sets to
keep. By default all empty sets are discarded, i.e. ``min_size=1``.
- ``multiset`` (boolean; default ``True``) -- whether to keep multiple
copies of the same set.
.. NOTE::
This method goes over all sets of ``self`` before building a new
:class:`IncidenceStructure` (which involves some relabelling and
sorting). It probably should not be called in a performance-critical
code.
EXAMPLES:
A Baer subplane of order 2 (i.e. a Fano plane) in a projective plane of order 4::
sage: P4 = designs.projective_plane(4)
sage: F = designs.projective_plane(2)
sage: for x in Subsets(P4.ground_set(),7):
....: if P4.trace(x,min_size=2).is_isomorphic(F):
....: break
sage: subplane = P4.trace(x,min_size=2); subplane
Incidence structure with 7 points and 7 blocks
sage: subplane.is_isomorphic(F)
True
TESTS::
sage: F.trace([0..50])
Traceback (most recent call last):
...
ValueError: 7 is not a point of the incidence structure
sage: F.relabel(dict(enumerate("abcdefg")))
sage: F.trace("abc")
Incidence structure with 3 points and ...
sage: F.trace("Y")
Traceback (most recent call last):
...
ValueError: 'Y' is not a point of the incidence structure
"""
# Checking the input
if self._point_to_index is None:
n = self.num_points()
int_points = frozenset(int(x) for x in points)
for x in int_points:
if x < 0 or x >= n:
raise ValueError("{} is not a point of the incidence structure".format(x))
else:
try:
int_points = frozenset(self._point_to_index[x] for x in points)
except KeyError as bad_pt:
raise ValueError("{} is not a point of the incidence structure".format(bad_pt))
blocks = [int_points.intersection(S) for S in self._blocks]
if min_size:
blocks = [S for S in blocks if len(S)>=min_size]
if not multiset:
blocks = set(blocks)
IS = IncidenceStructure(blocks)
IS.relabel({i:self._points[i] for i in int_points})
return IS
def ground_set(self):
r"""
Return the ground set (i.e the list of points).
EXAMPLES::
sage: IncidenceStructure(3, [[0,1],[0,2]]).ground_set()
[0, 1, 2]
"""
return self._points[:]
def num_points(self):
r"""
Return the size of the ground set.
EXAMPLES::
sage: designs.DesarguesianProjectivePlaneDesign(2).num_points()
7
sage: B = IncidenceStructure(4, [[0,1],[0,2],[0,3],[1,2], [1,2,3]])
sage: B.num_points()
4
"""
return len(self._points)
def num_blocks(self):
r"""
Return the number of blocks.
EXAMPLES::
sage: designs.DesarguesianProjectivePlaneDesign(2).num_blocks()
7
sage: B = IncidenceStructure(4, [[0,1],[0,2],[0,3],[1,2], [1,2,3]])
sage: B.num_blocks()
5
"""
return len(self._blocks)
def blocks(self):
"""
Return the list of blocks.
EXAMPLES::
sage: BD = IncidenceStructure(7,[[0,1,2],[0,3,4],[0,5,6],[1,3,5],[1,4,6],[2,3,6],[2,4,5]])
sage: BD.blocks()
[[0, 1, 2], [0, 3, 4], [0, 5, 6], [1, 3, 5], [1, 4, 6], [2, 3, 6], [2, 4, 5]]
"""
if self._point_to_index is None:
return [b[:] for b in self._blocks]
else:
return [[self._points[i] for i in b] for b in self._blocks]
def block_sizes(self):
r"""
Return the set of block sizes.
EXAMPLES::
sage: BD = IncidenceStructure(8, [[0,1,3],[1,4,5,6],[1,2],[5,6,7]])
sage: BD.block_sizes()
[3, 2, 4, 3]
sage: BD = IncidenceStructure(7,[[0,1,2],[0,3,4],[0,5,6],[1,3,5],[1,4,6],[2,3,6],[2,4,5]])
sage: BD.block_sizes()
[3, 3, 3, 3, 3, 3, 3]
"""
return [len(_) for _ in self._blocks]
def degree(self, p=None, subset=False):
r"""
Return the degree of a point ``p`` (or a set of points).
The degree of a point (or set of points) is the number of blocks that
contain it.
INPUT:
- ``p`` -- a point (or a set of points) of the incidence structure.
- ``subset`` (boolean) -- whether to interpret the argument as a set of
point (``subset=True``) or as a point (``subset=False``, default).
EXAMPLES::
sage: designs.steiner_triple_system(9).degree(3)
4
sage: designs.steiner_triple_system(9).degree({1,2},subset=True)
1
TESTS::
sage: designs.steiner_triple_system(9).degree()
doctest:...: DeprecationWarning: Please use degrees() instead of degree(None)
See http://trac.sagemath.org/17108 for details.
{0: 4, 1: 4, 2: 4, 3: 4, 4: 4, 5: 4, 6: 4, 7: 4, 8: 4}
sage: designs.steiner_triple_system(9).degree(subset=True)
Traceback (most recent call last):
...
ValueError: subset must be False when p is None
"""
if p is None:
if subset is True:
raise ValueError("subset must be False when p is None")
from sage.misc.superseded import deprecation
deprecation(17108, "Please use degrees() instead of degree(None)")
return self.degrees()
# degree of a point
if not subset:
if self._point_to_index:
p = self._point_to_index.get(p,-1)
else:
p = p if (p>=0 and p<len(self._points)) else -1
return sum((p in b) for b in self._blocks) if p != -1 else 0
# degree of a set
else:
if self._point_to_index:
p = set(self._point_to_index.get(x,-1) for x in p)
else:
p = set(p) if all(x>=0 and x<len(self._points) for x in p) else set([-1])
return sum(p.issubset(b) for b in self._blocks) if -1 not in p else 0
def degrees(self, size=None):
r"""
Return the degree of all sets of given size, or the degree of all points.
The degree of a point (or set of point) is the number of blocks that
contain it.
INPUT:
- ``size`` (integer) -- return the degree of all subsets of points of
cardinality ``size``. When ``size=None``, the function outputs the
degree of all points.
.. NOTE::
When ``size=None`` the output is indexed by the points. When
``size=1`` it is indexed by tuples of size 1. This is the same
information, stored slightly differently.
OUTPUT:
A dictionary whose values are degrees and keys are either:
- the points of the incidence structure if ``size=None`` (default)
- the subsets of size ``size`` of the points stored as tuples
EXAMPLES::
sage: IncidenceStructure([[1,2,3],[1,4]]).degrees(2)
{(1, 2): 1, (1, 3): 1, (1, 4): 1, (2, 3): 1, (2, 4): 0, (3, 4): 0}
In a Steiner triple system, all pairs have degree 1::
sage: S13 = designs.steiner_triple_system(13)
sage: all(v == 1 for v in S13.degrees(2).values())
True
"""
if size is None:
d = [0]*self.num_points()
for b in self._blocks:
for x in b:
d[x] += 1
return {p: d[i] for i, p in enumerate(self._points)}
else:
from itertools import combinations
d = {t:0 for t in combinations(range(self.num_points()),size)}
for b in self._blocks:
for s in combinations(b,size):
d[s]+=1
if self._point_to_index:
return {tuple([self._points[x] for x in s]):v for s,v in six.iteritems(d)}
else:
return d
def rank(self):
r"""
Return the rank of the hypergraph (the maximum size of a block).
EXAMPLES::
sage: h = Hypergraph(8, [[0,1,3],[1,4,5,6],[1,2]])
sage: h.rank()
4
"""
return max(len(b) for b in self._blocks)
def is_regular(self,r=None):
r"""
Test whether the incidence structure is `r`-regular.
An incidence structure is said to be `r`-regular if all its points are
incident with exactly `r` blocks.
INPUT:
- ``r`` (integer)
OUTPUT:
If ``r`` is defined, a boolean is returned. If ``r`` is set to ``None``
(default), the method returns either ``False`` or the integer ``r`` such
that the incidence structure is `r`-regular.
.. WARNING::
In case of `0`-regular incidence structure, beware that ``if not
H.is_regular()`` is a satisfied condition.
EXAMPLES::
sage: designs.balanced_incomplete_block_design(7,3).is_regular()
3
sage: designs.balanced_incomplete_block_design(7,3).is_regular(r=3)
True
sage: designs.balanced_incomplete_block_design(7,3).is_regular(r=4)
False
TESTS::
sage: IncidenceStructure([]).is_regular()
Traceback (most recent call last):
...
ValueError: This incidence structure has no points.
"""
if self.num_points() == 0:
raise ValueError("This incidence structure has no points.")
count = [0]*self.num_points()
for b in self._blocks:
for x in b:
count[x] += 1
count = set(count)
if len(count) != 1:
return False
elif r is None:
return count.pop()
else:
return count.pop() == r
def is_uniform(self,k=None):
r"""
Test whether the incidence structure is `k`-uniform
An incidence structure is said to be `k`-uniform if all its blocks have
size `k`.
INPUT:
- ``k`` (integer)
OUTPUT:
If ``k`` is defined, a boolean is returned. If ``k`` is set to ``None``
(default), the method returns either ``False`` or the integer ``k`` such
that the incidence structure is `k`-uniform.
.. WARNING::
In case of `0`-uniform incidence structure, beware that ``if not
H.is_uniform()`` is a satisfied condition.
EXAMPLES::
sage: designs.balanced_incomplete_block_design(7,3).is_uniform()
3
sage: designs.balanced_incomplete_block_design(7,3).is_uniform(k=3)
True
sage: designs.balanced_incomplete_block_design(7,3).is_uniform(k=4)
False
TESTS::
sage: IncidenceStructure([]).is_uniform()
Traceback (most recent call last):
...
ValueError: This incidence structure has no blocks.
"""
if self.num_blocks() == 0:
raise ValueError("This incidence structure has no blocks.")
sizes = set(self.block_sizes())
if len(sizes) != 1:
return False
elif k is None:
return sizes.pop()
else:
return sizes.pop() == k
def is_connected(self):
r"""
Test whether the design is connected.
EXAMPLES::
sage: IncidenceStructure(3, [[0,1],[0,2]]).is_connected()
True
sage: IncidenceStructure(4, [[0,1],[2,3]]).is_connected()
False
"""
from sage.sets.disjoint_set import DisjointSet
D = DisjointSet(self.num_points())
for B in self._blocks:
x = B[0]
for i in range(1,len(B)):
D.union(x,B[i])
return D.number_of_subsets() == 1
def is_simple(self):
r"""
Test whether this design is simple (i.e. no repeated block).
EXAMPLES::
sage: IncidenceStructure(3, [[0,1],[1,2],[0,2]]).is_simple()
True
sage: IncidenceStructure(3, [[0],[0]]).is_simple()
False
sage: V = [(0,'a'),(0,'b'),(1,'a'),(1,'b')]
sage: B = [[V[0],V[1]], [V[1],V[2]]]
sage: I = IncidenceStructure(V, B)
sage: I.is_simple()
True
sage: I2 = IncidenceStructure(V, B*2)
sage: I2.is_simple()
False
"""
B = self._blocks
return all(B[i] != B[i+1] for i in range(len(B)-1))
def _gap_(self):
"""
Return the GAP string describing the design.
EXAMPLES::
sage: BD = IncidenceStructure(7,[[0,1,2],[0,3,4],[0,5,6],[1,3,5],[1,4,6],[2,3,6],[2,4,5]])
sage: BD._gap_()
'BlockDesign(7,[[1, 2, 3], [1, 4, 5], [1, 6, 7], [2, 4, 6], [2, 5, 7], [3, 4, 7], [3, 5, 6]])'
"""
B = self.blocks()
v = self.num_points()
gB = [[x+1 for x in b] for b in self._blocks]
return "BlockDesign("+str(v)+","+str(gB)+")"
def intersection_graph(self,sizes=None):
r"""
Return the intersection graph of the incidence structure.
The vertices of this graph are the :meth:`blocks` of the incidence
structure. Two of them are adjacent if the size of their intersection
belongs to the set ``sizes``.
INPUT:
- ``sizes`` -- a list/set of integers. For convenience, setting
``sizes`` to ``5`` has the same effect as ``sizes=[5]``. When set to
``None`` (default), behaves as ``sizes=PositiveIntegers()``.
EXAMPLES:
The intersection graph of a
:func:`~sage.combinat.designs.bibd.balanced_incomplete_block_design` is
a :meth:`strongly regular graph <Graph.is_strongly_regular>` (when it is
not trivial)::
sage: BIBD = designs.balanced_incomplete_block_design(19,3)
sage: G = BIBD.intersection_graph(1)
sage: G.is_strongly_regular(parameters=True)
(57, 24, 11, 9)
"""
from sage.sets.positive_integers import PositiveIntegers
from sage.graphs.graph import Graph
from sage.sets.set import Set
if sizes is None:
sizes = PositiveIntegers()
elif sizes in PositiveIntegers():
sizes = (sizes,)
V = [Set(v) for v in self]
return Graph([V, lambda x,y: len(x & y) in sizes], loops=False)
def incidence_matrix(self):
r"""
Return the incidence matrix `A` of the design. A is a `(v \times b)`
matrix defined by: ``A[i,j] = 1`` if ``i`` is in block ``B_j`` and 0
otherwise.
EXAMPLES::
sage: BD = IncidenceStructure(7, [[0,1,2],[0,3,4],[0,5,6],[1,3,5],[1,4,6],[2,3,6],[2,4,5]])
sage: BD.block_sizes()
[3, 3, 3, 3, 3, 3, 3]
sage: BD.incidence_matrix()
[1 1 1 0 0 0 0]
[1 0 0 1 1 0 0]
[1 0 0 0 0 1 1]
[0 1 0 1 0 1 0]
[0 1 0 0 1 0 1]
[0 0 1 1 0 0 1]
[0 0 1 0 1 1 0]
sage: I = IncidenceStructure('abc', ('ab','abc','ac','c'))
sage: I.incidence_matrix()
[1 1 1 0]
[1 1 0 0]
[0 1 1 1]
"""
from sage.matrix.constructor import Matrix
from sage.rings.all import ZZ
A = Matrix(ZZ, self.num_points(), self.num_blocks(), sparse=True)
for j, b in enumerate(self._blocks):
for i in b:
A[i, j] = 1
return A
def incidence_graph(self,labels=False):
r"""
Return the incidence graph of the incidence structure
A point and a block are adjacent in this graph whenever they are
incident.
INPUT:
- ``labels`` (boolean) -- whether to return a graph whose vertices are
integers, or labelled elements.
- ``labels is False`` (default) -- in this case the first vertices
of the graphs are the elements of :meth:`ground_set`, and appear
in the same order. Similarly, the following vertices represent the
elements of :meth:`blocks`, and appear in the same order.
- ``labels is True``, the points keep their original labels, and the
blocks are :func:`Set <Set>` objects.
Note that the labelled incidence graph can be incorrect when
blocks are repeated, and on some (rare) occasions when the
elements of :meth:`ground_set` mix :func:`Set` and non-:func:`Set
<Set>` objects.
EXAMPLES::
sage: BD = IncidenceStructure(7, [[0,1,2],[0,3,4],[0,5,6],[1,3,5],[1,4,6],[2,3,6],[2,4,5]])
sage: BD.incidence_graph()
Bipartite graph on 14 vertices
sage: A = BD.incidence_matrix()
sage: Graph(block_matrix([[A*0,A],[A.transpose(),A*0]])) == BD.incidence_graph()
True
TESTS:
With ``labels = True``::
sage: BD.incidence_graph(labels=True).has_edge(0,Set([0,1,2]))
True
"""
if labels:
from sage.graphs.graph import Graph
from sage.sets.set import Set
G = Graph()
G.add_vertices(self.ground_set())
for b in self.blocks():
b = Set(b)
G.add_vertex(b)
G.add_edges((b,x) for x in b)
return G
else:
from sage.graphs.bipartite_graph import BipartiteGraph
A = self.incidence_matrix()
return BipartiteGraph(A)
def complement(self,uniform=False):
r"""
Return the complement of the incidence structure.
Two different definitions of "complement" are made available, according
to the value of ``uniform``.
INPUT:
- ``uniform`` (boolean) --
- if set to ``False`` (default), returns the incidence structure whose
blocks are the complements of all blocks of the incidence structure.
- If set to ``True`` and the incidence structure is `k`-uniform,
returns the incidence structure whose blocks are all `k`-sets of the
ground set that do not appear in ``self``.
EXAMPLES:
The complement of a
:class:`~sage.combinat.designs.bibd.BalancedIncompleteBlockDesign` is
also a `2`-design::
sage: bibd = designs.balanced_incomplete_block_design(13,4)
sage: bibd.is_t_design(return_parameters=True)
(True, (2, 13, 4, 1))
sage: bibd.complement().is_t_design(return_parameters=True)
(True, (2, 13, 9, 6))
The "uniform" complement of a graph is a graph::
sage: g = graphs.PetersenGraph()
sage: G = IncidenceStructure(g.edges(labels=False))
sage: H = G.complement(uniform=True)
sage: h = Graph(H.blocks())
sage: g == h
False
sage: g == h.complement()
True
TESTS::
sage: bibd.relabel({i:str(i) for i in bibd.ground_set()})
sage: bibd.complement().ground_set()
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
sage: I = IncidenceStructure('abc', ['ab','ac','bc'])
sage: I.is_t_design(return_parameters=True)
(True, (2, 3, 2, 1))
"""
if uniform:
k = self.is_uniform()
if k is False:
raise ValueError("The incidence structure is not uniform.")
blocks = []
num_blocks = self.num_blocks()
i = 0
from itertools import combinations
for B in combinations(range(self.num_points()),k):
B = list(B)
while i<num_blocks and self._blocks[i] < B:
i += 1
if i<num_blocks and self._blocks[i] == B:
i += 1
continue
blocks.append(B)
I = IncidenceStructure(blocks,copy=False)
else:
X = set(range(self.num_points()))
I = IncidenceStructure([X.difference(B) for B in self._blocks])
I.relabel({i:self._points[i] for i in range(self.num_points())})
return I
def relabel(self, perm=None, inplace=True):
r"""
Relabel the ground set
INPUT:
- ``perm`` -- can be one of
- a dictionary -- then each point ``p`` (which should be a key of
``d``) is relabeled to ``d[p]``
- a list or a tuple of length ``n`` -- the first point returned by
:meth:`ground_set` is relabeled to ``l[0]``, the second to
``l[1]``, ...
- ``None`` -- the incidence structure is relabeled to be on
`\{0,1,...,n-1\}` in the ordering given by :meth:`ground_set`.
- ``inplace`` -- If ``True`` then return a relabeled graph and does not
touch ``self`` (default is ``False``).
EXAMPLES::
sage: TD=designs.transversal_design(5,5)
sage: TD.relabel({i:chr(97+i) for i in range(25)})
sage: TD.ground_set()
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y']
sage: TD.blocks()[:3]
[['a', 'f', 'k', 'p', 'u'], ['a', 'g', 'm', 's', 'y'], ['a', 'h', 'o', 'q', 'x']]
Relabel to integer points::
sage: TD.relabel()
sage: TD.blocks()[:3]
[[0, 5, 10, 15, 20], [0, 6, 12, 18, 24], [0, 7, 14, 16, 23]]
TESTS:
Check that the relabel is consistent on a fixed incidence structure::
sage: I = IncidenceStructure([0,1,2,3,4],
....: [[0,1,3],[0,2,4],[2,3,4],[0,1]])
sage: I.relabel()
sage: from itertools import permutations
sage: for p in permutations([0,1,2,3,4]):
....: J = I.relabel(p,inplace=False)
....: if I == J: print(p)
(0, 1, 2, 3, 4)
(0, 1, 4, 3, 2)
And one can also verify that we have exactly two automorphisms::
sage: I.automorphism_group()
Permutation Group with generators [(2,4)]
"""
if not inplace:
from copy import copy
G = copy(self)
G.relabel(perm=perm, inplace=True)
return G
if perm is None:
self._points = list(range(self.num_points()))
self._point_to_index = None
return
if isinstance(perm, (list,tuple)):
perm = dict(zip(self._points, perm))
if not isinstance(perm, dict):
raise ValueError("perm argument must be None, a list or a dictionary")
if len(set(perm.values())) != len(perm):
raise ValueError("Two points are getting relabelled with the same name !")
self._points = [perm[x] for x in self._points]
if self._points == list(range(self.num_points())):
self._point_to_index = None
else:
self._point_to_index = {v:i for i,v in enumerate(self._points)}
def __hash__(self):
r"""
Not Implemented
This object is mutable because of .relabel()
EXAMPLES::
sage: TD=designs.transversal_design(5,5)
sage: hash(TD)
Traceback (most recent call last):
...
RuntimeError: This object is mutable !
"""
raise RuntimeError("This object is mutable !")
#####################
# real computations #
#####################
def packing(self, solver=None, verbose=0):
r"""
Return a maximum packing
A maximum packing in a hypergraph is collection of disjoint sets/blocks
of maximal cardinality. This problem is NP-complete in general, and in
particular on 3-uniform hypergraphs. It is solved here with an Integer
Linear Program.
For more information, see the :wikipedia:`Packing_in_a_hypergraph`.
INPUT:
- ``solver`` -- (default: ``None``) Specify a Linear Program (LP)
solver to be used. If set to ``None``, the default one is used. For
more information on LP solvers and which default solver is used, see
the method
:meth:`solve <sage.numerical.mip.MixedIntegerLinearProgram.solve>`
of the class
:class:`MixedIntegerLinearProgram <sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``). Sets the level of
verbosity. Set to 0 by default, which means quiet.
EXAMPLES::
sage: P = IncidenceStructure([[1,2],[3,4],[2,3]]).packing()
sage: sorted(sorted(b) for b in P)
[[1, 2], [3, 4]]
sage: len(designs.steiner_triple_system(9).packing())
3
"""
from sage.numerical.mip import MixedIntegerLinearProgram
# List of blocks containing a given point x
d = [[] for x in self._points]
for i, B in enumerate(self._blocks):
for x in B:
d[x].append(i)
p = MixedIntegerLinearProgram(solver=solver)
b = p.new_variable(binary=True)
for x, L in enumerate(d): # Set of disjoint blocks
p.add_constraint(p.sum([b[i] for i in L]) <= 1)
# Maximum number of blocks
p.set_objective(p.sum([b[i] for i in range(self.num_blocks())]))
p.solve(log=verbose)
return [[self._points[x] for x in self._blocks[i]]
for i, v in six.iteritems(p.get_values(b)) if v]
def is_t_design(self, t=None, v=None, k=None, l=None, return_parameters=False):
r"""
Test whether ``self`` is a `t-(v,k,l)` design.
A `t-(v,k,\lambda)` (sometimes called `t`-design for short) is a block
design in which:
- the underlying set has cardinality `v`
- the blocks have size `k`
- each `t`-subset of points is covered by `\lambda` blocks
INPUT:
- ``t,v,k,l`` (integers) -- their value is set to ``None`` by
default. The function tests whether the design is a ``t-(v,k,l)``
design using the provided values and guesses the others. Note that
`l`` cannot be specified if ``t`` is not.
- ``return_parameters`` (boolean)-- whether to return the parameters of
the `t`-design. If set to ``True``, the function returns a pair
``(boolean_answer,(t,v,k,l))``.
EXAMPLES::
sage: fano_blocks = [[0,1,2],[0,3,4],[0,5,6],[1,3,5],[1,4,6],[2,3,6],[2,4,5]]
sage: BD = IncidenceStructure(7, fano_blocks)
sage: BD.is_t_design()
True
sage: BD.is_t_design(return_parameters=True)
(True, (2, 7, 3, 1))
sage: BD.is_t_design(2, 7, 3, 1)
True
sage: BD.is_t_design(1, 7, 3, 3)
True
sage: BD.is_t_design(0, 7, 3, 7)
True
sage: BD.is_t_design(0,6,3,7) or BD.is_t_design(0,7,4,7) or BD.is_t_design(0,7,3,8)
False
sage: BD = designs.AffineGeometryDesign(3, 1, GF(2))
sage: BD.is_t_design(1)
True
sage: BD.is_t_design(2)
True
Steiner triple and quadruple systems are other names for `2-(v,3,1)` and
`3-(v,4,1)` designs::
sage: S3_9 = designs.steiner_triple_system(9)
sage: S3_9.is_t_design(2,9,3,1)
True
sage: blocks = designs.steiner_quadruple_system(8)
sage: S4_8 = IncidenceStructure(8, blocks)
sage: S4_8.is_t_design(3,8,4,1)
True
sage: blocks = designs.steiner_quadruple_system(14)
sage: S4_14 = IncidenceStructure(14, blocks)
sage: S4_14.is_t_design(3,14,4,1)
True
Some examples of Witt designs that need the gap database::
sage: BD = designs.WittDesign(9) # optional - gap_packages
sage: BD.is_t_design(2,9,3,1) # optional - gap_packages
True
sage: W12 = designs.WittDesign(12) # optional - gap_packages
sage: W12.is_t_design(5,12,6,1) # optional - gap_packages
True
sage: W12.is_t_design(4) # optional - gap_packages
True
Further examples::
sage: D = IncidenceStructure(4,[[],[]])
sage: D.is_t_design(return_parameters=True)
(True, (0, 4, 0, 2))
sage: D = IncidenceStructure(4, [[0,1],[0,2],[0,3]])
sage: D.is_t_design(return_parameters=True)
(True, (0, 4, 2, 3))
sage: D = IncidenceStructure(4, [[0],[1],[2],[3]])
sage: D.is_t_design(return_parameters=True)
(True, (1, 4, 1, 1))
sage: D = IncidenceStructure(4,[[0,1],[2,3]])
sage: D.is_t_design(return_parameters=True)
(True, (1, 4, 2, 1))
sage: D = IncidenceStructure(4, [list(range(4))])
sage: D.is_t_design(return_parameters=True)
(True, (4, 4, 4, 1))
TESTS::
sage: blocks = designs.steiner_quadruple_system(8)
sage: S4_8 = IncidenceStructure(8, blocks)
sage: R = list(range(15))
sage: [(v,k,l) for v in R for k in R for l in R if S4_8.is_t_design(3,v,k,l)]
[(8, 4, 1)]
sage: [(v,k,l) for v in R for k in R for l in R if S4_8.is_t_design(2,v,k,l)]
[(8, 4, 3)]
sage: [(v,k,l) for v in R for k in R for l in R if S4_8.is_t_design(1,v,k,l)]
[(8, 4, 7)]
sage: [(v,k,l) for v in R for k in R for l in R if S4_8.is_t_design(0,v,k,l)]
[(8, 4, 14)]
sage: A = designs.AffineGeometryDesign(3, 1, GF(2))
sage: A.is_t_design(return_parameters=True)
(True, (2, 8, 2, 1))
sage: A = designs.AffineGeometryDesign(4, 2, GF(2))
sage: A.is_t_design(return_parameters=True)
(True, (3, 16, 4, 1))
sage: I = IncidenceStructure(2, [])
sage: I.is_t_design(return_parameters=True)
(True, (0, 2, 0, 0))
sage: I = IncidenceStructure(2, [[0],[0,1]])
sage: I.is_t_design(return_parameters=True)
(False, (0, 0, 0, 0))
"""
from sage.arith.all import binomial
# Missing parameters ?
if v is None:
v = self.num_points()
if k is None:
k = len(self._blocks[0]) if self._blocks else 0
if l is not None and t is None:
raise ValueError("t must be set when l=None")
b = self.num_blocks()
# Trivial wrong answers
if (any(len(block) != k for block in self._blocks) or # non k-uniform
v != self.num_points()):
return (False, (0,0,0,0)) if return_parameters else False
# Trivial case t>k
if (t is not None and t>k):
if (l is None or l == 0):
return (True, (t,v,k,0)) if return_parameters else True
else:
return (False, (0,0,0,0)) if return_parameters else False
# Trivial case k=0
if k==0:
if (l is None or l == 0):
return (True, (0,v,k,b)) if return_parameters else True
else:
return (False, (0,0,0,0)) if return_parameters else False
# Trivial case k=v (includes v=0)
if k == v:
if t is None:
t = v
if l is None or b == l:
return (True, (t,v,k,b)) if return_parameters else True
else:
return (True, (0,0,0,0)) if return_parameters else False
# Handbook of combinatorial design theorem II.4.8:
#
# a t-(v,k,l) is also a t'-(v,k,l')
# for t' < t and l' = l* binomial(v-t',t-t') / binomial(k-t',t-t')
#
# We look for the largest t such that self is a t-design
from itertools import combinations
for tt in (range(1,k+1) if t is None else [t]):
# is lambda an integer?
if (b*binomial(k,tt)) % binomial(v,tt) != 0:
tt -= 1
break
s = {}
for block in self._blocks:
for i in combinations(block,tt):
s[i] = s.get(i,0) + 1
if len(set(s.values())) != 1:
tt -= 1
break
ll = b*binomial(k,tt) // binomial(v,tt)
if ((t is not None and t!=tt) or
(l is not None and l!=ll)):
return (False, (0,0,0,0)) if return_parameters else False
else:
if tt == 0:
ll = b
return (True, (tt,v,k,ll)) if return_parameters else True
def is_generalized_quadrangle(self, verbose=False, parameters=False):
r"""
Test if the incidence structure is a generalized quadrangle.
An incidence structure is a generalized quadrangle iff (see [BH12]_,
section 9.6):
- two blocks intersect on at most one point.
- For every point `p` not in a block `B`, there is a unique block `B'`
intersecting both `\{p\}` and `B`
It is a *regular* generalized quadrangle if furthermore:
- it is `s+1`-:meth:`uniform <is_uniform>` for some positive integer `s`.
- it is `t+1`-:meth:`regular <is_regular>` for some positive integer `t`.
For more information, see the :wikipedia:`Generalized_quadrangle`.
.. NOTE::
Some references (e.g. [PT09]_ or [GQwiki]_) only allow *regular*
generalized quadrangles. To use such a definition, see the
``parameters`` optional argument described below, or the methods
:meth:`is_regular` and :meth:`is_uniform`.
INPUT:
- ``verbose`` (boolean) -- whether to print an explanation when the
instance is not a generalized quadrangle.
- ``parameters`` (boolean; ``False``) -- if set to ``True``, the
function returns a pair ``(s,t)`` instead of ``True`` answers. In this
case, `s` and `t` are the integers defined above if they exist (each
can be set to ``False`` otherwise).
EXAMPLES::
sage: h = designs.CremonaRichmondConfiguration()
sage: h.is_generalized_quadrangle()
True
This is actually a *regular* generalized quadrangle::
sage: h.is_generalized_quadrangle(parameters=True)
(2, 2)
TESTS::
sage: H = IncidenceStructure((2*graphs.CompleteGraph(3)).edges(labels=False))
sage: H.is_generalized_quadrangle(verbose=True)
Some point is at distance >3 from some block.
False
sage: G = graphs.CycleGraph(5)
sage: B = list(G.subgraph_search_iterator(graphs.PathGraph(3)))
sage: H = IncidenceStructure(B)
sage: H.is_generalized_quadrangle(verbose=True)
Two blocks intersect on >1 points.
False
sage: hypergraphs.CompleteUniform(4,2).is_generalized_quadrangle(verbose=1)
Some point has two projections on some line.
False
"""
# The distance between a point and a line in the incidence graph is odd
# and must be <= 3. Thus, the diameter is at most 4
g = self.incidence_graph()
if g.diameter() > 4:
if verbose:
print("Some point is at distance >3 from some block.")
return False
# There is a unique projection of a point on a line. Thus, the girth of
# g is at least 7
girth = g.girth()
if girth == 4:
if verbose:
print("Two blocks intersect on >1 points.")
return False
elif girth == 6:
if verbose:
print("Some point has two projections on some line.")
return False
if parameters:
s = self.is_uniform()
t = self.is_regular()
s = s-1 if (s is not False and s>=2) else False
t = t-1 if (t is not False and t>=2) else False
return (s,t)
else:
return True
def dual(self, algorithm=None):
"""
Return the dual of the incidence structure.
INPUT:
- ``algorithm`` -- whether to use Sage's implementation
(``algorithm=None``, default) or use GAP's (``algorithm="gap"``).
.. NOTE::
The ``algorithm="gap"`` option requires GAP's Design package
(included in the gap_packages Sage spkg).
EXAMPLES:
The dual of a projective plane is a projective plane::
sage: PP = designs.DesarguesianProjectivePlaneDesign(4)
sage: PP.dual().is_t_design(return_parameters=True)
(True, (2, 21, 5, 1))
TESTS::
sage: D = IncidenceStructure(4, [[0,2],[1,2,3],[2,3]])
sage: D
Incidence structure with 4 points and 3 blocks
sage: D.dual()
Incidence structure with 3 points and 4 blocks
sage: print(D.dual(algorithm="gap")) # optional - gap_packages
Incidence structure with 3 points and 4 blocks
sage: blocks = [[0,1,2],[0,3,4],[0,5,6],[1,3,5],[1,4,6],[2,3,6],[2,4,5]]
sage: BD = IncidenceStructure(7, blocks, name="FanoPlane")
sage: BD
Incidence structure with 7 points and 7 blocks
sage: print(BD.dual(algorithm="gap")) # optional - gap_packages
Incidence structure with 7 points and 7 blocks
sage: BD.dual()
Incidence structure with 7 points and 7 blocks
REFERENCE:
- Soicher, Leonard, Design package manual, available at
http://www.gap-system.org/Manuals/pkg/design/htm/CHAP003.htm
"""
if algorithm == "gap":
from sage.interfaces.gap import gap
gap.load_package("design")
gD = self._gap_()
gap.eval("DD:=DualBlockDesign("+gD+")")
v = eval(gap.eval("DD.v"))
gblcks = eval(gap.eval("DD.blocks"))
gB = []
for b in gblcks:
gB.append([x-1 for x in b])
return IncidenceStructure(list(range(v)), gB, name=None, check=False)
else:
return IncidenceStructure(
incidence_matrix=self.incidence_matrix().transpose(),
check=False)
def automorphism_group(self):
r"""
Return the subgroup of the automorphism group of the incidence graph
which respects the P B partition. It is (isomorphic to) the automorphism
group of the block design, although the degrees differ.
EXAMPLES::
sage: P = designs.DesarguesianProjectivePlaneDesign(2); P
(7,3,1)-Balanced Incomplete Block Design
sage: G = P.automorphism_group()
sage: G.is_isomorphic(PGL(3,2))
True
sage: G
Permutation Group with generators [...]
sage: G.cardinality()
168
A non self-dual example::
sage: IS = IncidenceStructure(list(range(4)), [[0,1,2,3],[1,2,3]])
sage: IS.automorphism_group().cardinality()
6
sage: IS.dual().automorphism_group().cardinality()
1
Examples with non-integer points::
sage: I = IncidenceStructure('abc', ('ab','ac','bc'))
sage: I.automorphism_group()
Permutation Group with generators [('b','c'), ('a','b')]
sage: IncidenceStructure([[(1,2),(3,4)]]).automorphism_group()
Permutation Group with generators [((1,2),(3,4))]
"""
from sage.graphs.graph import Graph
from sage.groups.perm_gps.permgroup import PermutationGroup
g = Graph()
n = self.num_points()
g.add_edges((i+n,x) for i,b in enumerate(self._blocks) for x in b)
ag = g.automorphism_group(partition=[list(range(n)),
list(range(n,n+self.num_blocks()))])
if self._point_to_index:
gens = [[tuple([self._points[i] for i in cycle if (not cycle or cycle[0]<n)])
for cycle in g.cycle_tuples()]
for g in ag.gens()]
else:
gens = [[tuple(cycle) for cycle in g.cycle_tuples() if (not cycle or cycle[0]<n)]
for g in ag.gens()]
return PermutationGroup(gens, domain=self._points)
def is_resolvable(self, certificate=False, solver=None, verbose=0, check=True):
r"""
Test whether the hypergraph is resolvable
A hypergraph is said to be resolvable if its sets can be partitionned
into classes, each of which is a partition of the ground set.
.. NOTE::
This problem is solved using an Integer Linear Program, and GLPK
(the default LP solver) has been reported to be very slow on some
instances. If you hit this wall, consider installing a more powerful
LP solver (CPLEX, Gurobi, ...).
INPUT:
- ``certificate`` (boolean) -- whether to return the classes along with
the binary answer (see examples below).
- ``solver`` -- (default: ``None``) Specify a Linear Program (LP) solver
to be used. If set to ``None``, the default one is used. For more
information on LP solvers and which default solver is used, see the
method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``). Sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``check`` (boolean) -- whether to check that output is correct before
returning it. As this is expected to be useless (but we are cautious
guys), you may want to disable it whenever you want speed. Set to ``True``
by default.
EXAMPLES:
Some resolvable designs::
sage: TD = designs.transversal_design(2,2,resolvable=True)
sage: TD.is_resolvable()
True
sage: AG = designs.AffineGeometryDesign(3,1,GF(2))
sage: AG.is_resolvable()
True
Their classes::
sage: b,cls = TD.is_resolvable(True)
sage: b
True
sage: cls # random
[[[0, 3], [1, 2]], [[1, 3], [0, 2]]]
sage: b,cls = AG.is_resolvable(True)
sage: b
True
sage: cls # random
[[[6, 7], [4, 5], [0, 1], [2, 3]],
[[5, 7], [0, 4], [3, 6], [1, 2]],
[[0, 2], [4, 7], [1, 3], [5, 6]],
[[3, 4], [0, 7], [1, 5], [2, 6]],
[[3, 7], [1, 6], [0, 5], [2, 4]],
[[0, 6], [2, 7], [1, 4], [3, 5]],
[[4, 6], [0, 3], [2, 5], [1, 7]]]
A non-resolvable design::
sage: Fano = designs.balanced_incomplete_block_design(7,3)
sage: Fano.is_resolvable()
False
sage: Fano.is_resolvable(True)
(False, [])
TESTS::
sage: _,cls1 = AG.is_resolvable(certificate=True)
sage: _,cls2 = AG.is_resolvable(certificate=True)
sage: cls1 is cls2
False
"""
if self._classes is None:
degrees = set(itervalues(self.degrees()))
if len(degrees) != 1:
self._classes = False
else:
from sage.numerical.mip import MixedIntegerLinearProgram
from sage.numerical.mip import MIPSolverException
n_classes = degrees.pop()
p = MixedIntegerLinearProgram(solver=solver)
b = p.new_variable(binary=True)
domain = list(range(self.num_points()))
# Lists of blocks containing i for every i
dual = [[] for i in domain]
for i,B in enumerate(self._blocks):
for x in B:
dual[x].append(i)
# Each class is a partition
for t in range(n_classes):
for x in domain:
p.add_constraint(p.sum(b[t,i] for i in dual[x]) == 1)
# Each set appears exactly once
for i in range(len(self._blocks)):
p.add_constraint(p.sum(b[t,i] for t in range(n_classes)) == 1)
try:
p.solve(log=verbose)
except MIPSolverException:
self._classes = False
else:
# each class is stored as the list of indices of its blocks
self._classes = [[] for _ in range(n_classes)]
for (t,i),v in six.iteritems(p.get_values(b)):
if v:
self._classes[t].append(self._blocks[i])
if check and self._classes is not False:
assert sorted(id(c) for cls in self._classes for c in cls) == sorted(id(b) for b in self._blocks), "some set does not appear exactly once"
domain = list(range(self.num_points()))
for i,c in enumerate(self._classes):
assert sorted(sum(c,[])) == domain, "class {} is not a partition".format(i)
if self._classes is False:
return (False, []) if certificate else False
if certificate:
if self._point_to_index is None:
classes = [[block[:] for block in classs] for classs in self._classes]
else:
classes = [[[self._points[i] for i in block] for block in classs] for classs in self._classes]
return (True, classes)
else:
return True
def coloring(self, k=None, solver=None, verbose=0):
r"""
Compute a (weak) `k`-coloring of the hypergraph
A weak coloring of a hypergraph `\mathcal H` is an assignment of colors
to its vertices such that no set is monochromatic.
INPUT:
- ``k`` (integer) -- compute a coloring with `k` colors if an integer is
provided, otherwise returns an optimal coloring (i.e. with the minimum
possible number of colors).
- ``solver`` -- (default: ``None``) Specify a Linear Program (LP)
solver to be used. If set to ``None``, the default one is used. For
more information on LP solvers and which default solver is used, see
the method
:meth:`~sage.numerical.mip.MixedIntegerLinearProgram.solve`
of the class
:class:`~sage.numerical.mip.MixedIntegerLinearProgram`.
- ``verbose`` -- non-negative integer (default: ``0``). Set the level
of verbosity you want from the linear program solver. Since the
problem is `NP`-complete, its solving may take some time depending on
the graph. A value of 0 means that there will be no message printed by
the solver.
EXAMPLES:
The Fano plane has chromatic number 3::
sage: len(designs.steiner_triple_system(7).coloring())
3
One admissible 3-coloring::
sage: designs.steiner_triple_system(7).coloring() # not tested - architecture-dependent
[[0, 2, 5, 1], [4, 3], [6]]
The chromatic number of a graph is equal to the chromatic number of its
2-uniform corresponding hypergraph::
sage: g = graphs.PetersenGraph()
sage: H = IncidenceStructure(g.edges(labels=False))
sage: len(g.coloring())
3
sage: len(H.coloring())
3
"""
if k is None:
for k in range(self.num_points()+1):
try:
return self.coloring(k)
except ValueError:
pass
if k == 0:
if self.num_points():
raise ValueError("Only empty hypergraphs are 0-chromatic")
return []
elif any(len(x) == 1 for x in self._blocks):
raise RuntimeError("No coloring can be defined "
"when there is a set of size 1")
elif k == 1:
if any(x for x in self._blocks):
raise ValueError("This hypergraph contains a set. "
"It is not 1-chromatic")
return [self.ground_set()]
from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException
p = MixedIntegerLinearProgram(solver=solver)
b = p.new_variable(binary=True)
for x in range(self.num_points()):
p.add_constraint(p.sum(b[x,i] for i in range(k)) == 1)
for s in self._blocks:
for i in range(k):
p.add_constraint(p.sum(b[x,i] for x in s) <= len(s)-1)
try:
p.solve(log=verbose)
except MIPSolverException:
raise ValueError("This hypergraph is not {}-colorable".format(k))
col = [[] for i in range(k)]
for (x,i),v in six.iteritems(p.get_values(b)):
if v:
col[i].append(self._points[x])
return col
def edge_coloring(self):
r"""
Compute a proper edge-coloring.
A proper edge-coloring is an assignment of colors to the sets of the
incidence structure such that two sets with non-empty intersection
receive different colors. The coloring returned minimizes the number of
colors.
OUTPUT:
A partition of the sets into color classes.
EXAMPLES::
sage: H = Hypergraph([{1,2,3},{2,3,4},{3,4,5},{4,5,6}]); H
Incidence structure with 6 points and 4 blocks
sage: C = H.edge_coloring()
sage: C # random
[[[3, 4, 5]], [[2, 3, 4]], [[4, 5, 6], [1, 2, 3]]]
sage: Set(map(Set,sum(C,[]))) == Set(map(Set,H.blocks()))
True
"""
from sage.graphs.graph import Graph
blocks = self.blocks()
blocks_sets = [frozenset(_) for _ in blocks]
g = Graph([list(range(self.num_blocks())), lambda x,y: len(blocks_sets[x]&blocks_sets[y])], loops = False)
return [[blocks[i] for i in C] for C in g.coloring(algorithm="MILP")]
def _spring_layout(self):
r"""
Return a spring layout for the points.
The layout is computed by creating a graph `G` on the points *and* sets
of the incidence structure. Each set is then made adjacent in `G` with
all points it contains before a spring layout is computed for this
graph. The position of the points in the graph gives the position of the
points in the final drawing.
.. NOTE::
This method also returns the position of the "fake" points,
i.e. those representing the sets.
EXAMPLES::
sage: H = Hypergraph([{1,2,3},{2,3,4},{3,4,5},{4,5,6}]); H
Incidence structure with 6 points and 4 blocks
sage: L = H._spring_layout()
sage: L # random
{1: (0.238, -0.926),
2: (0.672, -0.518),
3: (0.449, -0.225),
4: (0.782, 0.225),
5: (0.558, 0.518),
6: (0.992, 0.926),
{3, 4, 5}: (0.504, 0.173),
{2, 3, 4}: (0.727, -0.173),
{4, 5, 6}: (0.838, 0.617),
{1, 2, 3}: (0.393, -0.617)}
sage: all(v in L for v in H.ground_set())
True
sage: all(v in L for v in map(Set,H.blocks()))
True
"""
from sage.graphs.graph import Graph
g = Graph()
for s in map(Set, self.blocks()):
for x in s:
g.add_edge((0, s), (1, x))
_ = g.plot(iterations = 50000,save_pos=True)
# The values are rounded as TikZ does not like accuracy.
return {k[1]: (round(x, 3), round(y, 3))
for k, (x, y) in g.get_pos().items()}
def _latex_(self):
r"""
Return a TikZ representation of the incidence structure
EXAMPLES::
sage: H = Hypergraph([{1,2,3},{2,3,4},{3,4,5},{4,5,6}]); H
Incidence structure with 6 points and 4 blocks
sage: view(H) # not tested
With sets of size 4::
sage: g = graphs.Grid2dGraph(5,5)
sage: C4 = graphs.CycleGraph(4)
sage: sets = Set(map(Set,list(g.subgraph_search_iterator(C4))))
sage: H = Hypergraph(sets)
sage: view(H) # not tested
"""
from sage.functions.trig import arctan2
from sage.misc.misc import warn
warn("\nThe hypergraph is drawn as a set of closed curves. The curve "
"representing a set S go **THROUGH** the points contained "
"in S.\n A point which is encircled by a curve but is not located "
"on its boundary is **NOT** included in the corresponding set.\n"
"\n"
"The colors are picked for readability and have no other meaning.")
latex.add_package_to_preamble_if_available("tikz")
latex.add_to_mathjax_avoid_list("tikz")
if not latex.has_file("tikz.sty"):
raise RuntimeError("You must have TikZ installed in order "
"to draw a hypergraph.")
domain = self.ground_set()
pos = self._spring_layout()
tex = "\\begin{tikzpicture}[scale=3]\n"
colors = ["black", "red", "green", "blue", "cyan", "magenta", "yellow","pink","brown"]
colored_sets = [(s,i) for i,S in enumerate(self.edge_coloring()) for s in S]
# Prints each set with its color
for s,i in colored_sets:
current_color = colors[i%len(colors)]
if len(s) == 2:
s = list(s)
tex += ("\\draw[color="+str(current_color)+","+
"line width=.1cm,opacity = .6] "+
str(pos[s[0]])+" -- "+str(pos[s[1]])+";\n")
continue
tex += ("\\draw[color="+str(current_color)+","
"line width=.1cm,opacity = .6,"
"line cap=round,"
"line join=round]"
"plot [smooth cycle,tension=1] coordinates {")
# Reorders the vertices of s according to their angle with the
# "center", i.e. the vertex representing the set s
cx, cy = pos[Set(s)]
s = [pos[_] for _ in s]
s = sorted(s, key = lambda x_y: arctan2(x_y[0] - cx, x_y[1] - cy))
for x in s:
tex += str(x)+" "
tex += "};\n"
# Prints each vertex
for v in domain:
tex += "\\draw node[fill,circle,scale=.5,label={90:$"+latex(v)+"$}] at "+str(pos[v])+" {};\n"
tex += "\\end{tikzpicture}"
return tex
from sage.misc.rest_index_of_methods import gen_rest_table_index
__doc__ = __doc__.format(METHODS_OF_IncidenceStructure=gen_rest_table_index(IncidenceStructure))
| [
[
[
1683,
1697
]
],
[
[
1706,
1709
],
[
17658,
17661
],
[
17740,
17743
],
[
32528,
32531
],
[
50420,
50423
],
[
70054,
70057
],
[
74343,
74346
]
],
[
[
1726,
1736
],
[
68622,
68632
]
],
[
[
1759,
1764
],
[
7516,
7521
],
[
7642,
7647
],
[
7761,
7766
],
[
7924,
7929
],
[
11816,
11821
],
[
14946,
14951
],
[
14961,
14966
],
[
15072,
15077
],
[
21318,
21323
],
[
32276,
32281
],
[
36401,
36406
],
[
37175,
37180
],
[
44459,
44464
],
[
44838,
44843
],
[
44987,
44992
],
[
47273,
47278
],
[
47799,
47804
],
[
50274,
50279
],
[
56976,
56981
],
[
63314,
63319
],
[
65086,
65091
],
[
65147,
65152
],
[
69060,
69065
],
[
69386,
69391
],
[
69593,
69598
],
[
69678,
69683
],
[
70001,
70006
],
[
70412,
70417
],
[
72984,
72989
],
[
73894,
73899
],
[
73971,
73976
],
[
74040,
74045
],
[
74309,
74314
],
[
75396,
75401
]
],
[
[
1797,
1804
],
[
7714,
7721
],
[
7970,
7977
]
],
[
[
1833,
1838
],
[
78369,
78374
],
[
78428,
78433
],
[
78484,
78489
],
[
80087,
80092
]
],
[
[
1865,
1868
],
[
76970,
76973
],
[
79758,
79761
]
],
[
[
1877,
1895
],
[
80325,
80343
],
[
11470,
11488
],
[
21163,
21181
],
[
23569,
23587
],
[
26537,
26555
],
[
44766,
44784
],
[
44880,
44898
],
[
63290,
63308
],
[
63386,
63404
]
],
[
[
80228,
80248
],
[
80304,
80324
]
],
[
[
80249,
80256
]
]
] |
# -*- coding: utf-8 -*-
# Define source file encoding to support raw unicode characters in Python 2
import sys
# Third party
import pytest
# Project
from ddtrace.compat import to_unicode, PY2, reraise, get_connection_response
# Use different test suites for each Python version, this allows us to test the expected
# results for each Python version rather than writing a generic "works for both" test suite
if PY2:
class TestCompatPY2(object):
def test_to_unicode_string(self):
# Calling `compat.to_unicode` on a non-unicode string
res = to_unicode('test')
assert type(res) == unicode
assert res == 'test'
def test_to_unicode_unicode_encoded(self):
# Calling `compat.to_unicode` on a unicode encoded string
res = to_unicode('\xc3\xbf')
assert type(res) == unicode
assert res == u'ÿ'
def test_to_unicode_unicode_double_decode(self):
# Calling `compat.to_unicode` on a unicode decoded string
# This represents the double-decode issue, which can cause a `UnicodeEncodeError`
# `'\xc3\xbf'.decode('utf-8').decode('utf-8')`
res = to_unicode('\xc3\xbf'.decode('utf-8'))
assert type(res) == unicode
assert res == u'ÿ'
def test_to_unicode_unicode_string(self):
# Calling `compat.to_unicode` on a unicode string
res = to_unicode(u'ÿ')
assert type(res) == unicode
assert res == u'ÿ'
def test_to_unicode_bytearray(self):
# Calling `compat.to_unicode` with a `bytearray` containing unicode
res = to_unicode(bytearray('\xc3\xbf'))
assert type(res) == unicode
assert res == u'ÿ'
def test_to_unicode_bytearray_double_decode(self):
# Calling `compat.to_unicode` with an already decoded `bytearray`
# This represents the double-decode issue, which can cause a `UnicodeEncodeError`
# `bytearray('\xc3\xbf').decode('utf-8').decode('utf-8')`
res = to_unicode(bytearray('\xc3\xbf').decode('utf-8'))
assert type(res) == unicode
assert res == u'ÿ'
def test_to_unicode_non_string(self):
# Calling `compat.to_unicode` on non-string types
assert to_unicode(1) == u'1'
assert to_unicode(True) == u'True'
assert to_unicode(None) == u'None'
assert to_unicode(dict(key='value')) == u'{\'key\': \'value\'}'
def test_get_connection_response(self):
"""Ensure that buffering is in kwargs."""
class MockConn(object):
def getresponse(self, *args, **kwargs):
assert 'buffering' in kwargs
mock = MockConn()
get_connection_response(mock)
else:
class TestCompatPY3(object):
def test_to_unicode_string(self):
# Calling `compat.to_unicode` on a non-unicode string
res = to_unicode('test')
assert type(res) == str
assert res == 'test'
def test_to_unicode_unicode_encoded(self):
# Calling `compat.to_unicode` on a unicode encoded string
res = to_unicode('\xff')
assert type(res) == str
assert res == 'ÿ'
def test_to_unicode_unicode_string(self):
# Calling `compat.to_unicode` on a unicode string
res = to_unicode('ÿ')
assert type(res) == str
assert res == 'ÿ'
def test_to_unicode_bytearray(self):
# Calling `compat.to_unicode` with a `bytearray` containing unicode """
res = to_unicode(bytearray('\xff', 'utf-8'))
assert type(res) == str
assert res == 'ÿ'
def test_to_unicode_non_string(self):
# Calling `compat.to_unicode` on non-string types
assert to_unicode(1) == '1'
assert to_unicode(True) == 'True'
assert to_unicode(None) == 'None'
assert to_unicode(dict(key='value')) == '{\'key\': \'value\'}'
def test_get_connection_response(self):
"""Ensure that buffering is NOT in kwargs."""
class MockConn(object):
def getresponse(self, *args, **kwargs):
assert 'buffering' not in kwargs
mock = MockConn()
get_connection_response(mock)
class TestPy2Py3Compat(object):
"""Common tests to ensure functions are both Python 2 and
Python 3 compatible.
"""
def test_reraise(self):
# ensure the `raise` function is Python 2/3 compatible
with pytest.raises(Exception) as ex:
try:
raise Exception('Ouch!')
except Exception:
# original exception we want to re-raise
(typ, val, tb) = sys.exc_info()
try:
# this exception doesn't allow a re-raise, and we need
# to use the previous one collected via `exc_info()`
raise Exception('Obfuscate!')
except Exception:
pass
# this call must be Python 2 and 3 compatible
raise reraise(typ, val, tb)
assert ex.value.args[0] == 'Ouch!'
| [
[
[
107,
110
],
[
4899,
4902
]
],
[
[
133,
139
],
[
4689,
4695
]
],
[
[
178,
188
],
[
581,
591
],
[
813,
823
],
[
1208,
1218
],
[
1449,
1459
],
[
1681,
1691
],
[
2109,
2119
],
[
2359,
2369
],
[
2400,
2410
],
[
2447,
2457
],
[
2494,
2504
],
[
3035,
3045
],
[
3263,
3273
],
[
3479,
3489
],
[
3709,
3719
],
[
3942,
3952
],
[
3982,
3992
],
[
4028,
4038
],
[
4074,
4084
]
],
[
[
190,
193
],
[
416,
419
]
],
[
[
195,
202
],
[
5276,
5283
]
],
[
[
204,
227
],
[
2839,
2862
],
[
4426,
4449
]
],
[
[
431,
444
]
],
[
[
2886,
2899
]
],
[
[
4464,
4480
]
]
] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import importlib.util
import logging
import math
import os
import sys
import warnings
from collections import defaultdict
from itertools import accumulate
from typing import Callable, Dict, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.logging.meters import safe_round
from fairseq.modules import gelu, gelu_accurate, sin, swish
from fairseq.modules.multihead_attention import MultiheadAttention
from torch import Tensor
try:
from amp_C import multi_tensor_l2norm
multi_tensor_l2norm_available = True
except ImportError:
multi_tensor_l2norm_available = False
logger = logging.getLogger(__name__)
def split_paths(paths: str) -> List[str]:
return paths.split(os.pathsep) if "://" not in paths else paths.split("|")
def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
from fairseq import checkpoint_utils
deprecation_warning(
"utils.load_ensemble_for_inference is deprecated. "
"Please use checkpoint_utils.load_model_ensemble instead."
)
return checkpoint_utils.load_model_ensemble(
filenames, arg_overrides=model_arg_overrides, task=task
)
def apply_to_sample(f, sample):
if hasattr(sample, '__len__') and len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
def move_to_cuda(sample):
def _move_to_cuda(tensor):
return tensor.cuda()
return apply_to_sample(_move_to_cuda, sample)
def move_to_cpu(sample):
def _move_to_cpu(tensor):
# PyTorch has poor support for half tensors (float16) on CPU.
# Move any such tensors to float32.
if tensor.dtype in {torch.bfloat16, torch.float16}:
tensor = tensor.to(dtype=torch.float32)
return tensor.cpu()
return apply_to_sample(_move_to_cpu, sample)
def get_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
return module.get_incremental_state(incremental_state, key)
def set_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
result = module.set_incremental_state(incremental_state, key, value)
if result is not None:
incremental_state = result
return incremental_state
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str) and len(replace_unk) > 0:
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, "r") as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying the
# original source word.
align_dict = {}
return align_dict
def print_embed_overlap(embed_dict, vocab_dict):
embed_keys = set(embed_dict.keys())
vocab_keys = set(vocab_dict.symbols)
overlap = len(embed_keys & vocab_keys)
logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict)))
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor(
[float(weight) for weight in pieces[1:]]
)
return embed_dict
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
from fairseq import tokenizer
# Tokens are strings here
hypo_tokens = tokenizer.tokenize_line(hypo_str)
# TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"]
for i, ht in enumerate(hypo_tokens):
if ht == unk:
src_token = src_tokens[alignment[i]]
# Either take the corresponding value in the aligned dictionary or just copy the original value.
hypo_tokens[i] = align_dict.get(src_token, src_token)
return " ".join(hypo_tokens)
def post_process_prediction(
hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe=None, extra_symbols_to_ignore=None
):
hypo_str = tgt_dict.string(hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore)
if align_dict is not None:
hypo_str = replace_unk(
hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string()
)
if align_dict is not None or remove_bpe is not None:
# Convert back to tokens for evaluating with unk replacement or without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx: int, onnx_trace: bool = False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def buffered_arange(max):
if not hasattr(buffered_arange, "buf"):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
buffered_arange.buf.resize_(max)
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def convert_padding_direction(
src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False
):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
buffered = torch.empty(0).long()
if max_len > 0:
torch.arange(max_len, out=buffered)
range = buffered.type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def item(tensor):
if hasattr(tensor, "item"):
return tensor.item()
if hasattr(tensor, "__getitem__"):
return tensor[0]
return tensor
def multi_tensor_total_norm(grads, chunk_size=2048*32) -> torch.Tensor:
per_device_grads = {}
norms = []
for grad in grads:
device = grad.device
cur_device_grads = per_device_grads.get(device)
if cur_device_grads is None:
cur_device_grads = []
per_device_grads[device] = cur_device_grads
cur_device_grads.append(grad)
for device in per_device_grads.keys():
cur_device_grads = per_device_grads[device]
if device.type == "cuda":
# TODO(msb) return has_inf
has_inf = torch.zeros((1, 1), dtype=torch.int, device=device)
with torch.cuda.device(device):
norm = multi_tensor_l2norm(chunk_size, has_inf, [cur_device_grads], False)
norms.append(norm[0])
else:
norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads]
total_norm = torch.norm(torch.stack(norms))
return total_norm
def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
grads = [p.grad.detach() for p in filter(lambda p: p.grad is not None, params)]
if len(grads) == 0:
if len(params) > 0:
return params[0].new_tensor(0.)
else:
return torch.tensor(0.)
if len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)
else:
if multi_tensor_l2norm_available:
total_norm = multi_tensor_total_norm(grads)
else:
warnings.warn(
"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; "
"you may get better performance by installing NVIDIA's apex library"
)
total_norm = torch.norm(
torch.stack([torch.norm(g, p=2, dtype=torch.float32) for g in grads])
)
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads:
g.mul_(clip_coef)
return total_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
def _match_types(arg1, arg2):
"""Convert the numerical argument to the same type as the other argument"""
def upgrade(arg_number, arg_structure):
if isinstance(arg_structure, tuple):
return tuple([arg_number] * len(arg_structure))
elif isinstance(arg_structure, dict):
arg = copy.deepcopy(arg_structure)
for k in arg:
arg[k] = upgrade(arg_number, arg_structure[k])
return arg
else:
return arg_number
if isinstance(arg1, float) or isinstance(arg1, int):
return upgrade(arg1, arg2), arg2
elif isinstance(arg2, float) or isinstance(arg2, int):
return arg1, upgrade(arg2, arg1)
return arg1, arg2
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def map_value_update(d1, d2):
updated_value = copy.deepcopy(d1)
for key in d2:
if key not in updated_value:
updated_value[key] = d2[key]
else:
updated_value[key] = min(d1[key], d2[key])
return updated_value
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
max_positions, arg = _match_types(max_positions, arg)
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
elif isinstance(arg, dict):
max_positions = map_value_update(max_positions, arg)
else:
max_positions = tuple(map(nullsafe_min, zip(max_positions, arg)))
return max_positions
def import_user_module(args):
module_path = getattr(args, "user_dir", None)
if module_path is not None:
module_path = os.path.abspath(args.user_dir)
if not os.path.exists(module_path):
fairseq_rel_path = os.path.join(
os.path.dirname(__file__), "..", args.user_dir
)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
def softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def log_softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.log_softmax(x.float(), dim=dim)
else:
return F.log_softmax(x, dim=dim, dtype=torch.float32)
def get_perplexity(loss, round=2, base=2):
if loss is None:
return 0.
try:
return safe_round(base ** loss, round)
except OverflowError:
return float('inf')
def deprecation_warning(message, stacklevel=3):
# don't use DeprecationWarning, since it's ignored by default
warnings.warn(message, stacklevel=stacklevel)
def get_activation_fn(activation: str) -> Callable:
""" Returns the activation function corresponding to `activation` """
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
deprecation_warning(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == 'sin':
return sin
elif activation == 'swish':
return swish
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"gelu",
"gelu_fast", # deprecated
"gelu_accurate",
"sin",
"swish",
"tanh",
"linear",
]
@contextlib.contextmanager
def eval(model):
is_training = model.training
model.eval()
yield
model.train(is_training)
def has_parameters(module):
try:
next(module.parameters())
return True
except StopIteration:
return False
def set_torch_seed(seed):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
assert isinstance(seed, int)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
@contextlib.contextmanager
def with_torch_seed(seed):
assert isinstance(seed, int)
rng_state = torch.get_rng_state()
cuda_rng_state = torch.cuda.get_rng_state()
set_torch_seed(seed)
yield
torch.set_rng_state(rng_state)
torch.cuda.set_rng_state(cuda_rng_state)
def parse_alignment(line):
"""
Parses a single line from the alingment file.
Args:
line (str): String containing the alignment of the format:
<src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> ..
<src_idx_m>-<tgt_idx_m>. All indices are 0 indexed.
Returns:
torch.IntTensor: packed alignments of shape (2 * m).
"""
alignments = line.strip().split()
parsed_alignment = torch.IntTensor(2 * len(alignments))
for idx, alignment in enumerate(alignments):
src_idx, tgt_idx = alignment.split("-")
parsed_alignment[2 * idx] = int(src_idx)
parsed_alignment[2 * idx + 1] = int(tgt_idx)
return parsed_alignment
def get_token_to_word_mapping(tokens, exclude_list):
n = len(tokens)
word_start = [int(token not in exclude_list) for token in tokens]
word_idx = list(accumulate(word_start))
token_to_word = {i: word_idx[i] for i in range(n)}
return token_to_word
def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = ((tgt_sent != pad) & (tgt_sent != eos)).nonzero().squeeze(dim=-1)
src_invalid = ((src_sent == pad) | (src_sent == eos)).nonzero().squeeze(dim=-1)
src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad])
tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad])
alignment = []
if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent):
attn_valid = attn[tgt_valid]
attn_valid[:, src_invalid] = float("-inf")
_, src_indices = attn_valid.max(dim=1)
for tgt_idx, src_idx in zip(tgt_valid, src_indices):
alignment.append(
(
src_token_to_word[src_idx.item()] - 1,
tgt_token_to_word[tgt_idx.item()] - 1,
)
)
return alignment
def new_arange(x, *size):
"""
Return a Tensor of `size` filled with a range function on the device of x.
If size is empty, using the size of the variable x.
"""
if len(size) == 0:
size = x.size()
return torch.arange(size[-1], device=x.device).expand(*size).contiguous()
def get_tpu_device(args):
import torch_xla.core.xla_model as xm
return xm.xla_device()
def logging_multiple_line_messages(msg):
msg_arr = msg.split("\n")
for line in msg_arr:
logger.info(line)
class CudaEnvironment(object):
def __init__(self):
cur_device = torch.cuda.current_device()
prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device))
self.name = prop.name
self.major = prop.major
self.minor = prop.minor
self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024
@staticmethod
def pretty_print_cuda_env_list(cuda_env_list):
"""
Given a list of CudaEnviorments, pretty print them
"""
num_workers = len(cuda_env_list)
center = "CUDA enviroments for all {} workers".format(num_workers)
banner_len = 40 - len(center) // 2
first_line = "*" * banner_len + center + "*" * banner_len
msg_arr = [first_line]
for r, env in enumerate(cuda_env_list):
msg_arr.append(
"rank {:3d}: ".format(r)
+ "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor)
+ "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB)
+ "name = {:40s}".format(env.name)
)
msg_arr.append(first_line)
logging_multiple_line_messages("\n".join(msg_arr))
| [
[
[
185,
195
],
[
15270,
15280
],
[
15796,
15806
]
],
[
[
203,
207
],
[
11285,
11289
],
[
11853,
11857
]
],
[
[
215,
229
],
[
13475,
13484
]
],
[
[
237,
244
],
[
838,
845
]
],
[
[
252,
256
]
],
[
[
264,
266
],
[
933,
935
],
[
13016,
13018
],
[
13062,
13064
],
[
13122,
13124
],
[
13152,
13154
],
[
13228,
13230
],
[
13346,
13348
]
],
[
[
274,
277
],
[
13404,
13407
],
[
13429,
13432
]
],
[
[
285,
293
],
[
10188,
10196
],
[
14210,
14218
]
],
[
[
318,
329
]
],
[
[
352,
362
],
[
16948,
16958
]
],
[
[
382,
390
],
[
14300,
14308
]
],
[
[
392,
396
],
[
2628,
2632
],
[
2559,
2563
],
[
2569,
2573
],
[
2989,
2993
],
[
2999,
3003
],
[
2880,
2884
],
[
2890,
2894
],
[
2946,
2950
]
],
[
[
398,
402
],
[
899,
903
],
[
15084,
15088
]
],
[
[
404,
412
],
[
2619,
2627
],
[
2638,
2646
],
[
2550,
2558
],
[
2579,
2587
],
[
2980,
2988
],
[
3009,
3017
],
[
2871,
2879
],
[
2900,
2908
],
[
2956,
2964
]
],
[
[
421,
432
]
],
[
[
440,
445
],
[
4798,
4803
],
[
7042,
7047
],
[
7277,
7282
],
[
7387,
7392
],
[
8024,
8029
],
[
8074,
8079
],
[
8267,
8272
],
[
8336,
8341
],
[
8641,
8646
],
[
9159,
9164
],
[
9185,
9190
],
[
9228,
9233
],
[
9420,
9425
],
[
9445,
9450
],
[
9504,
9509
],
[
9515,
9520
],
[
9624,
9629
],
[
9664,
9669
],
[
9944,
9949
],
[
10007,
10012
],
[
10039,
10044
],
[
10410,
10415
],
[
10438,
10443
],
[
10451,
10456
],
[
10476,
10481
],
[
13683,
13688
],
[
13881,
13886
],
[
14881,
14886
],
[
15736,
15741
],
[
15764,
15769
],
[
15898,
15903
],
[
15941,
15946
],
[
16007,
16012
],
[
16042,
16047
],
[
16519,
16524
],
[
18164,
18169
],
[
18530,
18535
],
[
18573,
18578
],
[
1525,
1530
],
[
2304,
2309
],
[
2320,
2325
],
[
2373,
2378
]
],
[
[
453,
477
],
[
13600,
13601
],
[
13655,
13656
],
[
13790,
13791
],
[
13849,
13850
],
[
14428,
14429
]
],
[
[
513,
523
],
[
14004,
14014
]
],
[
[
552,
556
],
[
14481,
14485
]
],
[
[
558,
571
],
[
14650,
14663
],
[
14719,
14732
]
],
[
[
573,
576
],
[
14778,
14781
]
],
[
[
578,
583
],
[
14829,
14834
]
],
[
[
632,
650
],
[
2507,
2525
],
[
2828,
2846
]
],
[
[
669,
675
],
[
2647,
2653
],
[
2588,
2594
],
[
3018,
3024
],
[
2909,
2915
],
[
2965,
2971
]
],
[
[
704,
723
],
[
9278,
9297
]
],
[
[
728,
757
],
[
10075,
10104
]
],
[
[
789,
818
],
[
10075,
10104
]
],
[
[
829,
835
],
[
4115,
4121
],
[
18434,
18440
]
],
[
[
872,
883
]
],
[
[
995,
1022
]
],
[
[
1392,
1407
],
[
2066,
2081
],
[
2428,
2443
]
],
[
[
1972,
1984
]
],
[
[
2111,
2122
]
],
[
[
2472,
2493
]
],
[
[
2793,
2814
]
],
[
[
3315,
3330
]
],
[
[
3942,
3961
]
],
[
[
4205,
4220
]
],
[
[
4911,
4925
]
],
[
[
5136,
5147
],
[
6079,
6090
]
],
[
[
5791,
5814
]
],
[
[
6515,
6529
]
],
[
[
7117,
7126
]
],
[
[
7181,
7196
],
[
7222,
7237
],
[
7255,
7270
],
[
7309,
7324
],
[
7346,
7361
],
[
7409,
7424
],
[
7441,
7456
]
],
[
[
7473,
7498
]
],
[
[
8424,
8428
]
],
[
[
8587,
8610
],
[
10131,
10154
]
],
[
[
9563,
9578
]
],
[
[
10818,
10835
]
],
[
[
10965,
10977
],
[
12493,
12505
]
],
[
[
11698,
11719
]
],
[
[
12886,
12904
]
],
[
[
13518,
13525
]
],
[
[
13704,
13715
]
],
[
[
13902,
13916
]
],
[
[
14096,
14115
],
[
1113,
1132
],
[
14530,
14549
]
],
[
[
14262,
14279
]
],
[
[
15050,
15078
]
],
[
[
15300,
15304
]
],
[
[
15408,
15422
]
],
[
[
15548,
15562
],
[
15972,
15986
]
],
[
[
15826,
15841
]
],
[
[
16089,
16104
]
],
[
[
16789,
16814
],
[
17308,
17333
],
[
17380,
17405
]
],
[
[
17058,
17080
]
],
[
[
17933,
17943
]
],
[
[
18237,
18251
]
],
[
[
18334,
18364
],
[
19596,
19626
]
],
[
[
18460,
18475
]
]
] |
from numpy.core.fromnumeric import reshape
import torch
import numpy as np
import pickle
from itertools import combinations, permutations
from sklearn.decomposition import PCA
from sklearn.manifold import MDS, TSNE
from scipy.stats import pearsonr, ttest_ind
import statsmodels.api as sm
from dataset import get_loaders, WineGrid
def analyze_episodic(model, test_data, args):
# Collect attention weights for each sample in test set
model.eval()
m, x_ = test_data[0] # only 1 episode in test data
m = m.to(args.device) # m: [1, n_train, sample_dim]
x = x_[:,:,:-1].to(args.device) # x: [1, n_test, sample_dim]
y = x_[:,:,-1].type(torch.long).to(args.device)
y = y.squeeze() # y: [1, n_test]
with torch.no_grad():
y_hat, attention = model(x, m)
attention = attention[0] # first (only) memory layer
attention = np.squeeze(attention)
# attention: [n_train, n_test]
# Check the retrieval weights of relevant vs. irrelevant training samples
grid = test_data.grid
train = grid.train # train *samples* in test *episode*
test = grid.test # test *samples* in test *episode*
n_train = len(train)
n_test = len(test)
rel_ids = grid.hub_sample_ids # relevant memory ids (train samples)
attn_ranks = np.zeros_like(attention)
for i in range(n_test):
argsorted_attn = np.argsort(attention[i])
ranks = np.zeros([n_train])
ranks[argsorted_attn] = np.arange(n_train)
attn_ranks[i] = ranks
relevant = []
irrelevant = []
for i in range(n_test):
for j in range(n_train):
if j in rel_ids[i]:
relevant.append(attn_ranks[i,j])
else:
irrelevant.append(attn_ranks[i,j])
rank_data = {"relevant": relevant, "irrelevant": irrelevant}
# Check how often a legitimate "path" was retrieved in the top 5%
k = 8 # top k memories with highest weights (k = 8 means 5 percent)
used_hub = []
for i in range(n_test):
highest_attn = np.argsort(attention[i])[-k:]
test_f1, test_f2, test_ctx, test_y = test[i]
# Get relevant hubs for current test sample
hubs = []
for rel_id in rel_ids[i]:
train_sample = train[rel_id]
train_f1, train_f2 = train_sample[0], train_sample[1]
if train_f1 in [test_f1, test_f2]:
hubs.append(train_f2)
if train_f2 in [test_f1, test_f2]:
hubs.append(train_f1)
hubs = list(set(hubs))
hubs_dict = {h:[] for h in hubs}
assert len(hubs) == 2, "shouldn't be more than 2 hubs?"
# Check if one of the hubs appears with f1 and f2
attended_train = [train[idx] for idx in highest_attn]
for sample in attended_train:
train_f1, train_f2, train_ctx, train_y = sample
if train_ctx != test_ctx:
continue # must be samples testing the same axis to be relevant
if hubs[0] == train_f1:
hubs_dict[hubs[0]].append(sample[1])
if hubs[1] == sample[0]:
hubs_dict[hubs[1]].append(sample[1])
if hubs[0] == sample[1]:
hubs_dict[hubs[0]].append(sample[0])
if hubs[1] == sample[1]:
hubs_dict[hubs[1]].append(sample[0])
if test_f1 in hubs_dict[hubs[0]] and test_f2 in hubs_dict[hubs[0]]:
used_hub.append(True)
elif test_f1 in hubs_dict[hubs[1]] and test_f2 in hubs_dict[hubs[1]]:
used_hub.append(True)
else:
used_hub.append(False)
p_used_hub = np.mean(used_hub)
print("Proportion that episodic system retrieved a hub path:", p_used_hub)
results = {"rank_data":rank_data, "p_used_hub": p_used_hub}
return results
def analyze_cortical(model, test_data, analyze_loader, args):
# Useful dictionaries from test dataset
n_states = test_data.n_states
loc2idx = test_data.loc2idx
idx2loc = {idx:loc for loc, idx in loc2idx.items()}
idxs = [idx for idx in range(n_states)]
# locs = [idx2loc[idx] for idx in idxs]
idx2tensor = test_data.idx2tensor
model.eval()
# Get embeddings from model for each face
face_embedding = model.face_embedding
face_embedding.to(args.device)
embeddings = []
# Get hiddens from the recurrent model for each face
# if the model was stepwisemlp
if args.cortical_model=='stepwisemlp':
hiddens = [[] for i in range(2)]
hiddens_cong = [[] for i in range(2)]
hiddens_incong = [[] for i in range(2)]
hiddens_ctxs = [[[] for j in range(args.N_contexts)] for i in range(2)]
else:
hiddens = [] # hidden reps. for both contexts
hiddens_incong = []
hiddens_cong = []
hiddens_ctxs = [[] for i in range(args.N_contexts)]
idxs1 = []
idxs2 = []
idxs1_ctxs = [[] for i in range(args.N_contexts)]
idxs2_ctxs = [[] for i in range(args.N_contexts)]
samples = []
samples_ctxs = [[] for i in range(args.N_contexts)]
samples_cong = []
samples_incong = []
with torch.no_grad():
for idx in range(n_states):
face_tensor = idx2tensor[idx].unsqueeze(0).to(args.device)
embedding = face_embedding(face_tensor) # [1, state_dim]
embedding = embedding.cpu().numpy()
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0) # [n_states, state_dim]
for batch in analyze_loader:
if args.cortical_task == 'face_task':
f1, f2, ctx, out, idx1, idx2 = batch
elif args.cortical_task == 'wine_task':
f1, f2, ctx, out1, out2, idx1, idx2 = batch
idx1 = idx1[0]
idx2 = idx2[0]
samples.append(batch)
(x1, y1), (x2, y2) = idx2loc[idx1], idx2loc[idx2]
f1 = f1.to(args.device)
f2 = f2.to(args.device)
ctx = ctx.to(args.device)
# create congruent and incongruent groups
grid_angle = np.arctan2((y2-y1),(x2-x1))
phi = np.sin(2*grid_angle)
if np.abs(phi)<1e-5:
# for congrunet trials,
# zero out those very close to zero angles
# so it won't turn into 1 or -1 by sign
cong = 0
else:
cong = np.sign(phi) # 1: congruent, -1:incongruent, 0:none
# get the hidden reps.
y_hat, out = model(f1, f2, ctx)
# y_hat: [1, 2]
# rnn_out: [seq_length, 1, hidden_dim]: [3, 1, 128]
# mlp_out: [1, hidden_dim]: [1, 128]
if args.order_ctx == 'first':
f1_ind = 1
f2_ind = 2
elif args.order_ctx == 'last':
f1_ind = 0
f2_ind = 1
if args.cortical_model=='stepwisemlp':
out1, out2 = out
out1 = out1.cpu().numpy()
out2 = out2.cpu().numpy()
hiddens[0].append(out1)
hiddens[1].append(out2)
hiddens_ctxs[0][ctx].append(out1)
hiddens_ctxs[1][ctx].append(out2)
else:
out = out.cpu().numpy()
hiddens.append(out)
hiddens_ctxs[ctx].append(out)
ctx = ctx[0].cpu().numpy()
idxs1.append(idx1)
idxs2.append(idx2)
idxs1_ctxs[ctx].append(idx1)
idxs2_ctxs[ctx].append(idx2)
samples_ctxs[ctx].append(batch)
if ((cong==1) and ((ctx==0) or (ctx==1))):
if args.cortical_model=='stepwisemlp':
hiddens_cong[0].append(out1)
hiddens_cong[1].append(out2)
else:
hiddens_cong.append(out)
samples_cong.append(batch)
elif ((cong==-1) and ((ctx==0) or (ctx==1))):
if args.cortical_model=='stepwisemlp':
hiddens_incong[0].append(out1)
hiddens_incong[1].append(out2)
else:
hiddens_incong.append(out)
samples_incong.append(batch)
hiddens = np.asarray(hiddens).squeeze()
# for n_ctx=2, data_len = 16*12*2=384 (n_states:16, n_states-ties:12, permutation:2)
# rnn hiddens: [data_len, seq_length, hidden_dim] : [384, 3, 128]
# mlp hiddens: [data_len, hidden_dim]: [384, 128]
# stepwisemlp hiddens: [num_hidds, data_len, hidden_dim]: [2, 384, 128]
# with diagonals - wine task = data_len = (n_ctx-n_diag)*192+n_diag*212
# [n_ctx:2, data_len:384], [n_ctx:4, data_len:768], [n_ctx:8, data_len: 1616]
hiddens_incong = np.asarray(hiddens_incong).squeeze()
hiddens_cong = np.asarray(hiddens_cong).squeeze()
# rnn hiddens_cong/incong: [144, 3, 128]
# mlp hiddens_cong/incong: [144, 128]
# stepwise mlp hiddens_cong/incong: [2, 144, 128]
# hiddens_ctx: even tho it is 384, but it is ordered based on the contexts
if args.cortical_model=='stepwisemlp':
hiddens_ctx = np.concatenate(np.asarray(hiddens_ctxs).squeeze(), axis=1)
# hiddens_ctxs: [n_hidds=2, n_ctx, 192, 1, 128]
# hiddens_ctx: [n_hidds=2, 384, 128]
hiddens_inc_c = np.concatenate((hiddens_incong, hiddens_cong), axis=1)
# hiddens_inc_c: [n_hidds, 384-ties, 128]: [2, 288, 128]
else:
hiddens_ctx = np.concatenate(hiddens_ctxs, axis = 0).squeeze()
# mlp hiddens_ctxs: [n_ctx, 192, 1, 128]
# rnn hiddens_ctxs: [n_ctx, n_trials=192, 3, 1, 128]
# rnn hiddens_ctx: [384, 3, 128]
# mlp hiddens_ctx: [384, 128]
hiddens_inc_c = np.concatenate((hiddens_incong, hiddens_cong), axis=0)
# rnn hiddens_inc_c: [384-ties, seq_length, 128]: [288, 3, 128]
# mlp hiddens_inc_c: [384-ties, 128]: [288, 128]
if ((args.cortical_model=='rnn') or (args.cortical_model=='rnncell')):
hiddens_ctx = hiddens_ctx[:, -1, :] # [384, 128]
hiddens_inc_c = hiddens_inc_c[:, -1, :] #[288, 128]
samples_inc_c = np.concatenate((samples_incong, samples_cong), axis=0)
if args.cortical_model=='stepwisemlp':
avg_hidden = np.zeros([2, n_states, hiddens.shape[-1]])
avg_hidden_ctxs = np.zeros([2, args.N_contexts, n_states, hiddens.shape[-1]])
else:
avg_hidden = np.zeros([n_states, hiddens.shape[-1]])
avg_hidden_ctxs = np.zeros([args.N_contexts, n_states, hiddens.shape[-1]])
if ((args.cortical_model=='rnn') or (args.cortical_model=='rnncell')):
hiddens_ctxs = np.asarray(hiddens_ctxs).squeeze() # [n_ctx, n_tirals=192, seq_len=3, hidd_dim=128]
# Take average for each face based on its location
for f in range(n_states):
temp1 = [np.expand_dims(hiddens[i,f1_ind,:], axis=0)
for i, idx1 in enumerate(idxs1) if idx1==f]
temp2 = [np.expand_dims(hiddens[i,f2_ind,:], axis=0)
for i, idx2 in enumerate(idxs2) if idx2==f]
if len(temp1 + temp2)>1:
avg_hidden[f] = np.concatenate(temp1 + temp2, axis=0).mean(axis=0)
for ctx in range(args.N_contexts):
temp1_ctxs = [hiddens_ctxs[ctx,i,f1_ind,:]
for i, idx1 in enumerate(idxs1_ctxs[ctx]) if idx1==f]
temp2_ctxs = [hiddens_ctxs[ctx,i,f2_ind,:]
for i, idx2 in enumerate(idxs2_ctxs[ctx]) if idx2==f]
if len(temp1_ctxs + temp2_ctxs)>1:
m = np.zeros([2,hiddens_ctxs.shape[-1]])
m[0] = np.mean(np.asarray(temp1_ctxs), axis=0)
m[1] = np.mean(np.asarray(temp2_ctxs), axis=0)
avg_hidden_ctxs[ctx, f, :] = np.mean(m, axis=0)
# avg_hidden_ctxs[ctx, f, :] = np.concatenate(temp1_ctxs + temp2_ctxs, axis=0).mean(axis=0)
# avg_hidden_ctxs: [n_ctx, n_states, hidden_dim]: [2, 16, 128]
avg_hidden_ctx = np.concatenate(avg_hidden_ctxs, axis=0)
elif args.cortical_model in ['mlp', 'mlp_cc']:
for f in range(n_states):
temp = [hiddens[i,:]
for i, (idx1, idx2) in enumerate(zip(idxs1, idxs2))
if ((idx1==f) | (idx2==f))]
if len(temp)>1:
avg_hidden[f] = np.mean(temp, axis=0)
for ctx in range(args.N_contexts):
temp_ctxs = [hiddens_ctxs[ctx][i]
for i, (idx1, idx2) in enumerate(zip(idxs1_ctxs[ctx], idxs2_ctxs[ctx]))
if ((idx1==f) | (idx2==f))]
if len(temp_ctxs)>1:
avg_hidden_ctxs[ctx, f, :] = np.mean(temp_ctxs, axis=0)
# avg_hidden_ctxs: [n_contexts, n_states, hidden_dim]: [2, 16, 128]
avg_hidden_ctx = np.concatenate(avg_hidden_ctxs, axis=0)
elif args.cortical_model=='stepwisemlp':
# todo: how to do the averaging? over both hidden reps?
# hiddens_ctxs anf hiddens_inc_c for the pca results should have two dimensions,
hiddens_ctxs = np.asarray(hiddens_ctxs).squeeze()
for f in range(n_states):
temp1 = [hiddens[0,i,:]
for i, idx1 in enumerate(idxs1) if idx1==f]
temp2 = [hiddens[1,i,:]
for i, idx2 in enumerate(idxs2) if idx2==f]
if len(temp1)>1:
avg_hidden[0,f,:] = np.mean(temp1, axis=0)
if len(temp2)>1:
avg_hidden[1,f,:] = np.mean(temp2, axis=0)
# avg_hidden: [n_hidd, n_states, hidd_dim]: [2,16,128]
for ctx in range(args.N_contexts):
temp1_ctxs = [hiddens_ctxs[0,ctx,i,:]
for i, idx1 in enumerate(idxs1_ctxs[ctx]) if idx1==f]
temp2_ctxs = [hiddens_ctxs[1,ctx,i,:]
for i, idx2 in enumerate(idxs2_ctxs[ctx]) if idx2==f]
if len(temp1_ctxs)>1:
avg_hidden_ctxs[0,ctx,f,:] = np.mean(temp1_ctxs, axis=0)
if len(temp2_ctxs)>1:
avg_hidden_ctxs[1,ctx,f,:] = np.mean(temp2_ctxs, axis=0)
# avg_hidden_ctxs: [n_hidd, n_contexts, n_states, hidden_dim]: [2, 2, 16, 128]
avg_hidden_ctx = np.concatenate(avg_hidden_ctxs, axis=1)
samples_res = {'samples': samples,
'samples_ctxs': samples_ctxs,
'samples_inc_c': samples_inc_c}
results = {'samples_res':samples_res,
'idxs1': idxs1, 'idxs2': idxs2,
'embeddings': embeddings, # [16, 32]
'hiddens_ctx':hiddens_ctx, # mlp/rnn: [384,128] or in stepwisedmlp: [2,384,128]
'hiddens_ctxs':hiddens_ctxs, # mlp: [n_ctx, 192, 1, 128], rnn: [n_ctx, 192, 3, 128]
'avg_hidden':avg_hidden, # [16, 128] or [n_hidd=2, 16, 128]
'avg_hidden_ctx':avg_hidden_ctx, # mlp/rnn: [32, 128] or stepwisedmlp: [n_hidd=2, 32, 128]
# the reaosn to have these is because the concat for each model is diff and want to deal with it here
'avg_hidden_ctxs':avg_hidden_ctxs, # [mlp/rnn: n_ctx, 16, 128] or stepwisedmlp: [n_hidd=2, n_ctx, 16, 128]
'hiddens_inc_c': hiddens_inc_c} # mlp/rnn: [288, 128] or stepwisedmlp: [n_hidd=2, 288, 128]
return results
def analyze_accs(args, test_data, cortical_result, dist_results):
resutls = {'train_acc': cortical_result['train_acc'],
'test_acc': cortical_result['test_acc'],
'cong_train_acc': cortical_result['cong_train_acc'],
'incong_train_acc': cortical_result['incong_train_acc'],
'cong_test_acc': cortical_result['cong_test_acc'],
'incong_test_acc': cortical_result['incong_test_acc']}
return resutls
# cortical_analyze_acc = cortical_result['analyze_acc']
# cortical_analyze_correct = cortical_result['analyze_correct']
def analyze_credit_assignment(args, test_data, cortical_result, dist_results):
resutls = {'grad_ctx': cortical_result['grad_ctx'],
'grad_f1': cortical_result['grad_f1'],
'grad_f2': cortical_result['grad_f2'],
'grad_ctx_cong': cortical_result['grad_ctx_cong'],
'grad_f1_cong': cortical_result['grad_f1_cong'],
'grad_f2_cong': cortical_result['grad_f2_cong'],
'grad_ctx_incong': cortical_result['grad_ctx_incong'],
'grad_f1_incong': cortical_result['grad_f1_incong'],
'grad_f2_incong': cortical_result['grad_f2_incong']
}
return resutls
def proportions(args, test_data, cortical_result, dist_results):
hiddens_ctxs = cortical_result['hiddens_ctxs'] # list of len [n_ctx]
hiddens_ctxs = [np.concatenate(h, axis=0) for h in hiddens_ctxs] # list of len [n_ctx] each has either [192,128] or [224,128]
# when n_ctx=8, we have diff number of ties, therefore,
# in the first 4 contexts we have [192, 128], and in
# the second 4 contexts (diagonals) we have [224, 128]
# that is why we go over each of the hiddens in hiddens_ctxs
# and then concat them to create [n_trials, hidden_dim] for each
ps = []
p_pies = []
for h in hiddens_ctxs: # h: [n_trials, hidden_dim]
p_pies.append(np.any(h>0, axis=0)) # list of len [n_ctx], each shape [128,]
ps.append(np.mean(h>0, axis=0)) # [n_ctx, 128]
ps = np.asarray(ps)
# ps: [n_ctx, 128]
# avg num of the trials that were active for each unit, and for each context
s = np.sum(ps, axis=0, keepdims=True)
# s: [1, hidden_dim], overall activity of each hidden unit,
# if that unit was active at all, over all trials (regardless of the context)
n = ps / s
# n: [n_ctx, hidden_dim]
# normalized - how much each unit is active for each ctx over trials
# normalized by the overall activity of that unit for all ctx and trials
# f = n > threshold
# there are some NaNs
prop_results = {'hiddens_ctxs': hiddens_ctxs,
'p_pies': p_pies, # which trials are active for each hidden unit,
'ps': ps, # on average, how many trials were active for each hidden unit
'n': n}
return prop_results
def calc_dist_ctx(args, test_data, cortical_result, dist_results):
N_contexts = 2 #ToDo: for now it works only for x and y, because of the angles
# Useful dictionaries from test dataset
n_states = test_data.n_states
loc2idx = test_data.loc2idx
idx2loc = {idx:loc for loc, idx in loc2idx.items()}
idxs = [idx for idx in range(n_states)]
N_contexts = args.N_contexts
N_responses = args.N_responses
avg_hidden_ctxs = cortical_result['avg_hidden_ctxs'] # [2, 16, 128]
# Correlation
grid_dists = []
hidd_dists_ctxs = [[] for i in range(N_contexts)]
grid_1ds_ctxs = [[] for i in range(N_contexts)]
grid_angles = []
samples = []
for idx1, idx2 in combinations(idxs, 2):
(x1, y1), (x2, y2) = idx2loc[idx1], idx2loc[idx2]
samples.append((idx1, idx2))
grid_dist = np.sqrt((x1-x2)**2 + (y1-y2)**2)
grid_dists.append(grid_dist)
for ctx in range(N_contexts):
# Euclidean distance between hidden reps. in context ctx
if args.cortical_model=='stepwisemlp':
hidd_dist = np.zeros([2])
hidd1, hidd2 = avg_hidden_ctxs[0,ctx,idx1,:], avg_hidden_ctxs[0,ctx,idx2,:]
hidd_dist[0] = np.linalg.norm(hidd1 - hidd2)
hidd1, hidd2 = avg_hidden_ctxs[1,ctx,idx1,:], avg_hidden_ctxs[1,ctx,idx2,:]
hidd_dist[1] = np.linalg.norm(hidd1 - hidd2)
else:
hidd1, hidd2 = avg_hidden_ctxs[ctx][idx1], avg_hidden_ctxs[ctx][idx2]
hidd_dist = np.linalg.norm(hidd1 - hidd2)
hidd_dists_ctxs[ctx].append(hidd_dist)
# 1D rank - Manhattan distance
loc1 = [x1, y1]
loc2 = [x2, y2]
winegrid = WineGrid(N_responses, N_contexts)
r1, r2 = winegrid.ctx_to_r(ctx, loc1, loc2)
grid_1ds_ctxs[ctx].append(np.abs(r1-r2))
# create on and off diagonal groups
grid_angle = np.arctan2((y2-y1),(x2-x1))
grid_angles.append(grid_angle)
grid_dists = np.array(grid_dists) # [(n_states*(nstates-1))/2]: [120]
grid_angles = np.array(grid_angles) # [120]
samples = np.array(samples)
hidd_dists_ctxs = np.array(hidd_dists_ctxs) # [n_ctx, sampels, n_hidds]: in mlp: [2,120], in stepwisemlp: [2,120,2]
phi = np.sin(2*grid_angles)
binary_phi = np.sign(phi)
for i, p in enumerate(phi):
if np.abs(p)<1e-5:
binary_phi[i] = 0
angle_results = {'grid_angles': grid_angles,
'phi': phi,
'binary_phi': binary_phi}
dist_results = {'samples': samples,
'hidd_dists_ctxs': hidd_dists_ctxs,
'grid_1ds_ctxs': grid_1ds_ctxs,
'grid_dists': grid_dists,
'angle_results': angle_results}
return dist_results
def calc_dist(args, test_data, cortical_result, dist_results=None):
# Useful dictionaries from test dataset
n_states = test_data.n_states
loc2idx = test_data.loc2idx
idx2loc = {idx:loc for loc, idx in loc2idx.items()}
idxs = [idx for idx in range(n_states)]
# Correlation
grid_dists = []
cong_grid_dists = []
incong_grid_dists = []
embed_dists = []
hidd_dists = []
cong_hidd_dists = []
incong_hidd_dists = []
cong_embed_dists = []
incong_embed_dists = []
grid_angles = []
cong_grid_angles = []
incong_grid_angles = []
samples = []
embeddings = cortical_result['embeddings']
avg_hidden = cortical_result['avg_hidden'] # [16, 128]
for idx1, idx2 in combinations(idxs, 2):
(x1, y1), (x2, y2) = idx2loc[idx1], idx2loc[idx2]
samples.append((idx1, idx2))
grid_dist = np.sqrt((x1-x2)**2 + (y1-y2)**2)
grid_dists.append(grid_dist)
# Euclidean distance between embeddings
emb1, emb2 = embeddings[idx1], embeddings[idx2]
embed_dist = np.linalg.norm(emb1 - emb2)
embed_dists.append(embed_dist)
# Euclidean distance between hidden reps.
if args.cortical_model=='stepwisemlp':
hidd_dist = np.zeros([2])
hidd1, hidd2 = avg_hidden[0,idx1], avg_hidden[0,idx2]
hidd_dist[0] = np.linalg.norm(hidd1 - hidd2)
hidd1, hidd2 = avg_hidden[1,idx1], avg_hidden[1,idx2]
hidd_dist[1] = np.linalg.norm(hidd1 - hidd2)
else:
hidd1, hidd2 = avg_hidden[idx1], avg_hidden[idx2]
hidd_dist = np.linalg.norm(hidd1 - hidd2)
hidd_dists.append(hidd_dist)
# create on and off diagonal groups
grid_angle = np.arctan2((y2-y1),(x2-x1))
grid_angles.append(grid_angle)
phi = np.sin(2*grid_angle)
if np.abs(phi)<1e-5:
# for congrunet trials,
# zero out those very close to zero angles
# so it won't turn into 1 or -1 by sign
cong = 0
else:
cong = np.sign(phi) # 1: congruent, -1:incongruent, 0:none
if cong==1:
cong_hidd_dists.append(hidd_dist)
cong_grid_dists.append(grid_dist)
cong_embed_dists.append(embed_dist)
cong_grid_angles.append(grid_angle)
if cong==-1:
incong_hidd_dists.append(hidd_dist)
incong_grid_dists.append(grid_dist)
incong_embed_dists.append(embed_dist)
incong_grid_angles.append(grid_angle)
grid_dists = np.array(grid_dists) # [(n_states*(nstates-1))/2]: [120]
embed_dists = np.array(embed_dists)
hidd_dists = np.array(hidd_dists)
cong_grid_dists = np.array(cong_grid_dists) # [36]
incong_grid_dists = np.array(incong_grid_dists) # [36]
cong_hidd_dists = np.array(cong_hidd_dists)
incong_hidd_dists = np.array(incong_hidd_dists)
cong_embed_dists = np.array(cong_embed_dists)
incong_embed_dists = np.array(incong_embed_dists)
grid_angles = np.array(grid_angles) # [120]
cong_grid_angles = np.array(cong_grid_angles) # [36]
incong_grid_angles = np.array(incong_grid_angles) # [36]
samples = np.array(samples)
phi = np.sin(2*grid_angles)
binary_phi = np.sign(phi)
for i, p in enumerate(phi):
if np.abs(p)<1e-5:
binary_phi[i] = 0
cong_dist_results = {'cong_grid_dists': cong_grid_dists,
'cong_hidd_dists': cong_hidd_dists,
'cong_embed_dists': cong_embed_dists}
incong_dist_results = {'incong_grid_dists': incong_grid_dists,
'incong_hidd_dists': incong_hidd_dists,
'incong_embed_dists': incong_embed_dists}
angle_results = {'grid_angles': grid_angles,
'cong_grid_angles': cong_grid_angles,
'incong_grid_angles': incong_grid_angles,
'phi': phi,
'binary_phi': binary_phi}
dist_results = {'samples': samples,
'grid_dists': grid_dists,
'embed_dists': embed_dists,
'hidd_dists':hidd_dists,
'cong_dist_results': cong_dist_results,
'incong_dist_results': incong_dist_results,
'angle_results': angle_results}
return dist_results
def analyze_dim_red(args, test_data, cortical_result, dist_results, n_components=2):
method = args.dimred_method
n_states = test_data.n_states
loc2idx = test_data.loc2idx
idx2loc = {idx:loc for loc, idx in loc2idx.items()}
idxs = [idx for idx in range(n_states)]
locs = [idx2loc[idx] for idx in idxs]
embeddings = cortical_result['embeddings'] # [16, 32]
hiddens_ctx = cortical_result['hiddens_ctx'] # [384, 128] or in stepwisemlp: [2,384,128]
avg_hidden = cortical_result['avg_hidden'] # [16, 128] or in stepwisemlp: [2,16,128]
avg_hidden_ctx = cortical_result['avg_hidden_ctx'] # [32, 128] or in stepwisemlp: [2,32,128]
hiddens_inc_c = cortical_result['hiddens_inc_c'] # [288, 128] or in stepwisemlp: [2,288,128]
# hiddens_ctx = np.asarray(hiddens_ctxs)
# hiddens_ctxs = np.concatenate(hiddens_ctxs, axis=0).squeeze() # [384, 128] or [384, 3, 128]
# if ((args.cortical_model == 'rnn') or (args.cortical_model == 'rnncell')):
# hiddens_ctx = hiddens_ctx[:,-1, :]
# avg_hidden_ctxs = np.concatenate(avg_hidden_ctxs, axis=0) # [32, 128]
results = {}
# PCA
if method == 'pca':
pca = PCA(n_components=n_components)
pca_2d_embed = pca.fit_transform(embeddings)
if args.cortical_model=='stepwisemlp':
pca_2d_hidd = np.zeros([hiddens_ctx.shape[0], hiddens_ctx.shape[1], n_components])
pca_2d_avg_hidd = np.zeros([avg_hidden.shape[0], avg_hidden.shape[1], n_components])
pca_2d_ctx_hidd = np.zeros([avg_hidden_ctx.shape[0], avg_hidden_ctx.shape[1], n_components])
pca_2d_incong_cong = np.zeros([hiddens_inc_c.shape[0], hiddens_inc_c.shape[1], n_components])
for h in range(hiddens_ctx.shape[0]):
pca_2d_hidd[h,:,:] = pca.fit_transform(hiddens_ctx[h,:,:]) # this is all the hiddens, no averaging for each face
pca_2d_avg_hidd[h,:,:] = pca.fit_transform(avg_hidden[h,:,:])
pca_2d_ctx_hidd[h,:,:] = pca.fit_transform(avg_hidden_ctx[h,:,:])
pca_2d_incong_cong[h,:,:] = pca.fit_transform(hiddens_inc_c[h,:,:])
else:
pca_2d_hidd = pca.fit_transform(hiddens_ctx) # this is all the hiddens, no averaging for each face
pca_2d_avg_hidd = pca.fit_transform(avg_hidden) # I might need to save this at all
pca_2d_ctx_hidd = pca.fit_transform(avg_hidden_ctx)
pca_2d_incong_cong = pca.fit_transform(hiddens_inc_c)
results = {'embed_2d': pca_2d_embed,
'hidd_2d': pca_2d_hidd,
'avg_hidd_2d': pca_2d_avg_hidd,
'ctx_hidd_2d': pca_2d_ctx_hidd,
'incong_cong_2d': pca_2d_incong_cong,
'grid_locations': locs,
'samples_res': cortical_result['samples_res']}
elif method == 'mds':
# MDS
mds = MDS(n_components=n_components)
mds_2d_embed = mds.fit_transform(embeddings)
mds_2d_hidd = mds.fit_transform(hiddens_ctx) # this is all the hiddens, no averaging for each face
mds_2d_avg_hidd = mds.fit_transform(avg_hidden) # I might need to save this at all
mds_2d_ctx_hidd = mds.fit_transform(avg_hidden_ctx)
mds_2d_incong_cong = mds.fit_transform(hiddens_inc_c)
results = {'embed_2d': mds_2d_embed,
'hidd_2d': mds_2d_hidd,
'avg_hidd_2d': mds_2d_avg_hidd,
'ctx_hidd_2d': mds_2d_ctx_hidd,
'incong_cong_2d': mds_2d_incong_cong}
elif method == 'tsne':
# tSNE
tsne = TSNE(n_components=n_components)
tsne_2d_embed = tsne.fit_transform(embeddings)
tsne_2d_hidd = tsne.fit_transform(hiddens_ctx) # this is all the hiddens, no averaging for each face
tsne_2d_avg_hidd = tsne.fit_transform(avg_hidden) # I might need to save this at all
tsne_2d_ctx_hidd = tsne.fit_transform(avg_hidden_ctx)
tsne_2d_incong_cong = tsne.fit_transform(hiddens_inc_c)
results = {'embed_2d': tsne_2d_embed,
'hidd_2d': tsne_2d_hidd,
'avg_hidd_2d': tsne_2d_avg_hidd,
'ctx_hidd_2d': tsne_2d_ctx_hidd,
'incong_cong_2d': tsne_2d_incong_cong}
return results
def hist_data(args, test_data, cortical_result, dist_results):
# embeddings
cong_embed_dists = dist_results['cong_dist_results']['cong_embed_dists']
incong_embed_dists = dist_results['incong_dist_results']['incong_embed_dists']
# hiddens
cong_hidd_dists = dist_results['cong_dist_results']['cong_hidd_dists']
incong_hidd_dists = dist_results['incong_dist_results']['incong_hidd_dists']
dist_c_inc_results = {'cong_embed_dist': cong_embed_dists,
'incong_embed_dist': incong_embed_dists,
'cong_hidd_dist': cong_hidd_dists,
'incong_hidd_dist': incong_hidd_dists}
return dist_c_inc_results
def calc_ratio(args, test_data, cortical_result, dist_results):
# embeddings
cong_embed_dists = dist_results['cong_dist_results']['cong_embed_dists']
incong_embed_dists = dist_results['incong_dist_results']['incong_embed_dists']
avg_cong_embed = np.mean(cong_embed_dists)
avg_incong_embed = np.mean(incong_embed_dists)
ratio_embed = (avg_cong_embed/avg_incong_embed)
# hiddens
cong_hidd_dists = dist_results['cong_dist_results']['cong_hidd_dists']
incong_hidd_dists = dist_results['incong_dist_results']['incong_hidd_dists']
avg_cong_hidd = np.mean(cong_hidd_dists, axis=0)
avg_incong_hidd = np.mean(incong_hidd_dists, axis=0)
# ratio_hidd = (avg_cong_hidd/avg_incong_hidd)
ratio_hidd = (avg_incong_hidd/avg_cong_hidd)
ratio_results = {'ratio_embed': ratio_embed, 'ratio_hidd': ratio_hidd,\
'avg_cong_hidd': avg_cong_hidd, 'avg_incong_hidd': avg_incong_hidd}
return ratio_results
def extract_hidd_dist(dist_results):
# hiddens
cong_hidd_dists = dist_results['cong_dist_results']['cong_hidd_dists']
incong_hidd_dists = dist_results['incong_dist_results']['incong_hidd_dists']
dist_result_hidd = {'cong_hidd_dists': cong_hidd_dists, 'incong_hidd_dists': incong_hidd_dists}
return dist_result_hidd
def analyze_ttest(args, test_data, cortical_result, dist_results):
cong_res = dist_results['cong_dist_results']
incong_res = dist_results['incong_dist_results']
incong_hidd_dists = incong_res['incong_hidd_dists']
cong_hidd_dists = cong_res['cong_hidd_dists']
if args.cortical_model == 'stepwisemlp':
t_hidd, t_p_val_hidd = np.zeros([2]), np.zeros([2])
for h in range(2):
t_hidd[h], t_p_val_hidd[h] = ttest_ind(cong_hidd_dists[:,h], incong_hidd_dists[:,h])
else:
t_hidd, t_p_val_hidd = ttest_ind(cong_res['cong_hidd_dists'],
incong_res['incong_hidd_dists'])
t_embed, t_p_val_embed = ttest_ind(cong_res['cong_embed_dists'],
incong_res['incong_embed_dists'])
t_grid, t_p_val_grid = ttest_ind(cong_res['cong_grid_dists'],
incong_res['incong_grid_dists'])
ttest_results = {'t_stat_hidd':t_hidd, 't_p_val_hidd': t_p_val_hidd,
't_stat_embed':t_embed, 't_p_val_embed': t_p_val_embed,
't_grid':t_grid, 't_p_val_grid': t_p_val_grid}
return ttest_results
def analyze_corr(args, test_data, cortical_result, dist_results):
grid_dists = dist_results['grid_dists']
embed_dists = dist_results['embed_dists']
hidd_dists = dist_results['hidd_dists']
cong_res = dist_results['cong_dist_results']
incong_res = dist_results['incong_dist_results']
r_embed, p_val_embed = pearsonr(grid_dists, embed_dists)
if args.cortical_model == 'stepwisemlp':
r_hidd, p_val_hidd = np.zeros([2]), np.zeros([2])
r_cong_hidd, p_val_cong_hidd, r_incong_hidd, p_val_incong_hidd = \
np.zeros([2]), np.zeros([2]), np.zeros([2]), np.zeros([2])
cong_hidd_dists, incong_hidd_dists = cong_res['cong_hidd_dists'], \
incong_res['incong_hidd_dists']
for h in range(2):
r_hidd[h], p_val_hidd[h] = pearsonr(grid_dists, hidd_dists[:,h])
r_cong_hidd[h], p_val_cong_hidd[h] = pearsonr(cong_res['cong_grid_dists'],
cong_hidd_dists[:,h])
r_incong_hidd[h], p_val_incong_hidd[h] = pearsonr(incong_res['incong_grid_dists'],
incong_hidd_dists[:,h])
else:
r_hidd, p_val_hidd = pearsonr(grid_dists, hidd_dists)
r_cong_hidd, p_val_cong_hidd = pearsonr(cong_res['cong_grid_dists'],
cong_res['cong_hidd_dists'])
r_incong_hidd, p_val_incong_hidd = pearsonr(incong_res['incong_grid_dists'],
incong_res['incong_hidd_dists'])
r_cong_embed, p_val_cong_embed = pearsonr(cong_res['cong_grid_dists'],
cong_res['cong_embed_dists'])
r_incong_embed, p_val_incong_embed = pearsonr(incong_res['incong_grid_dists'],
incong_res['incong_embed_dists'])
corr_results = {'r_embed': r_embed, 'p_val_embed': p_val_embed,
'r_cong_embed': r_cong_embed,
'p_val_cong_embed': p_val_cong_embed,
'r_incong_embed': r_incong_embed,
'p_val_incong_embed': p_val_incong_embed,
'r_hidd': r_hidd, 'p_val_hidd': p_val_hidd,
'r_cong_hidd': r_cong_hidd,
'p_val_cong_hidd': p_val_cong_hidd,
'r_incong_hidd': r_incong_hidd,
'p_val_incong_hidd': p_val_incong_hidd}
return corr_results
def analyze_regression(args, test_data, cortical_result, dist_results):
hidd_dists = dist_results['hidd_dists']
grid_dists = dist_results['grid_dists']
phi = dist_results['angle_results']['phi']
binary_phi = dist_results['angle_results']['binary_phi']
# prepare data for the regression analysis
x_cat = np.concatenate((grid_dists.reshape((-1,1)), binary_phi.reshape((-1,1))),axis=1)
x_con = np.concatenate((grid_dists.reshape((-1,1)), phi.reshape((-1,1))),axis=1)
# categorical regression analysis
x_cat = sm.add_constant(x_cat)
if args.cortical_model == 'stepwisemlp':
p_val, t_val, param, bse = ([[] for i in range(2)] for i in range(4))
y_hat_E = np.zeros(hidd_dists.shape)
y = np.zeros(hidd_dists.shape)
for h in range(2):
y[:,h] = hidd_dists[:,h]
y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_cat,y[:,h],grid_dists)
else:
y = hidd_dists
y_hat_E, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)
cat_reg = {'p_val': p_val,
't_val': t_val,
'param': param,
'y_hat_E': y_hat_E,
'y': y,
'bse': bse}
# continuous regression analysis
x_con = sm.add_constant(x_con)
if args.cortical_model == 'stepwisemlp':
p_val, t_val, param, bse = ([[] for i in range(2)] for i in range(4))
y_hat_E = np.zeros(hidd_dists.shape)
y = np.zeros(hidd_dists.shape)
for h in range(2):
y[:,h] = hidd_dists[:,h]
y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_con,y[:,h],grid_dists)
else:
y = hidd_dists
y_hat_E, p_val, t_val, param, bse = run_regression(x_con,y,grid_dists)
con_reg = {'p_val': p_val,
't_val': t_val,
'param': param,
'y_hat_E': y_hat_E,
'y': y,
'bse': bse}
reg_results = {'cat_reg': cat_reg,
'con_reg': con_reg}
return reg_results
def run_regression(x,y,grid_dist):
stats_model = sm.OLS(y,x).fit()
y_hat_E = stats_model.params[0] + (stats_model.params[1]*grid_dist)
p_val, t_val, param, bse = stats_model.pvalues, stats_model.tvalues, \
stats_model.params, stats_model.bse
return y_hat_E, p_val, t_val, param, bse
def analyze_regression_1D(args, test_data, cortical_result, dist_results):
# make sure dist_results is dist_ctx_results
hidd_dists_ctxs = dist_results['hidd_dists_ctxs']
hidd_dists_ctx0 = hidd_dists_ctxs[0]
hidd_dists_ctx1 = hidd_dists_ctxs[1]
grid_1ds_ctxs = dist_results['grid_1ds_ctxs']
grid_1ds_ctx0 = grid_1ds_ctxs[0]
grid_1ds_ctx1 = grid_1ds_ctxs[1]
grid_dists = dist_results['grid_dists']
phi = dist_results['angle_results']['phi']
binary_phi = dist_results['angle_results']['binary_phi']
hidd_dists_ctx = np.concatenate((hidd_dists_ctx0, hidd_dists_ctx1), axis=0)
grid_1ds_ctx = np.concatenate((grid_1ds_ctx0, grid_1ds_ctx1), axis=0)
grid_dists_ctx = np.concatenate((grid_dists, grid_dists), axis=0)
binary_phi_ctx = np.concatenate((binary_phi, binary_phi), axis=0)
phi_ctx = np.concatenate((phi, phi), axis=0)
# prepare data for the regression analysis
x_cat = np.concatenate((grid_dists_ctx.reshape((-1,1)), grid_1ds_ctx.reshape((-1,1)),
binary_phi_ctx.reshape((-1,1))),axis=1) # [240, 3]
x_con = np.concatenate((grid_dists_ctx.reshape((-1,1)), grid_1ds_ctx.reshape((-1,1)),
phi_ctx.reshape((-1,1))),axis=1)
# categorical regression analysis
x_cat = sm.add_constant(x_cat)
if args.cortical_model == 'stepwisemlp':
p_val, t_val, param, y_hat_E, y, bse = ([[] for i in range(2)] for i in range(6))
y_hat_E = np.zeros(hidd_dists_ctx.shape)
y = np.zeros(hidd_dists_ctx.shape)
for h in range(2):
y[:,h] = hidd_dists_ctx[:,h]
y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_cat,y[:,h],grid_dists_ctx)
else:
y = hidd_dists_ctx
y_hat_E, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists_ctx)
cat_reg = {'p_val': p_val,
't_val': t_val,
'param': param,
'y_hat_E': y_hat_E,
'y': y,
'bse': bse}
# continuous regression analysis
x_con = sm.add_constant(x_con)
if args.cortical_model == 'stepwisemlp':
p_val, t_val, param, bse = ([[] for i in range(2)] for i in range(4))
y_hat_E = np.zeros(hidd_dists_ctx.shape)
y = np.zeros(hidd_dists_ctx.shape)
for h in range(2):
y[:,h] = hidd_dists_ctx[:,h]
y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_con,y[:,h],grid_dists_ctx)
else:
y = hidd_dists_ctx
y_hat_E, p_val, t_val, param, bse = run_regression(x_con,y,grid_dists_ctx)
con_reg = {'p_val': p_val,
't_val': t_val,
'param': param,
'y_hat_E': y_hat_E,
'y': y,
'bse': bse}
reg_results = {'cat_reg': cat_reg,
'con_reg': con_reg}
return reg_results
def analyze_regression_exc(args, test_data, cortical_result, dist_results):
# Useful dictionaries from test dataset
n_states = test_data.n_states
hidd_dists = dist_results['hidd_dists'] #[n_combinations]: [120]
grid_dists = dist_results['grid_dists']
binary_phi = dist_results['angle_results']['binary_phi'] # [120]
samples = dist_results['samples'] # [120, 2]
states=[]
if args.cortical_model=='stepwisemlp':
p_vals, t_vals, params, bses = ([[] for i in range(2)] for i in range(4))
else:
p_vals, t_vals, params, bses = ([] for i in range(4))
for state in range(n_states):
s_idxs = [i for i, sample in enumerate(samples) if state not in sample] # [105]
# prepare data for the regression analysis
x_cat = np.concatenate((grid_dists[s_idxs].reshape((-1,1)), binary_phi[s_idxs].reshape((-1,1))),axis=1)
# regression analysis
x_cat = sm.add_constant(x_cat)
if args.cortical_model == 'stepwisemlp':
for h in range(2):
y = hidd_dists[s_idxs,h]
_ , p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)
p_vals[h].append(p_val)
t_vals[h].append(t_val)
params[h].append(param)
bses[h].append(bse)
else:
y = hidd_dists[s_idxs]
_, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)
p_vals.append(p_val)
t_vals.append(t_val)
params.append(param)
bses.append(bse)
states.append(state)
# regression analysis - after removing (0,0) and (3,3)
s_idxs = [i for i, sample in enumerate(samples) if ((0 not in sample) & (15 not in sample))] # [91]
x_cat = np.concatenate((grid_dists[s_idxs].reshape((-1,1)), binary_phi[s_idxs].reshape((-1,1))),axis=1)
x_cat = sm.add_constant(x_cat)
if args.cortical_model == 'stepwisemlp':
for h in range(2):
y = hidd_dists[s_idxs,h]
_, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)
p_vals[h].append(p_val)
t_vals[h].append(t_val)
params[h].append(param)
bses[h].append(bse)
else:
y = hidd_dists[s_idxs]
_, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)
p_vals.append(p_val)
t_vals.append(t_val)
params.append(param)
bses.append(bse)
states.append(16)
# regression analysis - after removing (0,0) and (3,3), (3,0) and (0.3)
s_idxs = [i for i, sample in enumerate(samples) if ((0 not in sample) & (15 not in sample) &
(3 not in sample) & (12 not in sample))] #[66]
x_cat = np.concatenate((grid_dists[s_idxs].reshape((-1,1)), binary_phi[s_idxs].reshape((-1,1))),axis=1)
x_cat = sm.add_constant(x_cat)
if args.cortical_model == 'stepwisemlp':
for h in range(2):
y = hidd_dists[s_idxs,h]
_, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)
p_vals[h].append(p_val)
t_vals[h].append(t_val)
params[h].append(param)
bses[h].append(bse)
else:
y = hidd_dists[s_idxs]
_, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)
p_vals.append(p_val)
t_vals.append(t_val)
params.append(param)
bses.append(bse)
states.append(17)
states = np.array(states)
p_vals = np.array(p_vals)
t_vals = np.array(t_vals)
params = np.array(params)
bses = np.array(bses)
exc_reg_results = {'excluded_states': states,
'p_vals': p_vals,
't_vals': t_vals,
'params': params,
'bses': bses}
return exc_reg_results
def analyze_test_seq(args, test_data, cortical_result, dist_results):
import sys
sys.path.append("..")
data = get_loaders(batch_size=32, meta=False,
use_images=True, image_dir='./images/',
n_episodes=None,
N_responses=args.N_responses, N_contexts=args.N_contexts,
cortical_task = args.cortical_task, #ToDo:check why it was set to cortical_task='face_task',
balanced = args.balanced)
train_data, train_loader, test_data, test_loader, analyze_data, analyze_loader = data
idx2loc = {idx:loc for loc, idx in test_data.loc2idx.items()}
# ctx_order = 'first'
# ctx_order_str = 'ctxF'
analyze_correct = cortical_result['analyze_correct'] # [n_trials, time_steps]: [384, 3]
analyze_correct = np.asarray(analyze_correct).squeeze()
hidd_t_idx = 1 # at what time step, t = 1 means at the time of face1
# and t = 2 means at the time of face2
# in axis First (axis is at t=0), it should be t = 1
# create groups based on the row or columns
# e.g, for context0 (xaxis), first column is group 1, sec col is group 2, and so on.
# 4 groups for each axis/context; total 8 groups
# ToDo: why it is always loc1???
ctx0_g0=[]
ctx0_g1=[]
ctx0_g2=[]
ctx0_g3=[]
ctx1_g0=[]
ctx1_g1=[]
ctx1_g2=[]
ctx1_g3=[]
for i, batch in enumerate(analyze_loader):
if args.cortical_task == 'face_task':
f1, f2, ctx, y, idx1, idx2 = batch # face1, face2, context, y, index1, index2
elif args.cortical_task == 'wine_task':
f1, f2, ctx, y1, y2, idx1, idx2 = batch # face1, face2, context, y1, y2, index1, index2
msg = 'analyze_test_seq is only implemented for one response, two contexts'
assert args.N_responses == 'one' and args.N_contexts == 2, msg
if args.N_responses == 'one':
y = y1
# f1, f2, ax, y, idx1, idx2 = batch
acc = analyze_correct[i][hidd_t_idx]
ctx = ctx.cpu().numpy().squeeze()
idx1 = idx1[0]
idx2 = idx2[0]
loc1 = idx2loc[idx1]
loc2 = idx2loc[idx2]
if ctx==0:
if loc1[ctx]==0: ctx0_g0.append(acc) # (len(all_perms)/2) / 4 = [48]
elif loc1[ctx]==1: ctx0_g1.append(acc)
elif loc1[ctx]==2: ctx0_g2.append(acc)
elif loc1[ctx]==3: ctx0_g3.append(acc)
elif ctx==1:
if loc1[ctx]==0: ctx1_g0.append(acc)
elif loc1[ctx]==1: ctx1_g1.append(acc)
elif loc1[ctx]==2: ctx1_g2.append(acc)
elif loc1[ctx]==3: ctx1_g3.append(acc)
ctx0_accs = [np.mean(ctx0_g0), np.mean(ctx0_g1),
np.mean(ctx0_g2), np.mean(ctx0_g3) ]
ctx1_accs = [np.mean(ctx1_g0), np.mean(ctx1_g1),
np.mean(ctx1_g2), np.mean(ctx1_g3) ]
# print('Accuracy at t=%s (face%s) contex 0:' %(hidd_t_idx,hidd_t_idx), ctx0_accs)
# print('Accuracy at t=%s (face%s) contex 1:' %(hidd_t_idx,hidd_t_idx), ctx1_accs)
return ctx0_accs, ctx1_accs | [
[
[
35,
42
]
],
[
[
50,
55
],
[
655,
660
],
[
729,
734
],
[
5129,
5134
]
],
[
[
64,
75
],
[
867,
869
],
[
1291,
1293
],
[
1369,
1371
],
[
1410,
1412
],
[
1462,
1464
],
[
2037,
2039
],
[
3623,
3625
],
[
5433,
5435
],
[
6086,
6088
],
[
6132,
6134
],
[
6168,
6170
],
[
6409,
6411
],
[
8270,
8272
],
[
8770,
8772
],
[
8827,
8829
],
[
9153,
9155
],
[
9168,
9170
],
[
9338,
9340
],
[
9491,
9493
],
[
9754,
9756
],
[
10152,
10154
],
[
10276,
10278
],
[
10345,
10347
],
[
10436,
10438
],
[
10502,
10504
],
[
10662,
10664
],
[
10860,
10862
],
[
10994,
10996
],
[
11175,
11177
],
[
11642,
11644
],
[
11706,
11708
],
[
11714,
11716
],
[
11773,
11775
],
[
11781,
11783
],
[
11862,
11864
],
[
12102,
12104
],
[
12453,
12455
],
[
12816,
12818
],
[
12957,
12959
],
[
13219,
13221
],
[
13563,
13565
],
[
13651,
13653
],
[
14154,
14156
],
[
14269,
14271
],
[
14418,
14420
],
[
16913,
16915
],
[
17440,
17442
],
[
17520,
17522
],
[
17566,
17568
],
[
17694,
17696
],
[
19253,
19255
],
[
19509,
19511
],
[
19646,
19648
],
[
19799,
19801
],
[
19961,
19963
],
[
20293,
20295
],
[
20386,
20388
],
[
20479,
20481
],
[
20554,
20556
],
[
20598,
20600
],
[
20638,
20640
],
[
20747,
20749
],
[
20786,
20788
],
[
20842,
20844
],
[
22169,
22171
],
[
22364,
22366
],
[
22552,
22554
],
[
22659,
22661
],
[
22782,
22784
],
[
22912,
22914
],
[
23044,
23046
],
[
23125,
23127
],
[
23157,
23159
],
[
23373,
23375
],
[
23873,
23875
],
[
23948,
23950
],
[
23987,
23989
],
[
24030,
24032
],
[
24087,
24089
],
[
24144,
24146
],
[
24194,
24196
],
[
24245,
24247
],
[
24297,
24299
],
[
24344,
24346
],
[
24397,
24399
],
[
24456,
24458
],
[
24506,
24508
],
[
24535,
24537
],
[
24574,
24576
],
[
24630,
24632
],
[
27007,
27009
],
[
27106,
27108
],
[
27203,
27205
],
[
27311,
27313
],
[
30959,
30961
],
[
31008,
31010
],
[
31283,
31285
],
[
31338,
31340
],
[
32361,
32363
],
[
32376,
32378
],
[
33639,
33641
],
[
33654,
33656
],
[
33757,
33759
],
[
33772,
33774
],
[
33787,
33789
],
[
33802,
33804
],
[
36164,
36166
],
[
36256,
36258
],
[
36544,
36546
],
[
36583,
36585
],
[
37290,
37292
],
[
37329,
37331
],
[
38833,
38835
],
[
38911,
38913
],
[
38987,
38989
],
[
39057,
39059
],
[
39120,
39122
],
[
39214,
39216
],
[
39383,
39385
],
[
39753,
39755
],
[
39796,
39798
],
[
40515,
40517
],
[
40558,
40560
],
[
41955,
41957
],
[
42946,
42948
],
[
43944,
43946
],
[
44665,
44667
],
[
44695,
44697
],
[
44725,
44727
],
[
44755,
44757
],
[
44783,
44785
],
[
45898,
45900
],
[
47814,
47816
],
[
47832,
47834
],
[
47867,
47869
],
[
47885,
47887
],
[
47921,
47923
],
[
47939,
47941
],
[
47974,
47976
],
[
47992,
47994
]
],
[
[
83,
89
]
],
[
[
112,
124
],
[
19115,
19127
],
[
22031,
22043
]
],
[
[
126,
138
]
],
[
[
173,
176
],
[
26850,
26853
]
],
[
[
206,
209
],
[
28582,
28585
]
],
[
[
211,
215
],
[
29295,
29299
]
],
[
[
240,
248
],
[
33531,
33539
],
[
34035,
34043
],
[
34122,
34130
],
[
34299,
34307
],
[
34498,
34506
],
[
34570,
34578
],
[
34729,
34737
],
[
34893,
34901
],
[
35047,
35055
]
],
[
[
250,
259
],
[
32460,
32469
],
[
32559,
32568
],
[
32700,
32709
],
[
32843,
32852
]
],
[
[
267,
288
],
[
36380,
36382
],
[
37126,
37128
],
[
37979,
37981
],
[
39577,
39579
],
[
40351,
40353
],
[
42097,
42099
],
[
43054,
43056
],
[
44052,
44054
]
],
[
[
309,
320
],
[
45183,
45194
]
],
[
[
322,
330
],
[
20164,
20172
]
],
[
[
336,
352
]
],
[
[
3809,
3825
]
],
[
[
15485,
15497
]
],
[
[
16089,
16114
]
],
[
[
16759,
16770
]
],
[
[
18411,
18424
]
],
[
[
21293,
21302
]
],
[
[
25678,
25693
]
],
[
[
29991,
30000
]
],
[
[
30701,
30711
]
],
[
[
31666,
31683
]
],
[
[
32007,
32020
]
],
[
[
33201,
33213
]
],
[
[
35841,
35859
]
],
[
[
37930,
37944
],
[
36739,
36753
],
[
36856,
36870
],
[
37485,
37499
],
[
37602,
37616
],
[
39960,
39974
],
[
40085,
40099
],
[
40722,
40736
],
[
40847,
40861
],
[
42288,
42302
],
[
42570,
42584
],
[
43228,
43242
],
[
43482,
43496
],
[
44228,
44242
],
[
44482,
44496
]
],
[
[
38270,
38291
]
],
[
[
41172,
41194
]
],
[
[
45065,
45081
]
]
] |
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.config.config_validators import (
validate_exchange,
validate_market_trading_pair,
)
from hummingbot.client.settings import (
required_exchanges,
EXAMPLE_PAIRS,
)
from typing import Optional
def symbol_prompt():
exchange = dev_5_vwap_config_map.get("exchange").value
example = EXAMPLE_PAIRS.get(exchange)
return "Enter the trading pair you would like to trade on %s%s >>> " \
% (exchange, f" (e.g. {example})" if example else "")
def str2bool(value: str):
return str(value).lower() in ("yes", "true", "t", "1")
# checks if the symbol pair is valid
def validate_market_trading_pair_tuple(value: str) -> Optional[str]:
market = dev_5_vwap_config_map.get("exchange").value
return validate_market_trading_pair(market, value)
def order_percent_of_volume_prompt():
percent_slippage = dev_5_vwap_config_map.get("percent_slippage").value
return ("What percent of open order volume up to %s percent slippage do you want" % percent_slippage
+ "each order to be? (default is 100 percent)? >>> ")
dev_5_vwap_config_map = {
"strategy":
ConfigVar(key="strategy",
prompt="",
default="dev_5_vwap"),
"exchange":
ConfigVar(key="exchange",
prompt="Enter the name of the exchange >>> ",
validator=validate_exchange,
on_validated=lambda value: required_exchanges.append(value),
prompt_on_new=True),
"market":
ConfigVar(key="market",
prompt=symbol_prompt,
validator=validate_market_trading_pair_tuple,
prompt_on_new=True),
"order_type":
ConfigVar(key="order_type",
prompt="Enter type of order (limit/market) default is market >>> ",
type_str="str",
validator=lambda v: None if v in {"limit", "market", ""} else "Invalid order type.",
default="market"),
"order_amount":
ConfigVar(key="order_amount",
prompt="What is your preferred quantity (denominated in the base asset, default is 1)? "
">>> ",
default=1.0,
type_str="float"),
"is_buy":
ConfigVar(key="is_buy",
prompt="Enter True for Buy order and False for Sell order (default is Buy Order) >>> ",
type_str="bool",
default=True),
"is_vwap":
ConfigVar(key="is_vwap",
prompt="Would you like to use VWAP or TWAP? (default is VWAP) >>> ",
type_str="bool",
default=True),
"num_individual_orders":
ConfigVar(key="num_individual_orders",
prompt="Into how many individual orders do you want to split this order? (Enter 10 to indicate 10 individual orders. "
"Default is 1)? >>> ",
required_if=lambda: dev_5_vwap_config_map.get("is_vwap").value is False,
type_str="float",
default=1),
"percent_slippage":
ConfigVar(key="percent_slippage",
prompt="What percent of price do you want to calculate open order volume? (default is 0 percent slippage) >>> ",
required_if=lambda: dev_5_vwap_config_map.get("is_vwap").value is True,
type_str="float",
default=0.1),
"order_percent_of_volume":
ConfigVar(key="order_percent_of_volume",
prompt=order_percent_of_volume_prompt,
required_if=lambda: dev_5_vwap_config_map.get("is_vwap").value is True,
type_str="float",
default=0.01),
"time_delay":
ConfigVar(key="time_delay",
prompt="How many seconds do you want to wait between each individual order? (Enter 10 to indicate 10 seconds. "
"Default is 10)? >>> ",
type_str="float",
default=10),
"order_price":
ConfigVar(key="order_price",
prompt="What is the price of the limit order ? >>> ",
required_if=lambda: dev_5_vwap_config_map.get("order_type").value == "limit",
type_str="float"),
"cancel_order_wait_time":
ConfigVar(key="cancel_order_wait_time",
prompt="How long do you want to wait before cancelling your limit order (in seconds). "
"(Default is 60 seconds) ? >>> ",
required_if=lambda: dev_5_vwap_config_map.get("order_type").value == "limit",
type_str="float",
default=60),
}
| [
[
[
48,
57
],
[
1197,
1206
],
[
1317,
1326
],
[
1594,
1603
],
[
1787,
1796
],
[
2103,
2112
],
[
2363,
2372
],
[
2584,
2593
],
[
2801,
2810
],
[
3214,
3223
],
[
3576,
3585
],
[
3859,
3868
],
[
4160,
4169
],
[
4432,
4441
]
],
[
[
119,
136
],
[
1435,
1452
]
],
[
[
142,
170
],
[
815,
843
]
],
[
[
219,
237
],
[
1499,
1517
]
],
[
[
243,
256
],
[
384,
397
]
],
[
[
279,
287
],
[
732,
740
]
],
[
[
294,
307
],
[
1643,
1656
]
],
[
[
558,
566
]
],
[
[
682,
716
],
[
1686,
1720
]
],
[
[
865,
895
],
[
3642,
3672
]
],
[
[
1147,
1168
],
[
326,
347
],
[
760,
781
],
[
922,
943
],
[
3063,
3084
],
[
3417,
3438
],
[
3712,
3733
],
[
4299,
4320
],
[
4675,
4696
]
]
] |
from django.views.generic import View
from django.http import HttpResponse
from django.conf import settings
import os
class ReactAppView(View):
def get(self, request):
try:
with open(os.path.join(str(settings.ROOT_DIR), 'frontend', 'build', 'index.html')) as file:
return HttpResponse(file.read())
except:
return HttpResponse(
"""
index.html not found ! build your React App!
""",
status=501,
)
| [
[
[
33,
37
],
[
139,
143
]
],
[
[
62,
74
],
[
315,
327
],
[
377,
389
]
],
[
[
99,
107
],
[
227,
235
]
],
[
[
115,
117
],
[
210,
212
]
],
[
[
126,
138
]
]
] |
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='ProductMain']/div[@class='product-title']/h1",
'price' : "//div[@class='Row Price']/div[@class='ProductPrice VariationProductPrice']",
'category' : "//div[@id='Breadcrumb']/ul/li/a",
'description' : "//div[@id='ProductDescription']/div[@class='ProductDescriptionContainer']",
'images' : "//div[@class='ProductThumbImage']/a/@href",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : ""
}
name = 'dcmobile.vn'
allowed_domains = ['dcmobile.vn']
start_urls = ['http://dcmobile.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow = ['/[a-zA-Z0-9-]+-\d+\.html$']), 'parse_item'),
Rule(LinkExtractor(deny = ['/ban-tin'], allow = ['/[a-zA-Z0-9-]+-b+\d+\.html']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| [
[
[
104,
108
],
[
837,
841
],
[
915,
919
]
],
[
[
143,
156
],
[
842,
855
],
[
920,
933
]
],
[
[
158,
163
]
],
[
[
636,
640
]
],
[
[
657,
672
]
],
[
[
691,
701
]
],
[
[
728,
740
]
],
[
[
746,
758
]
],
[
[
766,
779
]
],
[
[
803,
817
]
],
[
[
823,
828
]
]
] |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2017 The Ravencoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test using named arguments for RPCs."""
from test_framework.test_framework import BayemcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class NamedArgumentTest(BayemcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
node = self.nodes[0]
h = node.help(command='getinfo')
assert(h.startswith('getinfo\n'))
assert_raises_jsonrpc(-8, 'Unknown named parameter', node.help, random='getinfo')
h = node.getblockhash(height=0)
node.getblock(blockhash=h)
assert_equal(node.echo(), [])
assert_equal(node.echo(arg0=0,arg9=9), [0] + [None]*8 + [9])
assert_equal(node.echo(arg1=1), [None, 1])
assert_equal(node.echo(arg9=None), [None]*10)
assert_equal(node.echo(arg0=0,arg3=3,arg9=9), [0] + [None]*2 + [3] + [None]*5 + [9])
if __name__ == '__main__':
NamedArgumentTest().main()
| [
[
[
346,
368
],
[
477,
499
]
],
[
[
407,
419
],
[
873,
885
],
[
911,
923
],
[
980,
992
],
[
1031,
1043
],
[
1085,
1097
]
],
[
[
425,
448
]
],
[
[
459,
476
],
[
1202,
1219
]
]
] |
# coding: utf-8
"""
"""
import pandas as pd
import numpy as np
import cv2 # Used to manipulated the images
from scipy.signal import wiener
np.random.seed(1207) # The seed I used - pick your own or comment out for a random seed. A constant seed allows for better comparisons though
# Import Keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
# ## Load Training Data
# In[2]:
df_train = pd.read_json('./input/train.json') # this is a dataframe
# Need to reshape and feature scale the images:
# In[3]:
def get_scaled_imgs(df):
imgs = []
for i, row in df.iterrows():
band_1 = np.array(row['band_1'])
band_2 = np.array(row['band_2'])
#make 75x75 image
band_1 = band_1.reshape(75, 75)
band_2 = band_2.reshape(75, 75)
#band_3 = band_1 + band_2 # plus since log(x*y) = log(x) + log(y)
# Rescale
a = (band_1 - band_1.mean()) / (band_1.max() - band_1.min())
b = (band_2 - band_2.mean()) / (band_2.max() - band_2.min())
#c = (band_3 - band_3.mean()) / (band_3.max() - band_3.min())
imgs.append(np.dstack((a, b)))
return np.array(imgs)
def get_more_images(imgs):
more_images = []
vert_flip_imgs = []
hori_flip_imgs = []
for i in range(0,imgs.shape[0]):
a=imgs[i,:,:,0]
b=imgs[i,:,:,1]
#c=imgs[i,:,:,2]
av=cv2.flip(a,1)
ah=cv2.flip(a,0)
bv=cv2.flip(b,1)
bh=cv2.flip(b,0)
#cv=cv2.flip(c,1)
#ch=cv2.flip(c,0)
#vert_flip_imgs.append(np.dstack((av, bv, cv)))
#hori_flip_imgs.append(np.dstack((ah, bh, ch)))
vert_flip_imgs.append(np.dstack((av, bv)))
hori_flip_imgs.append(np.dstack((ah, bh)))
v = np.array(vert_flip_imgs)
h = np.array(hori_flip_imgs)
more_images = np.concatenate((imgs,v,h))
return more_images
def getModel():
#Build keras model
model=Sequential()
# CNN 1
model.add(Conv2D(64, kernel_size=(3, 3),activation='relu', input_shape=(75, 75, 2)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu' ))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu' ))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
# CNN 2
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu' ))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu' ))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu' ))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
#model.add(Dropout(0.2))
# CNN 3
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
#model.add(Dropout(0.2))
#CNN 4
model.add(Conv2D(256, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# You must flatten the data for the dense layers
model.add(Flatten())
#Dense 1
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
#Dense 2
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
# Output
model.add(Dense(1, activation="sigmoid"))
optimizer = Adam(lr=0.0001, decay=0.0)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
Xtrain = get_scaled_imgs(df_train)
Ytrain = np.array(df_train['is_iceberg'])
df_train.inc_angle = df_train.inc_angle.replace('na',0)
idx_tr = np.where(df_train.inc_angle>0)
Ytrain = Ytrain[idx_tr[0]]
Xtrain = Xtrain[idx_tr[0],...]
#Xtr_more = get_more_images(Xtrain)
#Ytr_more = np.concatenate((Ytrain,Ytrain,Ytrain))
X_train, X_valid, y_train, y_valid = train_test_split(Xtrain, Ytrain, test_size=0.1)
X_train_more = get_more_images(X_train)
y_train_more = np.concatenate([y_train, y_train, y_train])
X_valid_more = get_more_images(X_valid)
y_valid_more = np.concatenate([y_valid, y_valid, y_valid])
model = getModel()
model.summary()
batch_size = 32
model_file = '.mdl_2l2_wts.hdf5'
early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='min')
mcp_save = ModelCheckpoint(model_file, save_best_only=True, monitor='val_loss', mode='min')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, epsilon=1e-6, mode='min')
#model.fit(Xtr_more, Ytr_more, batch_size=batch_size, epochs=50, verbose=1, callbacks=[earlyStopping, mcp_save, reduce_lr_loss], validation_split=0.25)
#model.fit(Xtr_more, Ytr_more, batch_size=batch_size, epochs=60, verbose=1, callbacks=[mcp_save, reduce_lr_loss], validation_split=0.2)
model.fit(X_train_more, y_train_more, batch_size=32, epochs=60, verbose=1,
callbacks=[mcp_save, reduce_lr_loss],
validation_data=(X_valid, y_valid))
model.load_weights(filepath = model_file)
score = model.evaluate(Xtrain, Ytrain, verbose=1)
print('Train score:', score[0])
print('Train accuracy:', score[1])
df_test = pd.read_json('./input/test.json')
df_test.inc_angle = df_test.inc_angle.replace('na',0)
Xtest = (get_scaled_imgs(df_test))
pred_test = model.predict(Xtest)
submission = pd.DataFrame({'id': df_test["id"], 'is_iceberg': pred_test.reshape((pred_test.shape[0]))})
print(submission.head(10))
submission.to_csv('sub-2bands-nodrop-aug.csv', index=False)
| [
[
[
36,
48
],
[
722,
724
],
[
5353,
5355
],
[
5523,
5525
]
],
[
[
57,
68
],
[
148,
150
],
[
3752,
3754
],
[
3850,
3852
],
[
4171,
4173
],
[
4270,
4272
],
[
935,
937
],
[
976,
978
],
[
1437,
1439
],
[
1468,
1470
],
[
2017,
2019
],
[
2068,
2070
],
[
2104,
2106
],
[
2137,
2139
],
[
2188,
2190
]
],
[
[
77,
80
],
[
1725,
1728
],
[
1750,
1753
],
[
1775,
1778
],
[
1800,
1803
]
],
[
[
140,
146
]
],
[
[
332,
342
],
[
2299,
2309
]
],
[
[
368,
373
],
[
3342,
3347
],
[
3430,
3435
],
[
3518,
3523
]
],
[
[
375,
382
],
[
3388,
3395
],
[
3475,
3482
]
],
[
[
384,
391
],
[
3303,
3310
]
],
[
[
393,
403
]
],
[
[
429,
435
],
[
2343,
2349
],
[
2432,
2438
],
[
2498,
2504
],
[
2643,
2649
],
[
2710,
2716
],
[
2777,
2783
],
[
2948,
2954
],
[
3117,
3123
]
],
[
[
437,
449
],
[
2564,
2576
],
[
2844,
2856
],
[
3014,
3026
],
[
3183,
3195
]
],
[
[
478,
491
],
[
4419,
4432
]
],
[
[
493,
508
],
[
4500,
4515
]
],
[
[
510,
527
],
[
4598,
4615
]
],
[
[
567,
585
]
],
[
[
615,
619
],
[
3567,
3571
]
],
[
[
656,
672
],
[
4067,
4083
]
],
[
[
711,
719
],
[
3733,
3741
],
[
3761,
3769
],
[
3806,
3814
],
[
3785,
3793
],
[
3859,
3867
]
],
[
[
845,
860
],
[
3717,
3732
],
[
5450,
5465
]
],
[
[
1491,
1506
],
[
4131,
4146
],
[
4230,
4245
]
],
[
[
2249,
2257
],
[
4324,
4332
]
],
[
[
3708,
3714
],
[
3918,
3924
]
],
[
[
3743,
3749
],
[
3891,
3897
]
],
[
[
3841,
3847
],
[
3898,
3904
],
[
3925,
3931
]
],
[
[
3882,
3888
],
[
4092,
4098
],
[
5255,
5261
]
],
[
[
3909,
3915
],
[
4084,
4090
],
[
5247,
5253
]
],
[
[
4030,
4037
],
[
4147,
4154
]
],
[
[
4039,
4046
],
[
4246,
4253
],
[
5160,
5167
]
],
[
[
4048,
4055
],
[
4187,
4194
],
[
4196,
4203
],
[
4205,
4212
]
],
[
[
4057,
4064
],
[
4286,
4293
],
[
4295,
4302
],
[
4304,
4311
],
[
5169,
5176
]
],
[
[
4116,
4128
],
[
4998,
5010
]
],
[
[
4156,
4168
],
[
5012,
5024
]
],
[
[
4215,
4227
]
],
[
[
4255,
4267
]
],
[
[
4316,
4321
],
[
4335,
4340
],
[
4988,
4993
],
[
5181,
5186
],
[
5232,
5237
],
[
5488,
5493
]
],
[
[
4352,
4362
]
],
[
[
4368,
4378
],
[
4516,
4526
],
[
5211,
5221
]
],
[
[
4402,
4416
]
],
[
[
4489,
4497
],
[
5095,
5103
]
],
[
[
4581,
4595
],
[
5105,
5119
]
],
[
[
5224,
5229
],
[
5296,
5301
],
[
5331,
5336
]
],
[
[
5343,
5350
],
[
5407,
5414
],
[
5387,
5394
],
[
5466,
5473
],
[
5543,
5550
]
],
[
[
5441,
5446
],
[
5502,
5507
]
],
[
[
5476,
5485
],
[
5572,
5581
],
[
5591,
5600
]
],
[
[
5510,
5520
],
[
5620,
5630
],
[
5642,
5652
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from concurrent import futures
def naehere_pi_an(n):
pi_halbe = 1
zaehler, nenner = 2.0, 1.0
for i in range(n):
pi_halbe *= zaehler / nenner
if i % 2:
zaehler += 2
else:
nenner += 2
return 2*pi_halbe
N = (
12345678,
1234567,
123456,
12345,
1234
)
with futures.ThreadPoolExecutor(max_workers=4) as ex:
print(list(ex.map(naehere_pi_an, N)))
| [
[
[
70,
77
],
[
405,
412
]
],
[
[
83,
96
],
[
483,
496
]
],
[
[
332,
333
],
[
498,
499
]
],
[
[
450,
452
],
[
476,
478
]
]
] |
#!/usr/bin/env python
__all__ = ['nicovideo_download']
from ..common import *
def nicovideo_login(user, password):
data = "current_form=login&mail=" + user +"&password=" + password + "&login_submit=Log+In"
response = request.urlopen(request.Request("https://secure.nicovideo.jp/secure/login?site=niconico", headers=fake_headers, data=data.encode('utf-8')))
return response.headers
def nicovideo_download(url, output_dir='.', merge=True, info_only=False):
import ssl
ssl_context = request.HTTPSHandler(
context=ssl.SSLContext(ssl.PROTOCOL_TLSv1))
cookie_handler = request.HTTPCookieProcessor()
opener = request.build_opener(ssl_context, cookie_handler)
request.install_opener(opener)
import netrc, getpass
try:
info = netrc.netrc().authenticators('nicovideo')
except FileNotFoundError:
info = None
if info is None:
user = input("User: ")
password = getpass.getpass("Password: ")
else:
user, password = info[0], info[2]
print("Logging in...")
nicovideo_login(user, password)
html = get_html(url) # necessary!
title = unicodize(r1(r'<span class="videoHeaderTitle"[^>]*>([^<]+)</span>', html))
vid = url.split('/')[-1].split('?')[0]
api_html = get_html('http://www.nicovideo.jp/api/getflv?v=%s' % vid)
real_url = parse.unquote(r1(r'url=([^&]+)&', api_html))
type, ext, size = url_info(real_url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge = merge)
site_info = "Nicovideo.jp"
download = nicovideo_download
download_playlist = playlist_not_supported('nicovideo')
| [
[
[
23,
30
]
],
[
[
78,
79
],
[
1652,
1674
],
[
228,
235
],
[
244,
251
],
[
326,
338
],
[
504,
511
],
[
591,
598
],
[
634,
641
],
[
688,
695
],
[
1094,
1102
],
[
1133,
1142
],
[
1143,
1145
],
[
1267,
1275
],
[
1340,
1345
],
[
1354,
1356
],
[
1408,
1416
],
[
1432,
1442
],
[
1503,
1516
]
],
[
[
85,
100
],
[
1050,
1065
]
],
[
[
401,
419
],
[
1613,
1631
]
],
[
[
1575,
1584
],
[
1443,
1452
]
],
[
[
1602,
1610
]
],
[
[
1632,
1649
]
]
] |
import pytest
from .base import TestBaseClass
# flake8: noqa W291 - we want to explicitly test trailing whitespace here
class TestClassOelintVarsValueQuoted(TestBaseClass):
@pytest.mark.parametrize('id', ['oelint.vars.valuequoted'])
@pytest.mark.parametrize('occurrence', [2])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
A = "a
D = a"
''',
},
],
)
def test_bad(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
@pytest.mark.parametrize('id', ['oelint.vars.valuequoted'])
@pytest.mark.parametrize('occurrence', [0])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'A = "a"',
},
{
'oelint_adv_test.bb':
'A += "b"',
},
{
'oelint_adv_test.bb':
'PACKAGECONFIG[foo] = "-DFOO=ON,-DFOO=OFF,"',
},
{
'oelint_adv_test.bb':
'EXTRA_OEMAKE = \'CROSS_COMPILE=${TARGET_PREFIX} CC="${TARGET_PREFIX}gcc ${TOOLCHAIN_OPTIONS}" V=1\'',
},
{
'oelint_adv_test.bb':
'''
EXTRA_OECMAKE += "\\
-DBUILD_TESTS=OFF \\
"
''',
},
{
'oelint_adv_test.bb':
'''
DEPENDS += "\\
a \\
b \\
c \\
"
''',
},
],
)
def test_good(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
| [
[
[
7,
13
],
[
181,
187
],
[
245,
251
],
[
293,
299
],
[
842,
848
],
[
906,
912
],
[
954,
960
]
],
[
[
33,
46
],
[
159,
172
]
],
[
[
128,
158
]
]
] |
# See: https://packaging.python.org/en/latest/distributing/#standards-compliance-for-interoperability
__version__ = '0.9.0'
| [
[
[
102,
113
]
]
] |
#!/usr/bin/python3
# --- 001 > U5W1P1_Task1_w1
def solution(s):
# print( ''.join(reversed(s)) )
if( s==''.join(reversed(s))):
return bool(True)
return bool(False)
if __name__ == "__main__":
print('----------start------------')
s = "zork"
print(solution( s ))
print('------------end------------')
| [
[
[
52,
60
],
[
280,
288
]
],
[
[
259,
260
],
[
290,
291
]
]
] |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 9 10:51:35 2019
@author: levy.he
"""
import ctypes
from . import vxlapy
def stringify(cobj, indent=2):
s = "%s\n" % type(cobj)
if issubclass(type(cobj), ctypes.Union):
cobj = getattr(cobj, cobj._fields_[0][0])
if issubclass(type(cobj), ctypes.Structure):
for field in cobj._fields_:
s += "%s%s=%s\n" % (indent * ' ', field[0], stringify(getattr(cobj, field[0]), indent + 2))
return s
try:
return bytearray(cobj[:])
except TypeError:
return "%d (0x%x)" % (cobj, cobj)
def debugwrap(func):
def caller(*args, **kwargs):
if hasattr(args[0], 'debug') and args[0].debug:
print(args[0].__class__.__name__, repr(func), repr(args), repr(kwargs))
return func(*args, **kwargs)
return caller
class VxlBaseException(Exception):
pass
class VxlBaseEvent(object):
def __str__(self):
return stringify(getattr(self.event.tagData, self.tagDataAttr))
class VxlBase(object):
def __init__(self, debug=False, debugapi=False):
self.api = vxlapy.vxlapy(trace=debugapi)
# self._app_name = None
self.debug = debug
self.initAccess = False
self.portIsOpen = False
self.portHandle = vxlapy.XLportHandle(vxlapy.XL_INVALID_PORTHANDLE)
self.accessMask = vxlapy.XLaccess(0)
self.permissionMask = vxlapy.XLaccess(0)
self.api.xlOpenDriver()
@debugwrap
def __del__(self):
self.api.xlCloseDriver()
@debugwrap
def getchannelIdx(self, channel=0, app_name=None, busType=vxlapy.XL_INTERFACE_VERSION):
if app_name is not None:
hw_type = ctypes.c_uint(0)
hw_index = ctypes.c_uint(0)
hw_channel = ctypes.c_uint(0)
self.api.xlGetApplConfig(
self._app_name, channel, hw_type, hw_index, hw_channel,busType)
channelIdx = self.api.xlGetChannelIndex(hw_type.value, hw_index.value,
hw_channel.value)
if self.debug:
print('Channel %d idex %d found'%(channel,channelIdx))
if channelIdx < 0:
raise VxlBaseException("No HW port available")
else:
channelIdx = channel
return channelIdx
@debugwrap
def getChannelMask(self, busType, channelIdx=1, xlInterfaceVersion=vxlapy.XL_INTERFACE_VERSION):
driverConfig = vxlapy.XLdriverConfig()
self.api.xlGetDriverConfig(ctypes.byref(driverConfig))
for i in range(driverConfig.channelCount):
if self.debug:
print("Channel %d cap 0x%x ifver %d" % (i, driverConfig.channel[i].channelBusCapabilities, driverConfig.channel[i].interfaceVersion))
if (driverConfig.channel[i].channelBusCapabilities & busType and # eg. XL_BUS_COMPATIBLE_*
driverConfig.channel[i].interfaceVersion >= xlInterfaceVersion): # eg. XL_INTERFACE_VERSION*
if self.accessMask.value == 0 and channelIdx == i:
if self.debug:
print("Using %s, (sn=%06d, mask=0x%04x)" % (stringify(driverConfig.channel[i].name), driverConfig.channel[i].serialNumber,
driverConfig.channel[i].channelMask))
self.accessMask.value |= driverConfig.channel[i].channelMask
return True
#channelIdx -= 1
return False
@debugwrap
def openPort(self, busType, userName='vxlapy', accessMask=None, permissionMask=None, rxQueueSize=32768, xlInterfaceVersion=vxlapy.XL_INTERFACE_VERSION_V4):
if accessMask is not None:
self.accessMask.value = accessMask
if permissionMask is not None:
self.permissionMask.value = permissionMask
if permissionMask is None and self.accessMask.value != 0:
self.permissionMask.value = self.accessMask.value
self.api.xlOpenPort(ctypes.byref(self.portHandle), userName, self.accessMask.value, ctypes.byref(self.permissionMask), rxQueueSize, xlInterfaceVersion, busType)
self.portIsOpen = True
self.initAccess = self.permissionMask.value == self.accessMask.value and self.accessMask.value != 0
else:
raise VxlBaseException("No HW port available")
@debugwrap
def activateChannel(self, bustype):
return self.api.xlActivateChannel(self.portHandle, self.accessMask, bustype, 0)
@debugwrap
def deactivateChannel(self):
return self.api.xlDeactivateChannel(self.portHandle, self.accessMask)
@debugwrap
def flush_rx_buffer(self):
self.api.xlFlushReceiveQueue(self.portHandle)
@debugwrap
def flush_tx_buffer(self):
self.api.xlCanFlushTransmitQueue(self.portHandle, self.accessMask)
@debugwrap
def closePort(self):
if self.portIsOpen:
self.api.xlDeactivateChannel(self.portHandle, self.accessMask)
self.api.xlClosePort(self.portHandle)
self.api.xlCloseDriver()
self.portIsOpen = False
@debugwrap
def receive(self):
raise NotImplemented
if __name__ == "__main__":
b = VxlBase()
| [
[
[
94,
100
],
[
212,
218
],
[
307,
313
],
[
1701,
1707
],
[
1741,
1747
],
[
1783,
1789
],
[
2538,
2544
],
[
4063,
4069
],
[
4127,
4133
]
],
[
[
115,
121
],
[
1616,
1622
],
[
2426,
2432
],
[
3694,
3700
],
[
1110,
1116
],
[
1289,
1295
],
[
1309,
1315
],
[
1365,
1371
],
[
1414,
1420
],
[
2479,
2485
]
],
[
[
127,
136
],
[
418,
427
],
[
955,
964
],
[
3199,
3208
]
],
[
[
596,
605
],
[
1472,
1481
],
[
1544,
1553
],
[
2345,
2354
],
[
3557,
3566
],
[
4431,
4440
],
[
4575,
4584
],
[
4702,
4711
],
[
4803,
4812
],
[
4925,
4934
],
[
5192,
5201
]
],
[
[
848,
864
],
[
2225,
2241
],
[
4384,
4400
]
],
[
[
894,
906
]
],
[
[
1020,
1027
],
[
5291,
5298
]
],
[
[
5287,
5288
]
]
] |
import pandas as pd
import numpy as np
from time import time
import sys
class StateBasedBucketer(object):
def __init__(self, encoder):
self.encoder = encoder
self.dt_states = None
self.n_states = 0
def fit(self, X, y=None):
dt_encoded = self.encoder.fit_transform(X)
self.dt_states = dt_encoded.drop_duplicates()
self.dt_states = self.dt_states.assign(state = range(len(self.dt_states)))
self.n_states = len(self.dt_states)
return self
def predict(self, X, y=None):
dt_encoded = self.encoder.transform(X)
dt_transformed = pd.merge(dt_encoded, self.dt_states, how='left')
dt_transformed.fillna(-1, inplace=True)
return dt_transformed["state"].astype(int).as_matrix()
def fit_predict(self, X, y=None):
self.fit(X)
return self.predict(X) | [
[
[
7,
19
],
[
707,
709
]
],
[
[
27,
38
]
],
[
[
56,
60
]
],
[
[
68,
71
]
],
[
[
79,
97
]
]
] |
""" Test cases for .hist method """
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, Series, to_datetime
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
pytestmark = pytest.mark.slow
@td.skip_if_no_mpl
class TestSeriesPlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = "ts"
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist, by=self.ts.index.month)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with pytest.raises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
assert len(ax.patches) == 2
def test_hist_layout(self):
df = self.hist_df
with pytest.raises(ValueError):
df.height.hist(layout=(1, 1))
with pytest.raises(ValueError):
df.height.hist(layout=[1, 1])
def test_hist_layout_with_by(self):
df = self.hist_df
# _check_plot_works adds an `ax` kwarg to the method call
# so we get a warning about an axis being cleared, even
# though we don't explicing pass one, see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.gender, layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
def test_hist_no_overlap(self):
from matplotlib.pyplot import gcf, subplot
x = Series(np.random.randn(2))
y = Series(np.random.randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.axes
assert len(axes) == 2
def test_hist_by_no_extra_plots(self):
df = self.hist_df
axes = df.height.hist(by=df.gender) # noqa
assert len(self.plt.get_fignums()) == 1
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with pytest.raises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
@pytest.mark.parametrize(
"histtype, expected",
[
("bar", True),
("barstacked", True),
("step", False),
("stepfilled", True),
],
)
def test_histtype_argument(self, histtype, expected):
# GH23992 Verify functioning of histtype argument
ser = Series(np.random.randint(1, 10))
ax = ser.hist(histtype=histtype)
self._check_patches_all_filled(ax, filled=expected)
@pytest.mark.parametrize(
"by, expected_axes_num, expected_layout", [(None, 1, (1, 1)), ("b", 2, (1, 2))]
)
def test_hist_with_legend(self, by, expected_axes_num, expected_layout):
# GH 6279 - Series histogram can have a legend
index = 15 * ["1"] + 15 * ["2"]
s = Series(np.random.randn(30), index=index, name="a")
s.index.name = "b"
# Use default_axes=True when plotting method generate subplots itself
axes = _check_plot_works(s.hist, default_axes=True, legend=True, by=by)
self._check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout)
self._check_legend_labels(axes, "a")
@pytest.mark.parametrize("by", [None, "b"])
def test_hist_with_legend_raises(self, by):
# GH 6279 - Series histogram with legend and label raises
index = 15 * ["1"] + 15 * ["2"]
s = Series(np.random.randn(30), index=index, name="a")
s.index.name = "b"
with pytest.raises(ValueError, match="Cannot use both legend and label"):
s.hist(legend=True, by=by, label="c")
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
def test_hist_df_legacy(self):
from matplotlib.patches import Rectangle
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.hist_df.hist)
# make sure layout is handled
df = DataFrame(np.random.randn(100, 2))
df[2] = to_datetime(
np.random.randint(
self.start_date_to_int64,
self.end_date_to_int64,
size=100,
dtype=np.int64,
)
)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, grid=False)
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert not axes[1, 1].get_visible()
_check_plot_works(df[[2]].hist)
df = DataFrame(np.random.randn(100, 1))
_check_plot_works(df.hist)
# make sure layout is handled
df = DataFrame(np.random.randn(100, 5))
df[5] = to_datetime(
np.random.randint(
self.start_date_to_int64,
self.end_date_to_int64,
size=100,
dtype=np.int64,
)
)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, layout=(4, 2))
self._check_axes_shape(axes, axes_num=6, layout=(4, 2))
# make sure sharex, sharey is handled
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.hist, sharex=True, sharey=True)
# handle figsize arg
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.hist, figsize=(8, 10))
# check bins argument
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.hist, bins=5)
# make sure xlabelsize and xrot are handled
ser = df[0]
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = ser.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
self._check_ticks_props(
axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot
)
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = df.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
self._check_ticks_props(
axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot
)
tm.close()
ax = ser.hist(cumulative=True, bins=4, density=True)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
tm.close()
ax = ser.hist(log=True)
# scale of y must be 'log'
self._check_ax_scales(ax, yaxis="log")
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with pytest.raises(AttributeError):
ser.hist(foo="bar")
def test_hist_non_numerical_or_datetime_raises(self):
# gh-10444, GH32590
df = DataFrame(
{
"a": np.random.rand(10),
"b": np.random.randint(0, 10, 10),
"c": to_datetime(
np.random.randint(
1582800000000000000, 1583500000000000000, 10, dtype=np.int64
)
),
"d": to_datetime(
np.random.randint(
1582800000000000000, 1583500000000000000, 10, dtype=np.int64
),
utc=True,
),
}
)
df_o = df.astype(object)
msg = "hist method requires numerical or datetime columns, nothing to plot."
with pytest.raises(ValueError, match=msg):
df_o.hist()
def test_hist_layout(self):
df = DataFrame(np.random.randn(100, 2))
df[2] = to_datetime(
np.random.randint(
self.start_date_to_int64,
self.end_date_to_int64,
size=100,
dtype=np.int64,
)
)
layout_to_expected_size = (
{"layout": None, "expected_size": (2, 2)}, # default is 2x2
{"layout": (2, 2), "expected_size": (2, 2)},
{"layout": (4, 1), "expected_size": (4, 1)},
{"layout": (1, 4), "expected_size": (1, 4)},
{"layout": (3, 3), "expected_size": (3, 3)},
{"layout": (-1, 4), "expected_size": (1, 4)},
{"layout": (4, -1), "expected_size": (4, 1)},
{"layout": (-1, 2), "expected_size": (2, 2)},
{"layout": (2, -1), "expected_size": (2, 2)},
)
for layout_test in layout_to_expected_size:
axes = df.hist(layout=layout_test["layout"])
expected = layout_test["expected_size"]
self._check_axes_shape(axes, axes_num=3, layout=expected)
# layout too small for all 4 plots
with pytest.raises(ValueError):
df.hist(layout=(1, 1))
# invalid format for layout
with pytest.raises(ValueError):
df.hist(layout=(1,))
with pytest.raises(ValueError):
df.hist(layout=(-1, -1))
# GH 9351
def test_tight_layout(self):
df = DataFrame(np.random.randn(100, 2))
df[2] = to_datetime(
np.random.randint(
self.start_date_to_int64,
self.end_date_to_int64,
size=100,
dtype=np.int64,
)
)
# Use default_axes=True when plotting method generate subplots itself
_check_plot_works(df.hist, default_axes=True)
self.plt.tight_layout()
tm.close()
def test_hist_subplot_xrot(self):
# GH 30288
df = DataFrame(
{
"length": [1.5, 0.5, 1.2, 0.9, 3],
"animal": ["pig", "rabbit", "pig", "pig", "rabbit"],
}
)
# Use default_axes=True when plotting method generate subplots itself
axes = _check_plot_works(
df.hist,
default_axes=True,
filterwarnings="always",
column="length",
by="animal",
bins=5,
xrot=0,
)
self._check_ticks_props(axes, xrot=0)
@pytest.mark.parametrize(
"column, expected",
[
(None, ["width", "length", "height"]),
(["length", "width", "height"], ["length", "width", "height"]),
],
)
def test_hist_column_order_unchanged(self, column, expected):
# GH29235
df = DataFrame(
{
"width": [0.7, 0.2, 0.15, 0.2, 1.1],
"length": [1.5, 0.5, 1.2, 0.9, 3],
"height": [3, 0.5, 3.4, 2, 1],
},
index=["pig", "rabbit", "duck", "chicken", "horse"],
)
# Use default_axes=True when plotting method generate subplots itself
axes = _check_plot_works(
df.hist,
default_axes=True,
column=column,
layout=(1, 3),
)
result = [axes[0, i].get_title() for i in range(3)]
assert result == expected
@pytest.mark.parametrize(
"histtype, expected",
[
("bar", True),
("barstacked", True),
("step", False),
("stepfilled", True),
],
)
def test_histtype_argument(self, histtype, expected):
# GH23992 Verify functioning of histtype argument
df = DataFrame(np.random.randint(1, 10, size=(100, 2)), columns=["a", "b"])
ax = df.hist(histtype=histtype)
self._check_patches_all_filled(ax, filled=expected)
@pytest.mark.parametrize("by", [None, "c"])
@pytest.mark.parametrize("column", [None, "b"])
def test_hist_with_legend(self, by, column):
# GH 6279 - DataFrame histogram can have a legend
expected_axes_num = 1 if by is None and column is not None else 2
expected_layout = (1, expected_axes_num)
expected_labels = column or ["a", "b"]
if by is not None:
expected_labels = [expected_labels] * 2
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"])
# Use default_axes=True when plotting method generate subplots itself
axes = _check_plot_works(
df.hist,
default_axes=True,
legend=True,
by=by,
column=column,
)
self._check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout)
if by is None and column is None:
axes = axes[0]
for expected_label, ax in zip(expected_labels, axes):
self._check_legend_labels(ax, expected_label)
@pytest.mark.parametrize("by", [None, "c"])
@pytest.mark.parametrize("column", [None, "b"])
def test_hist_with_legend_raises(self, by, column):
# GH 6279 - DataFrame histogram with legend and label raises
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"])
with pytest.raises(ValueError, match="Cannot use both legend and label"):
df.hist(legend=True, by=by, column=column, label="d")
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
def test_grouped_hist_legacy(self):
from matplotlib.patches import Rectangle
from pandas.plotting._matplotlib.hist import _grouped_hist
df = DataFrame(np.random.randn(500, 1), columns=["A"])
df["B"] = to_datetime(
np.random.randint(
self.start_date_to_int64,
self.end_date_to_int64,
size=500,
dtype=np.int64,
)
)
df["C"] = np.random.randint(0, 4, 500)
df["D"] = ["X"] * 500
axes = _grouped_hist(df.A, by=df.C)
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
tm.close()
axes = df.hist(by=df.C)
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
tm.close()
# group by a key with single value
axes = df.hist(by="D", rot=30)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
self._check_ticks_props(axes, xrot=30)
tm.close()
# make sure kwargs to hist are handled
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = _grouped_hist(
df.A,
by=df.C,
cumulative=True,
bins=4,
xlabelsize=xf,
xrot=xrot,
ylabelsize=yf,
yrot=yrot,
density=True,
)
# height of last bin (index 5) must be 1.0
for ax in axes.ravel():
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
height = rects[-1].get_height()
tm.assert_almost_equal(height, 1.0)
self._check_ticks_props(
axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot
)
tm.close()
axes = _grouped_hist(df.A, by=df.C, log=True)
# scale of y must be 'log'
self._check_ax_scales(axes, yaxis="log")
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with pytest.raises(AttributeError):
_grouped_hist(df.A, by=df.C, foo="bar")
msg = "Specify figure size by tuple instead"
with pytest.raises(ValueError, match=msg):
df.hist(by="C", figsize="default")
def test_grouped_hist_legacy2(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender_int = np.random.choice([0, 1], size=n)
df_int = DataFrame({"height": height, "weight": weight, "gender": gender_int})
gb = df_int.groupby("gender")
axes = gb.hist()
assert len(axes) == 2
assert len(self.plt.get_fignums()) == 2
tm.close()
def test_grouped_hist_layout(self):
df = self.hist_df
msg = "Layout of 1x1 must be larger than required size 2"
with pytest.raises(ValueError, match=msg):
df.hist(column="weight", by=df.gender, layout=(1, 1))
msg = "Layout of 1x3 must be larger than required size 4"
with pytest.raises(ValueError, match=msg):
df.hist(column="height", by=df.category, layout=(1, 3))
msg = "At least one dimension of layout must be positive"
with pytest.raises(ValueError, match=msg):
df.hist(column="height", by=df.category, layout=(-1, -1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.hist, column="height", by=df.gender, layout=(2, 1)
)
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.hist, column="height", by=df.gender, layout=(2, -1)
)
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
axes = df.hist(column="height", by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.hist(column="height", by=df.category, layout=(-1, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.hist(column="height", by=df.category, layout=(4, 2), figsize=(12, 8))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 8))
tm.close()
# GH 6769
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.hist, column="height", by="classroom", layout=(2, 2)
)
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
# without column
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, by="classroom")
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.hist(by="gender", layout=(3, 5))
self._check_axes_shape(axes, axes_num=2, layout=(3, 5))
axes = df.hist(column=["height", "weight", "category"])
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
def test_grouped_hist_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
fig, axes = self.plt.subplots(2, 3)
returned = df.hist(column=["height", "weight", "category"], ax=axes[0])
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
returned = df.hist(by="classroom", ax=axes[1])
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
axes = df.hist(column="height", ax=axes)
def test_axis_share_x(self):
df = self.hist_df
# GH4089
ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True)
# share x
assert ax1._shared_x_axes.joined(ax1, ax2)
assert ax2._shared_x_axes.joined(ax1, ax2)
# don't share y
assert not ax1._shared_y_axes.joined(ax1, ax2)
assert not ax2._shared_y_axes.joined(ax1, ax2)
def test_axis_share_y(self):
df = self.hist_df
ax1, ax2 = df.hist(column="height", by=df.gender, sharey=True)
# share y
assert ax1._shared_y_axes.joined(ax1, ax2)
assert ax2._shared_y_axes.joined(ax1, ax2)
# don't share x
assert not ax1._shared_x_axes.joined(ax1, ax2)
assert not ax2._shared_x_axes.joined(ax1, ax2)
def test_axis_share_xy(self):
df = self.hist_df
ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True, sharey=True)
# share both x and y
assert ax1._shared_x_axes.joined(ax1, ax2)
assert ax2._shared_x_axes.joined(ax1, ax2)
assert ax1._shared_y_axes.joined(ax1, ax2)
assert ax2._shared_y_axes.joined(ax1, ax2)
@pytest.mark.parametrize(
"histtype, expected",
[
("bar", True),
("barstacked", True),
("step", False),
("stepfilled", True),
],
)
def test_histtype_argument(self, histtype, expected):
# GH23992 Verify functioning of histtype argument
df = DataFrame(np.random.randint(1, 10, size=(100, 2)), columns=["a", "b"])
ax = df.hist(by="a", histtype=histtype)
self._check_patches_all_filled(ax, filled=expected)
| [
[
[
44,
55
],
[
1625,
1627
],
[
3891,
3893
],
[
3930,
3932
],
[
4887,
4889
],
[
5330,
5332
],
[
5916,
5918
],
[
6431,
6433
],
[
6497,
6499
],
[
6646,
6648
],
[
6964,
6966
],
[
7086,
7088
],
[
7152,
7154
],
[
7301,
7303
],
[
9211,
9213
],
[
9252,
9254
],
[
9336,
9338
],
[
9431,
9433
],
[
9535,
9537
],
[
9630,
9632
],
[
9985,
9987
],
[
10051,
10053
],
[
10200,
10202
],
[
11423,
11425
],
[
11489,
11491
],
[
11638,
11640
],
[
13698,
13700
],
[
14397,
14399
],
[
15285,
15287
],
[
15740,
15742
],
[
15823,
15825
],
[
15972,
15974
],
[
16024,
16026
],
[
17829,
17831
],
[
17888,
17890
],
[
17979,
17981
],
[
22871,
22873
]
],
[
[
63,
69
],
[
287,
293
],
[
4544,
4550
],
[
5020,
5026
],
[
5700,
5706
],
[
12453,
12459
],
[
13353,
13359
],
[
13865,
13871
],
[
13913,
13919
],
[
14985,
14991
],
[
15033,
15039
],
[
22526,
22532
],
[
1482,
1488
],
[
1792,
1798
],
[
1875,
1881
],
[
4461,
4467
],
[
6001,
6007
],
[
9002,
9008
],
[
9867,
9873
],
[
11103,
11109
],
[
11215,
11221
],
[
11288,
11294
],
[
15356,
15362
],
[
17513,
17519
],
[
17663,
17669
],
[
18405,
18411
],
[
18589,
18595
],
[
18775,
18781
],
[
21163,
21169
]
],
[
[
78,
112
],
[
307,
309
],
[
6123,
6125
],
[
15494,
15496
]
],
[
[
133,
142
],
[
1615,
1624
],
[
6421,
6430
],
[
6954,
6963
],
[
7076,
7085
],
[
9165,
9174
],
[
9975,
9984
],
[
11413,
11422
],
[
11927,
11936
],
[
12758,
12767
],
[
13688,
13697
],
[
14387,
14396
],
[
15275,
15284
],
[
15730,
15739
],
[
18029,
18038
],
[
22861,
22870
]
],
[
[
144,
149
],
[
14333,
14338
],
[
15221,
15226
]
],
[
[
151,
157
],
[
3884,
3890
],
[
3923,
3929
],
[
4880,
4886
],
[
5323,
5329
],
[
5909,
5915
],
[
17822,
17828
],
[
17881,
17887
]
],
[
[
159,
170
],
[
6472,
6483
],
[
7127,
7138
],
[
9303,
9314
],
[
9502,
9513
],
[
10026,
10037
],
[
11464,
11475
],
[
15798,
15809
]
],
[
[
178,
199
],
[
524,
526
],
[
838,
840
],
[
960,
962
],
[
1286,
1288
],
[
2215,
2217
],
[
2416,
2418
],
[
2618,
2620
],
[
2821,
2823
],
[
3025,
3027
],
[
3229,
3231
],
[
3433,
3435
],
[
6279,
6281
],
[
6693,
6695
],
[
7348,
7350
],
[
7574,
7576
],
[
7723,
7725
],
[
7864,
7866
],
[
8514,
8516
],
[
8721,
8723
],
[
8782,
8784
],
[
8916,
8918
],
[
11845,
11847
],
[
16201,
16203
],
[
16317,
16319
],
[
16530,
16532
],
[
17113,
17115
],
[
17270,
17272
],
[
17428,
17430
],
[
17935,
17937
],
[
18248,
18250
],
[
18897,
18899
],
[
19138,
19140
],
[
19817,
19819
],
[
19860,
19862
],
[
20128,
20130
],
[
20842,
20844
],
[
21061,
21063
]
],
[
[
241,
253
],
[
347,
359
],
[
6166,
6178
],
[
15544,
15556
],
[
406,
418
]
],
[
[
255,
272
],
[
613,
630
],
[
653,
670
],
[
705,
722
],
[
891,
908
],
[
1013,
1030
],
[
1128,
1145
],
[
1175,
1192
],
[
1234,
1251
],
[
1356,
1373
],
[
1416,
1433
],
[
2275,
2292
],
[
2476,
2493
],
[
2678,
2695
],
[
2881,
2898
],
[
3085,
3102
],
[
3289,
3306
],
[
3493,
3510
],
[
5495,
5512
],
[
6332,
6349
],
[
6753,
6770
],
[
6909,
6926
],
[
6997,
7014
],
[
7408,
7425
],
[
7627,
7644
],
[
7776,
7793
],
[
7917,
7934
],
[
11758,
11775
],
[
12189,
12206
],
[
13118,
13135
],
[
14548,
14565
],
[
18957,
18974
],
[
19198,
19215
],
[
19920,
19937
],
[
20188,
20205
]
],
[
[
274,
284
]
],
[
[
331,
346
]
],
[
[
6147,
6165
]
],
[
[
15518,
15543
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BillDingBizOrderSum(object):
def __init__(self):
self._biz_date = None
self._expenses = None
self._income = None
@property
def biz_date(self):
return self._biz_date
@biz_date.setter
def biz_date(self, value):
self._biz_date = value
@property
def expenses(self):
return self._expenses
@expenses.setter
def expenses(self, value):
self._expenses = value
@property
def income(self):
return self._income
@income.setter
def income(self, value):
self._income = value
def to_alipay_dict(self):
params = dict()
if self.biz_date:
if hasattr(self.biz_date, 'to_alipay_dict'):
params['biz_date'] = self.biz_date.to_alipay_dict()
else:
params['biz_date'] = self.biz_date
if self.expenses:
if hasattr(self.expenses, 'to_alipay_dict'):
params['expenses'] = self.expenses.to_alipay_dict()
else:
params['expenses'] = self.expenses
if self.income:
if hasattr(self.income, 'to_alipay_dict'):
params['income'] = self.income.to_alipay_dict()
else:
params['income'] = self.income
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BillDingBizOrderSum()
if 'biz_date' in d:
o.biz_date = d['biz_date']
if 'expenses' in d:
o.expenses = d['expenses']
if 'income' in d:
o.income = d['income']
return o
| [
[
[
53,
57
]
],
[
[
110,
111
]
],
[
[
120,
139
],
[
1537,
1556
]
]
] |
class Calculator:
def __init__(self):
pass
def add(self, a, b):
return a + b
def divide(self, a, b):
return b / a
# Todo: Add subtract option
# def root(a):
# return math.sqrt()
def greetings(name):
print('Hello ' + name + '!')
def goodbye():
print('Goodbye!')
myCalculator = Calculator
myCalculator.subtract()
# execfile('console.py')
# exec('console.py')
| [
[
[
6,
16
],
[
344,
354
]
],
[
[
238,
247
]
],
[
[
294,
301
]
],
[
[
329,
341
],
[
355,
367
]
]
] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import atexit
import collections
import collections.abc
import enum
import fcntl
import logging
import os
import os.path
import pathlib
import queue
import re
import select
import shlex
import shutil
import subprocess
import sys
import tarfile
import tempfile
import threading
import time
import json
import serial
import serial.tools.list_ports
import yaml
from tvm.micro.project_api import server
_LOG = logging.getLogger(__name__)
API_SERVER_DIR = pathlib.Path(os.path.dirname(__file__) or os.path.getcwd())
BUILD_DIR = API_SERVER_DIR / "build"
MODEL_LIBRARY_FORMAT_RELPATH = "model.tar"
IS_TEMPLATE = not (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH).exists()
BOARDS = API_SERVER_DIR / "boards.json"
# Data structure to hold the information microtvm_api_server.py needs
# to communicate with each of these boards.
try:
with open(BOARDS) as boards:
BOARD_PROPERTIES = json.load(boards)
except FileNotFoundError:
raise FileNotFoundError(f"Board file {{{BOARDS}}} does not exist.")
def check_call(cmd_args, *args, **kwargs):
cwd_str = "" if "cwd" not in kwargs else f" (in cwd: {kwargs['cwd']})"
_LOG.info("run%s: %s", cwd_str, " ".join(shlex.quote(a) for a in cmd_args))
return subprocess.check_call(cmd_args, *args, **kwargs)
CACHE_ENTRY_RE = re.compile(r"(?P<name>[^:]+):(?P<type>[^=]+)=(?P<value>.*)")
CMAKE_BOOL_MAP = dict(
[(k, True) for k in ("1", "ON", "YES", "TRUE", "Y")]
+ [(k, False) for k in ("0", "OFF", "NO", "FALSE", "N", "IGNORE", "NOTFOUND", "")]
)
class CMakeCache(collections.abc.Mapping):
def __init__(self, path):
self._path = path
self._dict = None
def __iter__(self):
return iter(self._dict)
def __getitem__(self, key):
if self._dict is None:
self._dict = self._read_cmake_cache()
return self._dict[key]
def __len__(self):
return len(self._dict)
def _read_cmake_cache(self):
"""Read a CMakeCache.txt-like file and return a dictionary of values."""
entries = collections.OrderedDict()
with open(self._path, encoding="utf-8") as f:
for line in f:
m = CACHE_ENTRY_RE.match(line.rstrip("\n"))
if not m:
continue
if m.group("type") == "BOOL":
value = CMAKE_BOOL_MAP[m.group("value").upper()]
else:
value = m.group("value")
entries[m.group("name")] = value
return entries
CMAKE_CACHE = CMakeCache(BUILD_DIR / "CMakeCache.txt")
class BoardError(Exception):
"""Raised when an attached board cannot be opened (i.e. missing /dev nodes, etc)."""
class BoardAutodetectFailed(Exception):
"""Raised when no attached hardware is found matching the board= given to ZephyrCompiler."""
def _get_flash_runner():
flash_runner = CMAKE_CACHE.get("ZEPHYR_BOARD_FLASH_RUNNER")
if flash_runner is not None:
return flash_runner
with open(CMAKE_CACHE["ZEPHYR_RUNNERS_YAML"]) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
return doc["flash-runner"]
def _get_device_args(options):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return _get_nrf_device_args(options)
if flash_runner == "openocd":
return _get_openocd_device_args(options)
raise BoardError(
f"Don't know how to find serial terminal for board {CMAKE_CACHE['BOARD']} with flash "
f"runner {flash_runner}"
)
# kwargs passed to usb.core.find to find attached boards for the openocd flash runner.
BOARD_USB_FIND_KW = {
"nucleo_l4r5zi": {"idVendor": 0x0483, "idProduct": 0x374B},
"nucleo_f746zg": {"idVendor": 0x0483, "idProduct": 0x374B},
"stm32f746g_disco": {"idVendor": 0x0483, "idProduct": 0x374B},
"mimxrt1050_evk": {"idVendor": 0x1366, "idProduct": 0x0105},
}
def openocd_serial(options):
"""Find the serial port to use for a board with OpenOCD flash strategy."""
if "openocd_serial" in options:
return options["openocd_serial"]
import usb # pylint: disable=import-outside-toplevel
find_kw = BOARD_USB_FIND_KW[CMAKE_CACHE["BOARD"]]
boards = usb.core.find(find_all=True, **find_kw)
serials = []
for b in boards:
serials.append(b.serial_number)
if len(serials) == 0:
raise BoardAutodetectFailed(f"No attached USB devices matching: {find_kw!r}")
serials.sort()
autodetected_openocd_serial = serials[0]
_LOG.debug("zephyr openocd driver: autodetected serial %s", serials[0])
return autodetected_openocd_serial
def _get_openocd_device_args(options):
return ["--serial", openocd_serial(options)]
def _get_nrf_device_args(options):
nrfjprog_args = ["nrfjprog", "--ids"]
nrfjprog_ids = subprocess.check_output(nrfjprog_args, encoding="utf-8")
if not nrfjprog_ids.strip("\n"):
raise BoardAutodetectFailed(f'No attached boards recognized by {" ".join(nrfjprog_args)}')
boards = nrfjprog_ids.split("\n")[:-1]
if len(boards) > 1:
if options["nrfjprog_snr"] is None:
raise BoardError(
"Multiple boards connected; specify one with nrfjprog_snr=: " f'{", ".join(boards)}'
)
if str(options["nrfjprog_snr"]) not in boards:
raise BoardError(
f"nrfjprog_snr ({options['nrfjprog_snr']}) not found in {nrfjprog_args}: {boards}"
)
return ["--snr", options["nrfjprog_snr"]]
if not boards:
return []
return ["--snr", boards[0]]
PROJECT_TYPES = []
if IS_TEMPLATE:
for d in (API_SERVER_DIR / "src").iterdir():
if d.is_dir():
PROJECT_TYPES.append(d.name)
PROJECT_OPTIONS = [
server.ProjectOption(
"extra_files_tar",
help="If given, during generate_project, uncompress the tarball at this path into the project dir.",
),
server.ProjectOption(
"gdbserver_port", help=("If given, port number to use when running the local gdbserver.")
),
server.ProjectOption(
"nrfjprog_snr",
help=("When used with nRF targets, serial # of the attached board to use, from nrfjprog."),
),
server.ProjectOption(
"openocd_serial",
help=("When used with OpenOCD targets, serial # of the attached board to use."),
),
server.ProjectOption(
"project_type",
help="Type of project to generate.",
choices=tuple(PROJECT_TYPES),
),
server.ProjectOption("verbose", help="Run build with verbose output.", choices=(True, False)),
server.ProjectOption(
"west_cmd",
help=(
"Path to the west tool. If given, supersedes both the zephyr_base "
"option and ZEPHYR_BASE environment variable."
),
),
server.ProjectOption("zephyr_base", help="Path to the zephyr base directory."),
server.ProjectOption(
"zephyr_board",
choices=list(BOARD_PROPERTIES),
help="Name of the Zephyr board to build for.",
),
server.ProjectOption(
"config_main_stack_size",
help="Sets CONFIG_MAIN_STACK_SIZE for Zephyr board.",
),
]
class Handler(server.ProjectAPIHandler):
def __init__(self):
super(Handler, self).__init__()
self._proc = None
def server_info_query(self, tvm_version):
return server.ServerInfo(
platform_name="zephyr",
is_template=IS_TEMPLATE,
model_library_format_path=""
if IS_TEMPLATE
else (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH),
project_options=PROJECT_OPTIONS,
)
# These files and directories will be recursively copied into generated projects from the CRT.
CRT_COPY_ITEMS = ("include", "Makefile", "src")
# Maps extra line added to prj.conf to a tuple or list of zephyr_board for which it is needed.
EXTRA_PRJ_CONF_DIRECTIVES = {
"CONFIG_TIMER_RANDOM_GENERATOR=y": (
"qemu_x86",
"qemu_riscv32",
"qemu_cortex_r5",
"qemu_riscv64",
),
"CONFIG_ENTROPY_GENERATOR=y": (
"mps2_an521",
"nrf5340dk_nrf5340_cpuapp",
"nucleo_f746zg",
"nucleo_l4r5zi",
"stm32f746g_disco",
),
}
def _create_prj_conf(self, project_dir, options):
with open(project_dir / "prj.conf", "w") as f:
f.write(
"# For UART used from main().\n"
"CONFIG_RING_BUFFER=y\n"
"CONFIG_UART_CONSOLE=n\n"
"CONFIG_UART_INTERRUPT_DRIVEN=y\n"
"\n"
)
f.write("# For TVMPlatformAbort().\n" "CONFIG_REBOOT=y\n" "\n")
if options["project_type"] == "host_driven":
f.write("# For RPC server C++ bindings.\n" "CONFIG_CPLUSPLUS=y\n" "\n")
f.write("# For math routines\n" "CONFIG_NEWLIB_LIBC=y\n" "\n")
if self._has_fpu(options["zephyr_board"]):
f.write("# For models with floating point.\n" "CONFIG_FPU=y\n" "\n")
# Set main stack size, if needed.
if options.get("config_main_stack_size") is not None:
f.write(f"CONFIG_MAIN_STACK_SIZE={options['config_main_stack_size']}\n")
f.write("# For random number generation.\n" "CONFIG_TEST_RANDOM_GENERATOR=y\n")
f.write("\n# Extra prj.conf directives\n")
for line, board_list in self.EXTRA_PRJ_CONF_DIRECTIVES.items():
if options["zephyr_board"] in board_list:
f.write(f"{line}\n")
f.write("\n")
API_SERVER_CRT_LIBS_TOKEN = "<API_SERVER_CRT_LIBS>"
CRT_LIBS_BY_PROJECT_TYPE = {
"host_driven": "microtvm_rpc_server microtvm_rpc_common common",
"aot_demo": "memory microtvm_rpc_common common",
}
def generate_project(self, model_library_format_path, standalone_crt_dir, project_dir, options):
project_dir = pathlib.Path(project_dir)
# Make project directory.
project_dir.mkdir()
# Copy ourselves to the generated project. TVM may perform further build steps on the generated project
# by launching the copy.
shutil.copy2(__file__, project_dir / os.path.basename(__file__))
# Copy boards.json file to generated project.
shutil.copy2(BOARDS, project_dir / BOARDS.name)
# Place Model Library Format tarball in the special location, which this script uses to decide
# whether it's being invoked in a template or generated project.
project_model_library_format_tar_path = project_dir / MODEL_LIBRARY_FORMAT_RELPATH
shutil.copy2(model_library_format_path, project_model_library_format_tar_path)
# Extract Model Library Format tarball.into <project_dir>/model.
extract_path = os.path.splitext(project_model_library_format_tar_path)[0]
with tarfile.TarFile(project_model_library_format_tar_path) as tf:
os.makedirs(extract_path)
tf.extractall(path=extract_path)
if self._is_qemu(options):
shutil.copytree(API_SERVER_DIR / "qemu-hack", project_dir / "qemu-hack")
# Populate CRT.
crt_path = project_dir / "crt"
crt_path.mkdir()
for item in self.CRT_COPY_ITEMS:
src_path = os.path.join(standalone_crt_dir, item)
dst_path = crt_path / item
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path)
else:
shutil.copy2(src_path, dst_path)
# Populate Makefile.
with open(API_SERVER_DIR / "CMakeLists.txt.template", "r") as cmake_template_f:
with open(project_dir / "CMakeLists.txt", "w") as cmake_f:
for line in cmake_template_f:
if self.API_SERVER_CRT_LIBS_TOKEN in line:
crt_libs = self.CRT_LIBS_BY_PROJECT_TYPE[options["project_type"]]
line = line.replace("<API_SERVER_CRT_LIBS>", crt_libs)
cmake_f.write(line)
self._create_prj_conf(project_dir, options)
# Populate crt-config.h
crt_config_dir = project_dir / "crt_config"
crt_config_dir.mkdir()
shutil.copy2(
API_SERVER_DIR / "crt_config" / "crt_config.h", crt_config_dir / "crt_config.h"
)
# Populate src/
src_dir = project_dir / "src"
shutil.copytree(API_SERVER_DIR / "src" / options["project_type"], src_dir)
# Populate extra_files
if options.get("extra_files_tar"):
with tarfile.open(options["extra_files_tar"], mode="r:*") as tf:
tf.extractall(project_dir)
def build(self, options):
BUILD_DIR.mkdir()
cmake_args = ["cmake", ".."]
if options.get("verbose"):
cmake_args.append("-DCMAKE_VERBOSE_MAKEFILE:BOOL=TRUE")
if options.get("zephyr_base"):
cmake_args.append(f"-DZEPHYR_BASE:STRING={options['zephyr_base']}")
if options.get("west_cmd"):
cmake_args.append(f"-DWEST={options['west_cmd']}")
cmake_args.append(f"-DBOARD:STRING={options['zephyr_board']}")
check_call(cmake_args, cwd=BUILD_DIR)
args = ["make", "-j2"]
if options.get("verbose"):
args.append("VERBOSE=1")
check_call(args, cwd=BUILD_DIR)
# A list of all zephyr_board values which are known to launch using QEMU. Many platforms which
# launch through QEMU by default include "qemu" in their name. However, not all do. This list
# includes those tested platforms which do not include qemu.
_KNOWN_QEMU_ZEPHYR_BOARDS = ("mps2_an521",)
@classmethod
def _is_qemu(cls, options):
return (
"qemu" in options["zephyr_board"]
or options["zephyr_board"] in cls._KNOWN_QEMU_ZEPHYR_BOARDS
)
@classmethod
def _has_fpu(cls, zephyr_board):
fpu_boards = [name for name, board in BOARD_PROPERTIES.items() if board["fpu"]]
return zephyr_board in fpu_boards
def flash(self, options):
if self._is_qemu(options):
return # NOTE: qemu requires no flash step--it is launched from open_transport.
zephyr_board = options["zephyr_board"]
# The nRF5340DK requires an additional `nrfjprog --recover` before each flash cycle.
# This is because readback protection is enabled by default when this device is flashed.
# Otherwise, flashing may fail with an error such as the following:
# ERROR: The operation attempted is unavailable due to readback protection in
# ERROR: your device. Please use --recover to unlock the device.
if zephyr_board.startswith("nrf5340dk") and _get_flash_runner() == "nrfjprog":
recover_args = ["nrfjprog", "--recover"]
recover_args.extend(_get_nrf_device_args(options))
check_call(recover_args, cwd=API_SERVER_DIR / "build")
check_call(["make", "flash"], cwd=API_SERVER_DIR / "build")
def open_transport(self, options):
if self._is_qemu(options):
transport = ZephyrQemuTransport(options)
else:
transport = ZephyrSerialTransport(options)
to_return = transport.open()
self._transport = transport
atexit.register(lambda: self.close_transport())
return to_return
def close_transport(self):
if self._transport is not None:
self._transport.close()
self._transport = None
def read_transport(self, n, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.read(n, timeout_sec)
def write_transport(self, data, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.write(data, timeout_sec)
def _set_nonblock(fd):
flag = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK)
new_flag = fcntl.fcntl(fd, fcntl.F_GETFL)
assert (new_flag & os.O_NONBLOCK) != 0, "Cannot set file descriptor {fd} to non-blocking"
class ZephyrSerialTransport:
@classmethod
def _lookup_baud_rate(cls, options):
zephyr_base = options.get("zephyr_base", os.environ["ZEPHYR_BASE"])
sys.path.insert(0, os.path.join(zephyr_base, "scripts", "dts"))
try:
import dtlib # pylint: disable=import-outside-toplevel
finally:
sys.path.pop(0)
dt_inst = dtlib.DT(BUILD_DIR / "zephyr" / "zephyr.dts")
uart_baud = (
dt_inst.get_node("/chosen")
.props["zephyr,console"]
.to_path()
.props["current-speed"]
.to_num()
)
_LOG.debug("zephyr transport: found UART baudrate from devicetree: %d", uart_baud)
return uart_baud
@classmethod
def _find_nrf_serial_port(cls, options):
com_ports = subprocess.check_output(
["nrfjprog", "--com"] + _get_device_args(options), encoding="utf-8"
)
ports_by_vcom = {}
for line in com_ports.split("\n")[:-1]:
parts = line.split()
ports_by_vcom[parts[2]] = parts[1]
return ports_by_vcom["VCOM2"]
@classmethod
def _find_openocd_serial_port(cls, options):
serial_number = openocd_serial(options)
ports = [p for p in serial.tools.list_ports.grep(serial_number)]
if len(ports) != 1:
raise Exception(
f"_find_openocd_serial_port: expected 1 port to match {serial_number}, "
f"found: {ports!r}"
)
return ports[0].device
@classmethod
def _find_jlink_serial_port(cls, options):
return cls._find_openocd_serial_port(options)
@classmethod
def _find_serial_port(cls, options):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return cls._find_nrf_serial_port(options)
if flash_runner == "openocd":
return cls._find_openocd_serial_port(options)
if flash_runner == "jlink":
return cls._find_jlink_serial_port(options)
raise RuntimeError(f"Don't know how to deduce serial port for flash runner {flash_runner}")
def __init__(self, options):
self._options = options
self._port = None
def open(self):
port_path = self._find_serial_port(self._options)
self._port = serial.Serial(port_path, baudrate=self._lookup_baud_rate(self._options))
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=5.0,
session_established_timeout_sec=5.0,
)
def close(self):
self._port.close()
self._port = None
def read(self, n, timeout_sec):
self._port.timeout = timeout_sec
to_return = self._port.read(n)
if not to_return:
raise server.IoTimeoutError()
return to_return
def write(self, data, timeout_sec):
self._port.write_timeout = timeout_sec
bytes_written = 0
while bytes_written < len(data):
n = self._port.write(data)
data = data[n:]
bytes_written += n
class ZephyrQemuMakeResult(enum.Enum):
QEMU_STARTED = "qemu_started"
MAKE_FAILED = "make_failed"
EOF = "eof"
class ZephyrQemuTransport:
"""The user-facing Zephyr QEMU transport class."""
def __init__(self, options):
self.options = options
self.proc = None
self.pipe_dir = None
self.read_fd = None
self.write_fd = None
self._queue = queue.Queue()
def open(self):
self.pipe_dir = pathlib.Path(tempfile.mkdtemp())
self.pipe = self.pipe_dir / "fifo"
self.write_pipe = self.pipe_dir / "fifo.in"
self.read_pipe = self.pipe_dir / "fifo.out"
os.mkfifo(self.write_pipe)
os.mkfifo(self.read_pipe)
if "gdbserver_port" in self.options:
if "env" in self.kwargs:
self.kwargs["env"] = copy.copy(self.kwargs["env"])
else:
self.kwargs["env"] = os.environ.copy()
self.kwargs["env"]["TVM_QEMU_GDBSERVER_PORT"] = str(self.options["gdbserver_port"])
self.proc = subprocess.Popen(
["make", "run", f"QEMU_PIPE={self.pipe}"],
cwd=BUILD_DIR,
stdout=subprocess.PIPE,
)
self._wait_for_qemu()
# NOTE: although each pipe is unidirectional, open both as RDWR to work around a select
# limitation on linux. Without this, non-blocking I/O can't use timeouts because named
# FIFO are always considered ready to read when no one has opened them for writing.
self.read_fd = os.open(self.read_pipe, os.O_RDWR | os.O_NONBLOCK)
self.write_fd = os.open(self.write_pipe, os.O_RDWR | os.O_NONBLOCK)
_set_nonblock(self.read_fd)
_set_nonblock(self.write_fd)
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=10.0,
session_established_timeout_sec=10.0,
)
def close(self):
did_write = False
if self.write_fd is not None:
try:
server.write_with_timeout(
self.write_fd, b"\x01x", 1.0
) # Use a short timeout since we will kill the process
did_write = True
except server.IoTimeoutError:
pass
os.close(self.write_fd)
self.write_fd = None
if self.proc:
if not did_write:
self.proc.terminate()
try:
self.proc.wait(5.0)
except subprocess.TimeoutExpired:
self.proc.kill()
if self.read_fd:
os.close(self.read_fd)
self.read_fd = None
if self.pipe_dir is not None:
shutil.rmtree(self.pipe_dir)
self.pipe_dir = None
def read(self, n, timeout_sec):
return server.read_with_timeout(self.read_fd, n, timeout_sec)
def write(self, data, timeout_sec):
to_write = bytearray()
escape_pos = []
for i, b in enumerate(data):
if b == 0x01:
to_write.append(b)
escape_pos.append(i)
to_write.append(b)
while to_write:
num_written = server.write_with_timeout(self.write_fd, to_write, timeout_sec)
to_write = to_write[num_written:]
def _qemu_check_stdout(self):
for line in self.proc.stdout:
line = str(line)
_LOG.info("%s", line)
if "[QEMU] CPU" in line:
self._queue.put(ZephyrQemuMakeResult.QEMU_STARTED)
else:
line = re.sub("[^a-zA-Z0-9 \n]", "", line)
pattern = r"recipe for target (\w*) failed"
if re.search(pattern, line, re.IGNORECASE):
self._queue.put(ZephyrQemuMakeResult.MAKE_FAILED)
self._queue.put(ZephyrQemuMakeResult.EOF)
def _wait_for_qemu(self):
threading.Thread(target=self._qemu_check_stdout, daemon=True).start()
while True:
try:
item = self._queue.get(timeout=120)
except Exception:
raise TimeoutError("QEMU setup timeout.")
if item == ZephyrQemuMakeResult.QEMU_STARTED:
break
if item in [ZephyrQemuMakeResult.MAKE_FAILED, ZephyrQemuMakeResult.EOF]:
raise RuntimeError("QEMU setup failed.")
raise ValueError(f"{item} not expected.")
if __name__ == "__main__":
server.main(Handler())
| [
[
[
793,
799
],
[
16168,
16174
]
],
[
[
807,
818
]
],
[
[
826,
841
],
[
2332,
2343
],
[
2831,
2842
]
],
[
[
849,
853
],
[
20198,
20202
]
],
[
[
861,
866
],
[
16796,
16801
],
[
16812,
16817
],
[
16831,
16836
],
[
16847,
16852
],
[
16899,
16904
],
[
16915,
16920
]
],
[
[
874,
881
],
[
1196,
1203
]
],
[
[
889,
891
]
],
[
[
899,
906
],
[
1256,
1258
],
[
1285,
1287
],
[
11082,
11084
],
[
11673,
11675
],
[
11819,
11821
],
[
12164,
12166
],
[
12257,
12259
],
[
16869,
16871
],
[
16953,
16955
],
[
17162,
17164
],
[
17216,
17218
],
[
20821,
20823
],
[
20856,
20858
],
[
21087,
21089
],
[
21706,
21708
],
[
21730,
21732
],
[
21742,
21744
],
[
21781,
21783
],
[
21806,
21808
],
[
21818,
21820
],
[
22476,
22478
],
[
22794,
22796
]
],
[
[
914,
921
],
[
1243,
1250
],
[
10803,
10810
],
[
20633,
20640
]
],
[
[
929,
934
],
[
20574,
20579
]
],
[
[
942,
944
],
[
2081,
2083
],
[
23773,
23775
],
[
23888,
23890
],
[
23913,
23915
]
],
[
[
952,
958
]
],
[
[
966,
971
],
[
1967,
1972
]
],
[
[
979,
985
],
[
11045,
11051
],
[
11173,
11179
],
[
11497,
11503
],
[
11938,
11944
],
[
12298,
12304
],
[
12368,
12374
],
[
13086,
13092
],
[
13273,
13279
],
[
22900,
22906
]
],
[
[
993,
1003
],
[
2013,
2023
],
[
5598,
5608
],
[
17842,
17852
],
[
21223,
21233
],
[
21342,
21352
],
[
22696,
22706
]
],
[
[
1011,
1014
],
[
17197,
17200
],
[
17371,
17374
]
],
[
[
1022,
1029
],
[
11745,
11752
],
[
13440,
13447
]
],
[
[
1037,
1045
],
[
20646,
20654
]
],
[
[
1053,
1062
],
[
24088,
24097
]
],
[
[
1070,
1074
]
],
[
[
1082,
1086
],
[
1686,
1690
]
],
[
[
1095,
1101
]
],
[
[
1109,
1132
],
[
18294,
18300
],
[
19365,
19371
]
],
[
[
1140,
1144
],
[
3849,
3853
],
[
3869,
3873
]
],
[
[
1180,
1186
],
[
6545,
6551
],
[
6714,
6720
],
[
6845,
6851
],
[
7002,
7008
],
[
7150,
7156
],
[
7290,
7296
],
[
7389,
7395
],
[
7607,
7613
],
[
7691,
7697
],
[
7843,
7849
],
[
7986,
7992
],
[
24647,
24653
],
[
8165,
8171
],
[
16485,
16491
],
[
16673,
16679
],
[
19453,
19459
],
[
19866,
19872
],
[
21922,
21928
],
[
22220,
22226
],
[
22420,
22426
],
[
23014,
23020
],
[
23382,
23388
]
],
[
[
1189,
1193
],
[
1926,
1930
],
[
5298,
5302
],
[
17650,
17654
],
[
23606,
23610
]
],
[
[
1226,
1240
],
[
1317,
1331
],
[
1408,
1422
],
[
1475,
1489
],
[
6420,
6434
],
[
8343,
8357
],
[
11954,
11968
],
[
12449,
12463
],
[
13112,
13126
],
[
13289,
13303
],
[
15794,
15808
],
[
15863,
15877
]
],
[
[
1305,
1314
],
[
3337,
3346
],
[
13582,
13591
],
[
14069,
14078
],
[
14213,
14222
],
[
17415,
17424
],
[
21312,
21321
]
],
[
[
1344,
1372
],
[
1425,
1453
],
[
8360,
8388
],
[
11460,
11488
]
],
[
[
1389,
1400
],
[
6393,
6404
],
[
8244,
8255
],
[
8313,
8324
]
],
[
[
1466,
1472
],
[
1640,
1646
],
[
1774,
1780
],
[
11186,
11192
],
[
11208,
11214
]
],
[
[
1651,
1657
],
[
1696,
1702
]
],
[
[
1667,
1683
],
[
7758,
7774
],
[
14831,
14847
]
],
[
[
1808,
1818
],
[
14042,
14052
],
[
14192,
14202
],
[
15765,
15775
],
[
15829,
15839
]
],
[
[
2064,
2078
],
[
2958,
2972
]
],
[
[
2144,
2158
],
[
3128,
3142
]
],
[
[
2321,
2331
],
[
3326,
3336
]
],
[
[
3312,
3323
],
[
3672,
3683
],
[
3793,
3804
],
[
4237,
4248
],
[
4963,
4974
]
],
[
[
3375,
3385
],
[
4165,
4175
],
[
5921,
5931
],
[
6122,
6132
]
],
[
[
3495,
3516
],
[
5157,
5178
],
[
5706,
5727
]
],
[
[
3632,
3649
],
[
3969,
3986
],
[
15602,
15619
],
[
18768,
18785
]
],
[
[
3923,
3939
],
[
17903,
17919
]
],
[
[
4400,
4417
],
[
4945,
4962
]
],
[
[
4690,
4704
],
[
5475,
5489
],
[
18242,
18256
]
],
[
[
5416,
5440
],
[
4120,
4144
]
],
[
[
5506,
5526
],
[
4040,
4060
],
[
15722,
15742
]
],
[
[
6371,
6384
],
[
6490,
6503
],
[
7263,
7276
]
],
[
[
6414,
6415
],
[
6466,
6467
],
[
6511,
6512
]
],
[
[
6521,
6536
],
[
8419,
8434
]
],
[
[
7978,
7985
],
[
24659,
24666
],
[
8051,
8058
]
],
[
[
16766,
16779
],
[
21841,
21854
],
[
21877,
21890
]
],
[
[
17032,
17053
],
[
16055,
16076
]
],
[
[
20177,
20197
],
[
23697,
23717
],
[
23965,
23985
],
[
24023,
24043
],
[
24359,
24379
],
[
24441,
24461
],
[
24475,
24495
]
],
[
[
20300,
20319
],
[
15988,
16007
]
]
] |
"""
MIT License
Copyright (c) 2019 Yoga Suhas Kuruba Manjunath
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# raw images shoud be saved in "images" folder
image_folder = './images'
# final preprocessed images will be stored
extracted_folder = './extracted_images'
# to store model files
models = './models'
# to stroe graphs
graphs = './graphs'
# vertical and horizontal size to be used
image_size_vertical = 100
image_size_horizontal = 100
# number of epochs to train a model
epoch = 100
# batch size used to train a model
batch_size = 64
# data set split ratio
train_ratio = 0.6
test_ratio = 0.2
validation_ratio = 0.2
# input data shape, this will be updated
# accordingly in the code for GREY_SCALE
# or RGB images if used.
x_shape = ()
# type of channels
GREY = 1
RGB = 3
# this config represents the image fusion
# in vertical or horizontal way
vertical = "VERTICAL"
horizontal = "HORIZONTAL"
# number of classes, this will be updated
# in code
num_classes = 0
# labeling of classes, this will be updated
# in code
person_label = {} | [
[
[
1140,
1152
]
],
[
[
1210,
1226
]
],
[
[
1274,
1280
]
],
[
[
1313,
1319
]
],
[
[
1377,
1396
]
],
[
[
1403,
1424
]
],
[
[
1468,
1473
]
],
[
[
1516,
1526
]
],
[
[
1556,
1567
]
],
[
[
1574,
1584
]
],
[
[
1591,
1607
]
],
[
[
1722,
1729
]
],
[
[
1755,
1759
]
],
[
[
1764,
1767
]
],
[
[
1847,
1855
]
],
[
[
1869,
1879
]
],
[
[
1948,
1959
]
],
[
[
2019,
2031
]
]
] |
import numpy as np
import theano
import theano.tensor as TT
from rllab.core.serializable import Serializable
from rllab.misc import ext
from rllab.misc import krylov
from rllab.misc import logger
from rllab.misc.ext import sliced_fun
class PerlmutterHvp(Serializable):
def __init__(self, num_slices=1):
Serializable.quick_init(self, locals())
self.target = None
self.reg_coeff = None
self.opt_fun = None
self._num_slices = num_slices
def update_opt(self, f, target, inputs, reg_coeff):
self.target = target
self.reg_coeff = reg_coeff
params = target.get_params(trainable=True)
constraint_grads = theano.grad(
f, wrt=params, disconnected_inputs='warn')
xs = tuple([ext.new_tensor_like("%s x" % p.name, p) for p in params])
def Hx_plain():
Hx_plain_splits = TT.grad(
TT.sum([TT.sum(g * x)
for g, x in zip(constraint_grads, xs)]),
wrt=params,
disconnected_inputs='warn'
)
return TT.concatenate([TT.flatten(s) for s in Hx_plain_splits])
self.opt_fun = ext.lazydict(
f_Hx_plain=lambda: ext.compile_function(
inputs=inputs + xs,
outputs=Hx_plain(),
log_name="f_Hx_plain",
),
)
def build_eval(self, inputs):
def eval(x):
xs = tuple(self.target.flat_to_params(x, trainable=True))
ret = sliced_fun(self.opt_fun["f_Hx_plain"], self._num_slices)(
inputs, xs) + self.reg_coeff * x
return ret
return eval
class FiniteDifferenceHvp(Serializable):
def __init__(self, base_eps=1e-8, symmetric=True, grad_clip=None, num_slices=1):
Serializable.quick_init(self, locals())
self.base_eps = base_eps
self.symmetric = symmetric
self.grad_clip = grad_clip
self._num_slices = num_slices
def update_opt(self, f, target, inputs, reg_coeff):
self.target = target
self.reg_coeff = reg_coeff
params = target.get_params(trainable=True)
constraint_grads = theano.grad(
f, wrt=params, disconnected_inputs='warn')
flat_grad = ext.flatten_tensor_variables(constraint_grads)
def f_Hx_plain(*args):
inputs_ = args[:len(inputs)]
xs = args[len(inputs):]
flat_xs = np.concatenate([np.reshape(x, (-1,)) for x in xs])
param_val = self.target.get_param_values(trainable=True)
eps = np.cast['float32'](
self.base_eps / (np.linalg.norm(param_val) + 1e-8))
self.target.set_param_values(
param_val + eps * flat_xs, trainable=True)
flat_grad_dvplus = self.opt_fun["f_grad"](*inputs_)
if self.symmetric:
self.target.set_param_values(
param_val - eps * flat_xs, trainable=True)
flat_grad_dvminus = self.opt_fun["f_grad"](*inputs_)
hx = (flat_grad_dvplus - flat_grad_dvminus) / (2 * eps)
self.target.set_param_values(param_val, trainable=True)
else:
self.target.set_param_values(param_val, trainable=True)
flat_grad = self.opt_fun["f_grad"](*inputs_)
hx = (flat_grad_dvplus - flat_grad) / eps
return hx
self.opt_fun = ext.lazydict(
f_grad=lambda: ext.compile_function(
inputs=inputs,
outputs=flat_grad,
log_name="f_grad",
),
f_Hx_plain=lambda: f_Hx_plain,
)
def build_eval(self, inputs):
def eval(x):
xs = tuple(self.target.flat_to_params(x, trainable=True))
ret = sliced_fun(self.opt_fun["f_Hx_plain"], self._num_slices)(
inputs, xs) + self.reg_coeff * x
return ret
return eval
class ConjugateGradientOptimizer(Serializable):
"""
Performs constrained optimization via line search. The search direction is computed using a conjugate gradient
algorithm, which gives x = A^{-1}g, where A is a second order approximation of the constraint and g is the gradient
of the loss function.
"""
def __init__(
self,
cg_iters=10,
reg_coeff=1e-5,
subsample_factor=1.,
backtrack_ratio=0.8,
max_backtracks=15,
accept_violation=False,
hvp_approach=None,
num_slices=1):
"""
:param cg_iters: The number of CG iterations used to calculate A^-1 g
:param reg_coeff: A small value so that A -> A + reg*I
:param subsample_factor: Subsampling factor to reduce samples when using "conjugate gradient. Since the
computation time for the descent direction dominates, this can greatly reduce the overall computation time.
:param accept_violation: whether to accept the descent step if it violates the line search condition after
exhausting all backtracking budgets
:return:
"""
Serializable.quick_init(self, locals())
self._cg_iters = cg_iters
self._reg_coeff = reg_coeff
self._subsample_factor = subsample_factor
self._backtrack_ratio = backtrack_ratio
self._max_backtracks = max_backtracks
self._num_slices = num_slices
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
self._accept_violation = accept_violation
if hvp_approach is None:
hvp_approach = PerlmutterHvp(num_slices)
self._hvp_approach = hvp_approach
def update_opt(self, loss, target, leq_constraint, inputs, extra_inputs=None, constraint_name="constraint", *args,
**kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should implement methods of the
:class:`rllab.core.paramerized.Parameterized` class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs, which could be subsampled if needed. It is assumed
that the first dimension of these inputs should correspond to the number of data points
:param extra_inputs: A list of symbolic variables as extra inputs which should not be subsampled
:return: No return value.
"""
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
else:
extra_inputs = tuple(extra_inputs)
constraint_term, constraint_value = leq_constraint
params = target.get_params(trainable=True)
grads = theano.grad(loss, wrt=params, disconnected_inputs='warn')
flat_grad = ext.flatten_tensor_variables(grads)
self._hvp_approach.update_opt(f=constraint_term, target=target, inputs=inputs + extra_inputs,
reg_coeff=self._reg_coeff)
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
self._opt_fun = ext.lazydict(
f_loss=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=loss,
log_name="f_loss",
),
f_grad=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=flat_grad,
log_name="f_grad",
),
f_constraint=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=constraint_term,
log_name="constraint",
),
f_loss_constraint=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=[loss, constraint_term],
log_name="f_loss_constraint",
),
)
def loss(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
return sliced_fun(self._opt_fun["f_loss"], self._num_slices)(inputs, extra_inputs)
def constraint_val(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
return sliced_fun(self._opt_fun["f_constraint"], self._num_slices)(inputs, extra_inputs)
def optimize(self, inputs, extra_inputs=None, subsample_grouped_inputs=None):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
if self._subsample_factor < 1:
if subsample_grouped_inputs is None:
subsample_grouped_inputs = [inputs]
subsample_inputs = tuple()
for inputs_grouped in subsample_grouped_inputs:
n_samples = len(inputs_grouped[0])
inds = np.random.choice(
n_samples, int(n_samples * self._subsample_factor), replace=False)
subsample_inputs += tuple([x[inds] for x in inputs_grouped])
else:
subsample_inputs = inputs
logger.log("computing loss before")
loss_before = sliced_fun(self._opt_fun["f_loss"], self._num_slices)(
inputs, extra_inputs)
logger.log("performing update")
logger.log("computing descent direction")
flat_g = sliced_fun(self._opt_fun["f_grad"], self._num_slices)(
inputs, extra_inputs)
Hx = self._hvp_approach.build_eval(subsample_inputs + extra_inputs)
descent_direction = krylov.cg(Hx, flat_g, cg_iters=self._cg_iters)
initial_step_size = np.sqrt(
2.0 * self._max_constraint_val *
(1. / (descent_direction.dot(Hx(descent_direction)) + 1e-8))
)
if np.isnan(initial_step_size):
initial_step_size = 1.
flat_descent_step = initial_step_size * descent_direction
logger.log("descent direction computed")
prev_param = np.copy(self._target.get_param_values(trainable=True))
n_iter = 0
for n_iter, ratio in enumerate(self._backtrack_ratio ** np.arange(self._max_backtracks)):
cur_step = ratio * flat_descent_step
cur_param = prev_param - cur_step
self._target.set_param_values(cur_param, trainable=True)
loss, constraint_val = sliced_fun(
self._opt_fun["f_loss_constraint"], self._num_slices)(inputs, extra_inputs)
if loss < loss_before and constraint_val <= self._max_constraint_val:
break
if (np.isnan(loss) or np.isnan(constraint_val) or loss >= loss_before or constraint_val >=
self._max_constraint_val) and not self._accept_violation:
logger.log("Line search condition violated. Rejecting the step!")
if np.isnan(loss):
logger.log("Violated because loss is NaN")
if np.isnan(constraint_val):
logger.log("Violated because constraint %s is NaN" %
self._constraint_name)
if loss >= loss_before:
logger.log("Violated because loss not improving")
if constraint_val >= self._max_constraint_val:
logger.log(
"Violated because constraint %s is violated" % self._constraint_name)
self._target.set_param_values(prev_param, trainable=True)
logger.log("backtrack iters: %d" % n_iter)
logger.log("computing loss after")
logger.log("optimization finished")
| [
[
[
7,
18
],
[
9147,
9149
],
[
9916,
9918
],
[
10064,
10066
],
[
10266,
10268
],
[
10404,
10406
],
[
10857,
10859
],
[
10875,
10877
],
[
11111,
11113
],
[
11201,
11203
],
[
2463,
2465
],
[
2479,
2481
],
[
2601,
2603
],
[
2654,
2656
]
],
[
[
26,
32
],
[
682,
688
],
[
2197,
2203
],
[
6929,
6935
]
],
[
[
40,
59
],
[
883,
885
],
[
908,
910
],
[
916,
918
],
[
1099,
1101
],
[
1115,
1117
]
],
[
[
97,
109
],
[
257,
269
],
[
1706,
1718
],
[
4024,
4036
],
[
319,
331
],
[
1815,
1827
],
[
5175,
5187
]
],
[
[
133,
136
],
[
770,
773
],
[
1180,
1183
],
[
2285,
2288
],
[
3462,
3465
],
[
7007,
7010
],
[
7367,
7370
],
[
1225,
1228
],
[
3503,
3506
],
[
7408,
7411
],
[
7583,
7586
],
[
7769,
7772
],
[
7970,
7973
]
],
[
[
160,
166
],
[
9840,
9846
]
],
[
[
190,
196
],
[
9390,
9396
],
[
9545,
9551
],
[
9585,
9591
],
[
10203,
10209
],
[
11030,
11036
],
[
11143,
11149
],
[
11243,
11249
],
[
11398,
11404
],
[
11523,
11529
],
[
11703,
11709
],
[
11754,
11760
],
[
11797,
11803
]
],
[
[
224,
234
],
[
8320,
8330
],
[
8568,
8578
],
[
9448,
9458
],
[
9645,
9655
],
[
10637,
10647
],
[
1527,
1537
],
[
3838,
3848
]
],
[
[
243,
256
],
[
5712,
5725
]
],
[
[
1686,
1705
]
],
[
[
3997,
4023
]
]
] |
# coding: utf-8
import pprint
import re
import six
class TemplateCddl:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'flow': 'FlowItem',
'states': 'dict(str, TemplateState)',
'workflow': 'Workflow'
}
attribute_map = {
'flow': 'flow',
'states': 'states',
'workflow': 'workflow'
}
def __init__(self, flow=None, states=None, workflow=None):
"""TemplateCddl - a model defined in huaweicloud sdk"""
self._flow = None
self._states = None
self._workflow = None
self.discriminator = None
self.flow = flow
self.states = states
self.workflow = workflow
@property
def flow(self):
"""Gets the flow of this TemplateCddl.
:return: The flow of this TemplateCddl.
:rtype: FlowItem
"""
return self._flow
@flow.setter
def flow(self, flow):
"""Sets the flow of this TemplateCddl.
:param flow: The flow of this TemplateCddl.
:type: FlowItem
"""
self._flow = flow
@property
def states(self):
"""Gets the states of this TemplateCddl.
子任务states,map类型数据
:return: The states of this TemplateCddl.
:rtype: dict(str, TemplateState)
"""
return self._states
@states.setter
def states(self, states):
"""Sets the states of this TemplateCddl.
子任务states,map类型数据
:param states: The states of this TemplateCddl.
:type: dict(str, TemplateState)
"""
self._states = states
@property
def workflow(self):
"""Gets the workflow of this TemplateCddl.
:return: The workflow of this TemplateCddl.
:rtype: Workflow
"""
return self._workflow
@workflow.setter
def workflow(self, workflow):
"""Sets the workflow of this TemplateCddl.
:param workflow: The workflow of this TemplateCddl.
:type: Workflow
"""
self._workflow = workflow
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TemplateCddl):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
[
[
24,
30
],
[
3377,
3383
]
],
[
[
38,
40
]
],
[
[
49,
52
],
[
2457,
2460
]
],
[
[
64,
76
],
[
3617,
3629
]
]
] |
from flask import Flask, render_template, redirect, url_for, flash, request, abort
from functions import UserLogin, UserRegistration, NewExpense
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from datetime import datetime, timedelta, date
from flask_bcrypt import Bcrypt
from flask_login import LoginManager, UserMixin, login_user, current_user, logout_user, login_required
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from itertools import zip_longest
import os
import io
import base64
import numpy as np
app = Flask(__name__)
SECRET_KEY = os.urandom(16)
app.config['SECRET_KEY'] = SECRET_KEY
app.config['SQLALCHEMY_DATABASE_URI'] = ' '
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = 'login'
login_manager.login_message_category = 'info'
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(30), unique=True, nullable=False)
username = db.Column(db.String(10), unique=True, nullable=False)
password = db.Column(db.String(128), nullable=False)
expense_id = db.relationship('UserExpense', backref='expensedate', lazy='dynamic')
def __repr__(self):
return f"User('{self.username}', '{self.email}')"
class UserExpense(db.Model):
__tablename__ = 'user_expenses'
id = db.Column(db.Integer, primary_key=True)
userid = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
category = db.Column(db.String(30))
description = db.Column(db.String(50))
expense = db.Column(db.Numeric(scale=2, asdecimal=True))
expense_date = db.Column(db.Date, default=date.today())
def __repr__(self):
return f"UserExpense('{self.category}', '{self.description}', '{self.expense}', '{self.expense_date}')"
@app.route('/', methods=['GET', 'POST'])
def login():
form = UserLogin()
if current_user.is_authenticated:
return redirect(url_for('overview'))
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
return redirect(url_for('overview'))
else:
flash('Invalid login', 'danger')
return render_template('login.html', form=form)
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('overview'))
form = UserRegistration()
if form.validate_on_submit():
password_hashed = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data, password=password_hashed)
db.session.add(user)
db.session.commit()
flash('Account created!', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route('/logout')
def logout():
logout_user()
flash('Logged out!', 'success')
return redirect(url_for('login'))
@app.route('/overview', methods=['GET','POST'])
@login_required
def overview():
form = NewExpense()
userids = current_user.id
name = current_user.username
# Forms
if form.validate_on_submit():
expenses = UserExpense(category=form.category.data, description=form.description.data,
expense=form.expense.data, expensedate=current_user)
db.session.add(expenses)
db.session.commit()
# Queries
filters = db.session.query(UserExpense.expense_date).filter(UserExpense.userid==userids).distinct()
date_list=[] #List of distinct dates
for u in filters:
date_list.append(f'{u.expense_date}')
date_expense_list=[] #List of expenses for that specific date
for item in date_list:
date_expense = db.session.query(func.sum(UserExpense.expense)).filter(UserExpense.userid==userids, UserExpense.expense_date==item).scalar()
date_expense_list.append(f'{date_expense}')
item = list(zip_longest(date_list,date_expense_list,date_list, fillvalue=""))
# Matplotlib
fig, ax = plt.subplots(figsize=(11, 5))
ax.plot(date_list, [float(g) for g in date_expense_list], label="Expenses")
ax.legend()
fig.suptitle('Expense pattern')
patternpngImage = io.BytesIO()
FigureCanvas(fig).print_png(patternpngImage)
patternpngImageString = "data:image/png;base64,"
patternpngImageString += base64.b64encode(patternpngImage.getvalue()).decode('utf8')
return render_template('overview.html', normal='normal', title='Expenses',image=patternpngImageString,
form=form, name=name, item=item)
@app.route('/expense/<string:wkex_id>', methods=['GET','POST'])
@login_required
def userexpenses(wkex_id):
form = NewExpense()
userids = current_user.id
name = current_user.username
# Queries
items = db.session.query(UserExpense).filter(UserExpense.userid==userids, UserExpense.expense_date==wkex_id)
todays = str(date.today())
state="not"
if (wkex_id == todays) is True:
state="today"
if (wkex_id > todays) is True:
abort(404)
# Forms
if form.validate_on_submit():
expenses = UserExpense(category=form.category.data, description=form.description.data,
expense=form.expense.data, expensedate=current_user)
db.session.add(expenses)
db.session.commit()
flash('Expense added!', 'success')
return redirect(url_for('userexpenses', wkex_id=wkex_id))
return render_template('expenses.html', normal='normal', title='Expenses',
form=form, items=items, name=name, ids=wkex_id, state=state)
@app.route('/expense/<string:wkex_id>/<int:ex_id>/delete', methods=['GET','POST'])
@login_required
def delete_expense(wkex_id, ex_id):
expenses = db.session.query(UserExpense).get_or_404(ex_id) # Query for valid access
if expenses.expensedate != current_user:
abort(403)
db.session.delete(expenses)
db.session.commit()
flash('Expense deleted', 'success')
return redirect(url_for('overview'))
@app.route("/expense/<string:wkex_id>/<int:ex_id>/update", methods=['GET', 'POST'])
@login_required
def update_expense(wkex_id, ex_id):
name = current_user.username
expenses = db.session.query(UserExpense).get_or_404(ex_id) # Query for valid access
if expenses.expensedate != current_user:
abort(403)
form = NewExpense()
if form.validate_on_submit():
expenses.category = form.category.data
expenses.description = form.description.data
expenses.expense = form.expense.data
db.session.commit()
flash('Expense updated', 'success')
return redirect(url_for('overview'))
elif request.method=='GET':
form.category.data = expenses.category
form.description.data =expenses.description
form.expense.data = expenses.expense
return render_template('expenses.html', title='Expenses',form=form, name=name, wkex_id=wkex_id, state='today')
@app.route("/expense/<string:day_id>/charts", methods=['GET', 'POST'])
@login_required
def charts(day_id):
userids = current_user.id
name = current_user.username
# Queries
categories = db.session.query(UserExpense.category).filter(UserExpense.userid==userids,
UserExpense.expense_date==day_id).distinct()
cat_list=[]
for u in categories:
cat_list.append(f'{u.category}')
counts_list=[]
for item in cat_list:
counts = db.session.query(UserExpense.category).filter(UserExpense.userid==userids,
UserExpense.expense_date==day_id,
UserExpense.category==item).count()
counts_list.append(counts)
sum_list=[]
for item in cat_list:
Sums = db.session.query(func.sum(UserExpense.expense)).filter(UserExpense.userid==userids,
UserExpense.expense_date==day_id,
UserExpense.category==item).scalar()
sum_list.append(f'{Sums}')
# Highest expenditure graph
fig, axs = plt.subplots(figsize=(10, 5))
axs.bar(cat_list, [float(g) for g in sum_list])
fig.suptitle('Expenditure breakdown')
# Frequency graph
fig1, ax1 = plt.subplots(figsize=(10, 5), subplot_kw=dict(aspect="equal"))
wedges, texts = ax1.pie(counts_list, wedgeprops=dict(width=0.5), startangle=-40)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
kw = dict(arrowprops=dict(arrowstyle="-"),
bbox=bbox_props, zorder=0, va="top")
for i, p in enumerate(wedges):
ang = (p.theta2 - p.theta1)/2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
horizontalalignment = {-1: "right", 1: "left"}[int(np.sign(x))]
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle})
ax1.annotate(cat_list[i], xy=(x, y), xytext=(1.35*np.sign(x), 1.4*y),
horizontalalignment=horizontalalignment, **kw)
ax1.set_title("Expenses category frequency")
# Convert plot to PNG image
highpngImage = io.BytesIO()
freqpngImage = io.BytesIO()
FigureCanvas(fig).print_png(highpngImage)
FigureCanvas(fig1).print_png(freqpngImage)
# Encode PNG image to base64 string
highpngImageString = "data:image/png;base64,"
highpngImageString += base64.b64encode(highpngImage.getvalue()).decode('utf8')
freqpngImageString = "data:image/png;base64,"
freqpngImageString += base64.b64encode(freqpngImage.getvalue()).decode('utf8')
return render_template('charts.html',title ='History', name=name,
image1=highpngImageString, image2=freqpngImageString, day_id=day_id)
if __name__ == '__main__':
app.run()
| [
[
[
18,
23
],
[
639,
644
]
],
[
[
25,
40
],
[
2547,
2562
],
[
3146,
3161
],
[
4834,
4849
],
[
5883,
5898
],
[
7295,
7310
],
[
10224,
10239
]
],
[
[
42,
50
],
[
2150,
2158
],
[
2447,
2455
],
[
2707,
2715
],
[
3108,
3116
],
[
3310,
3318
],
[
5820,
5828
],
[
6434,
6442
],
[
7077,
7085
]
],
[
[
52,
59
],
[
2159,
2166
],
[
2456,
2463
],
[
2716,
2723
],
[
3117,
3124
],
[
3319,
3326
],
[
5829,
5836
],
[
6443,
6450
],
[
7086,
7093
]
],
[
[
61,
66
],
[
2503,
2508
],
[
3056,
3061
],
[
3267,
3272
],
[
5770,
5775
],
[
6387,
6392
],
[
7026,
7031
]
],
[
[
68,
75
],
[
7117,
7124
]
],
[
[
77,
82
],
[
5463,
5468
],
[
6316,
6321
],
[
6775,
6780
]
],
[
[
105,
114
],
[
2085,
2094
]
],
[
[
116,
132
],
[
2748,
2764
]
],
[
[
134,
144
],
[
3429,
3439
],
[
5110,
5120
],
[
6797,
6807
]
],
[
[
174,
184
],
[
770,
780
]
],
[
[
208,
212
],
[
4156,
4160
],
[
8310,
8314
]
],
[
[
234,
242
]
],
[
[
244,
253
]
],
[
[
255,
259
],
[
1868,
1872
],
[
5332,
5336
]
],
[
[
285,
291
],
[
795,
801
]
],
[
[
316,
328
],
[
823,
835
]
],
[
[
330,
339
],
[
1036,
1045
]
],
[
[
341,
351
],
[
2382,
2392
]
],
[
[
353,
365
],
[
2104,
2116
],
[
2661,
2673
],
[
3456,
3468
],
[
3483,
3495
],
[
3718,
3730
],
[
5137,
5149
],
[
5164,
5176
],
[
5687,
5699
],
[
6294,
6306
],
[
6612,
6624
],
[
6753,
6765
],
[
7521,
7533
],
[
7548,
7560
]
],
[
[
367,
378
],
[
3249,
3260
]
],
[
[
380,
394
],
[
3387,
3401
],
[
5057,
5071
],
[
6124,
6138
],
[
6550,
6564
],
[
7472,
7486
]
],
[
[
418,
431
],
[
4431,
4434
],
[
8671,
8674
],
[
8834,
8837
]
],
[
[
476,
507
],
[
4633,
4645
],
[
9815,
9827
],
[
9861,
9873
]
],
[
[
538,
544
]
],
[
[
567,
578
],
[
4333,
4344
]
],
[
[
586,
588
],
[
668,
670
]
],
[
[
596,
598
],
[
4616,
4618
],
[
9766,
9768
],
[
9798,
9800
]
],
[
[
606,
612
],
[
4761,
4767
],
[
10021,
10027
],
[
10155,
10161
]
],
[
[
620,
631
],
[
9252,
9254
],
[
9259,
9261
],
[
9288,
9290
],
[
9295,
9297
],
[
9371,
9373
],
[
9577,
9579
]
],
[
[
633,
636
],
[
683,
686
],
[
721,
724
],
[
781,
784
],
[
802,
805
],
[
836,
839
],
[
2021,
2024
],
[
2590,
2593
],
[
3210,
3213
],
[
3339,
3342
],
[
4993,
4996
],
[
6041,
6044
],
[
6466,
6469
],
[
7401,
7404
],
[
10419,
10422
]
],
[
[
655,
665
],
[
710,
720
]
],
[
[
765,
767
],
[
1026,
1028
],
[
1084,
1086
],
[
1094,
1096
],
[
1136,
1138
],
[
1146,
1148
],
[
1205,
1207
],
[
1215,
1217
],
[
1274,
1276
],
[
1284,
1286
],
[
1333,
1335
],
[
1505,
1507
],
[
1561,
1563
],
[
1571,
1573
],
[
1614,
1616
],
[
1624,
1626
],
[
1636,
1638
],
[
1693,
1695
],
[
1703,
1705
],
[
1736,
1738
],
[
1746,
1748
],
[
1775,
1777
],
[
1785,
1787
],
[
1841,
1843
],
[
1851,
1853
],
[
2999,
3001
],
[
3028,
3030
],
[
3740,
3742
],
[
3773,
3775
],
[
3822,
3824
],
[
4139,
4141
],
[
5213,
5215
],
[
5709,
5711
],
[
5742,
5744
],
[
6190,
6192
],
[
6331,
6333
],
[
6363,
6365
],
[
6649,
6651
],
[
6998,
7000
],
[
7601,
7603
],
[
7929,
7931
],
[
8293,
8295
]
],
[
[
786,
792
],
[
2307,
2313
],
[
2827,
2833
]
],
[
[
807,
820
],
[
841,
854
],
[
876,
889
],
[
924,
937
]
],
[
[
954,
963
]
],
[
[
1021,
1025
],
[
985,
989
],
[
2229,
2233
],
[
2908,
2912
]
],
[
[
1493,
1504
],
[
3571,
3582
],
[
3839,
3850
],
[
3872,
3883
],
[
4165,
4176
],
[
4194,
4205
],
[
4223,
4234
],
[
5230,
5241
],
[
5250,
5261
],
[
5279,
5290
],
[
5540,
5551
],
[
6207,
6218
],
[
6666,
6677
],
[
7618,
7629
],
[
7647,
7658
],
[
7739,
7750
],
[
7946,
7957
],
[
7975,
7986
],
[
8067,
8078
],
[
8164,
8175
],
[
8319,
8330
],
[
8348,
8359
],
[
8447,
8458
],
[
8551,
8562
]
],
[
[
2065,
2070
]
],
[
[
2642,
2650
]
],
[
[
3235,
3241
]
],
[
[
3406,
3414
]
],
[
[
5076,
5088
]
],
[
[
6143,
6157
]
],
[
[
6569,
6583
]
],
[
[
7491,
7497
]
]
] |
import unittest
import mock
import numpy
import pytest
import cupy
from cupy import testing
from cupyx.scipy import sparse
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],
'format': ['csr', 'csc', 'coo'],
'm': [3],
'n': [None, 3, 2],
'k': [0, 1],
}))
@testing.with_requires('scipy')
class TestEye(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_eye(self, xp, sp):
x = sp.eye(
self.m, n=self.n, k=self.k, dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],
'format': ['csr', 'csc', 'coo'],
}))
@testing.with_requires('scipy')
class TestIdentity(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_eye(self, xp, sp):
x = sp.identity(3, dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],
}))
@testing.with_requires('scipy')
class TestSpdiags(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_spdiags(self, xp, sp):
data = xp.arange(12, dtype=self.dtype).reshape(3, 4)
diags = xp.array([0, -1, 2], dtype='i')
x = sp.spdiags(data, diags, 3, 4)
return x
@testing.parameterize(*testing.product({
'random_method': ['random', 'rand'],
'dtype': [numpy.float32, numpy.float64],
'format': ['csr', 'csc', 'coo'],
}))
class TestRandom(unittest.TestCase):
def test_random(self):
x = getattr(sparse, self.random_method)(
3, 4, density=0.1,
format=self.format, dtype=self.dtype)
self.assertEqual(x.shape, (3, 4))
self.assertEqual(x.dtype, self.dtype)
self.assertEqual(x.format, self.format)
def test_random_with_seed(self):
x = getattr(sparse, self.random_method)(
3, 4, density=0.1,
format=self.format, dtype=self.dtype,
random_state=1)
self.assertEqual(x.shape, (3, 4))
self.assertEqual(x.dtype, self.dtype)
self.assertEqual(x.format, self.format)
y = getattr(sparse, self.random_method)(
3, 4, density=0.1,
format=self.format, dtype=self.dtype,
random_state=1)
self.assertTrue((x.toarray() == y.toarray()).all())
def test_random_with_state(self):
state1 = cupy.random.RandomState(1)
x = getattr(sparse, self.random_method)(
3, 4, density=0.1,
format=self.format, dtype=self.dtype,
random_state=state1)
self.assertEqual(x.shape, (3, 4))
self.assertEqual(x.dtype, self.dtype)
self.assertEqual(x.format, self.format)
state2 = cupy.random.RandomState(1)
y = getattr(sparse, self.random_method)(
3, 4, density=0.1,
format=self.format, dtype=self.dtype,
random_state=state2)
self.assertTrue((x.toarray() == y.toarray()).all())
def test_random_with_data_rvs(self):
if self.random_method == 'rand':
pytest.skip('cupyx.scipy.sparse.rand does not support data_rvs')
data_rvs = mock.MagicMock(side_effect=cupy.zeros)
x = getattr(sparse, self.random_method)(
3, 4, density=0.1, data_rvs=data_rvs,
format=self.format, dtype=self.dtype)
self.assertEqual(x.shape, (3, 4))
self.assertEqual(x.dtype, self.dtype)
self.assertEqual(x.format, self.format)
self.assertEqual(data_rvs.call_count, 1)
# Note that its value is generated randomly
self.assertIsInstance(data_rvs.call_args[0][0], int)
@testing.with_requires('scipy')
class TestRandomInvalidArgument(unittest.TestCase):
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_too_small_density(self, xp, sp):
sp.random(3, 4, density=-0.1)
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_too_large_density(self, xp, sp):
sp.random(3, 4, density=1.1)
@testing.numpy_cupy_raises(sp_name='sp', accept_error=NotImplementedError)
def test_invalid_dtype(self, xp, sp):
sp.random(3, 4, dtype='i')
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],
'format': ['dia', 'csr', 'csc', 'coo'],
}))
@testing.with_requires('scipy')
class TestDiags(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_diags_scalar_offset(self, xp, sp):
x = sp.diags(
xp.arange(16), offsets=0, dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x
@testing.numpy_cupy_allclose(sp_name='sp')
def test_diags_single_element_lists(self, xp, sp):
x = sp.diags(
[xp.arange(16)], offsets=[0], dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x
@testing.numpy_cupy_allclose(sp_name='sp')
def test_diags_multiple(self, xp, sp):
x = sp.diags(
[xp.arange(15), xp.arange(16), xp.arange(15), xp.arange(13)],
offsets=[-1, 0, 1, 3],
dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x
@testing.numpy_cupy_allclose(sp_name='sp')
def test_diags_offsets_as_array(self, xp, sp):
x = sp.diags(
[xp.arange(15), xp.arange(16), xp.arange(15), xp.arange(13)],
offsets=xp.array([-1, 0, 1, 3]),
dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x
@testing.numpy_cupy_allclose(sp_name='sp')
def test_diags_non_square(self, xp, sp):
x = sp.diags(
[xp.arange(5), xp.arange(3)],
offsets=[0, -2], shape=(5, 10),
dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x
| [
[
[
7,
15
],
[
389,
397
],
[
913,
921
],
[
1366,
1374
],
[
1825,
1833
],
[
4074,
4082
],
[
4779,
4787
]
],
[
[
24,
28
],
[
3521,
3525
]
],
[
[
36,
41
],
[
182,
187
],
[
197,
202
],
[
212,
217
],
[
229,
234
],
[
755,
760
],
[
770,
775
],
[
785,
790
],
[
802,
807
],
[
1246,
1251
],
[
1261,
1266
],
[
1276,
1281
],
[
1293,
1298
],
[
1736,
1741
],
[
1751,
1756
],
[
4617,
4622
],
[
4632,
4637
],
[
4647,
4652
],
[
4664,
4669
]
],
[
[
49,
55
],
[
3437,
3443
]
],
[
[
64,
68
],
[
2747,
2751
],
[
3091,
3095
],
[
3548,
3552
]
],
[
[
86,
93
],
[
128,
135
],
[
150,
157
],
[
344,
351
],
[
415,
422
],
[
701,
708
],
[
723,
730
],
[
863,
870
],
[
939,
946
],
[
1192,
1199
],
[
1214,
1221
],
[
1317,
1324
],
[
1392,
1399
],
[
1641,
1648
],
[
1663,
1670
],
[
4011,
4018
],
[
4100,
4107
],
[
4255,
4262
],
[
4409,
4416
],
[
4563,
4570
],
[
4585,
4592
],
[
4732,
4739
],
[
4805,
4812
],
[
5110,
5117
],
[
5426,
5433
],
[
5809,
5816
],
[
6210,
6217
]
],
[
[
118,
124
],
[
1893,
1899
],
[
2197,
2203
],
[
2492,
2498
],
[
2794,
2800
],
[
3138,
3144
],
[
3580,
3586
]
],
[
[
381,
388
]
],
[
[
900,
912
]
],
[
[
1354,
1365
]
],
[
[
1814,
1824
]
],
[
[
4048,
4073
]
],
[
[
4769,
4778
]
]
] |
import torch
import numpy as np;
from torch.autograd import Variable
def normal_std(x):
return x.std() * np.sqrt((len(x) - 1.)/(len(x)))
class Data_utility(object):
# train and valid is the ratio of training set and validation set. test = 1 - train - valid
def __init__(self, dSet, train, valid, cuda, horizon, window, normalize = 2):
self.cuda = cuda;
self.P = window;
self.h = horizon
self.rawdat = dSet
self.dat = np.zeros(self.rawdat.shape);
self.n, self.m = self.dat.shape;
self.normalize = 2
self.scale = np.ones(self.m);
self._normalized(normalize);
self._split(int(train * self.n), int((train+valid) * self.n), self.n);
self.scale = torch.from_numpy(self.scale).float();
tmp = self.test[1] * self.scale.expand(self.test[1].size(0), self.m);
if self.cuda:
self.scale = self.scale.cuda();
self.scale = Variable(self.scale);
self.rse = normal_std(tmp);
self.rae = torch.mean(torch.abs(tmp - torch.mean(tmp)));
def _normalized(self, normalize):
#normalized by the maximum value of entire matrix.
if (normalize == 0):
self.dat = self.rawdat
if (normalize == 1):
self.dat = self.rawdat / np.max(self.rawdat);
#normlized by the maximum value of each row(sensor).
if (normalize == 2):
for i in range(self.m):
self.scale[i] = np.max(np.abs(self.rawdat[:,i]));
self.dat[:,i] = self.rawdat[:,i] / np.max(np.abs(self.rawdat[:,i]));
def _split(self, train, valid, test):
train_set = range(self.P+self.h-1, train);
valid_set = range(train, valid);
test_set = range(valid, self.n);
self.train = self._batchify(train_set, self.h);
self.valid = self._batchify(valid_set, self.h);
self.test = self._batchify(test_set, self.h);
def _batchify(self, idx_set, horizon):
n = len(idx_set);
X = torch.zeros((n,self.P,self.m));
Y = torch.zeros((n,self.m));
for i in range(n):
end = idx_set[i] - self.h + 1;
start = end - self.P;
X[i,:,:] = torch.from_numpy(self.dat[start:end, :]);
Y[i,:] = torch.from_numpy(self.dat[idx_set[i], :]);
return [X, Y];
def get_batches(self, inputs, targets, batch_size, shuffle=True):
length = len(inputs)
if shuffle:
index = torch.randperm(length)
else:
index = torch.LongTensor(range(length))
start_idx = 0
while (start_idx < length):
end_idx = min(length, start_idx + batch_size)
excerpt = index[start_idx:end_idx]
X = inputs[excerpt]; Y = targets[excerpt];
# if (self.cuda):
# X = X.cuda();
# Y = Y.cuda();
yield Variable(X), Variable(Y);
start_idx += batch_size
| [
[
[
7,
12
],
[
753,
758
],
[
1055,
1060
],
[
1066,
1071
],
[
1082,
1087
],
[
2145,
2150
],
[
2189,
2194
],
[
2350,
2355
],
[
2413,
2418
],
[
2620,
2625
],
[
2677,
2682
]
],
[
[
20,
31
],
[
111,
113
],
[
472,
474
],
[
590,
592
],
[
1354,
1356
],
[
1546,
1548
],
[
1553,
1555
],
[
1631,
1633
],
[
1638,
1640
]
],
[
[
60,
68
],
[
969,
977
],
[
3039,
3047
],
[
3052,
3060
]
],
[
[
75,
85
],
[
1019,
1029
]
],
[
[
150,
162
]
]
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Cisco Systems
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: device_administration_dictionary_attributes_policy_set_info
short_description: Information module for Device Administration Dictionary Attributes Policy Set
description:
- Get all Device Administration Dictionary Attributes Policy Set.
version_added: '1.0.0'
author: Rafael Campos (@racampos)
options: {}
requirements:
- ciscoisesdk
seealso:
# Reference by Internet resource
- name: Device Administration Dictionary Attributes Policy Set reference
description: Complete reference of the Device Administration Dictionary Attributes Policy Set object model.
link: https://ciscoisesdk.readthedocs.io/en/latest/api/api.html#v3-0-0-summary
"""
EXAMPLES = r"""
- name: Get all Device Administration Dictionary Attributes Policy Set
cisco.ise.device_administration_dictionary_attributes_policy_set_info:
ise_hostname: "{{ise_hostname}}"
ise_username: "{{ise_username}}"
ise_password: "{{ise_password}}"
ise_verify: "{{ise_verify}}"
register: result
"""
RETURN = r"""
ise_response:
description: A dictionary or list with the response returned by the Cisco ISE Python SDK
returned: always
type: dict
sample: >
{
"response": [
{
"allowedValues": [
{
"isDefault": true,
"key": "string",
"value": "string"
}
],
"dataType": "string",
"description": "string",
"dictionaryName": "string",
"directionType": "string",
"id": "string",
"internalName": "string",
"name": "string"
}
],
"version": "string"
}
"""
| [
[
[
173,
186
]
],
[
[
850,
858
]
],
[
[
1179,
1185
]
]
] |
##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
## This class is used by the CompoundNumericPlugTest.
class CompoundNumericNode( Gaffer.Node ) :
def __init__( self, name="CompoundNumericNode" ) :
Gaffer.Node.__init__( self, name )
self.addChild( Gaffer.V3fPlug( "p", Gaffer.Plug.Direction.In ) )
IECore.registerRunTimeTyped( CompoundNumericNode, typeName = "GafferTest::CompoundNumericNode" )
| [
[
[
1867,
1873
],
[
2147,
2153
]
],
[
[
1882,
1888
],
[
1971,
1977
],
[
2043,
2049
],
[
2096,
2102
],
[
2117,
2123
]
],
[
[
1950,
1969
],
[
2176,
2195
]
]
] |
# buildifier: disable=module-docstring
load("@rules_foreign_cc//tools/build_defs/shell_toolchain/toolchains:function_and_call.bzl", "FunctionAndCall")
_REPLACE_VALUE = "BAZEL_GEN_ROOT"
def os_name():
return "Fancy"
def pwd():
return "$(pwd)"
def echo(text):
return "printf \"{text}\"".format(text = text)
def export_var(name, value):
return "export {name}={value}".format(name = name, value = value)
def local_var(name, value):
return "local {name}={value}".format(name = name, value = value)
def use_var(name):
return "$" + name
def env():
return "env"
def path(expression):
return "export PATH=\"{expression}:$PATH\"".format(expression = expression)
def touch(path):
return "touch " + path
def mkdirs(path):
return "mkdir -p " + path
def if_else(condition, if_text, else_text):
return """
if [ {condition} ]; then
{if_text}
else
{else_text}
fi
""".format(condition = condition, if_text = if_text, else_text = else_text)
# buildifier: disable=function-docstring
def define_function(name, text):
lines = []
lines.append("function " + name + "() {")
for line_ in text.splitlines():
lines.append(" " + line_)
lines.append("}")
return "\n".join(lines)
def replace_in_files(dir, from_, to_):
return FunctionAndCall(
text = """if [ -d "$1" ]; then
find -L $1 -print -type f \\( -name "*.pc" -or -name "*.la" -or -name "*-config" -or -name "*.cmake" \\) -exec sed -i 's@'"$2"'@'"$3"'@g' {} ';'
fi
""",
)
def copy_dir_contents_to_dir(source, target):
return """cp -L -r --no-target-directory "{}" "{}" """.format(source, target)
def symlink_contents_to_dir(source, target):
text = """local target="$2"
mkdir -p $target
if [[ -f $1 ]]; then
##symlink_to_dir## $1 $target
return 0
fi
local children=$(find $1 -maxdepth 1 -mindepth 1)
for child in $children; do
##symlink_to_dir## $child $target
done
"""
return FunctionAndCall(text = text)
def symlink_to_dir(source, target):
text = """local target="$2"
mkdir -p ${target}
if [[ -d $1 ]]; then
ln -s -t ${target} $1
elif [[ -f $1 ]]; then
ln -s -t ${target} $1
elif [[ -L $1 ]]; then
cp --no-target-directory $1 ${target}
else
echo "Can not copy $1"
fi
"""
return FunctionAndCall(text = text)
def script_prelude():
return "set -euo pipefail"
def increment_pkg_config_path(source):
text = """local children=$(find $1 -mindepth 1 -name '*.pc')
# assume there is only one directory with pkg config
for child in $children; do
export PKG_CONFIG_PATH="$${PKG_CONFIG_PATH:-}$$:$(dirname $child)"
return
done
"""
return FunctionAndCall(text = text)
def cat(filepath):
return "cat \"{}\"".format(filepath)
def redirect_out_err(from_process, to_file):
return from_process + " &> " + to_file
def assert_script_errors():
return "set -e"
def cleanup_function(on_success, on_failure):
text = "\n".join([
"local ecode=$?",
"if [ $ecode -eq 0 ]; then",
on_success,
"else",
on_failure,
"fi",
])
return FunctionAndCall(text = text, call = "trap \"cleanup_function\" EXIT")
def children_to_path(dir_):
text = """if [ -d {dir_} ]; then
local tools=$(find $EXT_BUILD_DEPS/bin -maxdepth 1 -mindepth 1)
for tool in $tools;
do
if [[ -d \"$tool\" ]] || [[ -L \"$tool\" ]]; then
export PATH=$PATH:$tool
fi
done
fi""".format(dir_ = dir_)
return FunctionAndCall(text = text)
def define_absolute_paths(dir_, abs_path):
return "##replace_in_files## {dir_} {REPLACE_VALUE} {abs_path}".format(
dir_ = dir_,
REPLACE_VALUE = _REPLACE_VALUE,
abs_path = abs_path,
)
def replace_absolute_paths(dir_, abs_path):
return "##replace_in_files## {dir_} {abs_path} {REPLACE_VALUE}".format(
dir_ = dir_,
REPLACE_VALUE = _REPLACE_VALUE,
abs_path = abs_path,
)
| [
[
[
152,
166
],
[
3631,
3645
],
[
3848,
3862
]
],
[
[
191,
198
]
],
[
[
226,
229
]
],
[
[
258,
262
]
],
[
[
326,
336
]
],
[
[
426,
435
]
],
[
[
524,
531
]
],
[
[
566,
569
]
],
[
[
595,
599
]
],
[
[
698,
703
]
],
[
[
743,
749
]
],
[
[
792,
799
]
],
[
[
1028,
1043
]
],
[
[
1244,
1260
]
],
[
[
1516,
1540
]
],
[
[
1645,
1668
]
],
[
[
1970,
1984
]
],
[
[
2291,
2305
]
],
[
[
2345,
2370
]
],
[
[
2657,
2660
]
],
[
[
2718,
2734
]
],
[
[
2807,
2827
]
],
[
[
2856,
2872
]
],
[
[
3147,
3163
]
],
[
[
3471,
3492
]
],
[
[
3687,
3709
]
]
] |
import re
import uuid
from django.db import transaction
from django.utils import timezone
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.files import File
from django.utils.translation import gettext_lazy as _
from django.conf import settings
from froide.account.services import AccountService
from froide.helper.text_utils import redact_subject
from froide.helper.storage import add_number_to_filename
from froide.helper.db_utils import save_obj_with_slug
from froide.problem.models import ProblemReport
from .models import FoiRequest, FoiMessage, RequestDraft, FoiProject, FoiAttachment
from .models.message import (
BOUNCE_TAG,
HAS_BOUNCED_TAG,
AUTO_REPLY_TAG,
BOUNCE_RESENT_TAG,
)
from .utils import (
generate_secret_address,
construct_initial_message_body,
get_publicbody_for_email,
redact_plaintext_with_request,
)
from .hooks import registry
from .tasks import create_project_requests, convert_attachment_task
User = get_user_model()
class BaseService(object):
def __init__(self, data, **kwargs):
self.data = data
self.kwargs = kwargs
def execute(self, request=None):
return self.process(request=request)
def generate_unique_secret_address(user):
while True:
address = generate_secret_address(user)
try:
FoiRequest.objects.get(secret_address=address)
except FoiRequest.DoesNotExist:
break
return address
class CreateRequestService(BaseService):
def process(self, request=None):
data = self.data
user = data["user"]
user_created = False
user_auth = user.is_authenticated
if not user_auth:
user, user_created = AccountService.create_user(**self.data)
self.data["user"] = user
if not user_created and not user_auth:
return self.create_token_draft(user)
if request is not None:
extra = registry.run_hook(
"pre_request_creation", request, user=user, data=data
)
if extra is not None:
data.update(extra)
if len(self.data["publicbodies"]) > 1:
foi_object = self.create_project()
else:
foi_object = self.create_request(self.data["publicbodies"][0])
if user_created:
AccountService(user).send_confirmation_mail(
request_id=foi_object.pk,
reference=foi_object.reference,
redirect_url=self.data.get("redirect_url"),
)
self.post_creation(foi_object)
return foi_object
def create_token_draft(self, user):
"""
User is not authenticated, but has given valid email.
Create a draft object with a token, send token to email.
"""
from .views import MakeRequestView
data = self.data
additional_kwargs = dict(
subject=data.get("subject", ""),
body=data.get("body", ""),
full_text=data.get("full_text", False),
public=data["public"],
reference=data.get("reference", ""),
law_type=data.get("law_type", ""),
)
flag_keys = set(MakeRequestView.FORM_CONFIG_PARAMS) | {"redirect_url"}
flags = {k: v for k, v in data.items() if k in flag_keys}
additional_kwargs["flags"] = flags
draft = RequestDraft.objects.create(
user=None, token=uuid.uuid4(), **additional_kwargs
)
draft.publicbodies.set(data["publicbodies"])
claim_url = reverse("foirequest-claim_draft", kwargs={"token": draft.token})
AccountService(user).send_confirm_action_mail(
claim_url,
draft.subject,
reference=draft.reference,
redirect_url=self.data.get("redirect_url"),
)
return draft
def create_project(self):
data = self.data
user = data["user"]
project = FoiProject(
title=data["subject"],
description=data["body"],
status=FoiProject.STATUS_PENDING,
public=data["public"],
user=user,
site=Site.objects.get_current(),
reference=data.get("reference", ""),
language=data.get("language", ""),
request_count=len(self.data["publicbodies"]),
)
save_obj_with_slug(project)
project.publicbodies.add(*data["publicbodies"])
if "tags" in data and data["tags"]:
project.tags.add(*data["tags"])
FoiProject.project_created.send(sender=project)
publicbody_ids = [pb.pk for pb in data["publicbodies"]]
extra = {"full_text": data.get("full_text", False)}
create_project_requests.delay(project.id, publicbody_ids, **extra)
return project
def create_request(self, publicbody, sequence=0):
data = self.data
user = data["user"]
now = timezone.now()
request = FoiRequest(
title=data["subject"],
public_body=publicbody,
user=data["user"],
description=data["body"],
public=data["public"],
language=data.get("language", ""),
site=Site.objects.get_current(),
reference=data.get("reference", ""),
first_message=now,
last_message=now,
project=data.get("project"),
project_order=data.get("project_order"),
)
send_now = False
if not user.is_active:
request.status = FoiRequest.STATUS.AWAITING_USER_CONFIRMATION
request.visibility = FoiRequest.VISIBILITY.INVISIBLE
else:
request.status = FoiRequest.STATUS.AWAITING_RESPONSE
request.determine_visibility()
send_now = True
request.secret_address = generate_unique_secret_address(user)
foilaw = None
if data.get("law_type"):
law_type = data["law_type"]
foilaw = publicbody.get_applicable_law(law_type=law_type)
if foilaw is None:
foilaw = publicbody.default_law
request.law = foilaw
request.jurisdiction = foilaw.jurisdiction
if send_now:
request.due_date = request.law.calculate_due_date()
if data.get("blocked"):
send_now = False
request.is_blocked = True
self.pre_save_request(request)
save_obj_with_slug(request, count=sequence)
if "tags" in data and data["tags"]:
request.tags.add(*data["tags"])
subject = "%s [#%s]" % (request.title, request.pk)
user_replacements = user.get_redactions()
message = FoiMessage(
request=request,
sent=False,
is_response=False,
sender_user=user,
sender_email=request.secret_address,
sender_name=user.display_name(),
timestamp=now,
status="awaiting_response",
subject=subject,
subject_redacted=redact_subject(subject, user_replacements),
)
send_address = bool(self.data.get("address"))
message.plaintext = construct_initial_message_body(
request,
text=data["body"],
foilaw=foilaw,
full_text=data.get("full_text", False),
send_address=send_address,
)
message.plaintext_redacted = redact_plaintext_with_request(
message.plaintext,
request,
)
message.recipient_public_body = publicbody
message.recipient = publicbody.name
message.recipient_email = publicbody.get_email(data.get("law_type"))
FoiRequest.request_to_public_body.send(sender=request)
message.save()
FoiRequest.request_created.send(
sender=request, reference=data.get("reference", "")
)
if send_now:
message.send()
message.save()
FoiRequest.message_sent.send(
sender=request,
message=message,
)
FoiRequest.request_sent.send(
sender=request, reference=data.get("reference", "")
)
return request
def pre_save_request(self, request):
pass
def post_creation(self, foi_object):
data = self.data
draft = data.get("draft")
if draft:
if isinstance(foi_object, FoiRequest):
draft.request = foi_object
draft.project = None
else:
draft.project = foi_object
draft.request = None
draft.save()
class CreateRequestFromProjectService(CreateRequestService):
def process(self, request=None):
data = self.data
pb = data["publicbody"]
return self.create_request(pb, sequence=data["project_order"])
class CreateSameAsRequestService(CreateRequestService):
def create_request(self, publicbody, sequence=0):
original_request = self.data["original_foirequest"]
sequence = original_request.same_as_count + 1
return super().create_request(publicbody, sequence=sequence)
def pre_save_request(self, request):
original_request = self.data["original_foirequest"]
request.same_as = original_request
request.campaign = original_request.campaign
request.not_publishable = original_request.not_publishable
class SaveDraftService(BaseService):
def process(self, request=None):
data = self.data
request_form = data["request_form"]
draft = request_form.cleaned_data.get("draft", None)
additional_kwargs = dict(
subject=request_form.cleaned_data.get("subject", ""),
body=request_form.cleaned_data.get("body", ""),
full_text=request_form.cleaned_data.get("full_text", False),
public=request_form.cleaned_data["public"],
reference=request_form.cleaned_data.get("reference", ""),
law_type=request_form.cleaned_data.get("law_type", ""),
)
if draft is None:
draft = RequestDraft.objects.create(user=request.user, **additional_kwargs)
else:
RequestDraft.objects.filter(id=draft.id).update(**additional_kwargs)
draft.publicbodies.set(data["publicbodies"])
return draft
class ReceiveEmailService(BaseService):
def process(self, request=None):
foirequest = self.kwargs["foirequest"]
publicbody = self.kwargs.get("publicbody", None)
email = self.data
subject = email.subject or ""
subject = subject[:250]
message_id = email.message_id or ""
if message_id:
message_id = message_id[:512]
recipient_name, recipient_email = self.get_recipient_name_email()
message = FoiMessage(
request=foirequest,
subject=subject,
email_message_id=message_id,
is_response=True,
sender_name=email.from_[0],
sender_email=email.from_[1],
recipient=recipient_name,
recipient_email=recipient_email,
plaintext=email.body,
html=email.html,
)
message.update_email_headers(email)
is_bounce = email.bounce_info.is_bounce
if not is_bounce:
if publicbody is None:
publicbody = get_publicbody_for_email(message.sender_email, foirequest)
if publicbody is None:
publicbody = foirequest.public_body
else:
publicbody = None
message.sender_public_body = publicbody
message.content_hidden = self.should_hide_content(email, foirequest, publicbody)
if email.date is None:
message.timestamp = timezone.now()
else:
message.timestamp = email.date
user_replacements = foirequest.user.get_redactions()
message.subject_redacted = redact_subject(message.subject, user_replacements)
message.plaintext_redacted = redact_plaintext_with_request(
message.plaintext,
foirequest,
redact_closing=True,
)
if is_bounce:
self.process_bounce_message(message)
return
message.save()
if email.is_auto_reply:
message.tags.add(AUTO_REPLY_TAG)
foirequest._messages = None
foirequest.status = FoiRequest.STATUS.AWAITING_CLASSIFICATION
foirequest.save()
self.add_attachments(foirequest, message, email.attachments)
foirequest.message_received.send(sender=foirequest, message=message)
def get_recipient_name_email(self):
foirequest = self.kwargs["foirequest"]
email = self.data
recipient_name, recipient_email = "", ""
if email.is_direct_recipient(foirequest.secret_address):
recipient_name = foirequest.user.display_name()
recipient_email = foirequest.secret_address
else:
try:
recipient_name = email.to[0][0]
recipient_email = email.to[0][1]
except IndexError:
pass
return recipient_name, recipient_email
def should_hide_content(self, email, foirequest, publicbody):
# Hide auto replies and bounces as they may expose sensitive info
if email.is_auto_reply or email.bounce_info.is_bounce:
return True
# Hide mediatior replies so it stays confidential by default
if (
foirequest.law
and foirequest.law.mediator
and publicbody == foirequest.law.mediator
):
return True
funcs = settings.FROIDE_CONFIG["hide_content_funcs"]
for func in funcs:
if func(email):
return True
return False
def process_bounce_message(self, message):
email = self.data
foirequest = self.kwargs["foirequest"]
# Find message
for mes in reversed(foirequest.messages):
if mes.recipient_email and mes.recipient_email in message.plaintext:
break
else:
mes = None
message.original = mes
message.save()
message.tags.add(BOUNCE_TAG)
if mes:
mes.tags.add(HAS_BOUNCED_TAG)
ProblemReport.objects.report(
message=mes or message,
kind="bounce_publicbody",
description=email.bounce_info.diagnostic_code or "",
auto_submitted=True,
)
foirequest._messages = None
foirequest.save()
self.add_attachments(foirequest, message, email.attachments)
def add_attachments(self, foirequest, message, attachments):
account_service = AccountService(foirequest.user)
names = set()
for i, attachment in enumerate(attachments):
att = FoiAttachment(
belongs_to=message,
name=attachment.name,
size=attachment.size,
filetype=attachment.content_type,
)
if not att.name:
att.name = _("attached_file_%d") % i
# Translators: replacement for person name in filename
repl = str(_("NAME"))
att.name = account_service.apply_name_redaction(att.name, repl)
att.name = re.sub(r"[^A-Za-z0-9_\.\-]", "", att.name)
att.name = att.name[:250]
# Assure name is unique
if att.name in names:
att.name = add_number_to_filename(att.name, i)
names.add(att.name)
if foirequest.not_publishable:
att.can_approve = False
attachment._committed = False
att.file = File(attachment)
att.save()
if att.can_convert_to_pdf():
self.trigger_convert_pdf(att.id)
def trigger_convert_pdf(self, att_id):
transaction.on_commit(lambda: convert_attachment_task.delay(att_id))
class ActivatePendingRequestService(BaseService):
def process(self, request=None):
if "request_id" in self.data:
try:
foirequest = FoiRequest.objects.get(id=self.data["request_id"])
except FoiRequest.DoesNotExist:
return None
else:
foirequest = self.data["foirequest"]
if request is not None and request.user != foirequest.user:
return
send_now = foirequest.set_status_after_change()
if send_now and foirequest.law:
foirequest.due_date = foirequest.law.calculate_due_date()
foirequest.save()
if send_now:
foirequest.safe_send_first_message()
FoiRequest.request_sent.send(sender=foirequest)
return foirequest
class ResendBouncedMessageService(BaseService):
def process(self, request=None):
message = self.data
if message.original:
message.tags.add(BOUNCE_RESENT_TAG)
return self.resend_message(message.original)
return self.resend_message(message)
def resend_message(self, sent_message):
sent_message.tags.remove(HAS_BOUNCED_TAG)
foirequest = sent_message.request
sent_message.recipient_email = foirequest.public_body.email
sent_message.sent = False
sent_message.save()
sent_message.force_resend()
return sent_message
| [
[
[
7,
9
],
[
15535,
15537
]
],
[
[
17,
21
],
[
3555,
3559
]
],
[
[
45,
56
],
[
16115,
16126
]
],
[
[
82,
90
],
[
5055,
5063
],
[
11939,
11947
]
],
[
[
115,
122
],
[
3673,
3680
]
],
[
[
155,
169
],
[
1055,
1069
]
],
[
[
210,
214
],
[
4279,
4283
],
[
5339,
5343
]
],
[
[
245,
249
],
[
15932,
15936
]
],
[
[
287,
304
],
[
15308,
15309
],
[
15425,
15426
]
],
[
[
329,
337
],
[
13854,
13862
]
],
[
[
375,
389
],
[
1799,
1813
],
[
2428,
2442
],
[
3746,
3760
],
[
14935,
14949
]
],
[
[
427,
441
],
[
7157,
7171
],
[
12107,
12121
]
],
[
[
476,
498
],
[
15714,
15736
]
],
[
[
534,
552
],
[
4479,
4497
],
[
6551,
6569
]
],
[
[
587,
600
],
[
14498,
14511
]
],
[
[
622,
632
],
[
1411,
1421
],
[
1473,
1483
],
[
5088,
5098
],
[
5668,
5678
],
[
5746,
5756
],
[
5821,
5831
],
[
7818,
7828
],
[
7905,
7915
],
[
8099,
8109
],
[
8220,
8230
],
[
8567,
8577
],
[
12582,
12592
],
[
16357,
16367
],
[
16427,
16437
],
[
16906,
16916
]
],
[
[
634,
644
],
[
6812,
6822
],
[
10981,
10991
]
],
[
[
646,
658
],
[
3497,
3509
],
[
10260,
10272
],
[
10354,
10366
]
],
[
[
660,
670
],
[
4073,
4083
],
[
4177,
4187
],
[
4661,
4671
]
],
[
[
672,
685
],
[
15061,
15074
]
],
[
[
720,
730
],
[
14419,
14429
]
],
[
[
736,
751
],
[
14472,
14487
],
[
17353,
17368
]
],
[
[
757,
771
],
[
12501,
12515
]
],
[
[
777,
794
],
[
17154,
17171
]
],
[
[
823,
846
],
[
1356,
1379
]
],
[
[
852,
882
],
[
7294,
7324
]
],
[
[
888,
912
],
[
11546,
11570
]
],
[
[
918,
947
],
[
7543,
7572
],
[
12195,
12224
]
],
[
[
970,
978
],
[
2026,
2034
]
],
[
[
998,
1021
],
[
4842,
4865
]
],
[
[
1023,
1046
],
[
16145,
16168
]
],
[
[
1048,
1052
]
],
[
[
1080,
1091
],
[
1564,
1575
],
[
9596,
9607
],
[
10525,
10536
],
[
16222,
16233
],
[
17016,
17027
]
],
[
[
1284,
1314
],
[
5962,
5992
]
],
[
[
1543,
1563
],
[
8823,
8843
],
[
9046,
9066
]
],
[
[
8791,
8822
]
],
[
[
9019,
9045
]
],
[
[
9579,
9595
]
],
[
[
10505,
10524
]
],
[
[
16192,
16221
]
],
[
[
16988,
17015
]
]
] |
"""
Django settings for Gallery project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
from decouple import config,Csv
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Application definition
INSTALLED_APPS = [
'bootstrap4',
'images.apps.ImagesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'Gallery.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'Gallery.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR,'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
| [
[
[
316,
318
],
[
1200,
1202
],
[
1216,
1218
],
[
1232,
1234
],
[
3469,
3471
],
[
3555,
3557
],
[
3707,
3709
]
],
[
[
327,
342
],
[
885,
900
],
[
980,
995
]
],
[
[
364,
370
],
[
381,
387
],
[
424,
430
],
[
453,
459
],
[
512,
518
],
[
645,
651
],
[
683,
689
],
[
725,
731
],
[
767,
773
],
[
928,
934
],
[
1079,
1085
]
],
[
[
371,
374
],
[
1108,
1111
]
],
[
[
376,
380
]
],
[
[
411,
421
]
],
[
[
445,
450
]
],
[
[
538,
547
],
[
1021,
1030
]
],
[
[
853,
862
],
[
1021,
1030
]
],
[
[
966,
977
],
[
1049,
1060
]
],
[
[
1063,
1076
]
],
[
[
1189,
1197
],
[
3482,
3490
],
[
3568,
3576
],
[
3720,
3728
]
],
[
[
1291,
1305
]
],
[
[
1549,
1559
]
],
[
[
2013,
2025
]
],
[
[
2044,
2053
]
],
[
[
2589,
2605
]
],
[
[
2744,
2768
]
],
[
[
3248,
3261
]
],
[
[
3273,
3282
]
],
[
[
3303,
3311
]
],
[
[
3320,
3328
]
],
[
[
3337,
3343
]
],
[
[
3455,
3466
]
],
[
[
3506,
3516
]
],
[
[
3530,
3546
]
],
[
[
3591,
3610
]
],
[
[
3672,
3681
]
],
[
[
3694,
3704
]
]
] |
"""SwaggerToSdk core tools.
"""
from enum import Enum, unique
import json
import logging
import os
import re
import tempfile
from pathlib import Path
import requests
from github import Github, UnknownObjectException
from .autorest_tools import (
autorest_latest_version_finder,
autorest_bootstrap_version_finder,
autorest_swagger_to_sdk_conf,
)
from azure_devtools.ci_tools.github_tools import get_files, GithubLink
_LOGGER = logging.getLogger(__name__)
CONFIG_FILE = "swagger_to_sdk_config_autorest.json"
CONFIG_FILE_DPG = "swagger_to_sdk_config_dpg.json"
DEFAULT_COMMIT_MESSAGE = "Generated from {hexsha}"
def build_file_content():
autorest_version = autorest_latest_version_finder()
autorest_bootstrap_version = autorest_bootstrap_version_finder()
return {
"autorest": autorest_version,
"autorest_bootstrap": autorest_bootstrap_version,
}
def get_repo_tag_meta(meta_conf):
repotag = meta_conf.get("repotag")
if repotag:
return repotag
# Guess for now, "repotag" should be added everywhere
if "go" in meta_conf["autorest_options"]:
return "azure-sdk-for-go"
if "ruby" in meta_conf["autorest_options"]:
return "azure-sdk-for-ruby"
if "java" in meta_conf["autorest_options"]:
return "azure-sdk-for-java"
if "nodejs" in meta_conf["autorest_options"]:
return "azure-sdk-for-node"
if "typescript" in meta_conf["autorest_options"]:
return "azure-sdk-for-js"
raise ValueError("No repotag found or infered")
@unique
class Language(str, Enum):
GOLANG = "go"
RUBY = "ruby"
JAVA = "java"
NODEJS = "nodejs"
CSHARP = "csharp"
PYTHON = "python"
TYPESCRIPT = "typescript"
def get_language_from_conf(meta_conf):
"""Detect the language based on the default Autorest options.
Assuming all language use --mylanguage in the config file.
If I don't find anything, well just say I don't know...
This is based on autorest language flags.
:rtype: Language
"""
autorest_options_lang = set(meta_conf["autorest_options"].keys())
languages = set()
for value in Language:
if value in autorest_options_lang:
languages.add(value)
if not languages:
_LOGGER.warning("No detected language from this conf")
return None # I don't what this conf is about?
language = languages.pop()
if languages:
_LOGGER.warning("This SwaggerToSdk conf seems to generate too much language in one call, assume we don't know")
return None
return language
def get_context_tag_from_git_object(git_object):
files_list = [file.filename for file in get_files(git_object)]
return get_context_tag_from_file_list(files_list)
def get_context_tag_from_file_list(files_list):
context_tags = set()
for filename in files_list:
filepath = Path(filename)
filename = filepath.as_posix()
if "/examples/" in filename:
# Do not compute context for example that are not used in SDK
continue
# Match if RP name
match = re.match(r"specification/(.*)/Microsoft.\w*/(stable|preview)/", filename, re.I)
if match:
context_tags.add(match.groups()[0])
continue
# Match if stable/preview but not RP like ARM (i.e. Cognitive Services)
match = re.match(r"specification/(.*)/(stable|preview)/", filename, re.I)
if match:
context_tags.add(match.groups()[0])
continue
# Match Readme
# Do it last step, because if some weird Readme for ServiceFabric...
match = re.match(r"specification/(.*)/readme.\w*.?md", filename, re.I)
if match:
context_tags.add(match.groups()[0])
continue
# No context-tags
return context_tags
def this_conf_will_generate_for_this_pr(git_object, config):
"""Try to guess if this PR has a chance to generate something for this conf.
Right now, just match the language in the conf with the presence
of ONLY "readme.language.md" files.
"""
lang = get_language_from_conf(config)
filenames = [file.filename.lower() for file in get_files(git_object)]
readme_lang = [name for name in filenames if re.match(r"(.*)readme.\w+.md", name)]
if len(readme_lang) != len(filenames):
return True # This means there is files that are not language specific readme
return bool([name for name in readme_lang if name.endswith("readme.{}.md".format(lang))])
def get_readme_files_from_git_object(git_object, base_dir=Path(".")):
files_list = [file.filename for file in get_files(git_object)]
return get_readme_files_from_file_list(files_list, base_dir)
def get_readme_files_from_file_list(files_list, base_dir=Path(".")):
"""Get readme files from this PR.
Algo is to look for context, and then search for Readme inside this context.
"""
readme_files = set()
context_tags = get_context_tag_from_file_list(files_list)
for context_tag in context_tags:
expected_folder = Path(base_dir) / Path("specification/{}".format(context_tag))
if not expected_folder.is_dir():
_LOGGER.warning("From context {} I didn't find folder {}".format(context_tag, expected_folder))
continue
for expected_readme in [l for l in expected_folder.iterdir() if l.is_file()]:
# Need to do a case-insensitive test.
match = re.match(r"readme.\w*.?md", expected_readme.name, re.I)
if match:
readme_files.add(expected_readme.relative_to(Path(base_dir)))
return readme_files
def read_config(sdk_git_folder, config_file):
"""Read the configuration file and return JSON"""
config_path = os.path.join(sdk_git_folder, config_file)
with open(config_path, "r") as config_fd:
return json.loads(config_fd.read())
def read_config_from_github(sdk_id, branch="main", gh_token=None):
raw_link = str(get_configuration_github_path(sdk_id, branch))
_LOGGER.debug("Will try to download: %s", raw_link)
_LOGGER.debug("Token is defined: %s", gh_token is not None)
headers = {"Authorization": "token {}".format(gh_token)} if gh_token else {}
response = requests.get(raw_link, headers=headers)
if response.status_code != 200:
raise ValueError(
"Unable to download conf file for SDK {} branch {}: status code {}".format(
sdk_id, branch, response.status_code
)
)
return json.loads(response.text)
def extract_conf_from_readmes(swagger_files_in_pr, restapi_git_folder, sdk_git_id, config, force_generation=False):
readme_files_in_pr = {
readme for readme in swagger_files_in_pr if getattr(readme, "name", readme).lower().endswith("readme.md")
}
for readme_file in readme_files_in_pr:
build_swaggertosdk_conf_from_json_readme(
readme_file, sdk_git_id, config, base_folder=restapi_git_folder, force_generation=force_generation
)
def get_readme_path(readme_file, base_folder="."):
"""Get a readable Readme path.
If start with http, assume online, ignore base_folder and convert to raw link if necessary.
If base_folder is not None, assume relative to base_folder.
"""
if not isinstance(readme_file, Path) and readme_file.startswith("http"):
return GithubLink.from_string(readme_file).as_raw_link()
else:
if base_folder is None:
base_folder = "."
return str(Path(base_folder) / Path(readme_file))
def build_swaggertosdk_conf_from_json_readme(readme_file, sdk_git_id, config, base_folder=".", force_generation=False):
"""Get the JSON conf of this README, and create SwaggerToSdk conf.
Readme path can be any readme syntax accepted by autorest.
readme_file will be project key as-is.
:param str readme_file: A path that Autorest accepts. Raw GH link or absolute path.
:param str sdk_dit_id: Repo ID. IF org/login is provided, will be stripped.
:param dict config: Config where to update the "projects" key.
:param bool force_generation: If no Swagger to SDK section is found, force once with the Readme as input
"""
readme_full_path = get_readme_path(readme_file, base_folder)
with tempfile.TemporaryDirectory() as temp_dir:
readme_as_conf = autorest_swagger_to_sdk_conf(readme_full_path, temp_dir, config)
generated_config = {
"markdown": readme_full_path,
}
sdk_git_short_id = sdk_git_id.split("/")[-1].lower()
_LOGGER.info("Looking for tag {} in readme {}".format(sdk_git_short_id, readme_file))
for swagger_to_sdk_conf in readme_as_conf:
if not isinstance(swagger_to_sdk_conf, dict):
continue
repo = swagger_to_sdk_conf.get("repo", "")
repo = repo.split("/")[-1].lower() # Be sure there is no org/login part
if repo == sdk_git_short_id:
_LOGGER.info("This Readme contains a swagger-to-sdk section for repo {}".format(repo))
generated_config.update(
{
"autorest_options": swagger_to_sdk_conf.get("autorest_options", {}),
"after_scripts": swagger_to_sdk_conf.get("after_scripts", []),
}
)
config.setdefault("projects", {})[str(readme_file)] = generated_config
return generated_config
else:
_LOGGER.info("Skip mismatch {} from {}".format(repo, sdk_git_short_id))
if not force_generation:
_LOGGER.info(
"Didn't find tag {} in readme {}. Did you forget to update the SwaggerToSdk section?".format(
sdk_git_short_id, readme_file
)
)
else:
_LOGGER.info("Didn't find tag {} in readme {}. Forcing it.".format(sdk_git_short_id, readme_file))
config.setdefault("projects", {})[str(readme_file)] = generated_config
def get_input_paths(global_conf, local_conf):
"""Returns a 2-tuple:
- Markdown Path or None
- Input-file Paths or empty list
"""
del global_conf # Unused
relative_markdown_path = None # Markdown is optional
input_files = [] # Input file could be empty
if "markdown" in local_conf:
relative_markdown_path = Path(local_conf["markdown"])
input_files = local_conf.get("autorest_options", {}).get("input-file", [])
if input_files and not isinstance(input_files, list):
input_files = [input_files]
input_files = [Path(input_file) for input_file in input_files]
if not relative_markdown_path and not input_files:
raise ValueError("No input file found")
return (relative_markdown_path, input_files)
def solve_relative_path(autorest_options, sdk_root):
"""Solve relative path in conf.
If a key is prefixed by "sdkrel:", it's solved against SDK root.
"""
SDKRELKEY = "sdkrel:"
solved_autorest_options = {}
for key, value in autorest_options.items():
if key.startswith(SDKRELKEY):
_LOGGER.debug("Found a sdkrel pair: %s/%s", key, value)
subkey = key[len(SDKRELKEY) :]
solved_value = Path(sdk_root, value).resolve()
solved_autorest_options[subkey] = str(solved_value)
else:
solved_autorest_options[key] = value
return solved_autorest_options
def get_configuration_github_path(sdk_id, branch="master"):
return GithubLink(sdk_id, "raw", branch, CONFIG_FILE)
| [
[
[
49,
53
],
[
1570,
1574
]
],
[
[
55,
61
],
[
1543,
1549
]
],
[
[
69,
73
],
[
5874,
5878
],
[
6532,
6536
]
],
[
[
81,
88
],
[
442,
449
]
],
[
[
96,
98
],
[
5771,
5773
]
],
[
[
106,
108
],
[
3108,
3110
],
[
3182,
3184
],
[
3371,
3373
],
[
3431,
3433
],
[
3640,
3642
],
[
3697,
3699
],
[
4267,
4269
],
[
5471,
5473
],
[
5521,
5523
]
],
[
[
116,
124
],
[
8293,
8301
]
],
[
[
145,
149
],
[
4591,
4595
],
[
4794,
4798
],
[
2879,
2883
],
[
5083,
5087
],
[
5100,
5104
],
[
5610,
5614
],
[
7329,
7333
],
[
7527,
7531
],
[
7547,
7551
],
[
10283,
10287
],
[
10504,
10508
],
[
11156,
11160
]
],
[
[
158,
166
],
[
6254,
6262
]
],
[
[
187,
193
]
],
[
[
195,
217
]
],
[
[
253,
283
],
[
677,
707
]
],
[
[
289,
322
],
[
743,
776
]
],
[
[
328,
356
],
[
8361,
8389
]
],
[
[
409,
418
],
[
2676,
2685
],
[
4195,
4204
],
[
4647,
4656
]
],
[
[
420,
430
],
[
7386,
7396
],
[
11423,
11433
]
],
[
[
432,
439
],
[
2259,
2266
],
[
2428,
2435
],
[
5198,
5205
],
[
6042,
6049
],
[
6098,
6105
],
[
8556,
8563
],
[
8945,
8952
],
[
9436,
9443
],
[
9545,
9552
],
[
9753,
9760
],
[
11030,
11037
]
],
[
[
471,
482
],
[
11457,
11468
]
],
[
[
523,
538
]
],
[
[
575,
597
]
],
[
[
632,
650
]
],
[
[
900,
917
]
],
[
[
1556,
1564
],
[
2142,
2150
]
],
[
[
1733,
1755
],
[
4113,
4135
]
],
[
[
2587,
2618
]
],
[
[
2759,
2789
],
[
2710,
2740
],
[
4977,
5007
]
],
[
[
3846,
3881
]
],
[
[
4537,
4569
]
],
[
[
4741,
4772
],
[
4681,
4712
]
],
[
[
5657,
5668
]
],
[
[
5909,
5932
]
],
[
[
6564,
6589
]
],
[
[
7043,
7058
],
[
8242,
8257
]
],
[
[
7572,
7612
],
[
6874,
6914
]
],
[
[
9937,
9952
]
],
[
[
10710,
10729
]
],
[
[
11356,
11385
],
[
5991,
6020
]
]
] |
import torch
from ptstat.core import RandomVariable, _to_v
class Categorical(RandomVariable):
"""
Categorical over 0,...,N-1 with arbitrary probabilities, 1-dimensional rv, long type.
"""
def __init__(self, p=None, p_min=1E-6, size=None, cuda=False):
super(Categorical, self).__init__()
if size:
assert len(size) == 2, str(size)
p = _to_v(1 / size[1], size, cuda)
else:
assert len(p.size()) == 2, str(p.size())
assert torch.min(p.data) >= 0, str(torch.min(p.data))
assert torch.max(torch.abs(torch.sum(p.data, 1) - 1)) <= 1E-5
self._p = torch.clamp(p, p_min)
def _size(self):
return self._p.size()[0], 1 # Type is Long.
def _log_pdf(self, x):
return torch.log(self._p.gather(1, x)).squeeze()
def _sample(self):
return self._p.multinomial(1, True)
def _entropy(self):
return - torch.sum(self._p * torch.log(self._p), 1).squeeze()
| [
[
[
7,
12
],
[
504,
509
],
[
532,
537
],
[
566,
571
],
[
576,
581
],
[
586,
591
],
[
639,
644
],
[
779,
784
],
[
931,
936
],
[
951,
956
]
],
[
[
37,
51
],
[
79,
93
]
],
[
[
53,
58
],
[
391,
396
]
],
[
[
67,
78
],
[
283,
294
]
]
] |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .__about__ import __version__
from .http_check import HTTPCheck
__all__ = ['__version__', 'HTTPCheck']
| [
[
[
139,
150
]
],
[
[
175,
184
]
],
[
[
186,
193
]
]
] |
import unittest
from localstack.utils.aws import aws_stack
class SSMTest(unittest.TestCase):
def test_describe_parameters(self):
ssm_client = aws_stack.connect_to_service("ssm")
response = ssm_client.describe_parameters()
self.assertIn("Parameters", response)
self.assertIsInstance(response["Parameters"], list)
def test_put_parameters(self):
ssm_client = aws_stack.connect_to_service("ssm")
ssm_client.put_parameter(
Name="test_put",
Description="test",
Value="123",
Type="String",
)
self._assert("test_put", "test_put")
self._assert("/test_put", "test_put")
def test_hierarchical_parameter(self):
ssm_client = aws_stack.connect_to_service("ssm")
ssm_client.put_parameter(
Name="/a/b/c",
Value="123",
Type="String",
)
self._assert("/a/b/c", "/a/b/c")
self._assert("/a//b//c", "/a/b/c")
self._assert("a/b//c", "/a/b/c")
def test_get_secret_parameter(self):
ssm_client = aws_stack.connect_to_service("ssm")
sec_client = aws_stack.connect_to_service("secretsmanager")
secret_name = "test_secret"
sec_client.create_secret(
Name=secret_name,
SecretString="my_secret",
Description="testing creation of secrets",
)
result = ssm_client.get_parameter(
Name="/aws/reference/secretsmanager/{0}".format(secret_name)
)
self.assertEqual(
"/aws/reference/secretsmanager/{0}".format(secret_name),
result.get("Parameter").get("Name"),
)
self.assertEqual("my_secret", result.get("Parameter").get("Value"))
source_result = result.get("Parameter").get("SourceResult")
self.assertTrue(source_result is not None, "SourceResult should be present")
self.assertTrue(type(source_result) is str, "SourceResult should be a string")
def test_get_inexistent_secret(self):
ssm_client = aws_stack.connect_to_service("ssm")
self.assertRaises(
ssm_client.exceptions.ParameterNotFound,
ssm_client.get_parameter,
Name="/aws/reference/secretsmanager/inexistent",
)
def test_get_parameters_and_secrets(self):
ssm_client = aws_stack.connect_to_service("ssm")
sec_client = aws_stack.connect_to_service("secretsmanager")
secret_path = "/aws/reference/secretsmanager/"
param_name = "test_param"
ssm_client.put_parameter(
Name=param_name,
Description="test",
Value="123",
Type="String",
)
secret_name = "test_secret_params"
sec_client.create_secret(
Name=secret_name,
SecretString="my_secret",
Description="testing creation of secrets",
)
complete_secret = secret_path + secret_name
response = ssm_client.get_parameters(
Names=[
param_name,
complete_secret,
"inexistent_param",
secret_path + "inexistent_secret",
]
)
found = response.get("Parameters")
not_found = response.get("InvalidParameters")
for param in found:
self.assertIn(param["Name"], [param_name, complete_secret])
for param in not_found:
self.assertIn(param, ["inexistent_param", secret_path + "inexistent_secret"])
def _assert(self, search_name, param_name):
ssm_client = aws_stack.connect_to_service("ssm")
def do_assert(result):
self.assertGreater(len(result), 0)
self.assertEqual(param_name, result[0]["Name"])
self.assertEqual("123", result[0]["Value"])
response = ssm_client.get_parameter(Name=search_name)
do_assert([response["Parameter"]])
response = ssm_client.get_parameters(Names=[search_name])
do_assert(response["Parameters"])
def test_get_parameters_by_path_and_filter_by_labels(self):
ssm_client = aws_stack.connect_to_service("ssm")
path = "/my/path"
value = "value"
param = ssm_client.put_parameter(Name=path, Value=value, Type="String")
ssm_client.label_parameter_version(
Name=path, ParameterVersion=param["Version"], Labels=["latest"]
)
list_of_params = ssm_client.get_parameters_by_path(
Path="/my", ParameterFilters=[{"Key": "Label", "Values": ["latest"]}]
)
self.assertEqual("/my/path", list_of_params["Parameters"][0]["Name"])
| [
[
[
7,
15
],
[
76,
84
]
],
[
[
50,
59
],
[
157,
166
],
[
409,
418
],
[
760,
769
],
[
1109,
1118
],
[
1166,
1175
],
[
2080,
2089
],
[
2374,
2383
],
[
2431,
2440
],
[
3617,
3626
],
[
4149,
4158
]
],
[
[
68,
75
]
]
] |
import json
import os
import sys
from . import uploader
from . import processing
from . import exif_read
def verify_mapillary_tag(filepath):
filepath_keep_original = processing.processed_images_rootpath(filepath)
if os.path.isfile(filepath_keep_original):
filepath = filepath_keep_original
"""
Check that image file has the required Mapillary tag
"""
return exif_read.ExifRead(filepath).mapillary_tag_exists()
def upload(
import_path,
skip_subfolders=False,
number_threads=None,
max_attempts=None,
video_import_path=None,
dry_run=False,
):
"""
Upload local images to Mapillary
Args:
import_path: Directory path to where the images are stored.
verbose: Print extra warnings and errors.
skip_subfolders: Skip images stored in subdirectories.
Returns:
Images are uploaded to Mapillary and flagged locally as uploaded.
"""
# in case of video processing, adjust the import path
if video_import_path:
# sanity check if video file is passed
if not os.path.isdir(video_import_path) and not os.path.isfile(
video_import_path
):
print(
"Error, video path " + video_import_path + " does not exist, exiting..."
)
sys.exit(1)
# set sampling path
video_sampling_path = "mapillary_sampled_video_frames"
video_dirname = (
video_import_path
if os.path.isdir(video_import_path)
else os.path.dirname(video_import_path)
)
import_path = (
os.path.join(os.path.abspath(import_path), video_sampling_path)
if import_path
else os.path.join(os.path.abspath(video_dirname), video_sampling_path)
)
# basic check for all
if not import_path or not os.path.isdir(import_path):
print(f"Error, import directory {import_path} does not exist, exiting...")
sys.exit(1)
# get list of file to process
total_file_list = uploader.get_total_file_list(import_path, skip_subfolders)
upload_file_list = uploader.get_upload_file_list(import_path, skip_subfolders)
success_file_list = uploader.get_success_upload_file_list(
import_path, skip_subfolders
)
to_finalize_file_list = uploader.get_finalize_file_list(
import_path, skip_subfolders
)
if len(success_file_list) == len(total_file_list):
print("All images have already been uploaded")
else:
# verify the images in the upload list, they need to have the image
# description and certain MAP properties
upload_file_list = [f for f in upload_file_list if verify_mapillary_tag(f)]
if not len(upload_file_list) and not len(to_finalize_file_list):
print("No images to upload.")
print(
'Please check if all images contain the required Mapillary metadata. If not, you can use "mapillary_tools process" to add them'
)
sys.exit(1)
if upload_file_list:
# get upload params for the manual upload images, group them per sequence
# and separate direct upload images
params = {}
list_per_sequence_mapping = {}
direct_upload_file_list = []
for image in upload_file_list:
log_root = uploader.log_rootpath(image)
# read upload params
upload_params_path = os.path.join(
log_root, "upload_params_process.json"
)
if os.path.isfile(upload_params_path):
with open(upload_params_path, "r") as fp:
params[image] = json.load(fp)
sequence = params[image]["key"]
list_per_sequence_mapping.setdefault(sequence, []).append(image)
else:
direct_upload_file_list.append(image)
# read image descriptions
description_path = os.path.join(
log_root, "mapillary_image_description.json"
)
if not os.path.isfile(description_path):
raise RuntimeError(
f"Please run process first because {description_path} is not generated"
)
with open(description_path, "r") as fp:
description = json.load(fp)
assert not set(description).intersection(
params.get(image, {})
), f"Parameter conflicting {description} and {params.get(image, {})}"
params.setdefault(image, {}).update(description)
# inform how many images are to be uploaded and how many are being skipped
# from upload
print(
f"Uploading {len(upload_file_list)} images with valid mapillary tags (Skipping {len(total_file_list) - len(upload_file_list)})"
)
if direct_upload_file_list:
raise RuntimeError(
f"Found {len(direct_upload_file_list)} files for direct upload which is not supported in v4"
)
total_sequences = len(list_per_sequence_mapping)
for idx, sequence_uuid in enumerate(list_per_sequence_mapping):
metadata = {
"total_sequences": total_sequences,
"sequence_idx": idx,
}
uploader.upload_sequence_v4(
list_per_sequence_mapping[sequence_uuid],
sequence_uuid,
params,
metadata=metadata,
dry_run=dry_run,
)
if to_finalize_file_list:
params = {}
sequences = []
for image in to_finalize_file_list:
log_root = uploader.log_rootpath(image)
upload_params_path = os.path.join(
log_root, "upload_params_process.json"
)
if os.path.isfile(upload_params_path):
with open(upload_params_path, "rb") as jf:
image_params = json.load(jf)
sequence = image_params["key"]
if sequence not in sequences:
params[image] = image_params
sequences.append(sequence)
uploader.flag_finalization(to_finalize_file_list)
uploader.print_summary(upload_file_list)
| [
[
[
7,
11
],
[
3740,
3744
],
[
4451,
4455
],
[
6237,
6241
]
],
[
[
19,
21
],
[
227,
229
],
[
1080,
1082
],
[
1121,
1123
],
[
1487,
1489
],
[
1537,
1539
],
[
1618,
1620
],
[
1631,
1633
],
[
1726,
1728
],
[
1739,
1741
],
[
1859,
1861
],
[
3492,
3494
],
[
3602,
3604
],
[
4049,
4051
],
[
4169,
4171
],
[
5989,
5991
],
[
6099,
6101
]
],
[
[
29,
32
],
[
1312,
1315
],
[
1978,
1981
],
[
3034,
3037
]
],
[
[
48,
56
],
[
2047,
2055
],
[
2129,
2137
],
[
2213,
2221
],
[
2323,
2331
],
[
3388,
3396
],
[
5514,
5522
],
[
5923,
5931
],
[
6485,
6493
],
[
6540,
6548
]
],
[
[
71,
81
],
[
173,
183
]
],
[
[
96,
105
],
[
393,
402
]
],
[
[
112,
132
],
[
2704,
2724
]
],
[
[
451,
457
]
]
] |
import re
import click
from cloup import option, option_group
from ... import logger
def validate_scene_range(ctx, param, value):
try:
start = int(value)
return (start,)
except Exception:
pass
if value:
try:
start, end = map(int, re.split(r"[;,\-]", value))
return start, end
except Exception:
logger.error("Couldn't determine a range for -n option.")
exit()
def validate_resolution(ctx, param, value):
if value:
try:
start, end = map(int, re.split(r"[;,\-]", value))
return (start, end)
except Exception:
logger.error("Resolution option is invalid.")
exit()
render_options = option_group(
"Render Options",
option(
"-n",
"--from_animation_number",
callback=validate_scene_range,
help="Start rendering from n_0 until n_1. If n_1 is left unspecified, "
"renders all scenes after n_0.",
default=None,
),
option(
"-a",
"--write_all",
is_flag=True,
help="Render all scenes in the input file.",
default=None,
),
option(
"--format",
type=click.Choice(["png", "gif", "mp4", "webm", "mov"], case_sensitive=False),
default=None,
),
option("-s", "--save_last_frame", is_flag=True, default=None),
option(
"-q",
"--quality",
default=None,
type=click.Choice(["l", "m", "h", "p", "k"], case_sensitive=False),
help="""
Render quality at the follow resolution framerates, respectively:
854x480 30FPS,
1280x720 30FPS,
1920x1080 60FPS,
2560x1440 60FPS,
3840x2160 60FPS
""",
),
option(
"-r",
"--resolution",
callback=validate_resolution,
default=None,
help="Resolution in (W,H) for when 16:9 aspect ratio isn't possible.",
),
option(
"--fps",
"--frame_rate",
"frame_rate",
type=float,
default=None,
help="Render at this frame rate.",
),
option(
"--renderer",
type=click.Choice(["cairo", "opengl", "webgl"], case_sensitive=False),
help="Select a renderer for your Scene.",
default=None,
),
option(
"--use_opengl_renderer",
is_flag=True,
help="Render scenes using OpenGL (Deprecated).",
default=None,
),
option(
"--use_webgl_renderer",
is_flag=True,
help="Render scenes using the WebGL frontend (Deprecated).",
default=None,
),
option(
"--webgl_renderer_path",
default=None,
type=click.Path(),
help="The path to the WebGL frontend.",
),
option(
"-g",
"--save_pngs",
is_flag=True,
default=None,
help="Save each frame as png (Deprecated).",
),
option(
"-i",
"--save_as_gif",
default=None,
is_flag=True,
help="Save as a gif (Deprecated).",
),
option(
"-s",
"--save_last_frame",
default=None,
is_flag=True,
help="Save last frame as png (Deprecated).",
),
option(
"-t",
"--transparent",
is_flag=True,
help="Render scenes with alpha channel.",
),
option(
"--use_projection_fill_shaders",
is_flag=True,
help="Use shaders for OpenGLVMobject fill which are compatible with transformation matrices.",
default=None,
),
option(
"--use_projection_stroke_shaders",
is_flag=True,
help="Use shaders for OpenGLVMobject stroke which are compatible with transformation matrices.",
default=None,
),
)
| [
[
[
7,
9
],
[
291,
293
],
[
571,
573
]
],
[
[
18,
23
],
[
1237,
1242
],
[
1489,
1494
],
[
2222,
2227
],
[
2764,
2769
]
],
[
[
42,
48
],
[
793,
799
],
[
1043,
1049
],
[
1196,
1202
],
[
1344,
1350
],
[
1411,
1417
],
[
1816,
1822
],
[
2012,
2018
],
[
2179,
2185
],
[
2371,
2377
],
[
2524,
2530
],
[
2688,
2694
],
[
2837,
2843
],
[
2990,
2996
],
[
3136,
3142
],
[
3295,
3301
],
[
3425,
3431
],
[
3632,
3638
]
],
[
[
50,
62
],
[
753,
765
]
],
[
[
80,
86
],
[
387,
393
],
[
669,
675
]
],
[
[
93,
113
],
[
867,
887
]
],
[
[
470,
489
],
[
1879,
1898
]
],
[
[
736,
750
]
]
] |
from setuptools import setup, find_packages
setup(
name="intent_classifier",
version="0.2.0",
packages=find_packages(),
include_package_data=True,
install_requires=["numpy", "scipy", "PyMySQL", "scikit-learn==0.20.3"]
)
| [
[
[
23,
28
],
[
45,
50
]
],
[
[
30,
43
],
[
116,
129
]
]
] |
from django.contrib import admin
from application.models import Profile
# Register your models here.
admin.site.register(Profile)
| [
[
[
27,
32
],
[
102,
107
]
],
[
[
64,
71
],
[
122,
129
]
]
] |
#!/usr/local/bin/python
'''
pyAero_geometry
Holds the Python Aerodynamic Analysis Classes (base and inherited).
Copyright (c) 2008 by Dr. Ruben E. Perez
All rights reserved. Not to be used for commercial purposes.
Revision: 1.1 $Date: 21/05/2008 21:00$
Developers:
-----------
- Dr. Ruben E. Perez (RP)
History
-------
v. 1.0 - Initial Class Creation (RP, 2008)
'''
__version__ = '$Revision: $'
'''
To Do:
-
'''
# =============================================================================
# Standard Python modules
# =============================================================================
import os, sys
import pdb
# =============================================================================
# External Python modules
# =============================================================================
import numpy
# =============================================================================
# Extension modules
# =============================================================================
# =============================================================================
# Misc Definitions
# =============================================================================
# =============================================================================
# Geometry Class
# =============================================================================
class Geometry(object):
'''
Abstract Class for Geometry Object
'''
def __init__(self, name={},CGPercent = 0.25,ForeSparPercent = 0.25,
RearSparPercent = 0.75,StaticMarginPercent=0.05,
ForeThickCon = 0.01, RearThickCon = 0.99,
rootOffset = 0.01, tipOffset=0.01,
xRootec=0.0, yRootec=0.0, zRootec=0.0,
*args, **kwargs):
'''
Flow Class Initialization
Keyword Arguments:
------------------
name -> STRING: Geometry Instance Name
Attributes:
-----------
Documentation last updated: May. 21, 2008 - Ruben E. Perez
'''
#
self.name = name
self.CGPercent = CGPercent
self.ForeSparPercent = ForeSparPercent
self.RearSparPercent = RearSparPercent
self.StaticMarginPercent = StaticMarginPercent
self.ForeThickCon = ForeThickCon
self.RearThickCon = RearThickCon
self.tipOffset = tipOffset
self.rootOffset = rootOffset
self.xRootec = xRootec
self.yRootec = yRootec
self.zRootec = zRootec
def ListAttributes(self):
'''
Print Structured Attributes List
Documentation last updated: May. 21, 2008 - Ruben E. Perez
'''
ListAttributes(self)
def __str__(self):
'''
Print Structured List of Variable
Documentation last updated: May. 21, 2008 - Ruben E. Perez
'''
return ('name \n'+' '+str(self.name).center(9) )
#==============================================================================
#
#==============================================================================
def ListAttributes(self):
'''
Print Structured Attributes List
Documentation last updated: March. 24, 2008 - Ruben E. Perez
'''
print('\n')
print('Attributes List of: ' + repr(self.__dict__['name']) + ' - ' + self.__class__.__name__ + ' Instance\n')
self_keys = self.__dict__.keys()
self_keys.sort()
for key in self_keys:
if key != 'name':
print(str(key) + ' : ' + repr(self.__dict__[key]))
#end
#end
print('\n')
#==============================================================================
# Flow Test
#==============================================================================
if __name__ == '__main__':
print('Testing ...')
# Test Variable
geo = Geometry(name = 'test')
geo.ListAttributes()
print(geo)
| [
[
[
381,
392
]
],
[
[
627,
629
]
],
[
[
631,
634
]
],
[
[
642,
645
]
],
[
[
840,
845
]
],
[
[
1394,
1402
],
[
4090,
4098
]
],
[
[
3254,
3268
],
[
2793,
2807
]
],
[
[
4084,
4087
],
[
4118,
4121
],
[
4149,
4152
]
]
] |
from orchespy import device
from orchespy.devicetype import CUDAGPU, Host, VE
import sys
import pytest
import numpy as np
if "cupy" in sys.modules:
import cupy as cp
if "nlcpy" in sys.modules:
import nlcpy as vp
no_nlcpy = pytest.mark.skipif(
"nlcpy" not in sys.modules, reason=' test require nlcpy. ')
no_cupy = pytest.mark.skipif(
"cupy" not in sys.modules, reason=' test require cupy. ')
# for tests with an argument
@device(Host, numpy_module_arg='xp')
def create_array_init_5_at_host(shape, dtype, order, xp):
return xp.full(shape, 5, dtype=dtype, order=order)
@device(CUDAGPU, numpy_module_arg='xp')
def create_array_init_5_at_gpu(shape, dtype, order, xp):
return xp.full(shape, 5, dtype=dtype, order=order)
@device(VE, numpy_module_arg='xp')
def create_array_init_5_at_ve(shape, dtype, order, xp):
return xp.full(shape, 5, dtype=dtype, order=order)
@pytest.mark.parametrize('shape', [(2), (2, 2), (2, 2, 2), (2, 3), (2, 3, 4)])
@pytest.mark.parametrize('dtype', [
'i4', 'i8', 'u4', 'u8', 'f4', 'f8', 'c8', 'c16'
])
@pytest.mark.parametrize('order', ['C', 'F'])
class TestDeviceArgs:
def test_device_args_host(self, shape, dtype, order):
y = create_array_init_5_at_host(shape, dtype, order)
assert(isinstance(y, np.ndarray))
expected = np.full(shape, 5, dtype=dtype, order=order)
assert((y == expected).all())
@no_cupy
def test_device_args_gpu(self, shape, dtype, order):
y = create_array_init_5_at_gpu(shape, dtype, order)
assert(isinstance(y, cp.ndarray))
expected = cp.full(shape, 5, dtype=dtype, order=order)
assert((y == expected).all())
@no_nlcpy
def test_device_args_ve(self, shape, dtype, order):
y = create_array_init_5_at_ve(shape, dtype, order)
assert(isinstance(y, vp.ndarray))
expected = vp.full(shape, 5, dtype=dtype, order=order)
assert((y == expected).all())
| [
[
[
21,
27
],
[
450,
456
],
[
602,
608
],
[
756,
762
]
],
[
[
60,
67
],
[
609,
616
]
],
[
[
69,
73
],
[
457,
461
]
],
[
[
75,
77
],
[
763,
765
]
],
[
[
85,
88
],
[
137,
140
],
[
186,
189
],
[
277,
280
],
[
374,
377
]
],
[
[
96,
102
],
[
234,
240
],
[
332,
338
],
[
904,
910
],
[
983,
989
],
[
1078,
1084
]
],
[
[
111,
122
],
[
1293,
1295
],
[
1325,
1327
]
],
[
[
161,
171
],
[
1567,
1569
],
[
1599,
1601
]
],
[
[
210,
221
],
[
1840,
1842
],
[
1872,
1874
]
],
[
[
223,
231
],
[
1687,
1695
]
],
[
[
322,
329
],
[
1413,
1420
]
],
[
[
490,
517
],
[
1215,
1242
]
],
[
[
645,
671
],
[
1490,
1516
]
],
[
[
794,
819
],
[
1764,
1789
]
],
[
[
1129,
1143
]
]
] |
# !/usr/local/python/bin/python
# -*- coding: utf-8 -*-
# (C) Wu Dong, 2021
# All rights reserved
# @Author: 'Wu Dong <[email protected]>'
# @Time: '6/29/21 10:49 AM'
# sys
import typing as t
from threading import Lock
from threading import get_ident
# 3p
import sqlalchemy
from sqlalchemy import (
orm,
schema,
)
from sqlalchemy.engine import make_url
from sqlalchemy.orm import (
declarative_base,
DeclarativeMeta,
Session as SessionBase
)
# project
from mask.globals import current_app
from .model import (
DefaultMeta,
Model
)
if t.TYPE_CHECKING:
from mask import Mask
__version__ = "1.0.0a1"
class BindSession(SessionBase):
def __init__(self, db, autocommit=False, autoflush=True, **options):
""" 此Session可以根据binds映射关系,自动找到响应的engine
"""
self.db = db
self.app = db.get_app()
bind = options.pop("bind", None) or db.engine
binds = options.pop("binds", db.get_binds(self.app))
SessionBase.__init__(
self,
autocommit=autocommit,
autoflush=autoflush,
bind=bind,
binds=binds,
**options
)
def get_bind(self, mapper=None, **kwargs):
""" 根据mapper映射信息找出合适的engine
:param mapper: Model -> table 映射
"""
if mapper is not None:
# SQLAlchemy >= 1.3 版本才有
persist_selectable = mapper.persist_selectable
# 读取 bind_key
info = getattr(persist_selectable, "info", {})
bind_key = info.get("bind_key")
if bind_key is not None:
# 读取预先格式化好的engine,创建 _EngineConnector 实例
return self.db.get_engine(self.app, bind=bind_key)
# 默认调用父类 get_bind 方法
return super().get_bind(mapper, **kwargs)
class _EngineConnector:
def __init__(self, sa, app, bind=None):
""" 初始化engine连接器,一个数据库对应一个Connector
"""
self._sa = sa
self._app = app
self._engine = None
self._bind = bind
self._connect_for = None
self._lock = Lock()
def get_uri(self):
""" 获取当前bind的uri
"""
# 默认去除对单数据库的连接方式
if self._bind is None:
return None
# 多个数据库绑定时的处理
binds = self._app.config.get("SQLALCHEMY_BINDS") or ()
if self._bind not in binds:
raise RuntimeError(f"Bind {self._bind!r} is not configure in 'SQLALCHEMY_BINDS'.")
return binds[self._bind]
def get_engine(self):
with self._lock:
# 读取数据库连接uri
uri = self.get_uri()
if uri == self._connect_for:
return self._engine
# 读取,格式化url连接中的配置项并创建真正的engine
sa_url, options = self.get_options(make_url(uri))
self._engine = self._sa.create_engine(sa_url, options)
self._connect_for = uri
return self._engine
def dispose(self):
""" 销毁Engine
"""
if not self._engine:
return
self._engine.dispose()
# 重置资源
self._engine = None
self._connect_for = None
def get_options(self, sa_url):
""" 获取所有可选项目
"""
options = {}
options.update(self._app.config["SQLALCHEMY_ENGINE_OPTIONS"])
options.update(self._sa._engine_options)
return sa_url, options
class _QueryProperty:
def __init__(self, sa):
self.sa = sa
def __get__(self, obj, cls): # pylint: disable=inconsistent-return-statements
try:
mapper = orm.class_mapper(cls)
if mapper:
return cls.query_class(mapper, session=self.sa.session())
except orm.exc.UnmappedClassError:
return None
def _include_sqlalchemy(obj, _):
""" 将原生SQLAlchemy的模块注册到Glib SQLAlchemy 中
"""
for module in sqlalchemy, sqlalchemy.orm:
for key in module.__all__:
if not hasattr(obj, key):
setattr(obj, key, getattr(module, key))
class SQLAlchemy:
Query = None
def __init__(
self,
app: t.Optional["Mask"] = None,
session_options: t.Optional[dict] = None,
metadata: t.Optional["schema.MetaData"] = None,
query_class: t.Optional["orm.Query"] = orm.Query,
model_class: t.Optional["Model"] = Model,
engine_options: t.Optional[dict] = None,
) -> None:
""" 创建一个SQLAlchemy用于替代原始的类型
"""
self.app = app
self.Query = query_class
self.session = self.create_scoped_session(session_options)
self.Model = self.make_declarative_base(model_class, metadata)
self._engine_lock = Lock()
self._engine_options = engine_options or {}
self.connectors = {}
_include_sqlalchemy(self, query_class)
if app is not None:
self.init_app(app)
@property
def engine(self):
""" 构造属性,创建engine
"""
return self.get_engine()
def get_engine(self, app: t.Optional["Mask"] = None, bind: str = None):
""" 依据bind创建一个engine
"""
app = self.get_app(app)
with self._engine_lock:
connector = self.connectors.get(bind)
if connector is None:
connector = _EngineConnector(self, self.get_app(app), bind)
self.connectors[bind] = connector
return connector.get_engine()
def _dispose_all_engine(self):
""" 运行时更新配置时,账号密码有可能会发生变化,所以需要销毁所有数据库连接
TIPS: 此操作会导致正在运行的请求失败
"""
with self._engine_lock:
for _, connector in self.connectors.items():
connector.dispose()
self.connectors.clear()
def create_engine(self, sa_url, engine_opts):
""" 创建engine
:param sa_url: 格式化后的url
:param engine_opts: 连接参数
"""
return sqlalchemy.create_engine(sa_url, **engine_opts)
def create_scoped_session(self, options=None):
""" 创建session
"""
options = options or {}
scope_func = options.pop("scopefunc", get_ident)
options.setdefault("query_cls", self.Query)
return orm.scoped_session(self.create_session(options), scopefunc=scope_func)
def create_session(self, options):
""" 创建session
"""
return orm.sessionmaker(class_=BindSession, db=self, **options)
def make_declarative_base(self, model, matadata=None):
""" 利用 SQAlchemy 工厂函数,创建模型基类
:param model: 用户定义模型基类,或者实例
:param matadata: 元数据,状态所有表结构
"""
if not isinstance(model, DeclarativeMeta):
model = declarative_base(cls=model, name="Model", metadata=matadata, metaclass=DefaultMeta)
if not getattr(model, "query_class", None):
model.query_class = self.Query
model.query = _QueryProperty(self)
return model
def get_binds(self, app=None):
""" 获取当前的所有binds
"""
app = self.get_app(app)
binds = [None] + list(app.config.get("SQLALCHEMY_BINDS") or ())
ret_val = {}
for bind in binds:
engine = self.get_engine(app, bind)
tables = self.get_tables_for_bind(bind)
ret_val.update({table: engine for table in tables})
return ret_val
def init_app(self, app):
""" glib扩展形式,初始化SQLAlchemy扩展
"""
# TODO: 从线程池有拉取app
self.app = app
app.config.setdefault("SQLALCHEMY_BINDS", None)
app.config.setdefault("SQLALCHEMY_ENGINE_OPTIONS", {})
# 如果配置更新,需要重新释放所有旧的链接
# 适用于配置运行时动态更新的情况
self._dispose_all_engine()
app.extensions["SQLAlchemy"] = self
@app.teardown_appcontext
def shutdown_session(exc): # pylint: disable=unused-variable
""" Shutdown session when error
"""
self.session.remove()
return exc
def get_app(self, reference_app=None):
""" 获取当前的Application
"""
if reference_app is not None:
return reference_app
# 查找当前的APP
if current_app:
return current_app._get_current_object()
if self.app is not None:
return self.app
raise RuntimeError(
"No application fund."
)
def get_tables_for_bind(self, bind=None):
""" 查询绑定的数据库下面的所有表
"""
result = []
for table in self.Model.metadata.tables.values():
if table.info.get("bind_key") == bind:
result.append(table)
return result
| [
[
[
179,
190
],
[
561,
562
],
[
4089,
4090
],
[
4145,
4146
],
[
4192,
4193
],
[
4255,
4256
],
[
4317,
4318
],
[
4374,
4375
],
[
5020,
5021
]
],
[
[
213,
217
],
[
2080,
2084
],
[
4685,
4689
]
],
[
[
240,
249
],
[
6091,
6100
]
],
[
[
262,
272
],
[
3840,
3850
],
[
3852,
3862
],
[
5878,
5888
]
],
[
[
302,
305
],
[
4281,
4284
],
[
3548,
3551
],
[
3682,
3685
],
[
6169,
6172
],
[
6329,
6332
]
],
[
[
311,
317
]
],
[
[
351,
359
],
[
2756,
2764
]
],
[
[
393,
409
],
[
6640,
6656
]
],
[
[
415,
430
],
[
6602,
6617
]
],
[
[
436,
458
],
[
650,
661
],
[
976,
987
]
],
[
[
496,
507
],
[
8091,
8102
],
[
8123,
8134
]
],
[
[
533,
544
],
[
6711,
6722
]
],
[
[
550,
555
],
[
4339,
4344
]
],
[
[
599,
603
]
],
[
[
606,
617
]
],
[
[
638,
649
],
[
6353,
6364
]
],
[
[
1807,
1823
],
[
5284,
5300
]
],
[
[
3364,
3378
],
[
6843,
6857
]
],
[
[
3740,
3759
],
[
4782,
4801
]
],
[
[
4005,
4015
]
]
] |
# Copyright (c) 2019 Platform9 Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
meta = MetaData()
cluster = Table(
'clusters', meta,
Column('id', Integer, primary_key=True),
Column('deleted', Integer, default=None),
Column('name', String(255), default=None),
Column('enabled', Boolean, default=False),
Column('status', String(36), default=1),
Column('updated_at', DateTime, default=None),
Column('created_at', DateTime, default=None),
Column('deleted_at', DateTime, default=None)
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
cluster.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
cluster.drop()
| [
[
[
615,
622
],
[
1026,
1033
]
],
[
[
646,
652
],
[
870,
876
],
[
915,
921
],
[
961,
967
],
[
1008,
1014
],
[
1055,
1061
],
[
1100,
1106
],
[
1150,
1156
],
[
1200,
1206
]
],
[
[
676,
684
],
[
1121,
1129
],
[
1171,
1179
],
[
1221,
1229
]
],
[
[
708,
715
],
[
883,
890
],
[
933,
940
]
],
[
[
739,
747
],
[
815,
823
]
],
[
[
771,
777
],
[
976,
982
],
[
1072,
1078
]
],
[
[
801,
806
],
[
837,
842
]
],
[
[
808,
812
],
[
860,
864
],
[
1282,
1286
],
[
1367,
1371
]
],
[
[
827,
834
],
[
1313,
1320
],
[
1398,
1405
]
],
[
[
1253,
1260
]
],
[
[
1336,
1345
]
]
] |
"""SCons.Tool.sunf77
Tool-specific initialization for sunf77, the Sun Studio F77 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf77.py 2014/08/24 12:12:31 garyo"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf77', 'f77']
def generate(env):
"""Add Builders and construction variables for sunf77 to an Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f77'
env['FORTRAN'] = fcomp
env['F77'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF77'] = '$F77'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF77FLAGS'] = SCons.Util.CLVar('$F77FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| [
[
[
1365,
1377
]
],
[
[
1449,
1459
],
[
1853,
1858
],
[
1917,
1922
]
],
[
[
1487,
1501
],
[
1637,
1651
]
],
[
[
1503,
1512
],
[
1681,
1690
],
[
1993,
2002
]
],
[
[
1538,
1546
]
],
[
[
1958,
1964
]
]
] |
# Package: values
# Date: 11th April 2010
# Author: James Mills, prologic at shortcircuit dot net dot au
"""Values
This defines the Value object used by components and events.
"""
from types import ListType
from itertools import imap
from events import Event
class ValueChanged(Event):
"""Value Changed Event
This Event is triggered when the return Value of an Event Handler has
changed it's value.
"""
def __init__(self, value):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(ValueChanged, self).__init__(value)
class Value(object):
"""Create a new future Value Object
Creates a new future Value Object which is used by Event Objects and the
Manager to store the result(s) of an Event Handler's exeuction of some
Event in the system.
:param event: The Event this Value is associated with.
:type event: Event instance
:param manager: The Manager/Component used to trigger notifications.
:type manager: A Manager/Component instance.
:param onSet: The channel used when triggering ValueChagned events.
:type onSet: A (channel, target) tuple.
:ivar result: True if this value has been changed.
:ivar errors: True if while setting this value an exception occured.
This is a Future/Promise implementation.
"""
def __init__(self, event=None, manager=None, onSet=None):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
self.event = event
self.manager = manager
self.onSet = onSet
self.result = False
self.errors = False
self._parent = self
self._value = None
def __getstate__(self):
keys = ("event", "onSet", "result", "errors", "_value")
return dict([(k, getattr(self, k, None)) for k in keys])
def __contains__(self, y):
value = self.value
return y in value if type(value) is ListType else y == value
def __getitem__(self, y):
v = self.value[y]
if isinstance(v, Value):
return v.value
else:
return v
def __iter__(self):
return imap(lambda v: v.value if isinstance(v, Value) else v,
self.value)
def __repr__(self):
"x.__repr__() <==> repr(x)"
value = ""
if self.result:
value = repr(self.value)
format = "<Value (%s) result: %r errors: %r for %r"
return format % (value, self.result, self.errors, self.event)
def __str__(self):
"x.__str__() <==> str(x)"
return str(self.value)
def getValue(self):
value = self._value
while isinstance(value, Value):
value = value._value
return value
def setValue(self, value):
if isinstance(value, Value):
value._parent = self
if self.result and type(self._value) is ListType:
self._value.append(value)
elif self.result:
self._value = [self._value]
self._value.append(value)
else:
self._value = value
def notify(o, v):
if not isinstance(v, Value) and v is not None:
o.result = True
if o.manager is not None and o.onSet is not None:
o.manager.fireEvent(ValueChanged(o), *o.onSet)
elif isinstance(v, Value):
o.errors = v.errors
o.result = v.result
if not o._parent == o:
notify(o._parent, v)
notify(self, value)
value = property(getValue, setValue, None, "Value of this Value")
| [
[
[
208,
216
],
[
1955,
1963
],
[
2916,
2924
]
],
[
[
239,
243
],
[
2172,
2176
]
],
[
[
264,
269
],
[
290,
295
]
],
[
[
277,
289
],
[
558,
570
],
[
3338,
3350
]
],
[
[
602,
607
],
[
2062,
2067
],
[
2703,
2708
],
[
2826,
2831
],
[
2212,
2217
],
[
3174,
3179
],
[
3396,
3401
]
]
] |
"""
Deploy semi-supervised PU machine learning models.
This module provides classes for training, testing, and deploying a PU
learning model for predicting material synthesizability. Utility functions
for plotting aid in visualizing and analyzing results.
References:
[1] DOI: 10.1021/acsnano.8b08014
[2] DOI: 10.1145/1401890.1401920
[3] DOI: 10.1016/j.patrec.2013.06.010
"""
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_recall_fscore_support
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import precision_recall_curve
from sklearn.model_selection import RepeatedKFold
from sklearn.utils import resample
from mpl_toolkits.mplot3d import Axes3D
from monty.serialization import dumpfn
import pandas as pd
import seaborn as sns
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
__author__ = "Nathan C. Frey, Jin Wang"
__copyright__ = "MIT License"
__version__ = "0.0.1"
__maintainer__ = "Nathan C. Frey"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "Aug 2017"
class PULearner:
def __init__(self):
"""A machine learning model that predicts material synthesizability.
Positive samples are experimentally synthesized materials. Unlabeled
samples are not-yet synthesized materials.
Features for training data might be generated by first-principles
(density functional theory) calculations, or structural or chemical
data looked up from a table.
Hyperparameters are initialized with sensible defaults, but any newly
trained model should have hyperparams carefully converged.
Attributes:
pu_stats (dict): Outputs of cv_baggingDT
df_U (DataFrame): Unlabeled data.
df_P (DataFrame): Positive data.
synth_scores (list): Synthesizability scores (between 0 and 1) of
unlabeled samples.
labels (list): Likely synthesizable (1) or not (0)
feat_importances (DataFrame): Feature importances from trained
decision tree classifiers. Index corresponds to feature index
in original data.
"""
self.pu_stats = None
self.df_U = None
self.df_P = None
self.synth_scores = None
self.labels = None
self.feat_importances = None
def cv_baggingDT(self, pu_data, splits=10, repeats=10, bags=100, filename=""):
"""
Train bagged decision tree base classifiers and do repeated
k-fold CV.
Synthesizability scores (0 = not synthesizable, 1 = already
synthesized) are generated for an unlabeled sample by averaging
the scores from the ensemble of decision tree classifiers that
have not been trained on that sample.
Args:
pu_data (json): A file where each row describes a material.
There MUST be a column called "PU_label" where a 1 value
indicates a synthesized (positive) compound and a 0 value
indicates an unlabeled compound.
splits (int): Number of splits in k-fold CV.
repeats (int): Number of repeated k-fold CV.
bags (int): Number of bags in bootstrap aggregation.
filename (string): Save model training results to file with
filename ending in .json or .pkl.
Returns:
pu_stats (dict): Metrics and outputs of PU learning model
training.
"""
print("Start PU Learning.")
# Preprocess data and set attributes
df = pd.read_json(pu_data)
df_P, df_U, X_P, X_U = self._process_pu_data(df)
self.df_P = df_P
self.df_U = df_U
# Split data into training and test splits for k-fold CV
kfold = RepeatedKFold(n_splits=splits, n_repeats=repeats, random_state=42)
# Scores for PU learning (tpr = True Positive Rate)
scores = []
tprs = []
# Predicted synthesis probability of CVed P and U sets
prob_P = np.ones(shape=(X_P.shape[0], splits * repeats))
prob_U = -np.ones(shape=(X_U.shape[0], splits * repeats))
# Feature importance
feat_rank = np.zeros(shape=(X_P.shape[1], splits * repeats))
idsp = 0 # index of repeated k splits
# Loop over P and U training/test samples
for (ptrain, ptest), (utrain, utest) in zip(kfold.split(X_P), kfold.split(X_U)):
# Number of P and U training samples
N_ptrain = X_P[ptrain].shape[0]
N_utrain = X_U[utrain].shape[0]
d = X_P.shape[1]
K = N_ptrain
train_label = np.zeros(shape=(N_ptrain + K,))
train_label[:N_ptrain] = 1.0 # Synthesized (positive)
# Out of bag samples
n_oob = np.zeros(shape=(N_utrain,))
f_oob = np.zeros(shape=(N_utrain, 2))
# Sums of probabilities of test sets
f_ptest = np.zeros(shape=(X_P[ptest].shape[0], 2))
f_utest = np.zeros(shape=(X_U[utest].shape[0], 2))
# Bootstrap resampling for each bag
for i in range(bags):
bootstrap_sample = np.random.choice(
np.arange(N_utrain), replace=True, size=K
)
# Positive samples and bootstrapped unlabeled samples
data_bootstrap = np.concatenate(
(X_P[ptrain], X_U[bootstrap_sample, :]), axis=0
)
# Train decision tree classifier
model = DecisionTreeClassifier(
max_depth=None,
max_features=None,
criterion="gini",
class_weight="balanced",
)
model.fit(data_bootstrap, train_label)
# Index for the oob samples
idx_oob = sorted(
set(range(N_utrain)) - set(np.unique(bootstrap_sample))
)
# Transductive learning on oob samples
f_oob[idx_oob] += model.predict_proba(X_U[utrain][idx_oob])
n_oob[idx_oob] += 1
f_ptest += model.predict_proba(X_P[ptest])
f_utest += model.predict_proba(X_U[utest])
feat_rank[:, idsp] = model.feature_importances_
# Predicted synthesis probabilities of unlabeled samples
predict_utrain = f_oob[:, 1] / n_oob
# Predicted probabilities for P and U test sets
predict_ptest = f_ptest[:, 1] / bags
predict_utest = f_utest[:, 1] / bags
# Find predicted positives
true_pos = predict_ptest[np.where(predict_ptest > 0.5)].shape[0]
u_pos = predict_utest[np.where(predict_utest > 0.5)].shape[0]
N_ptest = X_P[ptest].shape[0]
N_utest = X_U[utest].shape[0]
# Predicted positive ratio in test set
p_pred_pos = (true_pos + u_pos) / (N_ptest + N_utest) + 0.0001
# Compute PU recall (TPR) and score metrics
recall = true_pos / N_ptest
score = recall ** 2 / p_pred_pos
scores.append(score)
tprs.append(recall)
# Predicted probabilities
prob_P[ptest, idsp] = predict_ptest
prob_U[utrain, idsp] = predict_utrain
prob_U[utest, idsp] = predict_utest
idsp += 1
# Progress update
if (idsp + 1) % splits == 0:
tpr_tmp = np.asarray(tprs[-splits - 1 : -1])
print(
"Performed Repeated "
+ str(splits)
+ "-fold: "
+ str(idsp // splits + 1)
+ " out of "
+ str(repeats)
)
print(
"True Positive Rate: %0.2f (+/- %0.2f)"
% (tpr_tmp.mean(), tpr_tmp.std() * 2)
)
# Predicted labels from k-fold CV
label_U = np.zeros(shape=(X_U.shape[0], splits * repeats + 1), dtype=int)
label_U[:, : splits * repeats][np.where(prob_U > 0.5)] = 1
label_U[:, splits * repeats] = np.sum(
label_U[:, : splits * repeats + 1], axis=1
)
tprs = np.asarray(tprs)
scores = np.asarray(scores)
# Metrics for each model in the k-folds
label_U_rp = np.zeros(shape=(X_U.shape[0], repeats), dtype=int)
prob_U_rp = np.zeros(shape=(X_U.shape[0], repeats))
feat_rank_rp = np.zeros(shape=(X_U.shape[1], repeats))
tpr_rp = np.zeros(shape=(repeats,))
scores_rp = np.zeros(shape=(repeats,))
labels = np.zeros(shape=(X_U.shape[0],))
for i in range(repeats):
prob_U_rp[:, i] = prob_U[:, i * splits : (i + 1) * splits].mean(axis=1)
feat_rank_rp[:, i] = feat_rank[:, i * splits : (i + 1) * splits].mean(
axis=1
)
tpr_rp[i] = tprs[i * splits : (i + 1) * splits].mean()
scores_rp[i] = scores[i * splits : (i + 1) * splits].mean()
label_U_rp[np.where(prob_U_rp > 0.5)] = 1
prob = prob_U_rp.mean(axis=1)
labels[np.where(prob > 0.5)] = 1
# Get confidence interval of TPR for each kfold
tpr_low, tpr_up = self.bootstrapCI(tpr_rp)
scores_low, scores_up = self.bootstrapCI(scores_rp)
# PU learning metrics
metrics = np.asarray(
[tpr_rp.mean(), tpr_low, tpr_up, scores_rp.mean(), scores_low, scores_up]
)
print("Accuracy: %0.2f" % (tpr_rp.mean()))
print("95%% confidence interval: [%0.2f, %0.2f]" % (tpr_low, tpr_up))
# Metrics and results from training / testing
pu_stats = {
"prob": prob,
"labels": labels,
"metrics": metrics,
"prob_rp": prob_U_rp,
"label_rp": label_U_rp,
"tpr_rp": tpr_rp,
"scores_rp": scores_rp,
"feat_rank_rp": feat_rank_rp,
}
# Save results
if filename:
if filename.endswith(".json"):
dumpfn(pu_stats, filename)
if filename.endswith(".pkl"):
with open(filename, "wb") as file:
pickle.dump(pu_stats, file, protocol=pickle.HIGHEST_PROTOCOL)
self.pu_stats = pu_stats
return pu_stats
def bootstrapCI(self, data, ci=95, ns=10000):
"""Compute confidence interval of the TPR.
Args:
data (array): Array of TPRs for each kfold.
ci (int): Confidence interval.
ns (int): Number of bootstrap resamplings.
Returns:
lower (float): Lower endpoint of CI.
upper (float): Upper endpoint of CI.
"""
bs_rsample = []
for _ in range(ns):
rsample = resample(data, n_samples=len(data))
bs_rsample.append(np.mean(rsample))
bs_rsample = np.asarray(bs_rsample)
lower = np.percentile(bs_rsample, (100 - ci) / 2)
upper = np.percentile(bs_rsample, ci + (100 - ci) / 2)
return lower, upper
def corr_heatmap(self, num_feats=10, fname=""):
"""Plot correlation matrix between synthesizability and features.
cv_baggingDT must be run first.
Args:
num_feats (int): How many features to consider.
fname (str): Filename if correlation plot should be saved.
Returns:
None (generates plots)
"""
pu_stats = self.pu_stats
df_U = self.df_U
df_U_copy = df_U.drop(columns=["PU_label"])
# Get normalized, sorted & ranked list of most important features
synth_scores = pu_stats["prob"]
df_U_copy["synth_score"] = synth_scores
# Make correlation matrix of top "num_feats" features
corrmat = df_U_copy.corr()
cols = corrmat.nlargest(num_feats, "synth_score")["synth_score"].index
cm = np.corrcoef(df_U_copy[cols].values.T)
sns.set(style='ticks')
rcParams['figure.dpi'] = 300
fig, ax = plt.subplots(1, 1)
hm = sns.heatmap(
cm,
ax=ax,
cbar=True,
annot=True,
square=True,
fmt=".2f",
annot_kws={"size": 7},
yticklabels=cols.values,
xticklabels=cols.values,
)
if fname:
self.save_plot(fname + ".png", fig, ax)
def get_feat_importances(self, plot_format=""):
"""Process output from PU learning k-fold cross validation.
cv_baggingDT must be run first.
If plot_format is specified, a feature importance plot will
be saved.
Args:
plot_format (str): svg, png, or pdf file format for saving simple
visualizations of feature importance and correlation.
"""
pu_stats = self.pu_stats
# Feature importances for individual repetitions of kfold CV
feat_rank_rp = pu_stats["feat_rank_rp"]
feat_importances = np.sum(feat_rank_rp, axis=1)
df_U = self.df_U
df_U = df_U._get_numeric_data()
df_U_copy = df_U.drop(columns=["PU_label"])
feat_names = df_U_copy.columns
# Index corresponds to feature in original data
df_feat = pd.DataFrame(columns=["feature", "importance"])
df_feat["feature"] = feat_names
df_feat["importance"] = feat_importances
# Sort by importance
df_feat_sort = df_feat.sort_values(by="importance", ascending=False)
max_value = df_feat["importance"].max()
# Normalize to 1
df_feat_sort["importance"] = df_feat_sort["importance"] / max_value
# Set feature importance attribute
self.feat_importances = df_feat
if plot_format in ["svg", "pdf", "png"]:
# Feature importance plot
fig, ax = plt.subplots(figsize=(10, 4))
with sns.axes_style(style="ticks"):
sns.barplot(x="feature", y="importance", data=df_feat_sort)
ax.set_xticklabels(
ax.get_xticklabels(), rotation=45, ha="right", fontsize=7
)
filename = "feat_importance." + plot_format
self.save_plot(filename, fig, ax)
@staticmethod
def _process_pu_data(data):
"""Utility method for processing input data.
Args:
data (DataFrame): Data with positive and unlabeled samples.
Returns:
X_P (array): Positive sample set.
X_U (array): Unlabeled sample set.
"""
df_P = data.query("PU_label == 1") # Positive value is 1
df_U = data.query("PU_label == 0") # Unlabeled value is 0
# Chop off PU label and drop non-numeric columns for sklearn
X_P = np.asarray(df_P.drop(columns=["PU_label"])._get_numeric_data())
X_U = np.asarray(df_U.drop(columns=["PU_label"])._get_numeric_data())
return df_P, df_U, X_P, X_U
@staticmethod
def save_plot(filename, fig, ax):
"""Utility method for saving simple visualizations.
Args:
filename (str): Name ending in .svg, .png, or .pdf
fig, ax (objects): Matplotlib objects.
Returns:
None
"""
sns.set_style("ticks")
fig.tight_layout()
fig.savefig(filename)
class PUInteract:
def __init__(self, df_parent, pu_parent, df_child, pu_child, merge_on=(), feats=()):
"""Consider parent and child phase PU learning scores.
This class looks at PU learning scores for parent bulk
compounds (e.g. layered h-BN) and scores of the child phases
along with descriptors like exfoliation energy and changes
in structural/electronic properties to predict (parent, child)
pairs that can be synthesized.
Parent and child must be linked by a column that allows the
dataframes to be merged. There should also be additional features
that characterize the structural and chemical differences between
parents and children, e.g. changes in bond lengths, etc.
Unsupervised clustering models are used to identify synthesizable
(parent/child) pairs.
Args:
df_parent (str): Parent data filename.
pu_parent (dict): Output from PULearner.cv_baggingDT.
df_child (str): Child data filename.
pu_child (dict): Output from PULearner.cv_baggingDT.
merge_on (tuple): Column name(s) on which to merge.
feats (tuple): Column names to use as features. If empty, use all
possible columns.
Attributes:
merged_df (DataFrame): (Parent, child) pair data.
X (array): Array representation of merged_df.
Returns:
None
"""
df_parent = pd.read_json(df_parent)
df_child = pd.read_json(df_child)
# Set scores from PULearner
df_parent["synth_score"] = 1
df_child["synth_score"] = 1
df_parent.loc[df_parent.eval("PU_label == 0"), "synth_score"] = pu_parent[
"prob"
]
df_child.loc[df_child.eval("PU_label == 0"), "synth_score"] = pu_child["prob"]
# Merge parent and child dfs
merge_on = list(merge_on)
df = pd.merge(
df_parent, df_child, on=merge_on, how="outer", suffixes=["_p", "_c"]
)
df.drop(columns=["PU_label_p", "PU_label_c"], inplace=True, axis=1)
if feats:
feat_names = [f + "_p" for f in feats] + [f + "_c" for f in feats]
df = df[feat_names]
self.merged_df = df
self.X = np.array(df)
def do_kmeans(self, n_clusters=2, seed=42):
"""Do k-means clustering on (parent, child) pairs.
Args:
n_clusters (int): Number of clusters.
seed (int): Fix random seed for kmeans reproducibility.
Returns:
kmeans_output (dict): kmeans cluster centers, cluster labels for
each (parent, child)
"""
np.random.seed(seed)
km = KMeans(n_clusters=n_clusters, random_state=seed)
km.fit(self.X)
kmeans_output = {
"cluster_centers": km.cluster_centers_,
"cluster_labels": km.labels_,
}
return kmeans_output
def do_gmixture(self, n_components=2, seed=42):
"""
Estimate parameters of a Gaussian mixture distribution of (parent,
child) data.
Args:
n_components (int): Number of components in GMM.
seed (int): Random seed.
Returns:
gmm_output (dict): Predicted labels of (parent, child) pairs and
predicted posterior probabilities of each component.
"""
np.random.seed(seed)
gmm = GaussianMixture(
n_components=n_components, random_state=seed, covariance_type="full"
)
gmm.fit(self.X)
gmm_labels = gmm.predict(self.X)
gmm_prob = gmm.predict_proba(self.X)[:, 0]
gmm_output = {"gmm_labels": gmm_labels, "gmm_prob": gmm_prob}
return gmm_output
def do_bgm(self, n_components=6, seed=42):
"""Bayesian Gaussian Mixture.
Infer the effective number of components in a Gaussian Mixture Model
via variational Bayesian estimation.
n_effective_componenents < n_components if the model sets some
weights close to 0.
Args:
n_components (int): Number of components in GMM.
seed (int): Random seed.
Returns:
bgm_output (dict): Labels and probabilities.
"""
np.random.seed(seed)
bgm = BayesianGaussianMixture(
n_components=n_components,
covariance_type="full",
weight_concentration_prior=1e-2,
weight_concentration_prior_type="dirichlet_process",
mean_precision_prior=1e-2,
init_params="random",
max_iter=100,
random_state=seed,
)
bgm.fit(self.X)
bgm_labels = bgm.predict(self.X)
bgm_prob = bgm.predict_proba(self.X)[:, 0]
bgm_output = {"bgm_labels": bgm_labels, "bgm_prob": bgm_prob}
return bgm_output
| [
[
[
420,
442
]
],
[
[
471,
502
]
],
[
[
531,
537
],
[
18424,
18430
]
],
[
[
566,
581
],
[
19156,
19171
]
],
[
[
583,
606
],
[
20040,
20063
]
],
[
[
632,
654
],
[
5774,
5796
]
],
[
[
683,
705
]
],
[
[
742,
755
],
[
4004,
4017
]
],
[
[
782,
790
],
[
11125,
11133
]
],
[
[
825,
831
]
],
[
[
865,
871
],
[
10378,
10384
]
],
[
[
880,
892
],
[
3793,
3795
],
[
13605,
13607
],
[
17168,
17170
],
[
17211,
17213
],
[
17629,
17631
]
],
[
[
900,
914
],
[
12295,
12298
],
[
12406,
12409
],
[
14241,
14244
],
[
14288,
14291
],
[
15585,
15588
]
],
[
[
922,
924
]
],
[
[
932,
938
],
[
10518,
10524
],
[
10555,
10561
]
],
[
[
947,
958
],
[
4251,
4253
],
[
4317,
4319
],
[
4415,
4417
],
[
4871,
4873
],
[
5024,
5026
],
[
5072,
5074
],
[
5174,
5176
],
[
5237,
5239
],
[
5396,
5398
],
[
5434,
5436
],
[
5598,
5600
],
[
6156,
6158
],
[
6908,
6910
],
[
6982,
6984
],
[
7746,
7748
],
[
8264,
8266
],
[
8367,
8369
],
[
8434,
8436
],
[
8523,
8525
],
[
8557,
8559
],
[
8646,
8648
],
[
8717,
8719
],
[
8780,
8782
],
[
8837,
8839
],
[
8884,
8886
],
[
8928,
8930
],
[
9357,
9359
],
[
9441,
9443
],
[
9684,
9686
],
[
11191,
11193
],
[
11231,
11233
],
[
11270,
11272
],
[
11328,
11330
],
[
12248,
12250
],
[
13344,
13346
],
[
15103,
15105
],
[
15181,
15183
],
[
17982,
17984
],
[
18390,
18392
],
[
19121,
19123
],
[
20005,
20007
]
],
[
[
966,
990
],
[
12374,
12377
],
[
14194,
14197
]
],
[
[
1009,
1017
],
[
12326,
12334
]
],
[
[
1019,
1029
]
],
[
[
1059,
1072
]
],
[
[
1089,
1100
]
],
[
[
1111,
1125
]
],
[
[
1145,
1154
]
],
[
[
1181,
1191
]
],
[
[
1208,
1216
]
],
[
[
1238,
1247
]
],
[
[
15673,
15683
]
]
] |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for generate_universe.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
from absl.testing import absltest
from tests import test_constants
from validate import generate_universe
_DEFAULT_ONTOLOGY_LOCATION = test_constants.ONTOLOGY_ROOT
_BAD_MODIFIED_ONTOLOGY = path.join(test_constants.TEST_RESOURCES, 'BAD',
'BAD_FORMAT')
_NONEXISTENT_LOCATION = path.join(test_constants.TEST_ROOT, 'nonexistent')
_EMPTY_FOLDER = path.join(test_constants.TEST_RESOURCES, 'BAD', 'BAD_EMPTY')
class GenerateUniverseTest(absltest.TestCase):
def testCanGenerateUniverse(self):
universe = generate_universe.BuildUniverse(_DEFAULT_ONTOLOGY_LOCATION)
self.assertTrue(universe)
def testCatchInvalidModifiedOntology(self):
with self.assertRaises(Exception) as context:
generate_universe.BuildUniverse(_BAD_MODIFIED_ONTOLOGY)
self.assertIn('no longer valid', str(context.exception))
def testModifiedTypesCatchesNonexistent(self):
self.assertRaises(Exception,
generate_universe.BuildUniverse(_NONEXISTENT_LOCATION))
def testModifiedTypesCatchesEmpty(self):
self.assertRaises(Exception, generate_universe.BuildUniverse(_EMPTY_FOLDER))
if __name__ == '__main__':
absltest.main()
| [
[
[
632,
647
]
],
[
[
671,
679
]
],
[
[
703,
717
]
],
[
[
734,
738
],
[
931,
935
],
[
1052,
1056
],
[
1119,
1123
]
],
[
[
765,
773
],
[
1209,
1217
],
[
1909,
1917
]
],
[
[
793,
807
],
[
877,
891
],
[
941,
955
],
[
1062,
1076
],
[
1129,
1143
]
],
[
[
829,
846
],
[
1282,
1299
],
[
1475,
1492
],
[
1697,
1714
],
[
1830,
1847
]
],
[
[
848,
874
],
[
1314,
1340
]
],
[
[
906,
928
],
[
1507,
1529
]
],
[
[
1028,
1049
],
[
1729,
1750
]
],
[
[
1103,
1116
],
[
1862,
1875
]
],
[
[
1188,
1208
]
]
] |
"""GaussianProcessRegressionSklearn tests.
Scientific Machine Learning Benchmark:
A benchmark of regression models in chem- and materials informatics.
"""
import pytest
import numpy as np
skl = pytest.importorskip("sklearn")
import smlb
from smlb.learners.scikit_learn.gaussian_process_regression_sklearn import GaussianProcessRegressionSklearn
def test_GaussianProcessRegressionSklearn_1():
"""Simple examples."""
# linear function with linear kernel
kernel = skl.gaussian_process.kernels.DotProduct(sigma_0=0, sigma_0_bounds="fixed")
gpr = GaussianProcessRegressionSklearn(kernel=kernel, optimizer=None, rng=1)
train_data = smlb.TabularData(data=np.array([[-1], [1]]), labels=np.array([-1, 1]))
valid_data = smlb.TabularData(data=np.array([[-2], [-1], [0], [1], [2]]))
preds = gpr.fit(train_data).apply(valid_data)
mean, stddev = preds.mean, preds.stddev
assert np.allclose(mean, [-2, -1, 0, 1, 2])
assert stddev[0] > stddev[1] > stddev[2] < stddev[3] < stddev[4]
def test_GaussianProcessRegressionSklearn_2():
"""All predictive distributions.
Linear noise-free function, linear kernel + white noise kernel.
The optimized noise level is expected to go to its lower bound.
"""
kernel = skl.gaussian_process.kernels.DotProduct(
sigma_0=0, sigma_0_bounds="fixed"
) + skl.gaussian_process.kernels.WhiteKernel(noise_level=0.1, noise_level_bounds=(1e-5, 1e-5))
gpr = GaussianProcessRegressionSklearn(kernel=kernel, rng=1)
n = 100
train_data = smlb.TabularData(
data=np.ones(shape=(n, 1)) * 2, labels=np.ones(shape=n) * 3
)
valid_data = smlb.TabularData(data=train_data.samples())
preds = gpr.fit(train_data).apply(valid_data)
assert preds.has_signal_part and preds.has_noise_part
conf, noise = preds.signal_part, preds.noise_part
assert np.allclose(conf.mean, train_data.labels())
assert np.allclose(conf.stddev, np.ones(n) * np.sqrt(1e-5), atol=1e-3)
assert (preds.mean == conf.mean).all()
assert np.allclose(preds.stddev, np.ones(n) * np.sqrt(np.square(conf.stddev) + 1e-5))
assert np.allclose(noise.mean, np.zeros(shape=n))
assert np.allclose(noise.stddev, np.sqrt(1e-5))
def test_GaussianProcessRegressionSklearn_3():
"""All predictive distributions.
Linear noisy function, linear kernel + white noise kernel.
The optimized noise level is expected to go to its true value.
"""
kernel = skl.gaussian_process.kernels.DotProduct(
sigma_0=0, sigma_0_bounds="fixed"
) + skl.gaussian_process.kernels.WhiteKernel(noise_level=1, noise_level_bounds=(1e-5, 1e5))
gpr = GaussianProcessRegressionSklearn(kernel=kernel, rng=1)
n, nlsd = 100, 0.5
data = smlb.TabularData(data=np.ones(shape=(n, 1)) * 2, labels=np.ones(shape=n) * 3)
data = smlb.LabelNoise(noise=smlb.NormalNoise(stddev=nlsd, rng=1)).fit(data).apply(data)
preds = gpr.fit(data).apply(data)
assert preds.has_signal_part and preds.has_noise_part
conf, noise = preds.signal_part, preds.noise_part
assert np.allclose(conf.mean, np.ones(n) * 3, atol=1e-1)
assert np.allclose(conf.stddev, np.ones(n) * nlsd, atol=1e-1)
assert (preds.mean == conf.mean).all()
assert np.allclose(preds.stddev, np.sqrt(np.square(conf.stddev) + np.square(nlsd)), atol=1e-1)
assert np.allclose(noise.mean, np.zeros(shape=n))
assert np.allclose(noise.stddev, nlsd, atol=1e-1)
| [
[
[
164,
170
],
[
198,
204
]
],
[
[
179,
190
],
[
676,
678
],
[
706,
708
],
[
764,
766
],
[
909,
911
],
[
1567,
1569
],
[
1601,
1603
],
[
1864,
1866
],
[
1919,
1921
],
[
1944,
1946
],
[
1957,
1959
],
[
2038,
2040
],
[
2064,
2066
],
[
2077,
2079
],
[
2085,
2087
],
[
2129,
2131
],
[
2153,
2155
],
[
2183,
2185
],
[
2209,
2211
],
[
2763,
2765
],
[
2797,
2799
],
[
3075,
3077
],
[
3098,
3100
],
[
3136,
3138
],
[
3161,
3163
],
[
3246,
3248
],
[
3272,
3274
],
[
3280,
3282
],
[
3305,
3307
],
[
3346,
3348
],
[
3370,
3372
],
[
3400,
3402
]
],
[
[
192,
195
],
[
481,
484
],
[
1260,
1263
],
[
1351,
1354
],
[
2463,
2466
],
[
2554,
2557
]
],
[
[
237,
241
],
[
654,
658
],
[
742,
746
],
[
1536,
1540
],
[
1645,
1649
],
[
2741,
2745
],
[
2830,
2834
],
[
2852,
2856
]
],
[
[
317,
349
],
[
566,
598
],
[
1452,
1484
],
[
2652,
2684
]
],
[
[
356,
395
]
],
[
[
1021,
1060
]
],
[
[
2230,
2269
]
]
] |
# Picking Numbers
# Developer: Murillo Grubler
# Link: https://www.hackerrank.com/challenges/picking-numbers/problem
def picking_number(n, arr):
max_combinations = 0
for i in range(n):
combination = arr.count(arr[i]) + arr.count(arr[i] + 1)
if combination > max_combinations:
max_combinations = combination
return max_combinations
n = int(input().strip())
a = [int(a_temp) for a_temp in input().strip().split(' ')]
print (picking_number(n, a)) | [
[
[
122,
136
],
[
465,
479
]
],
[
[
374,
375
],
[
480,
481
]
],
[
[
399,
400
],
[
483,
484
]
]
] |
#!/usr/bin/env python3
import os
import sys
from vmaf.core.quality_runner import QualityRunner
from vmaf.core.result_store import FileSystemResultStore
from vmaf.routine import run_remove_results_for_dataset
from vmaf.tools.misc import import_python_file
__copyright__ = "Copyright 2016-2020, Netflix, Inc."
__license__ = "BSD+Patent"
def print_usage():
quality_runner_types = ['VMAF', 'PSNR', 'SSIM', 'MS_SSIM']
print("usage: " + os.path.basename(sys.argv[0]) + \
" quality_type dataset_filepath\n")
print("quality_type:\n\t" + "\n\t".join(quality_runner_types) +"\n")
def main():
if len(sys.argv) < 3:
print_usage()
return 2
try:
quality_type = sys.argv[1]
dataset_filepath = sys.argv[2]
except ValueError:
print_usage()
return 2
try:
dataset = import_python_file(dataset_filepath)
except Exception as e:
print("Error: " + str(e))
return 1
try:
runner_class = QualityRunner.find_subclass(quality_type)
except:
print_usage()
return 2
result_store = FileSystemResultStore()
run_remove_results_for_dataset(result_store, dataset, runner_class)
return 0
if __name__ == '__main__':
ret = main()
exit(ret)
| [
[
[
30,
32
],
[
442,
444
]
],
[
[
40,
43
],
[
459,
462
],
[
620,
623
],
[
707,
710
],
[
746,
749
]
],
[
[
81,
94
],
[
996,
1009
]
],
[
[
130,
151
],
[
1109,
1130
]
],
[
[
177,
207
],
[
1138,
1168
]
],
[
[
236,
254
],
[
848,
866
]
],
[
[
256,
269
]
],
[
[
309,
320
]
],
[
[
342,
353
],
[
643,
654
],
[
789,
800
],
[
1058,
1069
]
],
[
[
601,
605
],
[
1259,
1263
]
],
[
[
1253,
1256
],
[
1275,
1278
]
]
] |
""" Dataframe functions """
import logging
import os
from tempfile import mkstemp
import pandas as pd
from box import Box
# pylint: disable=too-many-arguments
logger = logging.getLogger(__name__) # pylint: disable=C0103
def pd_export(
dataframe: pd.DataFrame,
export_type: str,
df_name: str,
temp_name: bool = False,
df_name_prefix: str = "",
df_name_suffix: str = "",
dir_name: str = ".",
config_box: Box = None,
index=True,
header=True,
) -> str:
"""
Exports dataframe to file formats using various options
Return a filepaths for the exported Dataframe
"""
if temp_name and dir_name != "":
filepath = mkstemp(suffix=df_name_suffix, prefix=df_name_prefix, dir=dir_name)[
1
]
elif config_box and dir_name == "":
filepath = os.path.join(
config_box.extracttempdir,
f"{df_name_prefix}{df_name}{df_name_suffix}.{export_type}",
)
else:
filename = f"{df_name_prefix}{df_name}{df_name_suffix}.{export_type}"
filepath = os.path.join(dir_name, filename)
logger.info("Creating %s file %s from dataframe.", export_type, filepath)
if export_type == "parquet":
dataframe.to_parquet(path=filepath, index=index)
elif export_type == "csv":
dataframe.to_csv(filepath, index=index, header=header)
return filepath
def pd_colupdate(dataframe: pd.DataFrame, coldict: dict) -> pd.DataFrame:
"""
Rename and filter Pandas Dataframe columns using python dictionary.
Column names provided in coldict follow the same format as expected by
pd.DataFrame.rename(columns=dict). For example: {"current":"new", "current2":"new2"}
Columns in returned dataframe are filtered by those provided to be renamed.
Returns a modified pd.Dataframe copy
"""
logger.info("Renaming and filtering dataframe columns using coldict key:values.")
# Remap column names
dataframe = dataframe.rename(columns=coldict)
# Filter columns based on the new names
dataframe = dataframe[[val for key, val in coldict.items()]].copy()
return dataframe
| [
[
[
35,
42
],
[
171,
178
]
],
[
[
50,
52
],
[
832,
834
],
[
1074,
1076
]
],
[
[
74,
81
],
[
679,
686
]
],
[
[
90,
102
],
[
256,
258
],
[
1454,
1456
],
[
1422,
1424
]
],
[
[
119,
122
],
[
440,
443
]
],
[
[
162,
168
],
[
1112,
1118
],
[
1849,
1855
]
],
[
[
230,
239
]
],
[
[
1398,
1410
]
]
] |
from .MidiInfo import * | [
[
[
22,
23
]
]
] |
#!/usr/bin/env python3
import argparse
import json
import urllib.request
if __name__ == '__main__':
parser = argparse.ArgumentParser ()
parser.add_argument ('-v', '--verbose', help = 'Enable Verbose Mode', action = 'store_true')
parser.add_argument ('-ip', help = 'IP Address to Test')
args = parser.parse_args ()
if args.ip:
location_url = 'http://ipinfo.io/{:}/json'.format(args.ip)
else:
location_url = 'http://ipinfo.io/json'
if args.verbose:
print ('Retrieving location information ...')
location_facts = json.loads ((urllib.request.urlopen (location_url).read ())
.decode ("utf-8"))
print ('This IP is in {:}, {:}, {:}.'.format (location_facts ['city'],
location_facts ['region'],
location_facts ['country']))
if args.verbose:
print ('All done.')
| [
[
[
31,
39
],
[
114,
122
]
],
[
[
47,
51
],
[
544,
548
]
],
[
[
59,
73
],
[
557,
563
]
],
[
[
105,
111
],
[
143,
149
],
[
238,
244
],
[
304,
310
]
],
[
[
297,
301
],
[
331,
335
],
[
394,
398
],
[
460,
464
],
[
910,
914
]
],
[
[
344,
356
],
[
581,
593
]
],
[
[
415,
427
],
[
581,
593
]
],
[
[
527,
541
],
[
727,
741
],
[
800,
814
],
[
875,
889
]
]
] |
import os
import json
from pathlib import Path
import jimi
# Initialize
dbCollectionName = "model"
class _model(jimi.db._document):
name = str()
className = str()
classType = str()
location = str()
hidden = bool()
manifest = dict()
_dbCollection = jimi.db.db[dbCollectionName]
def new(self,name,className,classType,location,hidden):
self.name = name
self.className = className
self.classType = classType
self.location = location
self.hidden = hidden
self.acl = { "ids":[ { "accessID":"0","delete": True,"read": True,"write": True } ] }
return super(_model, self).new()
def classObject(self):
# ClassID wont exist if the className is model
try:
mod = __import__("{0}".format(self.location), fromlist=["{0}".format(self.className)])
except ModuleNotFoundError:
jimi.logging.debug("Error unable to find class='{0}', className='{1}', classType='{2}', location='{3}'".format(self.classID,self.className,self.classType,self.location),-1)
if self.classType == "_action":
return jimi.action._action
elif self.classType == "_trigger":
return jimi.trigger._trigger
else:
return jimi.db._document
class_ = getattr(mod, "{0}".format(self.className))
# Injecting manifest from model into the loaded class - this is only held in memory and never committed to the database
class_.manifest__ = self.manifest
return class_
def registerModel(name,className,classType,location,hidden=False):
# Checking that a model with the same name does not already exist ( this is due to identification within GUI, future changes could be made to allow this?? )
results = _model(False).query(query={ "name" : name })["results"]
if len(results) == 0:
return _model().new(name,className,classType,location,hidden)
else:
if jimi.logging.debugEnabled:
jimi.logging.debug("Register model failed as it already exists modelName='{0}', className='{1}', classType='{2}', location='{3}'".format(name,className,classType,location),4)
def deregisterModel(name,className,classType,location):
loadModels = _model(False).query(query={ "name" : name})["results"]
if loadModels:
loadModels = loadModels[0]
# This really does need to clean up the models objects that are left
#from core.models import trigger, action
#trigger._action().api_delete(query={"classID" : ObjectId(loadModels["_id"]) })
#action._action().api_delete(query={"classID" : ObjectId(loadModels["_id"]) })
results = _model().api_delete(query={ "name" : name, "classType" : classType })
if results["result"]:
return True
if jimi.logging.debugEnabled:
jimi.logging.debug("deregister model failed modelName='{0}', className='{1}', classType='{2}', location='{3}'".format(name,className,classType,location),4)
def getClassID(name):
loadModels = _model(False).query(query={ "name" : name})["results"]
if loadModels:
loadModels = loadModels[0]
return loadModels["_id"]
return None
def loadModel(modelName):
results = _model(False).query(query={ "name" : modelName })["results"]
if len(results) == 1:
results = results[0]
_class = _model().get(results["_id"])
return _class
return None
def getClassObject(classID,sessionData):
return _model().getAsClass(id=classID)
######### --------- API --------- #########
if jimi.api.webServer:
if not jimi.api.webServer.got_first_request:
if jimi.api.webServer.name == "jimi_web":
@jimi.api.webServer.route(jimi.api.base+"models/", methods=["GET"])
def getModels():
result = []
jimi.api.g.sessionData
models = _model(False).query(jimi.api.g.sessionData,query={ "_id" : { "$exists": True } })["results"]
for model in models:
result.append(model["name"])
return { "models" : result }, 200
@jimi.api.webServer.route(jimi.api.base+"models/<modelName>/", methods=["GET"])
def getModel(modelName):
class_ = loadModel(modelName).classObject()
if class_:
results = _model(False).query(jimi.api.g.sessionData,query={ "className" : class_.__name__ })["results"]
if len(results) == 1:
results = results[0]
return class_().query(jimi.api.g.sessionData,query={ "classID" : results["_id"] },fields=["_id","name","classType"]), 200
return {}, 404
@jimi.api.webServer.route(jimi.api.base+"models/<modelName>/extra/", methods=["GET"])
def getModelExtra(modelName):
class_ = loadModel(modelName).classObject()
if class_:
results = _model(False).query(jimi.api.g.sessionData,query={ "className" : class_.__name__ })["results"]
if len(results) == 1:
results = results[0]
results = class_(False).query(jimi.api.g.sessionData,query={ "classID" : results["_id"] },fields=["_id","name","classType","lastUpdateTime"])["results"]
ids = [ x["_id"] for x in results ]
# Possible for ID trigger and action to be the same ( although unlikey but keep in mind this could be an issue in future )
ConductsCache = jimi.conduct._conduct().query(query={ "$or" : [ { "flow.triggerID" : { "$in" : ids } }, { "flow.actionID" : { "$in" : ids } } ] },fields=["_id","name","flow"])["results"]
for result in results:
usedIn = []
for ConductCache in ConductsCache:
for flow in ConductCache["flow"]:
if "triggerID" in flow:
if flow["triggerID"] == result["_id"]:
usedIn.append({ "conductID" : ConductCache["_id"], "conductName" : ConductCache["name"] })
if "actionID" in flow:
if flow["actionID"] == result["_id"]:
usedIn.append({ "conductID" : ConductCache["_id"], "conductName" : ConductCache["name"] })
result["whereUsed"] = usedIn
return { "results" : results }, 200
return {}, 404
@jimi.api.webServer.route(jimi.api.base+"models/<modelName>/all/", methods=["GET"])
def getModelAndChildren(modelName):
class_ = loadModel(modelName).classObject()
classIDs = []
if class_:
results = _model(False).query(jimi.api.g.sessionData,query={ "className" : class_.__name__ })["results"]
if len(results) == 1:
results = results[0]
classIDs.append(results["_id"])
results = _model(False).query(jimi.api.g.sessionData,query={ "classType" : results["className"] })["results"]
for result in results:
classIDs.append(result["_id"])
result = []
for classID in classIDs:
for foundObject in class_(False).query(jimi.api.g.sessionData,query={ "classID" : classID })["results"]:
result.append(foundObject)
return { "results" : result}, 200
else:
return {}, 404
@jimi.api.webServer.route(jimi.api.base+"models/<modelName>/schema/", methods=["GET"])
def getModelSchema(modelName):
class_ = loadModel(modelName)
if class_:
access = jimi.db.ACLAccess(jimi.api.g.sessionData,class_.acl,"read")
if access:
return class_.classObject()(False).api_getSchema(), 200
else:
return {}, 403
else:
return {}, 404
@jimi.api.webServer.route(jimi.api.base+"models/<modelName>/<objectID>/", methods=["GET"])
def getModelObject(modelName,objectID):
class_ = loadModel(modelName).classObject()
if class_:
classObject = class_(False).getAsClass(jimi.api.g.sessionData,id=objectID)
if classObject:
classObject = classObject[0]
members = jimi.helpers.classToJson(classObject)
return members, 200
else:
return {}, 404
else:
return {}, 404
@jimi.api.webServer.route(jimi.api.base+"models/<modelName>/<objectID>/", methods=["DELETE"])
def deleteModelObject(modelName,objectID):
class_ = loadModel(modelName)
if class_:
_class = class_.classObject()(False).getAsClass(jimi.api.g.sessionData,id=objectID)
if len(_class) == 1:
_class = _class[0]
access = jimi.db.ACLAccess(jimi.api.g.sessionData,_class.acl,"delete")
if access:
if "_id" in jimi.api.g.sessionData:
jimi.audit._audit().add("model","delete",{ "_id" : jimi.api.g.sessionData["_id"], "user" : jimi.api.g.sessionData["user"], "modelName" : modelName, "objectID" : objectID })
else:
jimi.audit._audit().add("model","delete",{ "user" : "system", "objectID" : objectID })
result = class_.classObject()(False).api_delete(id=objectID)
if result["result"]:
return result, 200
else:
return {}, 403
return {}, 404
@jimi.api.webServer.route(jimi.api.base+"models/<modelName>/", methods=["PUT"])
def newModelObject(modelName):
class_ = loadModel(modelName)
if class_:
access = jimi.db.ACLAccess(jimi.api.g.sessionData,class_.acl,"read")
if access:
class_ = class_.classObject()(False)
if jimi.api.g.sessionData:
class_.acl = { "ids" : [ { "accessID" : jimi.api.g.sessionData["primaryGroup"], "read" : True, "write" : True, "delete" : True } ] }
newObjectID = super(type(class_), class_).new().inserted_id
if "_id" in jimi.api.g.sessionData:
jimi.audit._audit().add("model","create",{ "_id" : jimi.api.g.sessionData["_id"], "user" : jimi.api.g.sessionData["user"], "modelName" : modelName, "objectID" : str(newObjectID) })
else:
jimi.audit._audit().add("model","create",{ "user" : "system", "objectID" : str(newObjectID) })
return { "_id" : str(newObjectID) }, 200
return {}, 404
@jimi.api.webServer.route(jimi.api.base+"models/<modelName>/<objectID>/", methods=["POST"])
def updateModelObject(modelName,objectID):
class_ = loadModel(modelName)
if class_:
data = json.loads(jimi.api.request.data)
updateItemsList = []
changeLog = {}
_class = class_.classObject()(False).getAsClass(jimi.api.g.sessionData,id=objectID)
if len(_class) == 1:
_class = _class[0]
# Builds list of permitted ACL
access = jimi.db.ACLAccess(jimi.api.g.sessionData,_class.acl,"write")
adminBypass = False
if "admin" in jimi.api.g.sessionData:
if jimi.api.g.sessionData["admin"]:
adminBypass = True
if access:
for dataKey, dataValue in data.items():
fieldAccessPermitted = True
# Checking if sessionData is permitted field level access
if _class.acl != {} and not adminBypass:
fieldAccessPermitted = jimi.db.fieldACLAccess(jimi.api.g.sessionData,_class.acl,dataKey,"write")
if fieldAccessPermitted:
# _id is a protected mongodb object and cant be updated
if dataKey != "_id":
if hasattr(_class, dataKey):
changeLog[dataKey] = {}
changeLog[dataKey]["currentValue"] = getattr(_class, dataKey)
if type(getattr(_class, dataKey)) is str:
if _class.setAttribute(dataKey, str(dataValue),sessionData=jimi.api.g.sessionData):
updateItemsList.append(dataKey)
changeLog[dataKey]["newValue"] = getattr(_class, dataKey)
elif type(getattr(_class, dataKey)) is int:
try:
if _class.setAttribute(dataKey, int(dataValue),sessionData=jimi.api.g.sessionData):
updateItemsList.append(dataKey)
changeLog[dataKey]["newValue"] = getattr(_class, dataKey)
except ValueError:
if _class.setAttribute(dataKey, 0,sessionData=jimi.api.g.sessionData):
updateItemsList.append(dataKey)
changeLog[dataKey]["newValue"] = getattr(_class, dataKey)
elif type(getattr(_class, dataKey)) is float:
try:
if _class.setAttribute(dataKey, float(dataValue),sessionData=jimi.api.g.sessionData):
updateItemsList.append(dataKey)
changeLog[dataKey]["newValue"] = getattr(_class, dataKey)
except ValueError:
if _class.setAttribute(dataKey, 0,sessionData=jimi.api.g.sessionData):
updateItemsList.append(dataKey)
changeLog[dataKey]["newValue"] = getattr(_class, dataKey)
elif type(getattr(_class, dataKey)) is bool:
# Convert string object to bool
if type(dataValue) is str:
if dataValue.lower() == "true":
dataValue = True
else:
dataValue = False
if _class.setAttribute(dataKey, dataValue,sessionData=jimi.api.g.sessionData):
updateItemsList.append(dataKey)
changeLog[dataKey]["newValue"] = getattr(_class, dataKey)
elif type(getattr(_class, dataKey)) is dict or type(getattr(_class, dataKey)) is list:
if dataValue:
if _class.setAttribute(dataKey, json.loads(dataValue),sessionData=jimi.api.g.sessionData):
updateItemsList.append(dataKey)
changeLog[dataKey]["newValue"] = getattr(_class, dataKey)
# Commit back to database
if updateItemsList:
# Adding audit record
if "_id" in jimi.api.g.sessionData:
jimi.audit._audit().add("model","update",{ "_id" : jimi.api.g.sessionData["_id"], "user" : jimi.api.g.sessionData["user"], "objects" : changeLog, "modelName" : modelName, "objectID" : objectID })
else:
jimi.audit._audit().add("model","update",{ "user" : "system", "objects" : changeLog, "modelName" : modelName, "objectID" : objectID })
_class.update(updateItemsList,sessionData=jimi.api.g.sessionData,revisioning=True)
return {}, 200
else:
return {}, 403
return {}, 404
| [
[
[
7,
9
]
],
[
[
17,
21
],
[
11808,
11812
],
[
16707,
16711
]
],
[
[
42,
46
]
],
[
[
55,
59
],
[
115,
119
],
[
280,
284
],
[
3607,
3611
],
[
3638,
3642
],
[
3687,
3691
],
[
3739,
3743
],
[
3764,
3768
],
[
4170,
4174
],
[
4195,
4199
],
[
4776,
4780
],
[
4801,
4805
],
[
6723,
6727
],
[
6748,
6752
],
[
7887,
7891
],
[
7912,
7916
],
[
8425,
8429
],
[
8450,
8454
],
[
9090,
9094
],
[
9115,
9119
],
[
10358,
10362
],
[
10383,
10387
],
[
11562,
11566
],
[
11587,
11591
],
[
905,
909
],
[
1145,
1149
],
[
1235,
1239
],
[
1298,
1302
],
[
1997,
2001
],
[
2036,
2040
],
[
2844,
2848
],
[
2879,
2883
],
[
3879,
3883
],
[
3947,
3951
],
[
4423,
4427
],
[
4631,
4635
],
[
5040,
5044
],
[
5256,
5260
],
[
5626,
5630
],
[
7021,
7025
],
[
7293,
7297
],
[
7632,
7636
],
[
8118,
8122
],
[
8136,
8140
],
[
8713,
8717
],
[
8872,
8876
],
[
9379,
9383
],
[
9532,
9536
],
[
9550,
9554
],
[
9669,
9673
],
[
9725,
9729
],
[
9776,
9780
],
[
9816,
9820
],
[
9964,
9968
],
[
10582,
10586
],
[
10600,
10604
],
[
10761,
10765
],
[
10853,
10857
],
[
11066,
11070
],
[
11118,
11122
],
[
11169,
11173
],
[
11209,
11213
],
[
11357,
11361
],
[
11819,
11823
],
[
11986,
11990
],
[
12194,
12198
],
[
12212,
12216
],
[
12337,
12341
],
[
12392,
12396
],
[
12894,
12898
],
[
12917,
12921
],
[
13611,
13615
],
[
14082,
14086
],
[
14474,
14478
],
[
14957,
14961
],
[
15349,
15353
],
[
16211,
16215
],
[
16741,
16745
],
[
17168,
17172
],
[
17228,
17232
],
[
17279,
17283
],
[
17319,
17323
],
[
17498,
17502
],
[
17707,
17711
]
],
[
[
74,
90
],
[
291,
307
]
],
[
[
108,
114
],
[
642,
648
],
[
1824,
1830
],
[
1921,
1927
],
[
2285,
2291
],
[
2713,
2719
],
[
3075,
3081
],
[
3274,
3280
],
[
3407,
3413
],
[
3527,
3533
],
[
3927,
3933
],
[
4403,
4409
],
[
5020,
5026
],
[
7001,
7007
],
[
7273,
7279
]
],
[
[
1586,
1599
]
],
[
[
2216,
2231
]
],
[
[
3040,
3050
]
],
[
[
3238,
3247
],
[
4311,
4320
],
[
4928,
4937
],
[
6879,
6888
],
[
8041,
8050
],
[
8592,
8601
],
[
9263,
9272
],
[
10505,
10514
],
[
11733,
11742
]
],
[
[
3479,
3493
]
],
[
[
3822,
3831
]
],
[
[
4265,
4273
]
],
[
[
4877,
4890
]
],
[
[
6822,
6841
]
],
[
[
7989,
8003
]
],
[
[
8531,
8545
]
],
[
[
9199,
9216
]
],
[
[
10453,
10467
]
],
[
[
11669,
11686
]
]
] |