repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
dinghino/ecommerce_api | tests/test_pictures.py | 2 | 7336 | """
Test suite for PictureHandler and ItemPictureHandler
"""
from tests.test_case import TestCase
import json
from io import BytesIO
import os
import uuid
import http.client as client
from models import Item, Picture
from tests import test_utils
import utils
EXPECTED_RESULTS = test_utils.RESULTS['pictures']
TEST_IMAGE_FOLDER = 'test_images'
TEST_ITEM = {
'uuid': '429994bf-784e-47cc-a823-e0c394b823e8',
'name': 'mario',
'price': 20.20,
'description': 'svariati mariii',
'availability': 1,
'category': 'scarpe',
}
TEST_ITEM2 = {
'uuid': 'd46b13a1-f4bb-4cfb-8076-6953358145f3',
'name': 'GINO',
'price': 30.20,
'description': 'svariati GINIIIII',
'availability': 1,
'category': 'accessori',
}
TEST_PICTURE = {
'uuid': 'df690434-a488-419f-899e-8853cba1a22b',
'extension': 'jpg'
}
TEST_PICTURE2 = {
'uuid': 'c0001a48-10a3-43c1-b87b-eabac0b2d42f',
'extension': 'png'
}
WRONG_UUID = 'e8e42371-46de-4f5e-8927-e2cc34826269'
class TestPictures(TestCase):
@classmethod
def setup_class(cls):
super(TestPictures, cls).setup_class()
utils.get_image_folder = lambda: os.path.join(utils.get_project_root(),
TEST_IMAGE_FOLDER)
test_utils.get_image_folder = utils.get_image_folder
def test_get_picture__success(self):
test_utils.setup_images()
item = Item.create(**TEST_ITEM)
picture = Picture.create(item=item, **TEST_PICTURE)
open("{path}/{picture_uuid}.jpg".format(
path=utils.get_image_folder(),
picture_uuid=picture.uuid), "wb")
resp = self.app.get('/pictures/{picture_uuid}'.format(
picture_uuid=picture.uuid))
assert resp.status_code == client.OK
test_picture = TEST_PICTURE.copy()
test_picture['item_uuid'] = item.uuid
assert resp.data == b''
assert resp.headers['Content-Type'] == 'image/jpeg'
test_utils.clean_images()
def test_get_picture__missing(self):
resp = self.app.get('/pictures/{picture_uuid}'.format(
picture_uuid=WRONG_UUID))
assert resp.status_code == client.NOT_FOUND
def test_get_item_pictures__success(self):
item = Item.create(**TEST_ITEM)
Picture.create(item=item, **TEST_PICTURE)
Picture.create(item=item, **TEST_PICTURE2)
resp = self.app.get('/items/{item_uuid}/pictures/'.format(
item_uuid=item.uuid))
assert resp.status_code == client.OK
test_utils.assert_valid_response(
resp.data, EXPECTED_RESULTS['get_item_pictures__success'])
def test_get_item_pictures__empty(self):
item = Item.create(**TEST_ITEM)
resp = self.app.get('/items/{item_uuid}/pictures/'.format(
item_uuid=item.uuid))
pictures = json.loads(resp.data)
assert not pictures
def test_get_item_pictures__wrong_item_uuid(self):
resp = self.app.get('/items/{item_uuid}/pictures/'.format(
item_uuid=WRONG_UUID))
assert resp.status_code == client.NOT_FOUND
def test_post_picture__success(self):
item = Item.create(**TEST_ITEM)
resp = self.app.post('/items/{item_uuid}/pictures/'.format(
item_uuid=item.uuid),
data={'image': (BytesIO(b'my file contents'), 'testimage.jpg')},
content_type='multipart/form-data')
assert resp.status_code == client.CREATED
assert len(Picture.select()) == 1
picture = Picture.get()
assert picture.item == item
assert picture.extension == 'jpg'
assert type(picture.uuid) == uuid.UUID
def test_post_item_pictures__wrong_item_uuid(self):
resp = self.app.post('/items/{item_uuid}/pictures/'.format(
item_uuid=WRONG_UUID),
data={'image': (BytesIO(b'my file contents'), 'testimage.jpg')},
content_type='multipart/form-data')
assert resp.status_code == client.NOT_FOUND
assert Picture.select().count() == 0
def test_post_item_pictures__wrong_extension(self):
item = Item.create(**TEST_ITEM)
resp = self.app.post('/items/{item_uuid}/pictures/'.format(
item_uuid=item.uuid),
data={'image': (BytesIO(b'my file contents'), 'testimage.txt')},
content_type='multipart/form-data')
assert resp.status_code == client.BAD_REQUEST
assert Picture.select().count() == 0
def test_post_picture__no_image(self):
item = Item.create(**TEST_ITEM)
resp = self.app.post('/items/{item_uuid}/pictures/'.format(
item_uuid=item.uuid),
data={},
content_type='multipart/form-data')
assert resp.status_code == client.BAD_REQUEST
assert Picture.select().count() == 0
def test_delete_picture__success(self):
test_utils.setup_images()
item = Item.create(**TEST_ITEM)
picture = Picture.create(item=item, **TEST_PICTURE)
picture2 = Picture.create(item=item, **TEST_PICTURE2)
open("{path}/{picture_uuid}.{extension}".format(
path=utils.get_image_folder(),
picture_uuid=picture.uuid,
extension=picture.extension), "wb")
open("{path}/{picture_uuid}.{extension}".format(
path=utils.get_image_folder(),
picture_uuid=WRONG_UUID,
extension='jpg'), "wb")
open("{path}/{picture_uuid}.{extension}".format(
path=utils.get_image_folder(),
picture_uuid=picture2.uuid,
extension=picture2.extension), "wb")
resp = self.app.delete('/pictures/{picture_uuid}'.format(
picture_uuid=picture.uuid))
assert resp.status_code == client.NO_CONTENT
assert Picture.select().count() == 1
assert Item.select().count() == 1
item2 = Item.get()
assert str(item2.uuid) == TEST_ITEM['uuid']
assert item2.name == TEST_ITEM['name']
assert float(item2.price) == TEST_ITEM['price']
assert item2.description == TEST_ITEM['description']
assert os.path.isfile("{path}/{picture_uuid}.{extension}".format(
path=utils.get_image_folder(),
picture_uuid=WRONG_UUID,
extension='jpg'))
assert not os.path.isfile("{path}/{picture_uuid}.{extension}".format(
path=utils.get_image_folder(),
picture_uuid=picture.uuid,
extension=picture.extension))
assert os.path.isfile("{path}/{picture_uuid}.{extension}".format(
path=utils.get_image_folder(),
picture_uuid=picture2.uuid,
extension=picture2.extension))
test_utils.clean_images()
def test_delete_picture__wrong_uuid(self):
resp = self.app.delete('/pictures/{picture_uuid}'.format(
picture_uuid=WRONG_UUID))
assert resp.status_code == client.NOT_FOUND
def test_delete_pictures__missing_file(self):
item = Item.create(**TEST_ITEM)
picture = Picture.create(item=item, **TEST_PICTURE)
resp = self.app.delete('/pictures/{picture_uuid}'.format(
picture_uuid=picture.uuid))
assert resp.status_code == client.NO_CONTENT
assert not Picture.select().exists()
assert Item.select().exists()
| gpl-3.0 |
amagdas/eve | eve/tests/methods/delete.py | 10 | 29272 | from eve.tests import TestBase
from eve.tests.utils import DummyEvent
from eve.tests.test_settings import MONGO_DBNAME
from eve import ETAG
from bson import ObjectId
from eve.utils import ParsedRequest
import simplejson as json
import copy
from eve.methods.delete import deleteitem_internal
class TestDelete(TestBase):
def setUp(self):
super(TestDelete, self).setUp()
# Etag used to delete an item (a contact)
self.etag_headers = [('If-Match', self.item_etag)]
def test_unknown_resource(self):
url = '%s%s/' % (self.unknown_resource_url, self.item_id)
_, status = self.delete(url)
self.assert404(status)
def test_delete_from_resource_endpoint(self):
r, status = self.delete(self.known_resource_url)
self.assert204(status)
r, status = self.parse_response(self.test_client.get(
self.known_resource_url))
self.assert200(status)
self.assertEqual(len(r['_items']), 0)
def test_delete_from_resource_endpoint_write_concern(self):
# should get a 500 since there's no replicaset on the mongod instance
self.domain['contacts']['mongo_write_concern'] = {'w': 2}
_, status = self.delete(self.known_resource_url)
self.assert500(status)
def test_delete_from_resource_endpoint_different_resource(self):
r, status = self.delete(self.different_resource_url)
self.assert204(status)
r, status = self.parse_response(self.test_client.get(
self.different_resource_url))
self.assert200(status)
self.assertEqual(len(r['_items']), 0)
# deletion of 'users' will still lave 'contacts' untouched (same db
# collection)
r, status = self.parse_response(self.test_client.get(
self.known_resource_url))
self.assert200(status)
self.assertEqual(len(r['_items']), 25)
def test_delete_empty_resource(self):
url = '%s%s/' % (self.empty_resource_url, self.item_id)
_, status = self.delete(url)
self.assert404(status)
def test_delete_readonly_resource(self):
_, status = self.delete(self.readonly_id_url)
self.assert405(status)
def test_delete_unknown_item(self):
url = '%s%s/' % (self.known_resource_url, self.unknown_item_id)
_, status = self.delete(url)
self.assert404(status)
def test_delete_ifmatch_missing(self):
_, status = self.delete(self.item_id_url)
self.assert403(status)
def test_delete_ifmatch_disabled(self):
self.app.config['IF_MATCH'] = False
_, status = self.delete(self.item_id_url)
self.assert204(status)
def test_delete_ifmatch_bad_etag(self):
_, status = self.delete(self.item_id_url,
headers=[('If-Match', 'not-quite-right')])
self.assert412(status)
def test_delete(self):
r, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert204(status)
r = self.test_client.get(self.item_id_url)
self.assert404(r.status_code)
def test_delete_non_existant(self):
url = self.item_id_url[:-5] + "00000"
r, status = self.delete(url, headers=self.etag_headers)
self.assert404(status)
def test_delete_write_concern(self):
# should get a 500 since there's no replicaset on the mongod instance
self.domain['contacts']['mongo_write_concern'] = {'w': 2}
_, status = self.delete(self.item_id_url,
headers=[('If-Match', self.item_etag)])
self.assert500(status)
def test_delete_different_resource(self):
r, status = self.delete(self.user_id_url,
headers=[('If-Match', self.user_etag)])
self.assert204(status)
r = self.test_client.get(self.user_id_url)
self.assert404(r.status_code)
def test_delete_with_post_override(self):
# POST request with DELETE override turns into a DELETE
headers = [('X-HTTP-Method-Override', 'DELETE'),
('If-Match', self.item_etag)]
r = self.test_client.post(self.item_id_url, data={}, headers=headers)
self.assert204(r.status_code)
def test_delete_subresource(self):
_db = self.connection[MONGO_DBNAME]
# create random contact
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
# grab parent collection count; we will use this later to make sure we
# didn't delete all the users in the datanase. We add one extra invoice
# to make sure that the actual count will never be 1 (which would
# invalidate the test)
_db.invoices.insert({'inv_number': 1})
response, status = self.get('invoices')
invoices = len(response[self.app.config['ITEMS']])
# update first invoice to reference the new contact
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
# verify that the only document retrieved is referencing the correct
# parent document
response, status = self.get('users/%s/invoices' % fake_contact_id)
person_id = ObjectId(response[self.app.config['ITEMS']][0]['person'])
self.assertEqual(person_id, fake_contact_id)
# delete all documents at the sub-resource endpoint
response, status = self.delete('users/%s/invoices' % fake_contact_id)
self.assert204(status)
# verify that the no documents are left at the sub-resource endpoint
response, status = self.get('users/%s/invoices' % fake_contact_id)
self.assertEqual(len(response['_items']), 0)
# verify that other documents in the invoices collection have not neen
# deleted
response, status = self.get('invoices')
self.assertEqual(len(response['_items']), invoices - 1)
def test_delete_subresource_item(self):
_db = self.connection[MONGO_DBNAME]
# create random contact
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
# update first invoice to reference the new contact
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
# GET all invoices by new contact
response, status = self.get('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id))
etag = response[ETAG]
headers = [('If-Match', etag)]
response, status = self.delete('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id),
headers=headers)
self.assert204(status)
def test_deleteitem_internal(self):
# test that deleteitem_internal is available and working properly.
with self.app.test_request_context(self.item_id_url):
r, _, _, status = deleteitem_internal(
self.known_resource, concurrency_check=False,
**{'_id': self.item_id})
self.assert204(status)
r = self.test_client.get(self.item_id_url)
self.assert404(r.status_code)
def delete(self, url, headers=None):
r = self.test_client.delete(url, headers=headers)
return self.parse_response(r)
class TestSoftDelete(TestDelete):
def setUp(self):
super(TestSoftDelete, self).setUp()
# Enable soft delete
self.app.config['SOFT_DELETE'] = True
domain = copy.copy(self.domain)
for resource, settings in domain.items():
# rebuild resource settings for soft delete
del settings['soft_delete']
self.app.register_resource(resource, settings)
# alias for the configured DELETED field name
self.deleted_field = self.app.config['DELETED']
# TestDelete overrides
def test_delete(self):
"""Soft delete should mark an item as deleted and cause subsequent
requests to return 404 Not Found responses. 404s in response to GET
requests should include the document in their body with the _deleted
flag set to True.
"""
r, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert204(status)
r = self.test_client.get(self.item_id_url)
data, status = self.parse_response(r)
self.assert404(status)
self.assertEqual(data.get(self.deleted_field), True)
self.assertNotEqual(data.get('_etag'), self.item_etag)
# 404 should still include a status and an error field
self.assertTrue(self.app.config['ERROR'] in data)
def test_deleteitem_internal(self):
"""Deleteitem internal should honor soft delete settings.
"""
# test that deleteitem_internal is available and working properly.
with self.app.test_request_context(self.item_id_url):
r, _, _, status = deleteitem_internal(
self.known_resource, concurrency_check=False,
**{'_id': self.item_id})
self.assert204(status)
r = self.test_client.get(self.item_id_url)
data, status = self.parse_response(r)
self.assert404(status)
self.assertEqual(data.get(self.deleted_field), True)
def test_delete_different_resource(self):
r, status = self.delete(self.user_id_url,
headers=[('If-Match', self.user_etag)])
self.assert204(status)
r = self.test_client.get(self.user_id_url)
data, status = self.parse_response(r)
self.assert404(status)
self.assertEqual(data.get(self.deleted_field), True)
def test_delete_from_resource_endpoint(self):
"""Soft deleting an entire resource should mark each individual item
as deleted, queries to that resource should return no items, and GETs
on any individual items should return 404 responses.
"""
# TestDelete deletes resource at known_resource_url, and confirms
# subsequent queries to the resource return zero items
super(TestSoftDelete, self).test_delete_from_resource_endpoint()
r = self.test_client.get(self.item_id_url)
data, status = self.parse_response(r)
self.assert404(status)
self.assertEqual(data.get(self.deleted_field), True)
# TetsSoftDelete specific tests
def test_restore_softdeleted(self):
"""Sending a PUT or PATCH to a soft deleted document should restore the
document.
"""
def soft_delete_item(etag):
r, status = self.delete(
self.item_id_url, headers=[('If-Match', etag)])
self.assert204(status)
# GET soft deleted etag
return self.test_client.get(self.item_id_url)
# Restore via PATCH
deleted_etag = soft_delete_item(self.item_etag).headers['ETag']
r = self.test_client.patch(
self.item_id_url,
data=json.dumps({}),
headers=[('Content-Type', 'application/json'),
('If-Match', deleted_etag)])
self.assert200(r.status_code)
r = self.test_client.get(self.item_id_url)
self.assert200(r.status_code)
new_etag = r.headers['ETag']
# Restore via PUT
r = soft_delete_item(new_etag)
deleted_etag = r.headers['ETag']
restored_doc = {"ref": "1234567890123456789012345"}
r = self.test_client.put(
self.item_id_url,
data=json.dumps(restored_doc),
headers=[('Content-Type', 'application/json'),
('If-Match', deleted_etag)])
self.assert200(r.status_code)
r = self.test_client.get(self.item_id_url)
self.assert200(r.status_code)
def test_multiple_softdelete(self):
"""After an item has been soft deleted, subsequent DELETEs should
return a 404 Not Found response.
"""
r, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert204(status)
# GET soft deleted etag
r = self.test_client.get(self.item_id_url)
new_etag = r.headers['ETag']
# Second soft DELETE should return 404 Not Found
r, status = self.delete(
self.item_id_url, headers=[('If-Match', new_etag)])
self.assert404(status)
def test_softdelete_deleted_field(self):
"""The configured 'deleted' field should be added to all documents to indicate
whether that document has been soft deleted or not.
"""
r = self.test_client.get(self.item_id_url)
data, status = self.parse_response(r)
self.assert200(status)
self.assertEqual(data.get(self.deleted_field), False)
def test_softdelete_show_deleted(self):
"""GETs on resource endpoints should include soft deleted items when
the 'show_deleted' param is included in the query, or when the DELETED
field is explicitly included in the lookup.
"""
r, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert204(status)
data, status = self.get(self.known_resource)
after_softdelete_count = data[self.app.config['META']]['total']
self.assertEqual(after_softdelete_count, self.known_resource_count - 1)
data, status = self.get(self.known_resource, query="?show_deleted")
show_deleted_count = data[self.app.config['META']]['total']
self.assertEqual(show_deleted_count, self.known_resource_count)
# Test show_deleted with additional queries
role_query = '?where={"role": "' + self.item['role'] + '"}'
data, status = self.get(self.known_resource, query=role_query)
role_count = data[self.app.config['META']]['total']
data, status = self.get(
self.known_resource, query=role_query + "&show_deleted")
show_deleted_role_count = data[self.app.config['META']]['total']
self.assertEqual(show_deleted_role_count, role_count + 1)
# Test explicit _deleted query
data, status = self.get(
self.known_resource, query='?where={"_deleted": true}')
deleted_query_count = data[self.app.config['META']]['total']
self.assertEqual(deleted_query_count, 1)
def test_softdeleted_embedded_doc(self):
"""Soft deleted documents embedded in other documents should not be
included. They will resolve to None as if the document was actually
deleted.
"""
# Set up and confirm embedded document
_db = self.connection[MONGO_DBNAME]
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
fake_contact_url = self.known_resource_url + "/" + str(fake_contact_id)
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
invoices = self.domain['invoices']
invoices['embedding'] = True
invoices['schema']['person']['data_relation']['embeddable'] = True
embedded = '{"person": 1}'
r = self.test_client.get(
self.invoice_id_url + '?embedded=%s' % embedded)
data, status = self.parse_response(r)
self.assert200(status)
self.assertTrue('location' in data['person'])
# Get embedded doc etag so we can delete it
r = self.test_client.get(fake_contact_url)
embedded_contact_etag = r.headers['ETag']
# Delete embedded contact
data, status = self.delete(
fake_contact_url, headers=[('If-Match', embedded_contact_etag)])
self.assert204(status)
# embedded 'person' should now be empty
r = self.test_client.get(
self.invoice_id_url + '?embedded=%s' % embedded)
data, status = self.parse_response(r)
self.assert200(status)
self.assertEqual(data['person'], None)
def test_softdeleted_get_response_skips_embedded_expansion(self):
"""Soft deleted documents should not expand their embedded documents when
returned in a 404 Not Found response. The deleted document data should
reflect the state of the document when it was deleted, not change if
still active embedded documents are updated
"""
# Confirm embedded document works before delete
_db = self.connection[MONGO_DBNAME]
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
invoices = self.domain['invoices']
invoices['embedding'] = True
invoices['schema']['person']['data_relation']['embeddable'] = True
embedded = '{"person": 1}'
r = self.test_client.get(
self.invoice_id_url + '?embedded=%s' % embedded)
invoice_etag = r.headers['ETag']
data, status = self.parse_response(r)
self.assert200(status)
self.assertTrue('location' in data['person'])
# Soft delete document
data, status = self.delete(
self.invoice_id_url, headers=[('If-Match', invoice_etag)])
self.assert204(status)
# Document in 404 should not expand person
r = self.test_client.get(
self.invoice_id_url + '?embedded=%s' % embedded)
data, status = self.parse_response(r)
self.assert404(status)
self.assertEqual(data['person'], str(fake_contact_id))
def test_softdelete_caching(self):
"""404 Not Found responses after soft delete should be cacheable
"""
# Soft delete item
r, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert204(status)
# delete should have invalidated any previously cached 200 responses
r = self.test_client.get(
self.item_id_url, headers=[('If-None-Match', self.item_etag)])
self.assert404(r.status_code)
post_delete_etag = r.headers['ETag']
# validate cached 404 response data
r = status = self.test_client.get(
self.item_id_url, headers=[('If-None-Match', post_delete_etag)])
self.assert304(r.status_code)
def test_softdelete_datalayer(self):
"""Soft deleted items should not be returned by find methods in the Eve
data layer unless show_deleted is explicitly configured in the request,
the deleted field is included in the lookup, or the operation is 'raw'.
"""
# Soft delete item
r, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert204(status)
with self.app.test_request_context():
# find_one should only return item if a request w/ show_deleted ==
# True is passed or if the deleted field is part of the lookup
req = ParsedRequest()
doc = self.app.data.find_one(
self.known_resource, req, _id=self.item_id)
self.assertEqual(doc, None)
req.show_deleted = True
doc = self.app.data.find_one(
self.known_resource, req, _id=self.item_id)
self.assertNotEqual(doc, None)
self.assertEqual(doc.get(self.deleted_field), True)
req.show_deleted = False
doc = self.app.data.find_one(
self.known_resource, req, _id=self.item_id, _deleted=True)
self.assertNotEqual(doc, None)
self.assertEqual(doc.get(self.deleted_field), True)
# find_one_raw should always return a document, soft deleted or not
doc = self.app.data.find_one_raw(
self.known_resource, _id=ObjectId(self.item_id))
self.assertNotEqual(doc, None)
self.assertEqual(doc.get(self.deleted_field), True)
# find should only return deleted items if a request with
# show_deleted == True is passed or if the deleted field is part of
# the lookup
req.show_deleted = False
docs = self.app.data.find(self.known_resource, req, None)
undeleted_count = docs.count()
req.show_deleted = True
docs = self.app.data.find(self.known_resource, req, None)
with_deleted_count = docs.count()
self.assertEqual(undeleted_count, with_deleted_count - 1)
req.show_deleted = False
docs = self.app.data.find(
self.known_resource, req, {self.deleted_field: True})
deleted_count = docs.count()
self.assertEqual(deleted_count, 1)
# find_list_of_ids will return deleted documents if given their id
docs = self.app.data.find_list_of_ids(
self.known_resource, [ObjectId(self.item_id)])
self.assertEqual(docs.count(), 1)
def test_softdelete_db_fields(self):
"""Documents created when soft delete is enabled should include and
maintain the DELETED field in the db.
"""
r = self.test_client.post(self.known_resource_url, data={
'ref': "1234567890123456789054321"
})
data, status = self.parse_response(r)
self.assert201(status)
new_item_id = data[self.app.config['ID_FIELD']]
new_item_etag = data[self.app.config['ETAG']]
with self.app.test_request_context():
db_stored_doc = self.app.data.find_one_raw(
self.known_resource, _id=ObjectId(new_item_id))
self.assertTrue(self.deleted_field in db_stored_doc)
# PUT updates to the document should maintain the DELETED field
r = self.test_client.put(
self.known_resource_url + "/" + new_item_id,
data={'ref': '5432109876543210987654321'},
headers=[('If-Match', new_item_etag)]
)
data, status = self.parse_response(r)
self.assert200(status)
new_item_etag = data[self.app.config['ETAG']]
with self.app.test_request_context():
db_stored_doc = self.app.data.find_one_raw(
self.known_resource, _id=ObjectId(new_item_id))
self.assertTrue(self.deleted_field in db_stored_doc)
# PATCH updates to the document should maintain the DELETED field
r = self.test_client.patch(
self.known_resource_url + "/" + new_item_id,
data={'ref': '5555544444333332222211111'},
headers=[('If-Match', new_item_etag)]
)
self.assert200(r.status_code)
with self.app.test_request_context():
db_stored_doc = self.app.data.find_one_raw(
self.known_resource, _id=ObjectId(new_item_id))
self.assertTrue(self.deleted_field in db_stored_doc)
class TestResourceSpecificSoftDelete(TestBase):
def setUp(self):
super(TestResourceSpecificSoftDelete, self).setUp()
# Enable soft delete for one resource
domain = copy.copy(self.domain)
resource_settings = domain[self.known_resource]
resource_settings['soft_delete'] = True
self.app.register_resource(self.known_resource, resource_settings)
self.deleted_field = self.app.config['DELETED']
# Etag used to delete an item (a contact)
self.etag_headers = [('If-Match', self.item_etag)]
def test_resource_specific_softdelete(self):
""" Resource level soft delete configuration should override
application configuration.
"""
# Confirm soft delete is enabled for known resource.
data, status = self.delete(self.item_id_url, headers=self.etag_headers)
self.assert204(status)
r = self.test_client.get(self.item_id_url)
data, status = self.parse_response(r)
self.assert404(status)
self.assertEqual(data.get(self.deleted_field), True)
# DELETE on other resources should be hard deletes
data, status = self.delete(
self.invoice_id_url, headers=[('If-Match', self.invoice_etag)])
self.assert204(status)
r = self.test_client.get(self.invoice_id_url)
data, status = self.parse_response(r)
self.assert404(status)
self.assertTrue(self.deleted_field not in data)
class TestDeleteEvents(TestBase):
def test_on_pre_DELETE_for_item(self):
devent = DummyEvent(self.before_delete)
self.app.on_pre_DELETE += devent
self.delete_item()
self.assertEqual('contacts', devent.called[0])
self.assertFalse(devent.called[1] is None)
def test_on_pre_DELETE_resource_for_item(self):
devent = DummyEvent(self.before_delete)
self.app.on_pre_DELETE_contacts += devent
self.delete_item()
self.assertFalse(devent.called is None)
def test_on_pre_DELETE_for_resource(self):
devent = DummyEvent(self.before_delete)
self.app.on_pre_DELETE += devent
self.delete_resource()
self.assertFalse(devent.called is None)
def test_on_pre_DELETE_resource_for_resource(self):
devent = DummyEvent(self.before_delete)
self.app.on_pre_DELETE_contacts += devent
self.delete_resource()
self.assertFalse(devent.called is None)
def test_on_pre_DELETE_dynamic_filter(self):
def filter_this(resource, request, lookup):
lookup["_id"] = self.unknown_item_id
self.app.on_pre_DELETE += filter_this
# Would normally delete the known document; will return 404 instead.
r, s = self.parse_response(self.delete_item())
self.assert404(s)
def test_on_post_DELETE_for_item(self):
devent = DummyEvent(self.after_delete)
self.app.on_post_DELETE += devent
self.delete_item()
self.assertFalse(devent.called is None)
def test_on_post_DELETE_resource_for_item(self):
devent = DummyEvent(self.after_delete)
self.app.on_post_DELETE_contacts += devent
self.delete_item()
self.assertFalse(devent.called is None)
def test_on_post_DELETE_for_resource(self):
devent = DummyEvent(self.after_delete)
self.app.on_post_DELETE += devent
self.delete_resource()
self.assertFalse(devent.called is None)
def test_on_post_DELETE_resource_for_resource(self):
devent = DummyEvent(self.after_delete)
self.app.on_post_DELETE_contacts += devent
self.delete_resource()
self.assertFalse(devent.called is None)
def test_on_delete_resource(self):
devent = DummyEvent(self.before_delete)
self.app.on_delete_resource += devent
self.delete_resource()
self.assertEqual(('contacts',), devent.called)
def test_on_delete_resource_contacts(self):
devent = DummyEvent(self.before_delete)
self.app.on_delete_resource_contacts += devent
self.delete_resource()
self.assertEqual(tuple(), devent.called)
def test_on_deleted_resource(self):
devent = DummyEvent(self.after_delete)
self.app.on_deleted_resource += devent
self.delete_resource()
self.assertEqual(('contacts',), devent.called)
def test_on_deleted_resource_contacts(self):
devent = DummyEvent(self.after_delete)
self.app.on_deleted_resource_contacts += devent
self.delete_resource()
self.assertEqual(tuple(), devent.called)
def test_on_delete_item(self):
devent = DummyEvent(self.before_delete)
self.app.on_delete_item += devent
self.delete_item()
self.assertEqual('contacts', devent.called[0])
self.assertEqual(
self.item_id, str(devent.called[1][self.app.config['ID_FIELD']]))
def test_on_delete_item_contacts(self):
devent = DummyEvent(self.before_delete)
self.app.on_delete_item_contacts += devent
self.delete_item()
self.assertEqual(
self.item_id, str(devent.called[0][self.app.config['ID_FIELD']]))
def test_on_deleted_item(self):
devent = DummyEvent(self.after_delete)
self.app.on_deleted_item += devent
self.delete_item()
self.assertEqual('contacts', devent.called[0])
self.assertEqual(
self.item_id, str(devent.called[1][self.app.config['ID_FIELD']]))
def test_on_deleted_item_contacts(self):
devent = DummyEvent(self.after_delete)
self.app.on_deleted_item_contacts += devent
self.delete_item()
self.assertEqual(
self.item_id, str(devent.called[0][self.app.config['ID_FIELD']]))
def delete_resource(self):
self.test_client.delete(self.known_resource_url)
def delete_item(self):
return self.test_client.delete(
self.item_id_url, headers=[('If-Match', self.item_etag)])
def before_delete(self):
db = self.connection[MONGO_DBNAME]
return db.contacts.find_one(ObjectId(self.item_id)) is not None
def after_delete(self):
return not self.before_delete()
| bsd-3-clause |
mrucci/moto | moto/cloudwatch/models.py | 3 | 3720 | from moto.core import BaseBackend
import boto.ec2.cloudwatch
import datetime
class Dimension(object):
def __init__(self, name, value):
self.name = name
self.value = value
class FakeAlarm(object):
def __init__(self, name, comparison_operator, evaluation_periods, period,
threshold, statistic, description, dimensions, alarm_actions,
ok_actions, insufficient_data_actions, unit):
self.name = name
self.comparison_operator = comparison_operator
self.evaluation_periods = evaluation_periods
self.period = period
self.threshold = threshold
self.statistic = statistic
self.description = description
self.dimensions = [Dimension(dimension['name'], dimension['value']) for dimension in dimensions]
self.alarm_actions = alarm_actions
self.ok_actions = ok_actions
self.insufficient_data_actions = insufficient_data_actions
self.unit = unit
self.state_updated_timestamp = datetime.datetime.now()
self.configuration_updated_timestamp = datetime.datetime.now()
class MetricDatum(object):
def __init__(self, namespace, name, value, dimensions):
self.namespace = namespace
self.name = name
self.value = value
self.dimensions = [Dimension(dimension['name'], dimension['value']) for dimension in dimensions]
class CloudWatchBackend(BaseBackend):
def __init__(self):
self.alarms = {}
self.metric_data = []
def put_metric_alarm(self, name, comparison_operator, evaluation_periods,
period, threshold, statistic, description, dimensions,
alarm_actions, ok_actions, insufficient_data_actions, unit):
alarm = FakeAlarm(name, comparison_operator, evaluation_periods, period,
threshold, statistic, description, dimensions, alarm_actions,
ok_actions, insufficient_data_actions, unit)
self.alarms[name] = alarm
return alarm
def get_all_alarms(self):
return self.alarms.values()
@staticmethod
def _list_element_starts_with(items, needle):
"""True of any of the list elements starts with needle"""
for item in items:
if item.startswith(needle):
return True
return False
def get_alarms_by_action_prefix(self, action_prefix):
return [
alarm
for alarm in self.alarms.values()
if CloudWatchBackend._list_element_starts_with(
alarm.alarm_actions, action_prefix
)
]
def get_alarms_by_alarm_name_prefix(self, name_prefix):
return [
alarm
for alarm in self.alarms.values()
if alarm.name.startswith(name_prefix)
]
def get_alarms_by_alarm_names(self, alarm_names):
return [
alarm
for alarm in self.alarms.values()
if alarm.name in alarm_names
]
def get_alarms_by_state_value(self, state):
raise NotImplementedError(
"DescribeAlarm by state is not implemented in moto."
)
def delete_alarms(self, alarm_names):
for alarm_name in alarm_names:
self.alarms.pop(alarm_name, None)
def put_metric_data(self, namespace, metric_data):
for name, value, dimensions in metric_data:
self.metric_data.append(MetricDatum(namespace, name, value, dimensions))
def get_all_metrics(self):
return self.metric_data
cloudwatch_backends = {}
for region in boto.ec2.cloudwatch.regions():
cloudwatch_backends[region.name] = CloudWatchBackend()
| apache-2.0 |
robk5uj/invenio | modules/websubmit/lib/functions/Ask_For_Record_Details_Confirmation.py | 35 | 5952 | ## This file is part of Invenio.
## Copyright (C) 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Display the details of a record on which some operation is to be carried
out and prompt for the user's confirmation that it is the correct record.
Upon the clicking of the confirmation button, augment step by one.
"""
__revision__ = "$Id$"
import cgi
from invenio.config import CFG_SITE_ADMIN_EMAIL
from invenio.websubmit_config import \
InvenioWebSubmitFunctionStop, \
InvenioWebSubmitFunctionError
from invenio.search_engine import print_record, record_exists
## Details of record to display to the user for confirmation:
CFG_DOCUMENT_DETAILS_MESSAGE = """
<div>
We're about to process your request for the following document:<br /><br />
<table border="0">
<tr>
<td>Report Number(s):</td><td>%(report-numbers)s</td>
</tr>
<tr>
<td>Title:</td><td>%(title)s</td>
</tr>
<tr>
<td>Author(s):</td><td>%(author)s</td>
</tr>
</table>
<br />
If this is correct, please CONFIRM it:<br />
<br />
<input type="submit" width="350" height="50"
name="CONFIRM" value="CONFIRM"
onClick="document.forms[0].step.value=%(newstep)s;">
<br />
If you think that there is a problem, please contact
<a href="mailto:%(admin-email)s">%(admin-email)s</a>.<br />
</div>
"""
def Ask_For_Record_Details_Confirmation(parameters, \
curdir, \
form, \
user_info=None):
"""
Display the details of a record on which some operation is to be carried
out and prompt for the user's confirmation that it is the correct record.
Upon the clicking of the confirmation button, augment step by one.
Given the "recid" (001) of a record, retrieve the basic metadata
(title, report-number(s) and author(s)) and display them in the
user's browser along with a prompt asking them to confirm that
it is indeed the record that they expected to see.
The function depends upon the presence of the "sysno" global and the
presence of the "step" field in the "form" parameter.
When the user clicks on the "confirm" button, step will be augmented by
1 and the form will be submitted.
@parameters: None.
@return: None.
@Exceptions raise: InvenioWebSubmitFunctionError if problems are
encountered;
InvenioWebSubmitFunctionStop in order to display the details of the
record and the confirmation message.
"""
global sysno
## Make sure that we know the current step:
try:
current_step = int(form['step'])
except TypeError:
## Can't determine step.
msg = "Unable to determine submission step. Cannot continue."
raise InvenioWebSubmitFunctionError(msg)
else:
newstep = current_step + 1
## Make sure that the sysno is valid:
try:
working_recid = int(sysno)
except TypeError:
## Unable to find the details of this record - cannot query the database
msg = "Unable to retrieve details of record - record id was invalid."
raise InvenioWebSubmitFunctionError(msg)
if not record_exists(working_recid):
## Record doesn't exist.
msg = "Unable to retrieve details of record [%s] - record does not " \
"exist." % working_recid
raise InvenioWebSubmitFunctionError(msg)
## Retrieve the details to be displayed:
##
## Author(s):
rec_authors = ""
rec_first_author = print_record(int(sysno), 'tm', "100__a")
rec_other_authors = print_record(int(sysno), 'tm', "700__a")
if rec_first_author != "":
rec_authors += "".join(["%s<br />\n" % cgi.escape(author.strip()) for \
author in rec_first_author.split("\n")])
if rec_other_authors != "":
rec_authors += "".join(["%s<br />\n" % cgi.escape(author.strip()) for \
author in rec_other_authors.split("\n")])
## Title:
rec_title = "".join(["%s<br />\n" % cgi.escape(title.strip()) for title in \
print_record(int(sysno), 'tm', "245__a").split("\n")])
## Report numbers:
rec_reportnums = ""
rec_reportnum = print_record(int(sysno), 'tm', "037__a")
rec_other_reportnums = print_record(int(sysno), 'tm', "088__a")
if rec_reportnum != "":
rec_reportnums += "".join(["%s<br />\n" % cgi.escape(repnum.strip()) \
for repnum in rec_reportnum.split("\n")])
if rec_other_reportnums != "":
rec_reportnums += "".join(["%s<br />\n" % cgi.escape(repnum.strip()) \
for repnum in \
rec_other_reportnums.split("\n")])
raise InvenioWebSubmitFunctionStop(CFG_DOCUMENT_DETAILS_MESSAGE % \
{ 'report-numbers' : rec_reportnums, \
'title' : rec_title, \
'author' : rec_authors, \
'newstep' : newstep, \
'admin-email' : CFG_SITE_ADMIN_EMAIL, \
} )
| gpl-2.0 |
olhoneles/politicos | settings.py | 1 | 1327 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2018, Marcelo Jorge Vieira <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License
# for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from tornado.options import define, options
define('debug', default=True, help='debug mode')
define('port', default=8888, help='port to listen on', type=int)
define('redis_port', default=6379, help='redis port')
define('redis_host', default='localhost', help='redis hostname or IP')
define('es_hosts', default='localhost', help='elasticsearch hosts')
define('es_index', default='politicians', help='elasticsearch index')
options.parse_command_line()
define('per_page', default=10, help='items per page')
define('max_per_page', default=50, help='max items per page')
| agpl-3.0 |
GheRivero/ansible | lib/ansible/modules/cloud/azure/azure_rm_acs.py | 15 | 29357 | #!/usr/bin/python
#
# Copyright (c) 2017 Julien Stroheker, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_acs
version_added: "2.4"
short_description: Manage an Azure Container Service Instance (ACS).
description:
- Create, update and delete an Azure Container Service Instance.
options:
resource_group:
description:
- Name of a resource group where the Container Services exists or will be created.
required: true
name:
description:
- Name of the Container Services instance.
required: true
state:
description:
- Assert the state of the ACS. Use 'present' to create or update an ACS and 'absent' to delete it.
default: present
choices:
- absent
- present
location:
description:
- Valid azure location. Defaults to location of the resource group.
orchestration_platform:
description:
- Specifies the Container Orchestration Platform to use. Currently can be either DCOS, Kubernetes or Swarm.
choices:
- 'DCOS'
- 'Kubernetes'
- 'Swarm'
required: true
master_profile:
description:
- Master profile suboptions.
required: true
suboptions:
count:
description:
- Number of masters (VMs) in the container service cluster. Allowed values are 1, 3, and 5.
required: true
choices:
- 1
- 3
- 5
vm_size:
description:
- The VM Size of each of the Agent Pool VM's (e.g. Standard_F1 / Standard_D2v2).
required: true
version_added: 2.5
dns_prefix:
description:
- The DNS Prefix to use for the Container Service master nodes.
required: true
linux_profile:
description:
- The linux profile suboptions.
required: true
suboptions:
admin_username:
description:
- The Admin Username for the Cluster.
required: true
ssh_key:
description:
- The Public SSH Key used to access the cluster.
required: true
agent_pool_profiles:
description:
- The agent pool profile suboptions.
required: true
suboptions:
name:
description:
- Unique name of the agent pool profile in the context of the subscription and resource group.
required: true
count:
description:
- Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive).
required: true
dns_prefix:
description:
- The DNS Prefix given to Agents in this Agent Pool.
required: true
vm_size:
description:
- The VM Size of each of the Agent Pool VM's (e.g. Standard_F1 / Standard_D2v2).
required: true
service_principal:
description:
- The service principal suboptions.
suboptions:
client_id:
description:
- The ID for the Service Principal.
required: false
client_secret:
description:
- The secret password associated with the service principal.
required: false
diagnostics_profile:
description:
- Should VM Diagnostics be enabled for the Container Service VM's.
required: true
type: bool
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Julien Stroheker (@julienstroheker)"
'''
EXAMPLES = '''
- name: Create an azure container services instance running Kubernetes
azure_rm_acs:
name: acctestcontservice1
location: eastus
resource_group: Testing
orchestration_platform: Kubernetes
master_profile:
- count: 3
dns_prefix: acsk8smasterdns
vm_size: Standard_D2_v2
linux_profile:
- admin_username: azureuser
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
service_principal:
- client_id: "cf72ca99-f6b9-4004-b0e0-bee10c521948"
client_secret: "mySPNp@ssw0rd!"
agent_pool_profiles:
- name: default
count: 5
dns_prefix: acsk8sagent
vm_size: Standard_D2_v2
diagnostics_profile: false
tags:
Environment: Production
- name: Create an azure container services instance running DCOS
azure_rm_acs:
name: acctestcontservice2
location: eastus
resource_group: Testing
orchestration_platform: DCOS
master_profile:
- count: 3
dns_prefix: acsdcosmasterdns
vm_size: Standard_D2_v2
linux_profile:
- admin_username: azureuser
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
agent_pool_profiles:
- name: default
count: 5
dns_prefix: acscdcosagent
vm_size: Standard_D2_v2
diagnostics_profile: false
tags:
Environment: Production
- name: Create an azure container services instance running Swarm
azure_rm_acs:
name: acctestcontservice3
location: eastus
resource_group: Testing
orchestration_platform: Swarm
master_profile:
- count: 3
dns_prefix: acsswarmmasterdns
vm_size: Standard_D2_v2
linux_profile:
- admin_username: azureuser
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
agent_pool_profiles:
- name: default
count: 5
dns_prefix: acsswarmagent
vm_size: Standard_D2_v2
diagnostics_profile: false
tags:
Environment: Production
# Deletes the specified container service in the specified subscription and resource group.
# The operation does not delete other resources created as part of creating a container service,
# including storage accounts, VMs, and availability sets. All the other resources created with the container
# service are part of the same resource group and can be deleted individually.
- name: Remove an azure container services instance
azure_rm_acs:
name: acctestcontservice3
location: eastus
resource_group: Testing
state: absent
orchestration_platform: Swarm
master_profile:
- count: 1
vm_size: Standard_A0
dns_prefix: acstestingmasterdns5
linux_profile:
- admin_username: azureuser
ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
agent_pool_profiles:
- name: default
count: 4
dns_prefix: acctestagent15
vm_size: Standard_A0
diagnostics_profile: false
tags:
Ansible: azure_rm_acs
'''
RETURN = '''
state:
description: Current state of the azure container service
returned: always
type: dict
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.containerservice.models import (
ContainerService, ContainerServiceOrchestratorProfile, ContainerServiceCustomProfile,
ContainerServiceServicePrincipalProfile, ContainerServiceMasterProfile,
ContainerServiceAgentPoolProfile, ContainerServiceWindowsProfile,
ContainerServiceLinuxProfile, ContainerServiceSshConfiguration,
ContainerServiceDiagnosticsProfile, ContainerServiceSshPublicKey,
ContainerServiceVMDiagnostics
)
except ImportError:
# This is handled in azure_rm_common
pass
def create_agent_pool_profile_instance(agentpoolprofile):
'''
Helper method to serialize a dict to a ContainerServiceAgentPoolProfile
:param: agentpoolprofile: dict with the parameters to setup the ContainerServiceAgentPoolProfile
:return: ContainerServiceAgentPoolProfile
'''
return ContainerServiceAgentPoolProfile(
name=agentpoolprofile['name'],
count=agentpoolprofile['count'],
dns_prefix=agentpoolprofile['dns_prefix'],
vm_size=agentpoolprofile['vm_size']
)
def create_orch_platform_instance(orchestrator):
'''
Helper method to serialize a dict to a ContainerServiceOrchestratorProfile
:param: orchestrator: dict with the parameters to setup the ContainerServiceOrchestratorProfile
:return: ContainerServiceOrchestratorProfile
'''
return ContainerServiceOrchestratorProfile(
orchestrator_type=orchestrator,
)
def create_service_principal_profile_instance(spnprofile):
'''
Helper method to serialize a dict to a ContainerServiceServicePrincipalProfile
:param: spnprofile: dict with the parameters to setup the ContainerServiceServicePrincipalProfile
:return: ContainerServiceServicePrincipalProfile
'''
return ContainerServiceServicePrincipalProfile(
client_id=spnprofile[0]['client_id'],
secret=spnprofile[0]['client_secret']
)
def create_linux_profile_instance(linuxprofile):
'''
Helper method to serialize a dict to a ContainerServiceLinuxProfile
:param: linuxprofile: dict with the parameters to setup the ContainerServiceLinuxProfile
:return: ContainerServiceLinuxProfile
'''
return ContainerServiceLinuxProfile(
admin_username=linuxprofile[0]['admin_username'],
ssh=create_ssh_configuration_instance(linuxprofile[0]['ssh_key'])
)
def create_ssh_configuration_instance(sshconf):
'''
Helper method to serialize a dict to a ContainerServiceSshConfiguration
:param: sshconf: dict with the parameters to setup the ContainerServiceSshConfiguration
:return: ContainerServiceSshConfiguration
'''
listssh = []
key = ContainerServiceSshPublicKey(key_data=str(sshconf))
listssh.append(key)
return ContainerServiceSshConfiguration(
public_keys=listssh
)
def create_master_profile_instance(masterprofile):
'''
Helper method to serialize a dict to a ContainerServiceMasterProfile
Note: first_consecutive_static_ip is specifically set to None, for Azure server doesn't accept
request body with this property. This should be an inconsistency bug before Azure client SDK
and Azure server.
:param: masterprofile: dict with the parameters to setup the ContainerServiceMasterProfile
:return: ContainerServiceMasterProfile
'''
return ContainerServiceMasterProfile(
count=masterprofile[0]['count'],
dns_prefix=masterprofile[0]['dns_prefix'],
vm_size=masterprofile[0]['vm_size'],
first_consecutive_static_ip=None
)
def create_diagnostics_profile_instance(diagprofile):
'''
Helper method to serialize a dict to a ContainerServiceDiagnosticsProfile
:param: diagprofile: dict with the parameters to setup the ContainerServiceDiagnosticsProfile
:return: ContainerServiceDiagnosticsProfile
'''
return ContainerServiceDiagnosticsProfile(
vm_diagnostics=create_vm_diagnostics_instance(diagprofile)
)
def create_vm_diagnostics_instance(vmdiag):
'''
Helper method to serialize a dict to a ContainerServiceVMDiagnostics
:param: vmdiag: dict with the parameters to setup the ContainerServiceVMDiagnostics
:return: ContainerServiceVMDiagnostics
'''
return ContainerServiceVMDiagnostics(
enabled=vmdiag
)
def create_acs_dict(acs):
'''
Helper method to deserialize a ContainerService to a dict
:param: acs: ContainerService or AzureOperationPoller with the Azure callback object
:return: dict with the state on Azure
'''
service_principal_profile_dict = None
if acs.orchestrator_profile.orchestrator_type == 'Kubernetes':
service_principal_profile_dict = create_service_principal_profile_dict(acs.service_principal_profile)
return dict(
id=acs.id,
name=acs.name,
location=acs.location,
tags=acs.tags,
orchestrator_profile=create_orchestrator_profile_dict(acs.orchestrator_profile),
master_profile=create_master_profile_dict(acs.master_profile),
linux_profile=create_linux_profile_dict(acs.linux_profile),
service_principal_profile=service_principal_profile_dict,
diagnostics_profile=create_diagnotstics_profile_dict(acs.diagnostics_profile),
provisioning_state=acs.provisioning_state,
agent_pool_profiles=create_agent_pool_profiles_dict(acs.agent_pool_profiles),
type=acs.type
)
def create_linux_profile_dict(linuxprofile):
'''
Helper method to deserialize a ContainerServiceLinuxProfile to a dict
:param: linuxprofile: ContainerServiceLinuxProfile with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
ssh_key=linuxprofile.ssh.public_keys[0].key_data,
admin_username=linuxprofile.admin_username
)
def create_master_profile_dict(masterprofile):
'''
Helper method to deserialize a ContainerServiceMasterProfile to a dict
:param: masterprofile: ContainerServiceMasterProfile with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
count=masterprofile.count,
fqdn=masterprofile.fqdn,
vm_size=masterprofile.vm_size,
dns_prefix=masterprofile.dns_prefix
)
def create_service_principal_profile_dict(serviceprincipalprofile):
'''
Helper method to deserialize a ContainerServiceServicePrincipalProfile to a dict
Note: For security reason, the service principal secret is skipped on purpose.
:param: serviceprincipalprofile: ContainerServiceServicePrincipalProfile with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
client_id=serviceprincipalprofile.client_id
)
def create_diagnotstics_profile_dict(diagnosticsprofile):
'''
Helper method to deserialize a ContainerServiceVMDiagnostics to a dict
:param: diagnosticsprofile: ContainerServiceVMDiagnostics with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
vm_diagnostics=diagnosticsprofile.vm_diagnostics.enabled
)
def create_orchestrator_profile_dict(orchestratorprofile):
'''
Helper method to deserialize a ContainerServiceOrchestratorProfile to a dict
:param: orchestratorprofile: ContainerServiceOrchestratorProfile with the Azure callback object
:return: dict with the state on Azure
'''
return dict(
orchestrator_type=str(orchestratorprofile.orchestrator_type)
)
def create_agent_pool_profiles_dict(agentpoolprofiles):
'''
Helper method to deserialize a ContainerServiceAgentPoolProfile to a dict
:param: agentpoolprofiles: ContainerServiceAgentPoolProfile with the Azure callback object
:return: dict with the state on Azure
'''
return [dict(
count=profile.count,
vm_size=profile.vm_size,
name=profile.name,
dns_prefix=profile.dns_prefix,
fqdn=profile.fqdn
) for profile in agentpoolprofiles]
class AzureRMContainerService(AzureRMModuleBase):
"""Configuration class for an Azure RM container service resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
required=False,
default='present',
choices=['present', 'absent']
),
location=dict(
type='str',
required=False
),
orchestration_platform=dict(
type='str',
required=True,
choices=['DCOS', 'Kubernetes', 'Swarm']
),
master_profile=dict(
type='list',
required=True
),
linux_profile=dict(
type='list',
required=True
),
agent_pool_profiles=dict(
type='list',
required=True
),
service_principal=dict(
type='list',
required=False
),
diagnostics_profile=dict(
type='bool',
required=True
)
)
self.resource_group = None
self.name = None
self.location = None
self.tags = None
self.state = None
self.orchestration_platform = None
self.master_profile = None
self.linux_profile = None
self.agent_pool_profiles = None
self.service_principal = None
self.diagnostics_profile = None
self.results = dict(changed=False, state=dict())
super(AzureRMContainerService, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
resource_group = None
response = None
results = dict()
to_be_updated = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
# Check if the ACS instance already present in the RG
if self.state == 'present':
if self.orchestration_platform == 'Kubernetes':
if not self.service_principal:
self.fail('service_principal should be specified when using Kubernetes')
if not self.service_principal[0].get('client_id'):
self.fail('service_principal.client_id should be specified when using Kubernetes')
if not self.service_principal[0].get('client_secret'):
self.fail('service_principal.client_secret should be specified when using Kubernetes')
mastercount = self.master_profile[0].get('count')
if mastercount != 1 and mastercount != 3 and mastercount != 5:
self.fail('Master Count number wrong : {} / should be 1 3 or 5'.format(mastercount))
# For now Agent Pool cannot be more than 1, just remove this part in the future if it change
agentpoolcount = len(self.agent_pool_profiles)
if agentpoolcount > 1:
self.fail('You cannot specify more than agent_pool_profiles')
response = self.get_acs()
self.results['state'] = response
if not response:
to_be_updated = True
else:
self.log('Results : {0}'.format(response))
update_tags, response['tags'] = self.update_tags(response['tags'])
if response['provisioning_state'] == "Succeeded":
if update_tags:
to_be_updated = True
def is_property_changed(profile, property, ignore_case=False):
base = response[profile].get(property)
new = getattr(self, profile)[0].get(property)
if ignore_case:
return base.lower() != new.lower()
else:
return base != new
# Cannot Update the master count for now // Uncomment this block in the future to support it
if is_property_changed('master_profile', 'count'):
# self.log(("Master Profile Count Diff, Was {0} / Now {1}"
# .format(response['master_profile'].count,
# self.master_profile[0].get('count'))))
# to_be_updated = True
self.module.warn("master_profile.count cannot be updated")
# Cannot Update the master vm_size for now. Could be a client SDK bug
# Uncomment this block in the future to support it
if is_property_changed('master_profile', 'vm_size', True):
# self.log(("Master Profile VM Size Diff, Was {0} / Now {1}"
# .format(response['master_profile'].get('vm_size'),
# self.master_profile[0].get('vm_size'))))
# to_be_updated = True
self.module.warn("master_profile.vm_size cannot be updated")
# Cannot Update the SSH Key for now // Uncomment this block in the future to support it
if is_property_changed('linux_profile', 'ssh_key'):
# self.log(("Linux Profile Diff SSH, Was {0} / Now {1}"
# .format(response['linux_profile'].ssh.public_keys[0].key_data,
# self.linux_profile[0].get('ssh_key'))))
# to_be_updated = True
self.module.warn("linux_profile.ssh_key cannot be updated")
# self.log("linux_profile response : {0}".format(response['linux_profile'].get('admin_username')))
# self.log("linux_profile self : {0}".format(self.linux_profile[0].get('admin_username')))
# Cannot Update the Username for now // Uncomment this block in the future to support it
if is_property_changed('linux_profile', 'admin_username'):
# self.log(("Linux Profile Diff User, Was {0} / Now {1}"
# .format(response['linux_profile'].admin_username,
# self.linux_profile[0].get('admin_username'))))
# to_be_updated = True
self.module.warn("linux_profile.admin_username cannot be updated")
# Cannot have more that one agent pool profile for now // Uncomment this block in the future to support it
# if len(response['agent_pool_profiles']) != len(self.agent_pool_profiles):
# self.log("Agent Pool count is diff, need to updated")
# to_be_updated = True
for profile_result in response['agent_pool_profiles']:
matched = False
for profile_self in self.agent_pool_profiles:
if profile_result['name'] == profile_self['name']:
matched = True
if profile_result['count'] != profile_self['count'] or profile_result['vm_size'] != \
profile_self['vm_size']:
self.log(("Agent Profile Diff - Count was {0} / Now {1} - Vm_size was {2} / Now {3}"
.format(profile_result['count'], profile_self['count'],
profile_result['vm_size'], profile_self['vm_size'])))
to_be_updated = True
if not matched:
self.log("Agent Pool not found")
to_be_updated = True
if to_be_updated:
self.log("Need to Create / Update the ACS instance")
if self.check_mode:
return self.results
self.results['state'] = self.create_update_acs()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.state == 'absent':
if self.check_mode:
return self.results
self.delete_acs()
self.log("ACS instance deleted")
return self.results
def create_update_acs(self):
'''
Creates or updates a container service with the specified configuration of orchestrator, masters, and agents.
:return: deserialized ACS instance state dictionary
'''
self.log("Creating / Updating the ACS instance {0}".format(self.name))
service_principal_profile = None
agentpools = []
if self.agent_pool_profiles:
for profile in self.agent_pool_profiles:
self.log("Trying to push the following Profile {0}".format(profile))
agentpools.append(create_agent_pool_profile_instance(profile))
if self.orchestration_platform == 'Kubernetes':
service_principal_profile = create_service_principal_profile_instance(self.service_principal)
parameters = ContainerService(
location=self.location,
tags=self.tags,
orchestrator_profile=create_orch_platform_instance(self.orchestration_platform),
service_principal_profile=service_principal_profile,
linux_profile=create_linux_profile_instance(self.linux_profile),
master_profile=create_master_profile_instance(self.master_profile),
agent_pool_profiles=agentpools,
diagnostics_profile=create_diagnostics_profile_instance(self.diagnostics_profile)
)
# self.log("orchestrator_profile : {0}".format(parameters.orchestrator_profile))
# self.log("service_principal_profile : {0}".format(parameters.service_principal_profile))
# self.log("linux_profile : {0}".format(parameters.linux_profile))
# self.log("ssh from yaml : {0}".format(results.get('linux_profile')[0]))
# self.log("ssh : {0}".format(parameters.linux_profile.ssh))
# self.log("master_profile : {0}".format(parameters.master_profile))
# self.log("agent_pool_profiles : {0}".format(parameters.agent_pool_profiles))
# self.log("vm_diagnostics : {0}".format(parameters.diagnostics_profile.vm_diagnostics))
try:
poller = self.containerservice_client.container_services.create_or_update(self.resource_group, self.name,
parameters)
response = self.get_poller_result(poller)
except CloudError as exc:
self.log('Error attempting to create the ACS instance.')
self.fail("Error creating the ACS instance: {0}".format(str(exc)))
return create_acs_dict(response)
def delete_acs(self):
'''
Deletes the specified container service in the specified subscription and resource group.
The operation does not delete other resources created as part of creating a container service,
including storage accounts, VMs, and availability sets.
All the other resources created with the container service are part of the same resource group and can be deleted individually.
:return: True
'''
self.log("Deleting the ACS instance {0}".format(self.name))
try:
poller = self.containerservice_client.container_services.delete(self.resource_group, self.name)
self.get_poller_result(poller)
except CloudError as e:
self.log('Error attempting to delete the ACS instance.')
self.fail("Error deleting the ACS instance: {0}".format(str(e)))
return True
def get_acs(self):
'''
Gets the properties of the specified container service.
:return: deserialized ACS instance state dictionary
'''
self.log("Checking if the ACS instance {0} is present".format(self.name))
found = False
try:
response = self.containerservice_client.container_services.get(self.resource_group, self.name)
found = True
self.log("Response : {0}".format(response))
self.log("ACS instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the ACS instance.')
if found is True:
return create_acs_dict(response)
else:
return False
def main():
"""Main execution"""
AzureRMContainerService()
if __name__ == '__main__':
main()
| gpl-3.0 |
drmateo/ecto | test/benchmark/metrics.py | 4 | 4501 | #!/usr/bin/env python
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ecto
import ecto_test
import sys
def test_nodelay():
plasm = ecto.Plasm()
ping = ecto_test.Ping("Ping")
metrics = ecto_test.Metrics("Metrics", queue_size=10)
plasm.connect(ping[:] >> metrics[:])
sched = ecto.Scheduler(plasm)
sched.execute(niter=10000)
print "Hz:", metrics.outputs.hz, " Latency in seconds: %f" % metrics.outputs.latency_seconds
# these are kinda loose
assert metrics.outputs.hz > 5000
assert metrics.outputs.latency_seconds < 0.0001
def test_20hz():
plasm = ecto.Plasm()
ping = ecto_test.Ping("Ping")
throttle = ecto_test.Throttle("Throttle", rate=20)
metrics = ecto_test.Metrics("Metrics", queue_size=10)
plasm.connect(ping[:] >> throttle[:],
throttle[:] >> metrics[:])
sched = ecto.Scheduler(plasm)
sched.execute(niter=100)
print "Hz:", metrics.outputs.hz, " Latency in seconds: %f" % metrics.outputs.latency_seconds
# these are kinda loose
assert 19 < metrics.outputs.hz < 21
assert 0.04 < metrics.outputs.latency_seconds < 0.06
def makeplasm(n_nodes):
plasm = ecto.Plasm()
ping = ecto_test.Ping("Ping")
throttle = ecto_test.Sleep("Sleep_0", seconds=1.0/n_nodes)
plasm.connect(ping[:] >> throttle[:])
for j in range(n_nodes-1): # one has already been added
throttle_next = ecto_test.Sleep("Sleep_%u" % (j+1), seconds=1.0/n_nodes)
plasm.connect(throttle, "out", throttle_next, "in")
throttle = throttle_next
metrics = ecto_test.Metrics("Metrics", queue_size=4)
plasm.connect(throttle[:] >> metrics[:])
# o = open('graph.dot', 'w')
# print >>o, plasm.viz()
# o.close()
# print "\n", plasm.viz(), "\n"
return (plasm, metrics)
def test_st(niter, n_nodes):
(plasm, metrics) = makeplasm(n_nodes)
#sched = ecto.Scheduler(plasm)
#sched.execute(niter)
sched = ecto.Scheduler(plasm)
sched.execute(niter)
print "Hz:", metrics.outputs.hz, " Latency in seconds:", metrics.outputs.latency_seconds
assert 0.95 < metrics.outputs.hz < 1.05
assert 0.95 < metrics.outputs.latency_seconds < 1.05
#
# It is hard to test the middle cases, i.e. if you have one thread
# per node, things should run at n_nodes hz and 1 second latency but
# if there are less than that, things are somewhere in the middle.
# Also your latency tends to be worse as you have to wait for the
# graph to "fill up"
#
def test_tp(niter, n_nodes):
(plasm, metrics) = makeplasm(n_nodes)
sched = ecto.Scheduler(plasm)
sched.execute(niter=niter)
print "Hz:", metrics.outputs.hz, " Latency in seconds:", metrics.outputs.latency_seconds
assert n_nodes * 0.95 < metrics.outputs.hz < n_nodes * 1.05
assert 0.9 < metrics.outputs.latency_seconds < 1.1
test_nodelay()
test_20hz()
test_st(5, 5)
test_st(5, 12)
test_tp(20, 15)
test_tp(20, 10)
test_tp(20, 5)
| bsd-3-clause |
hpcugent/hanythingondemand | hod/subcommands/relabel.py | 2 | 2618 | #!/usr/bin/env python
# #
# Copyright 2009-2016 Ghent University
#
# This file is part of hanythingondemand
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/hanythingondemand
#
# hanythingondemand is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# hanythingondemand is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with hanythingondemand. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Relabel a cluster.
@author: Ewan Higgs (Universiteit Gent)
@author: Kenneth Hoste (Universiteit Gent)
"""
import sys
from vsc.utils.generaloption import GeneralOption
from hod import VERSION as HOD_VERSION
from hod.subcommands.subcommand import SubCommand
import hod.cluster as hc
class RelabelOptions(GeneralOption):
"""Option parser for 'relabel' subcommand."""
VERSION = HOD_VERSION
ALLOPTSMANDATORY = False # let us use optionless arguments.
class RelabelSubCommand(SubCommand):
"""Implementation of HOD 'relabel' subcommand."""
CMD = 'relabel'
EXAMPLE = "<source-cluster-label> <dest-cluster-label>"
HELP = "Change the label of an existing job."
def run(self, args):
"""Run 'relabel' subcommand."""
optparser = RelabelOptions(go_args=args, envvar_prefix=self.envvar_prefix, usage=self.usage_txt)
try:
if len(optparser.args) != 3:
self.report_error(self.usage())
labels = hc.known_cluster_labels()
if optparser.args[1] not in labels:
self.report_error("Cluster with label '%s' not found", optparser.args[1])
try:
hc.mv_cluster_info(optparser.args[1], optparser.args[2])
except (IOError, OSError) as err:
self.report_error("Could not change label '%s' to '%s': %s", optparser.args[1], optparser.args[2], err)
except StandardError as err:
self._log_and_raise(err)
return 0
| gpl-2.0 |
aferr/TemporalPartitioningMemCtl | src/arch/x86/isa/insts/general_purpose/flags/push_and_pop.py | 90 | 2440 | # Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PUSHF {
.adjust_env oszIn64Override
rflags t1
st t1, ss, [1, t0, rsp], "-env.stackSize", dataSize=ssz
subi rsp, rsp, ssz
};
def macroop POPF {
.adjust_env oszIn64Override
ld t1, ss, [1, t0, rsp], dataSize=ssz
addi rsp, rsp, ssz
wrflags t1, t0
};
'''
| bsd-3-clause |
aaltinisik/OCBAltinkaya | addons/fetchmail/fetchmail.py | 6 | 15874 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import poplib
import time
from imaplib import IMAP4
from imaplib import IMAP4_SSL
from poplib import POP3
from poplib import POP3_SSL
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import zipfile
import base64
from openerp import addons
from openerp.osv import fields, osv
from openerp import tools, api
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MAX_POP_MESSAGES = 50
MAIL_TIMEOUT = 60
# Workaround for Python 2.7.8 bug https://bugs.python.org/issue23906
poplib._MAXLINE = 65536
class fetchmail_server(osv.osv):
"""Incoming POP/IMAP mail server account"""
_name = 'fetchmail.server'
_description = "POP/IMAP Server"
_order = 'priority'
_columns = {
'name':fields.char('Name', required=True, readonly=False),
'active':fields.boolean('Active', required=False),
'state':fields.selection([
('draft', 'Not Confirmed'),
('done', 'Confirmed'),
], 'Status', select=True, readonly=True, copy=False),
'server' : fields.char('Server Name', readonly=True, help="Hostname or IP of the mail server", states={'draft':[('readonly', False)]}),
'port' : fields.integer('Port', readonly=True, states={'draft':[('readonly', False)]}),
'type':fields.selection([
('pop', 'POP Server'),
('imap', 'IMAP Server'),
('local', 'Local Server'),
], 'Server Type', select=True, required=True, readonly=False),
'is_ssl':fields.boolean('SSL/TLS', help="Connections are encrypted with SSL/TLS through a dedicated port (default: IMAPS=993, POP3S=995)"),
'attach':fields.boolean('Keep Attachments', help="Whether attachments should be downloaded. "
"If not enabled, incoming emails will be stripped of any attachments before being processed"),
'original':fields.boolean('Keep Original', help="Whether a full original copy of each email should be kept for reference"
"and attached to each processed message. This will usually double the size of your message database."),
'date': fields.datetime('Last Fetch Date', readonly=True),
'user' : fields.char('Username', readonly=True, states={'draft':[('readonly', False)]}),
'password' : fields.char('Password', readonly=True, states={'draft':[('readonly', False)]}),
'action_id':fields.many2one('ir.actions.server', 'Server Action', help="Optional custom server action to trigger for each incoming mail, "
"on the record that was created or updated by this mail"),
'object_id': fields.many2one('ir.model', "Create a New Record", help="Process each incoming mail as part of a conversation "
"corresponding to this document type. This will create "
"new documents for new conversations, or attach follow-up "
"emails to the existing conversations (documents)."),
'priority': fields.integer('Server Priority', readonly=True, states={'draft':[('readonly', False)]}, help="Defines the order of processing, "
"lower values mean higher priority"),
'message_ids': fields.one2many('mail.mail', 'fetchmail_server_id', 'Messages', readonly=True),
'configuration' : fields.text('Configuration', readonly=True),
'script' : fields.char('Script', readonly=True),
}
_defaults = {
'state': "draft",
'type': "pop",
'active': True,
'priority': 5,
'attach': True,
'script': '/mail/static/scripts/openerp_mailgate.py',
}
def onchange_server_type(self, cr, uid, ids, server_type=False, ssl=False, object_id=False):
port = 0
values = {}
if server_type == 'pop':
port = ssl and 995 or 110
elif server_type == 'imap':
port = ssl and 993 or 143
else:
values['server'] = ''
values['port'] = port
conf = {
'dbname' : cr.dbname,
'uid' : uid,
'model' : 'MODELNAME',
}
if object_id:
m = self.pool.get('ir.model')
r = m.read(cr,uid,[object_id],['model'])
conf['model']=r[0]['model']
values['configuration'] = """Use the below script with the following command line options with your Mail Transport Agent (MTA)
openerp_mailgate.py --host=HOSTNAME --port=PORT -u %(uid)d -p PASSWORD -d %(dbname)s
Example configuration for the postfix mta running locally:
/etc/postfix/virtual_aliases:
@youdomain openerp_mailgate@localhost
/etc/aliases:
openerp_mailgate: "|/path/to/openerp-mailgate.py --host=localhost -u %(uid)d -p PASSWORD -d %(dbname)s"
""" % conf
return {'value':values}
def set_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids , {'state':'draft'})
return True
@api.cr_uid_ids_context
def connect(self, cr, uid, server_id, context=None):
if isinstance(server_id, (list,tuple)):
server_id = server_id[0]
server = self.browse(cr, uid, server_id, context)
if server.type == 'imap':
if server.is_ssl:
connection = IMAP4_SSL(server.server, int(server.port))
else:
connection = IMAP4(server.server, int(server.port))
connection.login(server.user, server.password)
elif server.type == 'pop':
if server.is_ssl:
connection = POP3_SSL(server.server, int(server.port))
else:
connection = POP3(server.server, int(server.port))
#TODO: use this to remove only unread messages
#connection.user("recent:"+server.user)
connection.user(server.user)
connection.pass_(server.password)
# Add timeout on socket
connection.sock.settimeout(MAIL_TIMEOUT)
return connection
def button_confirm_login(self, cr, uid, ids, context=None):
if context is None:
context = {}
for server in self.browse(cr, uid, ids, context=context):
try:
connection = server.connect()
server.write({'state':'done'})
except Exception, e:
_logger.exception("Failed to connect to %s server %s.", server.type, server.name)
raise osv.except_osv(_("Connection test failed!"), _("Here is what we got instead:\n %s.") % tools.ustr(e))
finally:
try:
if connection:
if server.type == 'imap':
connection.close()
elif server.type == 'pop':
connection.quit()
except Exception:
# ignored, just a consequence of the previous exception
pass
return True
def _fetch_mails(self, cr, uid, ids=False, context=None):
if not ids:
ids = self.search(cr, uid, [('state','=','done'),('type','in',['pop','imap'])])
return self.fetch_mail(cr, uid, ids, context=context)
def fetch_mail(self, cr, uid, ids, context=None):
"""WARNING: meant for cron usage only - will commit() after each email!"""
context = dict(context or {})
context['fetchmail_cron_running'] = True
mail_thread = self.pool.get('mail.thread')
action_pool = self.pool.get('ir.actions.server')
for server in self.browse(cr, uid, ids, context=context):
_logger.info('start checking for new emails on %s server %s', server.type, server.name)
context.update({'fetchmail_server_id': server.id, 'server_type': server.type})
count, failed = 0, 0
imap_server = False
pop_server = False
if server.type == 'imap':
try:
imap_server = server.connect()
imap_server.select()
result, data = imap_server.search(None, '(UNSEEN)')
for num in data[0].split():
res_id = None
result, data = imap_server.fetch(num, '(RFC822)')
imap_server.store(num, '-FLAGS', '\\Seen')
try:
res_id = mail_thread.message_process(cr, uid, server.object_id.model,
data[0][1],
save_original=server.original,
strip_attachments=(not server.attach),
context=context)
imap_server.store(num, '+FLAGS', '\\Seen')
except Exception:
_logger.exception('Failed to process mail from %s server %s.', server.type, server.name)
failed += 1
if res_id and server.action_id:
action_pool.run(cr, uid, [server.action_id.id], {'active_id': res_id, 'active_ids': [res_id], 'active_model': context.get("thread_model", server.object_id.model)})
cr.commit()
count += 1
_logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", count, server.type, server.name, (count - failed), failed)
except Exception:
_logger.exception("General failure when trying to fetch mail from %s server %s.", server.type, server.name)
finally:
if imap_server:
imap_server.close()
imap_server.logout()
elif server.type == 'pop':
try:
while True:
pop_server = server.connect()
(numMsgs, totalSize) = pop_server.stat()
pop_server.list()
for num in range(1, min(MAX_POP_MESSAGES, numMsgs) + 1):
(header, msges, octets) = pop_server.retr(num)
msg = '\n'.join(msges)
res_id = None
try:
res_id = mail_thread.message_process(cr, uid, server.object_id.model,
msg,
save_original=server.original,
strip_attachments=(not server.attach),
context=context)
pop_server.dele(num)
except Exception:
_logger.exception('Failed to process mail from %s server %s.', server.type, server.name)
failed += 1
if res_id and server.action_id:
action_pool.run(cr, uid, [server.action_id.id], {'active_id': res_id, 'active_ids': [res_id], 'active_model': context.get("thread_model", server.object_id.model)})
cr.commit()
if numMsgs < MAX_POP_MESSAGES:
break
pop_server.quit()
_logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", numMsgs, server.type, server.name, (numMsgs - failed), failed)
except Exception:
_logger.exception("General failure when trying to fetch mail from %s server %s.", server.type, server.name)
finally:
if pop_server:
pop_server.quit()
server.write({'date': time.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)})
return True
def _update_cron(self, cr, uid, context=None):
if context and context.get('fetchmail_cron_running'):
return
try:
cron = self.pool['ir.model.data'].get_object(
cr, uid, 'fetchmail', 'ir_cron_mail_gateway_action', context=context)
except ValueError:
# Nevermind if default cron cannot be found
return
# Enabled/Disable cron based on the number of 'done' server of type pop or imap
cron.toggle(model=self._name, domain=[('state','=','done'), ('type','in',['pop','imap'])])
def create(self, cr, uid, values, context=None):
res = super(fetchmail_server, self).create(cr, uid, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(fetchmail_server, self).write(cr, uid, ids, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(fetchmail_server, self).unlink(cr, uid, ids, context=context)
self._update_cron(cr, uid, context=context)
return res
class mail_mail(osv.osv):
_inherit = "mail.mail"
_columns = {
'fetchmail_server_id': fields.many2one('fetchmail.server', "Inbound Mail Server",
readonly=True,
select=True,
oldname='server_id'),
}
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
fetchmail_server_id = context.get('fetchmail_server_id')
if fetchmail_server_id:
values['fetchmail_server_id'] = fetchmail_server_id
res = super(mail_mail, self).create(cr, uid, values, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
if context is None:
context = {}
fetchmail_server_id = context.get('fetchmail_server_id')
if fetchmail_server_id:
values['fetchmail_server_id'] = fetchmail_server_id
res = super(mail_mail, self).write(cr, uid, ids, values, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gnuhub/intellij-community | python/lib/Lib/site-packages/django/utils/autoreload.py | 135 | 4239 | # Autoreloading launcher.
# Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org).
# Some taken from Ian Bicking's Paste (http://pythonpaste.org/).
#
# Portions copyright (c) 2004, CherryPy Team ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the CherryPy Team nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, time
try:
import thread
except ImportError:
import dummy_thread as thread
# This import does nothing, but it's necessary to avoid some race conditions
# in the threading module. See http://code.djangoproject.com/ticket/2330 .
try:
import threading
except ImportError:
pass
RUN_RELOADER = True
_mtimes = {}
_win = (sys.platform == "win32")
def code_changed():
global _mtimes, _win
for filename in filter(lambda v: v, map(lambda m: getattr(m, "__file__", None), sys.modules.values())):
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
if not os.path.exists(filename):
continue # File might be in an egg, so it can't be reloaded.
stat = os.stat(filename)
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
if filename not in _mtimes:
_mtimes[filename] = mtime
continue
if mtime != _mtimes[filename]:
_mtimes = {}
return True
return False
def reloader_thread():
while RUN_RELOADER:
if code_changed():
sys.exit(3) # force reload
time.sleep(1)
def restart_with_reloader():
while True:
args = [sys.executable] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)
if exit_code != 3:
return exit_code
def python_reloader(main_func, args, kwargs):
if os.environ.get("RUN_MAIN") == "true":
thread.start_new_thread(main_func, args, kwargs)
try:
reloader_thread()
except KeyboardInterrupt:
pass
else:
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
def jython_reloader(main_func, args, kwargs):
from _systemrestart import SystemRestart
thread.start_new_thread(main_func, args)
while True:
if code_changed():
raise SystemRestart
time.sleep(1)
def main(main_func, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
if sys.platform.startswith('java'):
reloader = jython_reloader
else:
reloader = python_reloader
reloader(main_func, args, kwargs)
| apache-2.0 |
anomitra/articleScraper | PyQt-gpl-5.4.1/examples/widgets/stylesheet/stylesheeteditor.py | 3 | 4557 | #############################################################################
##
## Copyright (C) 2010 Hans-Peter Jansen <[email protected]>.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
###########################################################################
from PyQt5.QtCore import pyqtSlot, QFile, QRegExp, Qt, QTextStream
from PyQt5.QtWidgets import (QApplication, QDialog, QFileDialog, QMessageBox,
QStyleFactory)
from ui_stylesheeteditor import Ui_StyleSheetEditor
class StyleSheetEditor(QDialog):
def __init__(self, parent=None):
super(StyleSheetEditor, self).__init__(parent)
self.ui = Ui_StyleSheetEditor()
self.ui.setupUi(self)
regExp = QRegExp(r'.(.*)\+?Style')
defaultStyle = QApplication.style().metaObject().className()
if regExp.exactMatch(defaultStyle):
defaultStyle = regExp.cap(1)
self.ui.styleCombo.addItems(QStyleFactory.keys())
self.ui.styleCombo.setCurrentIndex(
self.ui.styleCombo.findText(defaultStyle, Qt.MatchContains))
self.ui.styleSheetCombo.setCurrentIndex(
self.ui.styleSheetCombo.findText('Coffee'))
self.loadStyleSheet('Coffee')
@pyqtSlot(str)
def on_styleCombo_activated(self, styleName):
QApplication.setStyle(styleName)
self.ui.applyButton.setEnabled(False)
@pyqtSlot(str)
def on_styleSheetCombo_activated(self, sheetName):
self.loadStyleSheet(sheetName)
def on_styleTextEdit_textChanged(self):
self.ui.applyButton.setEnabled(True)
def on_applyButton_clicked(self):
QApplication.instance().setStyleSheet(
self.ui.styleTextEdit.toPlainText())
self.ui.applyButton.setEnabled(False)
def on_saveButton_clicked(self):
fileName, _ = QFileDialog.getSaveFileName(self)
if fileName:
self.saveStyleSheet(fileName)
def loadStyleSheet(self, sheetName):
file = QFile(':/qss/%s.qss' % sheetName.lower())
file.open(QFile.ReadOnly)
styleSheet = file.readAll()
try:
# Python v2.
styleSheet = unicode(styleSheet, encoding='utf8')
except NameError:
# Python v3.
styleSheet = str(styleSheet, encoding='utf8')
self.ui.styleTextEdit.setPlainText(styleSheet)
QApplication.instance().setStyleSheet(styleSheet)
self.ui.applyButton.setEnabled(False)
def saveStyleSheet(self, fileName):
styleSheet = self.ui.styleTextEdit.toPlainText()
file = QFile(fileName)
if file.open(QFile.WriteOnly):
QTextStream(file) << styleSheet
else:
QMessageBox.information(self, "Unable to open file",
file.errorString())
| gpl-2.0 |
free-z4u/android_kernel_htc_z4u | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
nebril/fuel-web | nailgun/nailgun/openstack/common/timeutils.py | 16 | 5967 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import time
import iso8601
import six
# ISO 8601 extended time format with microseconds
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
def parse_isotime(timestr):
"""Parse time from ISO 8601 format."""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(six.text_type(e))
except TypeError as e:
raise ValueError(six.text_type(e))
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
if utcnow.override_time is None:
# NOTE(kgriffs): This is several times faster
# than going through calendar.timegm(...)
return int(time.time())
return calendar.timegm(utcnow().timetuple())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formated date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
utcnow.override_time = None
def set_time_override(override_time=None):
"""Overrides utils.utcnow.
Make it return a constant time or a list thereof, one at a time.
:param override_time: datetime instance or list thereof. If not
given, defaults to the current UTC time.
"""
utcnow.override_time = override_time or datetime.datetime.utcnow()
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overridden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.
"""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
def delta_seconds(before, after):
"""Return the difference between two timing objects.
Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
def is_soon(dt, window):
"""Determines if time is going to happen in the next window seconds.
:params dt: the time
:params window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon
| apache-2.0 |
molobrakos/home-assistant | homeassistant/components/fints/sensor.py | 7 | 9289 | """Read the balance of your bank accounts via FinTS."""
from collections import namedtuple
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_USERNAME, CONF_PIN, CONF_URL, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(hours=4)
ICON = 'mdi:currency-eur'
BankCredentials = namedtuple('BankCredentials', 'blz login pin url')
CONF_BIN = 'bank_identification_number'
CONF_ACCOUNTS = 'accounts'
CONF_HOLDINGS = 'holdings'
CONF_ACCOUNT = 'account'
ATTR_ACCOUNT = CONF_ACCOUNT
ATTR_BANK = 'bank'
ATTR_ACCOUNT_TYPE = 'account_type'
SCHEMA_ACCOUNTS = vol.Schema({
vol.Required(CONF_ACCOUNT): cv.string,
vol.Optional(CONF_NAME, default=None): vol.Any(None, cv.string),
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_BIN): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PIN): cv.string,
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ACCOUNTS, default=[]): cv.ensure_list(SCHEMA_ACCOUNTS),
vol.Optional(CONF_HOLDINGS, default=[]): cv.ensure_list(SCHEMA_ACCOUNTS),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensors.
Login to the bank and get a list of existing accounts. Create a
sensor for each account.
"""
credentials = BankCredentials(config[CONF_BIN], config[CONF_USERNAME],
config[CONF_PIN], config[CONF_URL])
fints_name = config.get(CONF_NAME, config[CONF_BIN])
account_config = {acc[CONF_ACCOUNT]: acc[CONF_NAME]
for acc in config[CONF_ACCOUNTS]}
holdings_config = {acc[CONF_ACCOUNT]: acc[CONF_NAME]
for acc in config[CONF_HOLDINGS]}
client = FinTsClient(credentials, fints_name)
balance_accounts, holdings_accounts = client.detect_accounts()
accounts = []
for account in balance_accounts:
if config[CONF_ACCOUNTS] and account.iban not in account_config:
_LOGGER.info('skipping account %s for bank %s',
account.iban, fints_name)
continue
account_name = account_config.get(account.iban)
if not account_name:
account_name = '{} - {}'.format(fints_name, account.iban)
accounts.append(FinTsAccount(client, account, account_name))
_LOGGER.debug('Creating account %s for bank %s',
account.iban, fints_name)
for account in holdings_accounts:
if config[CONF_HOLDINGS] and \
account.accountnumber not in holdings_config:
_LOGGER.info('skipping holdings %s for bank %s',
account.accountnumber, fints_name)
continue
account_name = holdings_config.get(account.accountnumber)
if not account_name:
account_name = '{} - {}'.format(
fints_name, account.accountnumber)
accounts.append(FinTsHoldingsAccount(client, account, account_name))
_LOGGER.debug('Creating holdings %s for bank %s',
account.accountnumber, fints_name)
add_entities(accounts, True)
class FinTsClient:
"""Wrapper around the FinTS3PinTanClient.
Use this class as Context Manager to get the FinTS3Client object.
"""
def __init__(self, credentials: BankCredentials, name: str):
"""Initialize a FinTsClient."""
self._credentials = credentials
self.name = name
@property
def client(self):
"""Get the client object.
As the fints library is stateless, there is not benefit in caching
the client objects. If that ever changes, consider caching the client
object and also think about potential concurrency problems.
"""
from fints.client import FinTS3PinTanClient
return FinTS3PinTanClient(
self._credentials.blz, self._credentials.login,
self._credentials.pin, self._credentials.url)
def detect_accounts(self):
"""Identify the accounts of the bank."""
from fints.dialog import FinTSDialogError
balance_accounts = []
holdings_accounts = []
for account in self.client.get_sepa_accounts():
try:
self.client.get_balance(account)
balance_accounts.append(account)
except IndexError:
# account is not a balance account.
pass
except FinTSDialogError:
# account is not a balance account.
pass
try:
self.client.get_holdings(account)
holdings_accounts.append(account)
except FinTSDialogError:
# account is not a holdings account.
pass
return balance_accounts, holdings_accounts
class FinTsAccount(Entity):
"""Sensor for a FinTS balance account.
A balance account contains an amount of money (=balance). The amount may
also be negative.
"""
def __init__(self, client: FinTsClient, account, name: str) -> None:
"""Initialize a FinTs balance account."""
self._client = client # type: FinTsClient
self._account = account
self._name = name # type: str
self._balance = None # type: float
self._currency = None # type: str
@property
def should_poll(self) -> bool:
"""Return True.
Data needs to be polled from the bank servers.
"""
return True
def update(self) -> None:
"""Get the current balance and currency for the account."""
bank = self._client.client
balance = bank.get_balance(self._account)
self._balance = balance.amount.amount
self._currency = balance.amount.currency
_LOGGER.debug('updated balance of account %s', self.name)
@property
def name(self) -> str:
"""Friendly name of the sensor."""
return self._name
@property
def state(self) -> float:
"""Return the balance of the account as state."""
return self._balance
@property
def unit_of_measurement(self) -> str:
"""Use the currency as unit of measurement."""
return self._currency
@property
def device_state_attributes(self) -> dict:
"""Additional attributes of the sensor."""
attributes = {
ATTR_ACCOUNT: self._account.iban,
ATTR_ACCOUNT_TYPE: 'balance',
}
if self._client.name:
attributes[ATTR_BANK] = self._client.name
return attributes
@property
def icon(self) -> str:
"""Set the icon for the sensor."""
return ICON
class FinTsHoldingsAccount(Entity):
"""Sensor for a FinTS holdings account.
A holdings account does not contain money but rather some financial
instruments, e.g. stocks.
"""
def __init__(self, client: FinTsClient, account, name: str) -> None:
"""Initialize a FinTs holdings account."""
self._client = client # type: FinTsClient
self._name = name # type: str
self._account = account
self._holdings = []
self._total = None # type: float
@property
def should_poll(self) -> bool:
"""Return True.
Data needs to be polled from the bank servers.
"""
return True
def update(self) -> None:
"""Get the current holdings for the account."""
bank = self._client.client
self._holdings = bank.get_holdings(self._account)
self._total = sum(h.total_value for h in self._holdings)
@property
def state(self) -> float:
"""Return total market value as state."""
return self._total
@property
def icon(self) -> str:
"""Set the icon for the sensor."""
return ICON
@property
def device_state_attributes(self) -> dict:
"""Additional attributes of the sensor.
Lists each holding of the account with the current value.
"""
attributes = {
ATTR_ACCOUNT: self._account.accountnumber,
ATTR_ACCOUNT_TYPE: 'holdings',
}
if self._client.name:
attributes[ATTR_BANK] = self._client.name
for holding in self._holdings:
total_name = '{} total'.format(holding.name)
attributes[total_name] = holding.total_value
pieces_name = '{} pieces'.format(holding.name)
attributes[pieces_name] = holding.pieces
price_name = '{} price'.format(holding.name)
attributes[price_name] = holding.market_value
return attributes
@property
def name(self) -> str:
"""Friendly name of the sensor."""
return self._name
@property
def unit_of_measurement(self) -> str:
"""Get the unit of measurement.
Hardcoded to EUR, as the library does not provide the currency for the
holdings. And as FinTS is only used in Germany, most accounts will be
in EUR anyways.
"""
return "EUR"
| apache-2.0 |
irwinlove/django | django/template/__init__.py | 198 | 2022 | """
Django's support for templates.
The django.template namespace contains two independent subsystems:
1. Multiple Template Engines: support for pluggable template backends,
built-in backends and backend-independent APIs
2. Django Template Language: Django's own template engine, including its
built-in loaders, context processors, tags and filters.
Ideally these subsystems would be implemented in distinct packages. However
keeping them together made the implementation of Multiple Template Engines
less disruptive .
Here's a breakdown of which modules belong to which subsystem.
Multiple Template Engines:
- django.template.backends.*
- django.template.loader
- django.template.response
Django Template Language:
- django.template.base
- django.template.context
- django.template.context_processors
- django.template.loaders.*
- django.template.debug
- django.template.defaultfilters
- django.template.defaulttags
- django.template.engine
- django.template.loader_tags
- django.template.smartif
Shared:
- django.template.utils
"""
# Multiple Template Engines
from .engine import Engine
from .utils import EngineHandler
engines = EngineHandler()
__all__ = ('Engine', 'engines')
# Django Template Language
# Public exceptions
from .base import VariableDoesNotExist # NOQA isort:skip
from .context import ContextPopException # NOQA isort:skip
from .exceptions import TemplateDoesNotExist, TemplateSyntaxError # NOQA isort:skip
# Template parts
from .base import ( # NOQA isort:skip
Context, Node, NodeList, Origin, RequestContext, Template, Variable,
)
# Deprecated in Django 1.8, will be removed in Django 1.10.
from .base import resolve_variable # NOQA isort:skip
# Library management
from .library import Library # NOQA isort:skip
__all__ += ('Template', 'Context', 'RequestContext')
| bsd-3-clause |
lawl/pmbootstrap | pmb/aportgen/linux.py | 2 | 4781 | """
Copyright 2017 Oliver Smith
This file is part of pmbootstrap.
pmbootstrap is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pmbootstrap is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pmbootstrap. If not, see <http://www.gnu.org/licenses/>.
"""
import pmb.helpers.run
import pmb.aportgen.core
import pmb.parse.apkindex
import pmb.parse.arch
def generate_apkbuild(args, pkgname, manufacturer, name, arch):
device = "-".join(pkgname.split("-")[1:])
carch = pmb.parse.arch.alpine_to_kernel(arch)
content = """\
# Kernel config based on: arch/""" + carch + """/configs/(CHANGEME!)
pkgname=\"""" + pkgname + """\"
pkgver=3.x.x
pkgrel=0
pkgdesc=\"""" + manufacturer + " " + name + """ kernel fork\"
arch=\"""" + arch + """\"
_carch=\"""" + carch + """\"
_flavor=\"""" + device + """\"
url="https://kernel.org"
license="GPL2"
options="!strip !check !tracedeps"
makedepends="perl sed installkernel bash gmp-dev bc linux-headers elfutils-dev"
HOSTCC="${CC:-gcc}"
HOSTCC="${HOSTCC#${CROSS_COMPILE}}"
# Source
_repository="(CHANGEME!)"
_commit="ffffffffffffffffffffffffffffffffffffffff"
_config="config-${_flavor}.${arch}"
source="
$pkgname-$_commit.tar.gz::https://github.com/LineageOS/${_repository}/archive/${_commit}.tar.gz
$_config
compiler-gcc6.h
01_msm-fix-perf_trace_counters.patch
02_gpu-msm-fix-gcc5-compile.patch
"
builddir="$srcdir/${_repository}-${_commit}"
prepare() {
default_prepare
# gcc6 support
cp -v "$srcdir/compiler-gcc6.h" "$builddir/include/linux/"
# Remove -Werror from all makefiles
find . -type f -name Makefile -print0 | \\
xargs -0 sed -i 's/-Werror-/-W/g'
find . -type f -name Makefile -print0 | \\
xargs -0 sed -i 's/-Werror//g'
# Prepare kernel config ('yes ""' for kernels lacking olddefconfig)
cp "$srcdir"/$_config "$builddir"/.config
yes "" | make ARCH="$_carch" HOSTCC="$HOSTCC" oldconfig
}
menuconfig() {
cd "$builddir"
make ARCH="$_carch" menuconfig
cp .config "$startdir"/$_config
}
build() {
unset LDFLAGS
make ARCH="$_carch" CC="${CC:-gcc}" \\
KBUILD_BUILD_VERSION="$((pkgrel + 1 ))-postmarketOS"
}
package() {
# kernel.release
install -D "$builddir/include/config/kernel.release" \\
"$pkgdir/usr/share/kernel/$_flavor/kernel.release"
# zImage (find the right one)
cd "$builddir/arch/$_carch/boot"
_target="$pkgdir/boot/vmlinuz-$_flavor"
for _zimg in zImage-dtb Image.gz-dtb *zImage Image; do
[ -e "$_zimg" ] || continue
msg "zImage found: $_zimg"
install -Dm644 "$_zimg" "$_target"
break
done
if ! [ -e "$_target" ]; then
error "Could not find zImage in $PWD!"
return 1
fi
}
sha512sums="(run 'pmbootstrap checksum """ + pkgname + """' to fill)"
"""
# Write the file
with open(args.work + "/aportgen/APKBUILD", "w", encoding="utf-8") as handle:
for line in content.split("\n"):
handle.write(line[8:].replace(" " * 4, "\t") + "\n")
def generate(args, pkgname):
device = "-".join(pkgname.split("-")[1:])
deviceinfo = pmb.parse.deviceinfo(args, device)
# Copy gcc6 support header and the patches from lg-mako for now
# (automatically finding the right patches is planned in #688)
pmb.helpers.run.user(args, ["mkdir", "-p", args.work + "/aportgen"])
for file in ["compiler-gcc6.h", "01_msm-fix-perf_trace_counters.patch",
"02_gpu-msm-fix-gcc5-compile.patch"]:
pmb.helpers.run.user(args, ["cp", args.aports +
"/device/linux-lg-mako/" + file,
args.work + "/aportgen/"])
generate_apkbuild(args, pkgname, deviceinfo["manufacturer"],
deviceinfo["name"], deviceinfo["arch"])
| gpl-3.0 |
a-b/PopClip-Extensions | source/InstantTranslate/requests/packages/chardet/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| mit |
stevenmizuno/QGIS | python/user.py | 7 | 4676 | # -*- coding: utf-8 -*-
"""
***************************************************************************
user.py
---------------------
Date : January 2015
Copyright : (C) 2015 by Nathan Woodrow
Email : woodrow dot nathan at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nathan Woodrow'
__date__ = 'January 2015'
__copyright__ = '(C) 2015, Nathan Woodrow'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import sys
import glob
import traceback
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import Qgis, QgsApplication, QgsMessageLog
def load_user_expressions(path):
"""
Load all user expressions from the given paths
"""
#Loop all py files and import them
modules = glob.glob(path + "/*.py")
names = [os.path.basename(f)[:-3] for f in modules]
for name in names:
if name == "__init__":
continue
# As user expression functions should be registered with qgsfunction
# just importing the file is enough to get it to load the functions into QGIS
try:
__import__("expressions.{0}".format(name), locals(), globals())
except:
error = traceback.format_exc()
msgtitle = QCoreApplication.translate("UserExpressions", "User expressions")
msg = QCoreApplication.translate("UserExpressions", "The user expression {0} is not valid").format(name)
QgsMessageLog.logMessage(msg + "\n" + error, msgtitle, Qgis.Warning)
userpythonhome = os.path.join(QgsApplication.qgisSettingsDirPath(), "python")
expressionspath = os.path.join(userpythonhome, "expressions")
sys.path.append(userpythonhome)
if not os.path.exists(expressionspath):
os.makedirs(expressionspath)
initfile = os.path.join(expressionspath, "__init__.py")
if not os.path.exists(initfile):
open(initfile, "w").close()
template = """\"\"\"
Define a new function using the @qgsfunction decorator.
The function accept the following parameters
:param [any]: Define any parameters you want to pass to your function before
the following arguments.
:param feature: The current feature
:param parent: The QgsExpression object
:param context: If there is an argument called ``context`` found at the last
position, this variable will contain a ``QgsExpressionContext``
object, that gives access to various additional information like
expression variables. E.g. ``context.variable('layer_id')``
:returns: The result of the expression.
The @qgsfunction decorator accepts the following arguments:
:param args: Defines the number of arguments. With ``args='auto'`` the number
arguments will automatically be extracted from the signature.
:param group: The name of the group under which this expression function will
be listed.
:param usesgeometry: Set this to False if your function does not access
feature.geometry(). Defaults to True.
:param referenced_columns: An array of attribute names that are required to run
this function. Defaults to
[QgsFeatureRequest.ALL_ATTRIBUTES].
\"\"\"
from qgis.core import *
from qgis.gui import *
@qgsfunction(args='auto', group='Custom')
def my_sum(value1, value2, feature, parent):
\"\"\"
Calculates the sum of the two parameters value1 and value2.
<h2>Example usage:</h2>
<ul>
<li>my_sum(5, 8) -> 13</li>
<li>my_sum(\"fiel1\", \"field2\") -> 42</li>
</ul>
\"\"\"
return value1 + value2
"""
try:
import expressions
expressions.load = load_user_expressions
expressions.load(expressionspath)
expressions.template = template
except ImportError:
# We get a import error and crash for some reason even if we make the expressions package
# TODO Fix the crash on first load with no expressions folder
# But for now it's not the end of the world if it doesn't load the first time
pass
| gpl-2.0 |
heidtn/PyDataLearn | PyDataLearn/NeuralNet.py | 1 | 6181 | from math import tanh
from pysqlite2 import dbapi2 as sqlite
def dtanh(y):
#this effectively creates a smaller change multiplier when the value is closest to 0 (when the slope is steepest) P_D controller?
return 1.0-y*y
class SearchNet:
def __init__(self, dbname):
self.con = sqlite.connect(dbname)
def __del__(self):
self.con.close()
def maketables(self):
self.con.execute('create table hiddennode(create_key)')
self.con.execute('create table wordhidden(fromid, toid, strength)')
self.con.execute('create table hiddenurl(fromid, toid, strength)')
self.con.commit()
def getstrength(self, fromid, toid, layer):
#returns strength of connection from fromid to toid
#layer specifies the table, whether dendrites connecting input to hidden or hidden to output
if layer == 0: table = 'wordhidden'
else: table = 'hiddenurl'
res = self.con.execute('select strength from %s where fromid=%d and toid=%d' % (table, fromid, toid)).fetchone()
if res == None:
if layer == 0: return -0.2 #if extra word, we want negative effects
if layer == 1: return 0
return res[0]
def setstrength(self, fromid, toid, layer, strength):
if layer == 0: table = 'wordhidden'
else: table = 'hiddenurl'
res = self.con.execute('select rowid from %s where fromid=%d and toid=%d' % (table, fromid, toid)).fetchone()
if res == None:
#we generate nodes as we need them/use them
self.con.execute('insert into %s (fromid,toid,strength) values (%d,%d,%f)' % (table, fromid, toid, strength))
else:
rowid = res[0]
self.con.execute('update %s set strength=%f where rowid=%d' % (table, strength, rowid))
def generatehiddennode(self, wordids, urls):
#generates new nodes for searches we haven't done yet
if len(wordids) > 3: return None
#check to see if we've created a node for this set of words
createkey = '_'.join(sorted([str(wi) for wi in wordids])) #sorting ensures any combination of these words
res = self.con.execute("select rowid from hiddennode where create_key='%s'" % createkey).fetchone()
#if we haven't seen this set of words
if res == None:
cur = self.con.execute("insert into hiddennode (create_key) values ('%s')" % createkey)
hiddenid = cur.lastrowid
for wordid in wordids:
self.setstrength(wordid, hiddenid, 0, 1.0/len(wordids))
for urlid in urls:
self.setstrength(hiddenid, urlid, 1, 0.1)
self.con.commit()
def getallhiddenids(self, wordids, urlids):
l1 = {}
for wordid in wordids:
cur = self.con.execute('select toid from wordhidden where fromid=%d' % wordid)
for row in cur: l1[row[0]] = 1
for urlid in urlids:
cur = self.con.execute('select fromid from hiddenurl where toid=%d' % urlid)
for row in cur: l1[row[0]] = 1
return l1.keys()
#load weights into memory for speeeed
def setupnetwork(self, wordids, urlids):
#values lists
self.wordids = wordids #current list of words we're searching for
self.hiddenids = self.getallhiddenids(wordids, urlids) #current list of hidden ids relevant to our input wordids and urlids
self.urlids = urlids
#node outputs
self.ai = [1.0]*len(self.wordids) #input layer outputs for each word
self.ah = [1.0]*len(self.hiddenids) #hidden layer outputs
self.ao = [1.0]*len(self.urlids) #output layer outputs
#create weights matrix
self.wi = [[self.getstrength(wordid, hiddenid, 0) #2d array of weights between input array and hidden array
for hiddenid in self.hiddenids] #for each word what are the weights of all relevant hidden neurons
for wordid in self.wordids]
self.wo = [[self.getstrength(hiddenid, urlid, 1) #same as wi, but from hidden layer to output layer
for urlid in self.urlids]
for hiddenid in self.hiddenids]
def feedforward(self):
#only query words for inputs
for i in xrange(len(self.wordids)): #reset input layer values to 1
self.ai[i] = 1.0
#hidden activations
for j in xrange(len(self.hiddenids)):
tot = 0.0
for i in xrange(len(self.wordids)): #iterate through weights 2d array and apply to input layer strength
tot += self.ai[i]*self.wi[i][j]
self.ah[j] = tanh(tot) #set hidden layer outputs to tanh of sum of input weights axon=tanh(sum(dendrites))
#output activations (feed forward from hidden layer)
for k in xrange(len(self.urlids)):
tot = 0.0
for j in xrange(len(self.hiddenids)):
tot += self.ah[j]*self.wo[j][k]
self.ao[k] = tanh(tot)
#return the outputs of the output layer
return self.ao[:]
def backpropagate(self, targets, N=0.5):
#calcuate all errors for output
output_deltas = [0.0] * len(self.urlids)
for k in xrange(len(self.urlids)):
error = targets[k] - self.ao[k]
output_deltas[k] = dtanh(self.ao[k]) * error
#do the same for hiden layer
hidden_deltas = [0.0] * len(self.hiddenids)
for j in xrange(len(self.hiddenids)):
error = 0.0
for k in xrange(len(self.urlids)):
error += output_deltas[k]*self.wo[j][k]
hidden_deltas[j] = dtanh(self.ah[j])*error
#update the weights
for j in xrange(len(self.hiddenids)):
for k in xrange(len(self.urlids)):
change = output_deltas[k]*self.ah[j]
self.wo[j][k] = self.wo[j][k] + N*change
#update input weights
for j in xrange(len(self.wordids)):
for k in xrange(len(self.hiddenids)):
change = hidden_deltas[k]*self.ai[j]
self.wi[j][k] = self.wi[j][k] + N*change
def trainquery(self, wordids, urlids, selectedurl):
#generate the hidden nodes if we have new words
self.generatehiddennode(wordids, urlids)
self.setupnetwork(wordids, urlids)
self.feedforward()
targets = [0.0]*len(urlids)
targets[urlids.index(selectedurl)] = 1.0
self.backpropagate(targets)
self.updatedatabase()
def updatedatabase(self):
#save our instance variables into the database
for i in xrange(len(self.wordids)):
for j in xrange(len(self.hiddenids)):
self.setstrength(self.wordids[i], self.hiddenids[j], 0, self.wi[i][j])
for i in xrange(len(self.hiddenids)):
for j in xrange(len(self.urlids)):
self.setstrength(self.hiddenids[i],self.urlids[j], 1, self.wo[i][j])
self.con.commit()
def getresult(self, wordids, urlids):
self.setupnetwork(wordids, urlids)
return self.feedforward()
| mit |
dfdx2/ancfinder | scripts/update_311.py | 1 | 2533 | import datetime, json, urllib2, os, errno, requests
# Open/create file, deleting info already in it so that we can make fresh info
file_name = open('data/311.json', 'w')
issues = []
working = {'issues':issues}
data = {}
# Get date in the past to start
start_date = (datetime.datetime.today() + datetime.timedelta(-180)).isoformat()
# Request info from SeeClickFix API
url = 'https://seeclickfix.com/api/v2/issues?place_url=district-of-columbia&&after='+start_date+'&page=1&per_page=100'
response = urllib2.urlopen(url)
info = json.load(response)
endpoint = info['metadata']['pagination']['pages']
page = 1
while page < endpoint:
url = 'https://seeclickfix.com/api/v2/issues?place_url=district-of-columbia&&after='+start_date+'&page='+str(page)+'&per_page=100'
response = urllib2.urlopen(url)
info = json.load(response)
working['issues'] += info['issues']
page +=1
#Locate in ANC using lat/long coordinates, then calculate the totals
for issue in working['issues']:
url = 'http://gis.govtrack.us/boundaries/dc-smd-2013/?contains='+str(issue['lat'])+','+str(issue['lng'])
request = requests.get(url)
info = json.loads(request.text)
try:
smd = info['objects'][0]['external_id']
anc = info['objects'][0]['external_id'][:2]
variety = issue['summary']
print smd, issue['lng'], issue['lat'], variety
if anc in data:
if smd in data[anc]['smds']:
data[anc]['smds'][smd]['total'] += 1
else:
data[anc]['smds'][smd] = {}
data[anc]['smds'][smd]['total'] = 1
data[anc]['smds'][smd]['types'] = {}
data[anc]['total'] += 1
else:
data[anc] = {}
data[anc]['total'] = 1
data[anc]['types'] = {}
data[anc]['smds'] = {}
data[anc]['smds'][smd] = {}
data[anc]['smds'][smd]['total'] = 1
data[anc]['smds'][smd]['types'] = {}
if variety in data[anc]['types']:
data[anc]['types'][variety] += 1
if variety in data[anc]['smds'][smd]['types']:
data[anc]['smds'][smd]['types'][variety] += 1
else:
data[anc]['smds'][smd]['types'][variety] = 1
else:
data[anc]['types'][variety] = 1
data[anc]['smds'][smd]['types'][variety] = 1
except IndexError:
continue
# Save the JSON file
with open('data/311.json', 'w') as output:
json.dump(data, output, sort_keys=True, indent=4)
| cc0-1.0 |
aveshagarwal/openshift-ansible | roles/lib_openshift/src/lib/storageclass.py | 18 | 3122 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class StorageClassConfig(object):
''' Handle service options '''
# pylint: disable=too-many-arguments
def __init__(self,
name,
provisioner,
parameters=None,
annotations=None,
default_storage_class="false",
api_version='v1',
kubeconfig='/etc/origin/master/admin.kubeconfig',
mount_options=None,
reclaim_policy=None):
''' constructor for handling storageclass options '''
self.name = name
self.parameters = parameters
self.annotations = annotations
self.provisioner = provisioner
self.api_version = api_version
self.default_storage_class = str(default_storage_class).lower()
self.kubeconfig = kubeconfig
self.mount_options = mount_options
self.reclaim_policy = reclaim_policy
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiates a storageclass dict '''
self.data['apiVersion'] = self.api_version
self.data['kind'] = 'StorageClass'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['annotations'] = {}
if self.annotations is not None:
self.data['metadata']['annotations'] = self.annotations
self.data['metadata']['annotations']['storageclass.beta.kubernetes.io/is-default-class'] = \
self.default_storage_class
self.data['provisioner'] = self.provisioner
self.data['parameters'] = {}
if self.parameters is not None:
self.data['parameters'].update(self.parameters)
# default to aws if no params were passed
else:
self.data['parameters']['type'] = 'gp2'
self.data['mountOptions'] = self.mount_options or []
if self.reclaim_policy is not None:
self.data['reclaimPolicy'] = self.reclaim_policy
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class StorageClass(Yedit):
''' Class to model the oc storageclass object '''
annotations_path = "metadata.annotations"
provisioner_path = "provisioner"
parameters_path = "parameters"
mount_options_path = "mountOptions"
reclaim_policy_path = "reclaimPolicy"
kind = 'StorageClass'
def __init__(self, content):
'''StorageClass constructor'''
super(StorageClass, self).__init__(content=content)
def get_annotations(self):
''' get a list of ports '''
return self.get(StorageClass.annotations_path) or {}
def get_parameters(self):
''' get the service selector'''
return self.get(StorageClass.parameters_path) or {}
def get_mount_options(self):
''' get mount options'''
return self.get(StorageClass.mount_options_path) or []
def get_reclaim_policy(self):
''' get reclaim policy'''
return self.get(StorageClass.reclaim_policy_path)
| apache-2.0 |
izpack/izpack | izpack-wrapper/src/main/resources/utils/wrappers/izpack2jnlp/setup.py | 26 | 1070 | #!/usr/bin/env python
# ........................................................................... #
#
# IzPack - Copyright 2008 Julien Ponge, All Rights Reserved.
#
# http://izpack.org/
# http://izpack.codehaus.org/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ........................................................................... #
from distutils.core import setup
import py2exe
setup(
console = [{
'script': 'izpack2jnlp.py',
'icon_resources': [(0, 'app.ico')]
}],
script_args=['py2exe', '--bundle-files', '1']
)
| apache-2.0 |
jejimenez/django | tests/m2m_through_regress/tests.py | 182 | 9847 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core import management
from django.test import TestCase
from django.utils.six import StringIO
from .models import (
Car, CarDriver, Driver, Group, Membership, Person, UserMembership,
)
class M2MThroughTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.jim = Person.objects.create(name="Jim")
cls.rock = Group.objects.create(name="Rock")
cls.roll = Group.objects.create(name="Roll")
cls.frank = User.objects.create_user("frank", "[email protected]", "password")
cls.jane = User.objects.create_user("jane", "[email protected]", "password")
# normal intermediate model
cls.bob_rock = Membership.objects.create(person=cls.bob, group=cls.rock)
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll, price=50)
cls.jim_rock = Membership.objects.create(person=cls.jim, group=cls.rock, price=50)
# intermediate model with custom id column
cls.frank_rock = UserMembership.objects.create(user=cls.frank, group=cls.rock)
cls.frank_roll = UserMembership.objects.create(user=cls.frank, group=cls.roll)
cls.jane_rock = UserMembership.objects.create(user=cls.jane, group=cls.rock)
def test_retrieve_reverse_m2m_items(self):
self.assertQuerysetEqual(
self.bob.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items(self):
self.assertQuerysetEqual(
self.roll.members.all(), [
"<Person: Bob>",
]
)
def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, setattr, self.bob, "group_set", [])
def test_cannot_use_setattr_on_forward_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, setattr, self.roll, "members", [])
def test_cannot_use_create_on_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, self.rock.members.create, name="Anne")
def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, self.bob.group_set.create, name="Funk")
def test_retrieve_reverse_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.frank.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.roll.user_members.all(), [
"<User: frank>",
]
)
def test_join_trimming_forwards(self):
"Check that we don't involve too many copies of the intermediate table when doing a join. Refs #8046, #8254"
self.assertQuerysetEqual(
self.rock.members.filter(membership__price=50), [
"<Person: Jim>",
]
)
def test_join_trimming_reverse(self):
self.assertQuerysetEqual(
self.bob.group_set.filter(membership__price=50), [
"<Group: Roll>",
]
)
class M2MThroughSerializationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.roll = Group.objects.create(name="Roll")
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll)
def test_serialization(self):
"m2m-through models aren't serialized as m2m fields. Refs #8134"
pks = {"p_pk": self.bob.pk, "g_pk": self.roll.pk, "m_pk": self.bob_roll.pk}
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(out.getvalue().strip(), """[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", "fields": {"person": %(p_pk)s, "price": 100, "group": %(g_pk)s}}, {"pk": %(p_pk)s, "model": "m2m_through_regress.person", "fields": {"name": "Bob"}}, {"pk": %(g_pk)s, "model": "m2m_through_regress.group", "fields": {"name": "Roll"}}]""" % pks)
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="xml",
indent=2, stdout=out)
self.assertXMLEqual(out.getvalue().strip(), """
<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="%(m_pk)s" model="m2m_through_regress.membership">
<field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">%(p_pk)s</field>
<field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">%(g_pk)s</field>
<field type="IntegerField" name="price">100</field>
</object>
<object pk="%(p_pk)s" model="m2m_through_regress.person">
<field type="CharField" name="name">Bob</field>
</object>
<object pk="%(g_pk)s" model="m2m_through_regress.group">
<field type="CharField" name="name">Roll</field>
</object>
</django-objects>
""".strip() % pks)
class ToFieldThroughTests(TestCase):
def setUp(self):
self.car = Car.objects.create(make="Toyota")
self.driver = Driver.objects.create(name="Ryan Briscoe")
CarDriver.objects.create(car=self.car, driver=self.driver)
# We are testing if wrong objects get deleted due to using wrong
# field value in m2m queries. So, it is essential that the pk
# numberings do not match.
# Create one intentionally unused driver to mix up the autonumbering
self.unused_driver = Driver.objects.create(name="Barney Gumble")
# And two intentionally unused cars.
self.unused_car1 = Car.objects.create(make="Trabant")
self.unused_car2 = Car.objects.create(make="Wartburg")
def test_to_field(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
def test_to_field_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
def test_to_field_clear_reverse(self):
self.driver.car_set.clear()
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
def test_to_field_clear(self):
self.car.drivers.clear()
self.assertQuerysetEqual(
self.car.drivers.all(), [])
# Low level tests for _add_items and _remove_items. We test these methods
# because .add/.remove aren't available for m2m fields with through, but
# through is the only way to set to_field currently. We do want to make
# sure these methods are ready if the ability to use .add or .remove with
# to_field relations is added some day.
def test_add(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
# Yikes - barney is going to drive...
self.car.drivers._add_items('car', 'driver', self.unused_driver)
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Barney Gumble>", "<Driver: Ryan Briscoe>"]
)
def test_add_null(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
nullcar.drivers._add_items('car', 'driver', self.unused_driver)
def test_add_related_null(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
self.car.drivers._add_items('car', 'driver', nulldriver)
def test_add_reverse(self):
car2 = Car.objects.create(make="Honda")
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._add_items('driver', 'car', car2)
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>", "<Car: Honda>"],
ordered=False
)
def test_add_null_reverse(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
self.driver.car_set._add_items('driver', 'car', nullcar)
def test_add_null_reverse_related(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
nulldriver.car_set._add_items('driver', 'car', self.car)
def test_remove(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
self.car.drivers._remove_items('car', 'driver', self.driver)
self.assertQuerysetEqual(
self.car.drivers.all(), [])
def test_remove_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._remove_items('driver', 'car', self.car)
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
class ThroughLoadDataTestCase(TestCase):
fixtures = ["m2m_through"]
def test_sequence_creation(self):
"Check that sequences on an m2m_through are created for the through model, not a phantom auto-generated m2m table. Refs #11107"
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(out.getvalue().strip(), """[{"pk": 1, "model": "m2m_through_regress.usermembership", "fields": {"price": 100, "group": 1, "user": 1}}, {"pk": 1, "model": "m2m_through_regress.person", "fields": {"name": "Guido"}}, {"pk": 1, "model": "m2m_through_regress.group", "fields": {"name": "Python Core Group"}}]""")
| bsd-3-clause |
bbc/kamaelia | Code/Python/Kamaelia/Kamaelia/Apps/Compose/GUI/ArgumentsPanel.py | 6 | 6027 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
from Kamaelia.UI.Tk.TkWindow import TkWindow
from Kamaelia.Support.Tk.Scrolling import ScrollingMenu
from Axon.Ipc import producerFinished, shutdownMicroprocess
import Tkinter
import pprint
class ArgumentsPanel(Tkinter.Frame):
def __init__(self, parent, theclass):
Tkinter.Frame.__init__(self, parent)
self.theclass = theclass
# pprint.pprint(theclass)
# build widgets
row=0
if self.theclass['classdoc']:
self.classdoclabel = Tkinter.Label(self, text = self.theclass['classdoc'], justify="left")
self.classdoclabel['font'] = " ".join(self.classdoclabel['font'].split(" ")[0:2])
self.classdoclabel.grid(row=row, column=0,columnspan=2,
sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S, padx=4, pady=4)
row+=1
if self.theclass['initdoc']:
self.initdoclabel = Tkinter.Label(self, text = self.theclass['initdoc'], justify="left")
self.initdoclabel['font'] = " ".join(self.initdoclabel['font'].split(" ")[0:2])
self.initdoclabel.grid(row=row, column=0, columnspan=2,
sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S, padx=4, pady=4)
row+=1
self.label = Tkinter.Label(self, text="ARGUMENTS:")
self.label.grid(row=row, column=0, columnspan=2,sticky=Tkinter.W+Tkinter.S, padx=4, pady=4)
row+=1
# enumerate std args
self.args = []
for arg in self.theclass['args']['std']:
arglabel = Tkinter.Label(self, text=arg[0])
arglabel.grid(row=row,column=0, sticky=Tkinter.E)
svar = Tkinter.StringVar()
argfield = Tkinter.Entry(self, bg="white", textvariable=svar, takefocus=1)
default=""
if len(arg)>=2:
default = arg[1]
svar.set(default)
argfield.grid(row=row,column=1, sticky=Tkinter.W)
self.args.append( (arg[0], svar, default) )
row+=1
# now do * and ** args
for argname in ["*","**"]:
if self.theclass['args'][argname]:
arglabel = Tkinter.Label(self, text=argname)
arglabel.grid(row=row,column=0, sticky=Tkinter.E)
arglabel = None
svar = Tkinter.StringVar()
argfield = Tkinter.Entry(self, bg="white", textvariable=svar, takefocus=1)
argfield.grid(row=row,column=1, sticky=Tkinter.W)
self.args.append( (argname, svar, "") )
row+=1
# self.rowconfigure(row, weight=1)
# self.grid()
def getDef(self):
return { "name" : self.theclass['class'],
"module" : self.theclass['module'],
"instantiation" : self.getInstantiation(),
"configuration" : self.getConfiguration()
}
def getConfiguration(self):
"""Return the instantiation string"""
argstr = ""
prefix = ""
SEQUENTIALARGS = []
TUPLEARGS = None
DICTARGS = None
for (argname, svar, default) in self.args:
unspecified = False
value = None
text = svar.get().strip()
default = default.strip()
if argname != "*" and argname != "**":
if default=="" or text != default:
if not text:
unspecified = True
value = text
SEQUENTIALARGS.append( [argname, unspecified,value, default ] )
else:
if text:
if argname == "*":
TUPLEARGS = text
if argname == "**":
DICTARGS = text
return { "args" : SEQUENTIALARGS,
"tupleargs" : TUPLEARGS ,
"dictargs" : DICTARGS,
"theclass" : self.theclass["theclass"], # FIXME: Is this a mistake, should we pass everything out?
}
def getInstantiation(self):
"""Return the instantiation string"""
argstr = ""
prefix = ""
for (argname, svar, default) in self.args:
text = svar.get().strip()
default = default.strip()
if argname != "*" and argname != "**":
if argname[0]=="[" and argname[-1]=="]":
if text:
argname=argname[1:-1]
argstr = argstr + prefix + argname + " = " + text
prefix=", "
elif (default=="" or text != default):
if not text:
text = "<<unspecified>>"
argstr = argstr + prefix + argname + " = " + text
prefix=", "
else:
if text:
argstr = argstr + prefix + text
prefix=", "
return argstr
| apache-2.0 |
NoahFlowa/glowing-spoon | forms.py | 2 | 1139 | from flask_wtf import Form
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Email, Length
class SignupForm(Form):
first_name = StringField('First name', validators=[DataRequired("Please enter your first name.")])
last_name = StringField('Last name', validators=[DataRequired("Please enter your last name.")])
email = StringField('Email', validators=[DataRequired("Please enter your email address."), Email("Please enter your email address.")])
password = PasswordField('Password', validators=[DataRequired("Please enter a password."), Length(min=6, message="Passwords must be 6 characters or more.")])
submit = SubmitField('Sign up')
class LoginForm(Form):
email = StringField('Email', validators=[DataRequired("Please enter your email address."), Email("Please enter your email address.")])
password = PasswordField('Password', validators=[DataRequired("Please enter a password.")])
submit = SubmitField("Sign in")
class AddressForm(Form):
address = StringField('Address', validators=[DataRequired("Please enter an address.")])
submit = SubmitField("Search") | apache-2.0 |
tangfeng1/flask | flask/helpers.py | 133 | 36499 | # -*- coding: utf-8 -*-
"""
flask.helpers
~~~~~~~~~~~~~
Implements various helpers.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import pkgutil
import posixpath
import mimetypes
from time import time
from zlib import adler32
from threading import RLock
from werkzeug.routing import BuildError
from functools import update_wrapper
try:
from werkzeug.urls import url_quote
except ImportError:
from urlparse import quote as url_quote
from werkzeug.datastructures import Headers
from werkzeug.exceptions import NotFound
# this was moved in 0.7
try:
from werkzeug.wsgi import wrap_file
except ImportError:
from werkzeug.utils import wrap_file
from jinja2 import FileSystemLoader
from .signals import message_flashed
from .globals import session, _request_ctx_stack, _app_ctx_stack, \
current_app, request
from ._compat import string_types, text_type
# sentinel
_missing = object()
# what separators does this operating system provide that are not a slash?
# this is used by the send_from_directory function to ensure that nobody is
# able to access files from outside the filesystem.
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
def _endpoint_from_view_func(view_func):
"""Internal helper that returns the default endpoint for a given
function. This always is the function name.
"""
assert view_func is not None, 'expected view func if endpoint ' \
'is not provided.'
return view_func.__name__
def stream_with_context(generator_or_function):
"""Request contexts disappear when the response is started on the server.
This is done for efficiency reasons and to make it less likely to encounter
memory leaks with badly written WSGI middlewares. The downside is that if
you are using streamed responses, the generator cannot access request bound
information any more.
This function however can help you keep the context around for longer::
from flask import stream_with_context, request, Response
@app.route('/stream')
def streamed_response():
@stream_with_context
def generate():
yield 'Hello '
yield request.args['name']
yield '!'
return Response(generate())
Alternatively it can also be used around a specific generator::
from flask import stream_with_context, request, Response
@app.route('/stream')
def streamed_response():
def generate():
yield 'Hello '
yield request.args['name']
yield '!'
return Response(stream_with_context(generate()))
.. versionadded:: 0.9
"""
try:
gen = iter(generator_or_function)
except TypeError:
def decorator(*args, **kwargs):
gen = generator_or_function()
return stream_with_context(gen)
return update_wrapper(decorator, generator_or_function)
def generator():
ctx = _request_ctx_stack.top
if ctx is None:
raise RuntimeError('Attempted to stream with context but '
'there was no context in the first place to keep around.')
with ctx:
# Dummy sentinel. Has to be inside the context block or we're
# not actually keeping the context around.
yield None
# The try/finally is here so that if someone passes a WSGI level
# iterator in we're still running the cleanup logic. Generators
# don't need that because they are closed on their destruction
# automatically.
try:
for item in gen:
yield item
finally:
if hasattr(gen, 'close'):
gen.close()
# The trick is to start the generator. Then the code execution runs until
# the first dummy None is yielded at which point the context was already
# pushed. This item is discarded. Then when the iteration continues the
# real generator is executed.
wrapped_g = generator()
next(wrapped_g)
return wrapped_g
def make_response(*args):
"""Sometimes it is necessary to set additional headers in a view. Because
views do not have to return response objects but can return a value that
is converted into a response object by Flask itself, it becomes tricky to
add headers to it. This function can be called instead of using a return
and you will get a response object which you can use to attach headers.
If view looked like this and you want to add a new header::
def index():
return render_template('index.html', foo=42)
You can now do something like this::
def index():
response = make_response(render_template('index.html', foo=42))
response.headers['X-Parachutes'] = 'parachutes are cool'
return response
This function accepts the very same arguments you can return from a
view function. This for example creates a response with a 404 error
code::
response = make_response(render_template('not_found.html'), 404)
The other use case of this function is to force the return value of a
view function into a response which is helpful with view
decorators::
response = make_response(view_function())
response.headers['X-Parachutes'] = 'parachutes are cool'
Internally this function does the following things:
- if no arguments are passed, it creates a new response argument
- if one argument is passed, :meth:`flask.Flask.make_response`
is invoked with it.
- if more than one argument is passed, the arguments are passed
to the :meth:`flask.Flask.make_response` function as tuple.
.. versionadded:: 0.6
"""
if not args:
return current_app.response_class()
if len(args) == 1:
args = args[0]
return current_app.make_response(args)
def url_for(endpoint, **values):
"""Generates a URL to the given endpoint with the method provided.
Variable arguments that are unknown to the target endpoint are appended
to the generated URL as query arguments. If the value of a query argument
is ``None``, the whole pair is skipped. In case blueprints are active
you can shortcut references to the same blueprint by prefixing the
local endpoint with a dot (``.``).
This will reference the index function local to the current blueprint::
url_for('.index')
For more information, head over to the :ref:`Quickstart <url-building>`.
To integrate applications, :class:`Flask` has a hook to intercept URL build
errors through :attr:`Flask.url_build_error_handlers`. The `url_for`
function results in a :exc:`~werkzeug.routing.BuildError` when the current
app does not have a URL for the given endpoint and values. When it does, the
:data:`~flask.current_app` calls its :attr:`~Flask.url_build_error_handlers` if
it is not ``None``, which can return a string to use as the result of
`url_for` (instead of `url_for`'s default to raise the
:exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.
An example::
def external_url_handler(error, endpoint, values):
"Looks up an external URL when `url_for` cannot build a URL."
# This is an example of hooking the build_error_handler.
# Here, lookup_url is some utility function you've built
# which looks up the endpoint in some external URL registry.
url = lookup_url(endpoint, **values)
if url is None:
# External lookup did not have a URL.
# Re-raise the BuildError, in context of original traceback.
exc_type, exc_value, tb = sys.exc_info()
if exc_value is error:
raise exc_type, exc_value, tb
else:
raise error
# url_for will use this result, instead of raising BuildError.
return url
app.url_build_error_handlers.append(external_url_handler)
Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and
`endpoint` and `values` are the arguments passed into `url_for`. Note
that this is for building URLs outside the current application, and not for
handling 404 NotFound errors.
.. versionadded:: 0.10
The `_scheme` parameter was added.
.. versionadded:: 0.9
The `_anchor` and `_method` parameters were added.
.. versionadded:: 0.9
Calls :meth:`Flask.handle_build_error` on
:exc:`~werkzeug.routing.BuildError`.
:param endpoint: the endpoint of the URL (name of the function)
:param values: the variable arguments of the URL rule
:param _external: if set to ``True``, an absolute URL is generated. Server
address can be changed via ``SERVER_NAME`` configuration variable which
defaults to `localhost`.
:param _scheme: a string specifying the desired URL scheme. The `_external`
parameter must be set to ``True`` or a :exc:`ValueError` is raised. The default
behavior uses the same scheme as the current request, or
``PREFERRED_URL_SCHEME`` from the :ref:`app configuration <config>` if no
request context is available. As of Werkzeug 0.10, this also can be set
to an empty string to build protocol-relative URLs.
:param _anchor: if provided this is added as anchor to the URL.
:param _method: if provided this explicitly specifies an HTTP method.
"""
appctx = _app_ctx_stack.top
reqctx = _request_ctx_stack.top
if appctx is None:
raise RuntimeError('Attempted to generate a URL without the '
'application context being pushed. This has to be '
'executed when application context is available.')
# If request specific information is available we have some extra
# features that support "relative" URLs.
if reqctx is not None:
url_adapter = reqctx.url_adapter
blueprint_name = request.blueprint
if not reqctx.request._is_old_module:
if endpoint[:1] == '.':
if blueprint_name is not None:
endpoint = blueprint_name + endpoint
else:
endpoint = endpoint[1:]
else:
# TODO: get rid of this deprecated functionality in 1.0
if '.' not in endpoint:
if blueprint_name is not None:
endpoint = blueprint_name + '.' + endpoint
elif endpoint.startswith('.'):
endpoint = endpoint[1:]
external = values.pop('_external', False)
# Otherwise go with the url adapter from the appctx and make
# the URLs external by default.
else:
url_adapter = appctx.url_adapter
if url_adapter is None:
raise RuntimeError('Application was not able to create a URL '
'adapter for request independent URL generation. '
'You might be able to fix this by setting '
'the SERVER_NAME config variable.')
external = values.pop('_external', True)
anchor = values.pop('_anchor', None)
method = values.pop('_method', None)
scheme = values.pop('_scheme', None)
appctx.app.inject_url_defaults(endpoint, values)
if scheme is not None:
if not external:
raise ValueError('When specifying _scheme, _external must be True')
url_adapter.url_scheme = scheme
try:
rv = url_adapter.build(endpoint, values, method=method,
force_external=external)
except BuildError as error:
# We need to inject the values again so that the app callback can
# deal with that sort of stuff.
values['_external'] = external
values['_anchor'] = anchor
values['_method'] = method
return appctx.app.handle_url_build_error(error, endpoint, values)
if anchor is not None:
rv += '#' + url_quote(anchor)
return rv
def get_template_attribute(template_name, attribute):
"""Loads a macro (or variable) a template exports. This can be used to
invoke a macro from within Python code. If you for example have a
template named :file:`_cider.html` with the following contents:
.. sourcecode:: html+jinja
{% macro hello(name) %}Hello {{ name }}!{% endmacro %}
You can access this from Python code like this::
hello = get_template_attribute('_cider.html', 'hello')
return hello('World')
.. versionadded:: 0.2
:param template_name: the name of the template
:param attribute: the name of the variable of macro to access
"""
return getattr(current_app.jinja_env.get_template(template_name).module,
attribute)
def flash(message, category='message'):
"""Flashes a message to the next request. In order to remove the
flashed message from the session and to display it to the user,
the template has to call :func:`get_flashed_messages`.
.. versionchanged:: 0.3
`category` parameter added.
:param message: the message to be flashed.
:param category: the category for the message. The following values
are recommended: ``'message'`` for any kind of message,
``'error'`` for errors, ``'info'`` for information
messages and ``'warning'`` for warnings. However any
kind of string can be used as category.
"""
# Original implementation:
#
# session.setdefault('_flashes', []).append((category, message))
#
# This assumed that changes made to mutable structures in the session are
# are always in sync with the session object, which is not true for session
# implementations that use external storage for keeping their keys/values.
flashes = session.get('_flashes', [])
flashes.append((category, message))
session['_flashes'] = flashes
message_flashed.send(current_app._get_current_object(),
message=message, category=category)
def get_flashed_messages(with_categories=False, category_filter=[]):
"""Pulls all flashed messages from the session and returns them.
Further calls in the same request to the function will return
the same messages. By default just the messages are returned,
but when `with_categories` is set to ``True``, the return value will
be a list of tuples in the form ``(category, message)`` instead.
Filter the flashed messages to one or more categories by providing those
categories in `category_filter`. This allows rendering categories in
separate html blocks. The `with_categories` and `category_filter`
arguments are distinct:
* `with_categories` controls whether categories are returned with message
text (``True`` gives a tuple, where ``False`` gives just the message text).
* `category_filter` filters the messages down to only those matching the
provided categories.
See :ref:`message-flashing-pattern` for examples.
.. versionchanged:: 0.3
`with_categories` parameter added.
.. versionchanged:: 0.9
`category_filter` parameter added.
:param with_categories: set to ``True`` to also receive categories.
:param category_filter: whitelist of categories to limit return values
"""
flashes = _request_ctx_stack.top.flashes
if flashes is None:
_request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \
if '_flashes' in session else []
if category_filter:
flashes = list(filter(lambda f: f[0] in category_filter, flashes))
if not with_categories:
return [x[1] for x in flashes]
return flashes
def send_file(filename_or_fp, mimetype=None, as_attachment=False,
attachment_filename=None, add_etags=True,
cache_timeout=None, conditional=False):
"""Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support. Alternatively
you can set the application's :attr:`~Flask.use_x_sendfile` attribute
to ``True`` to directly emit an ``X-Sendfile`` header. This however
requires support of the underlying webserver for ``X-Sendfile``.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
Please never pass filenames to this function from user sources;
you should use :func:`send_from_directory` instead.
.. versionadded:: 0.2
.. versionadded:: 0.5
The `add_etags`, `cache_timeout` and `conditional` parameters were
added. The default behavior is now to attach etags.
.. versionchanged:: 0.7
mimetype guessing and etag support for file objects was
deprecated because it was unreliable. Pass a filename if you are
able to, otherwise attach an etag yourself. This functionality
will be removed in Flask 1.0
.. versionchanged:: 0.9
cache_timeout pulls its default from application config, when None.
:param filename_or_fp: the filename of the file to send in `latin-1`.
This is relative to the :attr:`~Flask.root_path`
if a relative path is specified.
Alternatively a file object might be provided in
which case ``X-Sendfile`` might not work and fall
back to the traditional method. Make sure that the
file pointer is positioned at the start of data to
send before calling :func:`send_file`.
:param mimetype: the mimetype of the file if provided, otherwise
auto detection happens.
:param as_attachment: set to ``True`` if you want to send this file with
a ``Content-Disposition: attachment`` header.
:param attachment_filename: the filename for the attachment if it
differs from the file's filename.
:param add_etags: set to ``False`` to disable attaching of etags.
:param conditional: set to ``True`` to enable conditional responses.
:param cache_timeout: the timeout in seconds for the headers. When ``None``
(default), this value is set by
:meth:`~Flask.get_send_file_max_age` of
:data:`~flask.current_app`.
"""
mtime = None
if isinstance(filename_or_fp, string_types):
filename = filename_or_fp
file = None
else:
from warnings import warn
file = filename_or_fp
filename = getattr(file, 'name', None)
# XXX: this behavior is now deprecated because it was unreliable.
# removed in Flask 1.0
if not attachment_filename and not mimetype \
and isinstance(filename, string_types):
warn(DeprecationWarning('The filename support for file objects '
'passed to send_file is now deprecated. Pass an '
'attach_filename if you want mimetypes to be guessed.'),
stacklevel=2)
if add_etags:
warn(DeprecationWarning('In future flask releases etags will no '
'longer be generated for file objects passed to the send_file '
'function because this behavior was unreliable. Pass '
'filenames instead if possible, otherwise attach an etag '
'yourself based on another value'), stacklevel=2)
if filename is not None:
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
if mimetype is None and (filename or attachment_filename):
mimetype = mimetypes.guess_type(filename or attachment_filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
headers = Headers()
if as_attachment:
if attachment_filename is None:
if filename is None:
raise TypeError('filename unavailable, required for '
'sending as attachment')
attachment_filename = os.path.basename(filename)
headers.add('Content-Disposition', 'attachment',
filename=attachment_filename)
if current_app.use_x_sendfile and filename:
if file is not None:
file.close()
headers['X-Sendfile'] = filename
headers['Content-Length'] = os.path.getsize(filename)
data = None
else:
if file is None:
file = open(filename, 'rb')
mtime = os.path.getmtime(filename)
headers['Content-Length'] = os.path.getsize(filename)
data = wrap_file(request.environ, file)
rv = current_app.response_class(data, mimetype=mimetype, headers=headers,
direct_passthrough=True)
# if we know the file modification date, we can store it as
# the time of the last modification.
if mtime is not None:
rv.last_modified = int(mtime)
rv.cache_control.public = True
if cache_timeout is None:
cache_timeout = current_app.get_send_file_max_age(filename)
if cache_timeout is not None:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time() + cache_timeout)
if add_etags and filename is not None:
try:
rv.set_etag('flask-%s-%s-%s' % (
os.path.getmtime(filename),
os.path.getsize(filename),
adler32(
filename.encode('utf-8') if isinstance(filename, text_type)
else filename
) & 0xffffffff
))
except OSError:
warn('Access %s failed, maybe it does not exist, so ignore etags in '
'headers' % filename, stacklevel=2)
if conditional:
rv = rv.make_conditional(request)
# make sure we don't send x-sendfile for servers that
# ignore the 304 status code for x-sendfile.
if rv.status_code == 304:
rv.headers.pop('x-sendfile', None)
return rv
def safe_join(directory, filename):
"""Safely join `directory` and `filename`.
Example usage::
@app.route('/wiki/<path:filename>')
def wiki_page(filename):
filename = safe_join(app.config['WIKI_FOLDER'], filename)
with open(filename, 'rb') as fd:
content = fd.read() # Read and process the file content...
:param directory: the base directory.
:param filename: the untrusted filename relative to that directory.
:raises: :class:`~werkzeug.exceptions.NotFound` if the resulting path
would fall out of `directory`.
"""
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
raise NotFound()
if os.path.isabs(filename) or \
filename == '..' or \
filename.startswith('../'):
raise NotFound()
return os.path.join(directory, filename)
def send_from_directory(directory, filename, **options):
"""Send a file from a given directory with :func:`send_file`. This
is a secure way to quickly expose static files from an upload folder
or something similar.
Example usage::
@app.route('/uploads/<path:filename>')
def download_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=True)
.. admonition:: Sending files and Performance
It is strongly recommended to activate either ``X-Sendfile`` support in
your webserver or (if no authentication happens) to tell the webserver
to serve files for the given path on its own without calling into the
web application for improved performance.
.. versionadded:: 0.5
:param directory: the directory where all the files are stored.
:param filename: the filename relative to that directory to
download.
:param options: optional keyword arguments that are directly
forwarded to :func:`send_file`.
"""
filename = safe_join(directory, filename)
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
if not os.path.isfile(filename):
raise NotFound()
options.setdefault('conditional', True)
return send_file(filename, **options)
def get_root_path(import_name):
"""Returns the path to a package or cwd if that cannot be found. This
returns the path of a package or the folder that contains a module.
Not to be confused with the package path returned by :func:`find_package`.
"""
# Module already imported and has a file attribute. Use that first.
mod = sys.modules.get(import_name)
if mod is not None and hasattr(mod, '__file__'):
return os.path.dirname(os.path.abspath(mod.__file__))
# Next attempt: check the loader.
loader = pkgutil.get_loader(import_name)
# Loader does not exist or we're referring to an unloaded main module
# or a main module without path (interactive sessions), go with the
# current working directory.
if loader is None or import_name == '__main__':
return os.getcwd()
# For .egg, zipimporter does not have get_filename until Python 2.7.
# Some other loaders might exhibit the same behavior.
if hasattr(loader, 'get_filename'):
filepath = loader.get_filename(import_name)
else:
# Fall back to imports.
__import__(import_name)
mod = sys.modules[import_name]
filepath = getattr(mod, '__file__', None)
# If we don't have a filepath it might be because we are a
# namespace package. In this case we pick the root path from the
# first module that is contained in our package.
if filepath is None:
raise RuntimeError('No root path can be found for the provided '
'module "%s". This can happen because the '
'module came from an import hook that does '
'not provide file name information or because '
'it\'s a namespace package. In this case '
'the root path needs to be explicitly '
'provided.' % import_name)
# filepath is import_name.py for a module, or __init__.py for a package.
return os.path.dirname(os.path.abspath(filepath))
def _matching_loader_thinks_module_is_package(loader, mod_name):
"""Given the loader that loaded a module and the module this function
attempts to figure out if the given module is actually a package.
"""
# If the loader can tell us if something is a package, we can
# directly ask the loader.
if hasattr(loader, 'is_package'):
return loader.is_package(mod_name)
# importlib's namespace loaders do not have this functionality but
# all the modules it loads are packages, so we can take advantage of
# this information.
elif (loader.__class__.__module__ == '_frozen_importlib' and
loader.__class__.__name__ == 'NamespaceLoader'):
return True
# Otherwise we need to fail with an error that explains what went
# wrong.
raise AttributeError(
('%s.is_package() method is missing but is required by Flask of '
'PEP 302 import hooks. If you do not use import hooks and '
'you encounter this error please file a bug against Flask.') %
loader.__class__.__name__)
def find_package(import_name):
"""Finds a package and returns the prefix (or None if the package is
not installed) as well as the folder that contains the package or
module as a tuple. The package path returned is the module that would
have to be added to the pythonpath in order to make it possible to
import the module. The prefix is the path below which a UNIX like
folder structure exists (lib, share etc.).
"""
root_mod_name = import_name.split('.')[0]
loader = pkgutil.get_loader(root_mod_name)
if loader is None or import_name == '__main__':
# import name is not found, or interactive/main module
package_path = os.getcwd()
else:
# For .egg, zipimporter does not have get_filename until Python 2.7.
if hasattr(loader, 'get_filename'):
filename = loader.get_filename(root_mod_name)
elif hasattr(loader, 'archive'):
# zipimporter's loader.archive points to the .egg or .zip
# archive filename is dropped in call to dirname below.
filename = loader.archive
else:
# At least one loader is missing both get_filename and archive:
# Google App Engine's HardenedModulesHook
#
# Fall back to imports.
__import__(import_name)
filename = sys.modules[import_name].__file__
package_path = os.path.abspath(os.path.dirname(filename))
# In case the root module is a package we need to chop of the
# rightmost part. This needs to go through a helper function
# because of python 3.3 namespace packages.
if _matching_loader_thinks_module_is_package(
loader, root_mod_name):
package_path = os.path.dirname(package_path)
site_parent, site_folder = os.path.split(package_path)
py_prefix = os.path.abspath(sys.prefix)
if package_path.startswith(py_prefix):
return py_prefix, package_path
elif site_folder.lower() == 'site-packages':
parent, folder = os.path.split(site_parent)
# Windows like installations
if folder.lower() == 'lib':
base_dir = parent
# UNIX like installations
elif os.path.basename(parent).lower() == 'lib':
base_dir = os.path.dirname(parent)
else:
base_dir = site_parent
return base_dir, package_path
return None, package_path
class locked_cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value. Works like the one in Werkzeug but has a lock for
thread safety.
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
self.lock = RLock()
def __get__(self, obj, type=None):
if obj is None:
return self
with self.lock:
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class _PackageBoundObject(object):
def __init__(self, import_name, template_folder=None, root_path=None):
#: The name of the package or module. Do not change this once
#: it was set by the constructor.
self.import_name = import_name
#: location of the templates. ``None`` if templates should not be
#: exposed.
self.template_folder = template_folder
if root_path is None:
root_path = get_root_path(self.import_name)
#: Where is the app root located?
self.root_path = root_path
self._static_folder = None
self._static_url_path = None
def _get_static_folder(self):
if self._static_folder is not None:
return os.path.join(self.root_path, self._static_folder)
def _set_static_folder(self, value):
self._static_folder = value
static_folder = property(_get_static_folder, _set_static_folder, doc='''
The absolute path to the configured static folder.
''')
del _get_static_folder, _set_static_folder
def _get_static_url_path(self):
if self._static_url_path is not None:
return self._static_url_path
if self.static_folder is not None:
return '/' + os.path.basename(self.static_folder)
def _set_static_url_path(self, value):
self._static_url_path = value
static_url_path = property(_get_static_url_path, _set_static_url_path)
del _get_static_url_path, _set_static_url_path
@property
def has_static_folder(self):
"""This is ``True`` if the package bound object's container has a
folder for static files.
.. versionadded:: 0.5
"""
return self.static_folder is not None
@locked_cached_property
def jinja_loader(self):
"""The Jinja loader for this package bound object.
.. versionadded:: 0.5
"""
if self.template_folder is not None:
return FileSystemLoader(os.path.join(self.root_path,
self.template_folder))
def get_send_file_max_age(self, filename):
"""Provides default cache_timeout for the :func:`send_file` functions.
By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from
the configuration of :data:`~flask.current_app`.
Static file functions such as :func:`send_from_directory` use this
function, and :func:`send_file` calls this function on
:data:`~flask.current_app` when the given cache_timeout is ``None``. If a
cache_timeout is given in :func:`send_file`, that timeout is used;
otherwise, this method is called.
This allows subclasses to change the behavior when sending files based
on the filename. For example, to set the cache timeout for .js files
to 60 seconds::
class MyFlask(flask.Flask):
def get_send_file_max_age(self, name):
if name.lower().endswith('.js'):
return 60
return flask.Flask.get_send_file_max_age(self, name)
.. versionadded:: 0.9
"""
return current_app.config['SEND_FILE_MAX_AGE_DEFAULT']
def send_static_file(self, filename):
"""Function used internally to send static files from the static
folder to the browser.
.. versionadded:: 0.5
"""
if not self.has_static_folder:
raise RuntimeError('No static folder for this object')
# Ensure get_send_file_max_age is called in all cases.
# Here, we ensure get_send_file_max_age is called for Blueprints.
cache_timeout = self.get_send_file_max_age(filename)
return send_from_directory(self.static_folder, filename,
cache_timeout=cache_timeout)
def open_resource(self, resource, mode='rb'):
"""Opens a resource from the application's resource folder. To see
how this works, consider the following folder structure::
/myapplication.py
/schema.sql
/static
/style.css
/templates
/layout.html
/index.html
If you want to open the :file:`schema.sql` file you would do the
following::
with app.open_resource('schema.sql') as f:
contents = f.read()
do_something_with(contents)
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
if mode not in ('r', 'rb'):
raise ValueError('Resources can only be opened for reading')
return open(os.path.join(self.root_path, resource), mode)
| bsd-3-clause |
AlphaX2/FotoShareN9 | 1.6.1/fotoshare/opt/FotoShareN9/plugins/flickr/libs/flickrapi/reportinghttp.py | 10 | 2712 | # -*- encoding: utf-8 -*-
'''HTTPHandler that supports a callback method for progress reports.
'''
import urllib2
import httplib
import logging
__all__ = ['urlopen']
logging.basicConfig()
LOG = logging.getLogger(__name__)
progress_callback = None
class ReportingSocket(object):
'''Wrapper around a socket. Gives progress report through a
callback function.
'''
min_chunksize = 10240
def __init__(self, socket):
self.socket = socket
def sendall(self, bits):
'''Sends all data, calling the callback function for every
sent chunk.
'''
LOG.debug("SENDING: %s..." % bits[0:30])
total = len(bits)
sent = 0
chunksize = max(self.min_chunksize, total // 100)
while len(bits) > 0:
send = bits[0:chunksize]
self.socket.sendall(send)
sent += len(send)
if progress_callback:
progress = float(sent) / total * 100
progress_callback(progress, sent == total)
bits = bits[chunksize:]
def makefile(self, mode, bufsize):
'''Returns a file-like object for the socket.'''
return self.socket.makefile(mode, bufsize)
def close(self):
'''Closes the socket.'''
return self.socket.close()
class ProgressHTTPConnection(httplib.HTTPConnection):
'''HTTPConnection that gives regular progress reports during
sending of data.
'''
def connect(self):
'''Connects to a HTTP server.'''
httplib.HTTPConnection.connect(self)
self.sock = ReportingSocket(self.sock)
class ProgressHTTPHandler(urllib2.HTTPHandler):
'''HTTPHandler that gives regular progress reports during sending
of data.
'''
def http_open(self, req):
return self.do_open(ProgressHTTPConnection, req)
def set_callback(method):
'''Sets the callback function to use for progress reports.'''
global progress_callback # IGNORE:W0603
if not hasattr(method, '__call__'):
raise ValueError('Callback method must be callable')
progress_callback = method
def urlopen(url_or_request, callback, body=None):
'''Opens an URL using the ProgressHTTPHandler.'''
set_callback(callback)
opener = urllib2.build_opener(ProgressHTTPHandler)
return opener.open(url_or_request, body)
if __name__ == '__main__':
def upload(progress, finished):
'''Upload progress demo'''
LOG.info("%3.0f - %s" % (progress, finished))
conn = urlopen("http://www.flickr.com/", 'x' * 10245, upload)
data = conn.read()
LOG.info("Read data")
print data[:100].split('\n')[0]
| gpl-3.0 |
CyrilPeponnet/Archipel | ArchipelAgent/archipel-agent-vmparking/setup.py | 4 | 3362 | #
# setup.py
#
# Copyright (C) 2010 Antoine Mercadal <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
VERSION = '0.6.0'
AUTHOR = 'Antoine Mercadal'
MAIL = '[email protected]'
URL = 'http://archipelproject.org'
LICENSE = 'AGPL'
NAME = 'archipel-agent-vmparking'
SHORTDESCRIPTION = "Handle the virtual machine parking"
LONGDESCRIPTION = ""
ENTRY_POINTS = { 'archipel.plugin.hypervisor' : [
'factory=archipelagentvmparking:make_archipel_plugin'],
'archipel.plugin.virtualmachine' : [
'factory=archipelagentvmparking:make_archipel_plugin'],
'archipel.plugin' : [
'version=archipelagentvmparking:version']}
RPM_REQUIRED_DEPS = "archipel-core"
RPM_POST_INSTALL = "%post\narchipel-initinstall -m {0}\n".format(NAME)
## HACK FOR DEPS IN RPMS
from setuptools.command.bdist_rpm import bdist_rpm
def custom_make_spec_file(self):
spec = self._original_make_spec_file()
lineDescription = "%description"
spec.insert(spec.index(lineDescription) - 1, "requires: %s" % RPM_REQUIRED_DEPS)
spec.append(RPM_POST_INSTALL)
return spec
bdist_rpm._original_make_spec_file = bdist_rpm._make_spec_file
bdist_rpm._make_spec_file = custom_make_spec_file
## END OF HACK
setup(name=NAME,
version=VERSION,
description=SHORTDESCRIPTION,
long_description=LONGDESCRIPTION,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Internet',
'Topic :: System :: Emulators',
'Topic :: System :: Operating System'],
keywords='archipel, virtualization, libvirt, orchestration',
author=AUTHOR,
author_email=MAIL,
url=URL,
license=LICENSE,
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
"archipel-core>=0.6.0beta"
],
entry_points=ENTRY_POINTS
)
| agpl-3.0 |
pantaloons/4charm | libvpx/tools/intersect-diffs.py | 98 | 2364 | #!/usr/bin/env python
## Copyright (c) 2012 The WebM project authors. All Rights Reserved.
##
## Use of this source code is governed by a BSD-style license
## that can be found in the LICENSE file in the root of the source
## tree. An additional intellectual property rights grant can be found
## in the file PATENTS. All contributing project authors may
## be found in the AUTHORS file in the root of the source tree.
##
"""Calculates the "intersection" of two unified diffs.
Given two diffs, A and B, it finds all hunks in B that had non-context lines
in A and prints them to stdout. This is useful to determine the hunks in B that
are relevant to A. The resulting file can be applied with patch(1) on top of A.
"""
__author__ = "[email protected]"
import sys
import diff
def FormatDiffHunks(hunks):
"""Re-serialize a list of DiffHunks."""
r = []
last_header = None
for hunk in hunks:
this_header = hunk.header[0:2]
if last_header != this_header:
r.extend(hunk.header)
last_header = this_header
else:
r.extend(hunk.header[2])
r.extend(hunk.lines)
r.append("\n")
return "".join(r)
def ZipHunks(rhs_hunks, lhs_hunks):
"""Join two hunk lists on filename."""
for rhs_hunk in rhs_hunks:
rhs_file = rhs_hunk.right.filename.split("/")[1:]
for lhs_hunk in lhs_hunks:
lhs_file = lhs_hunk.left.filename.split("/")[1:]
if lhs_file != rhs_file:
continue
yield (rhs_hunk, lhs_hunk)
def main():
old_hunks = [x for x in diff.ParseDiffHunks(open(sys.argv[1], "r"))]
new_hunks = [x for x in diff.ParseDiffHunks(open(sys.argv[2], "r"))]
out_hunks = []
# Join the right hand side of the older diff with the left hand side of the
# newer diff.
for old_hunk, new_hunk in ZipHunks(old_hunks, new_hunks):
if new_hunk in out_hunks:
continue
old_lines = old_hunk.right
new_lines = new_hunk.left
# Determine if this hunk overlaps any non-context line from the other
for i in old_lines.delta_line_nums:
if i in new_lines:
out_hunks.append(new_hunk)
break
if out_hunks:
print FormatDiffHunks(out_hunks)
sys.exit(1)
if __name__ == "__main__":
main()
| mit |
SatelliteQE/robottelo | tests/foreman/api/test_hostcollection.py | 1 | 14844 | """Unit tests for host collections.
:Requirement: Hostcollection
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: HostCollections
:Assignee: swadeley
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from random import choice
from random import randint
import pytest
from broker import VMBroker
from nailgun import entities
from requests.exceptions import HTTPError
from robottelo.datafactory import invalid_values_list
from robottelo.datafactory import parametrized
from robottelo.datafactory import valid_data_list
from robottelo.hosts import ContentHost
@pytest.fixture(scope='module')
def fake_hosts(module_org):
"""Create content hosts that can be shared by tests."""
hosts = [entities.Host(organization=module_org).create() for _ in range(2)]
return hosts
@pytest.mark.parametrize('name', **parametrized(valid_data_list()))
@pytest.mark.tier1
def test_positive_create_with_name(module_org, name):
"""Create host collections with different names.
:id: 8f2b9223-f5be-4cb1-8316-01ea747cae14
:parametrized: yes
:expectedresults: The host collection was successfully created and has
appropriate name.
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(name=name, organization=module_org).create()
assert host_collection.name == name
@pytest.mark.tier1
def test_positive_list(module_org):
"""Create new host collection and then retrieve list of all existing
host collections
:id: 6ae32df2-b917-4830-8709-15fb272b76c1
:BZ: 1331875
:expectedresults: Returned list of host collections for the system
contains at least one collection
:CaseImportance: Critical
"""
entities.HostCollection(organization=module_org).create()
hc_list = entities.HostCollection().search()
assert len(hc_list) >= 1
@pytest.mark.tier1
def test_positive_list_for_organization():
"""Create host collection for specific organization. Retrieve list of
host collections for that organization
:id: 5f9de8ab-2c53-401b-add3-57d86c97563a
:expectedresults: The host collection was successfully created and
present in the list of collections for specific organization
:CaseImportance: Critical
"""
org = entities.Organization().create()
hc = entities.HostCollection(organization=org).create()
hc_list = entities.HostCollection(organization=org).search()
assert len(hc_list) == 1
assert hc_list[0].id == hc.id
@pytest.mark.parametrize('desc', **parametrized(valid_data_list()))
@pytest.mark.tier1
def test_positive_create_with_description(module_org, desc):
"""Create host collections with different descriptions.
:id: 9d13392f-8d9d-4ff1-8909-4233e4691055
:parametrized: yes
:expectedresults: The host collection was successfully created and has
appropriate description.
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(description=desc, organization=module_org).create()
assert host_collection.description == desc
@pytest.mark.tier1
def test_positive_create_with_limit(module_org):
"""Create host collections with different limits.
:id: 86d9387b-7036-4794-96fd-5a3472dd9160
:expectedresults: The host collection was successfully created and has
appropriate limit.
:CaseImportance: Critical
"""
for _ in range(5):
limit = randint(1, 30)
host_collection = entities.HostCollection(max_hosts=limit, organization=module_org).create()
assert host_collection.max_hosts == limit
@pytest.mark.parametrize("unlimited", [False, True])
@pytest.mark.tier1
def test_positive_create_with_unlimited_hosts(module_org, unlimited):
"""Create host collection with different values of 'unlimited hosts'
parameter.
:id: d385574e-5794-4442-b6cd-e5ded001d877
:parametrized: yes
:expectedresults: The host collection was successfully created and has
appropriate 'unlimited hosts' parameter value.
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(
max_hosts=None if unlimited else 1,
organization=module_org,
unlimited_hosts=unlimited,
).create()
assert host_collection.unlimited_hosts == unlimited
@pytest.mark.tier1
def test_positive_create_with_host(module_org, fake_hosts):
"""Create a host collection that contains a host.
:id: 9dc0ad72-58c2-4079-b1ca-2c4373472f0f
:expectedresults: The host collection can be read back, and it includes
one host.
:CaseImportance: Critical
:BZ: 1325989
"""
host_collection = entities.HostCollection(
host=[fake_hosts[0]], organization=module_org
).create()
assert len(host_collection.host) == 1
@pytest.mark.tier1
def test_positive_create_with_hosts(module_org, fake_hosts):
"""Create a host collection that contains hosts.
:id: bb8d2b42-9a8b-4c4f-ba0c-c56ae5a7eb1d
:expectedresults: The host collection can be read back, and it
references two hosts.
:CaseImportance: Critical
:BZ: 1325989
"""
host_collection = entities.HostCollection(host=fake_hosts, organization=module_org).create()
assert len(host_collection.host) == len(fake_hosts)
@pytest.mark.tier2
def test_positive_add_host(module_org, fake_hosts):
"""Add a host to host collection.
:id: da8bc901-7ac8-4029-bb62-af21aa4d3a88
:expectedresults: Host was added to the host collection.
:CaseLevel: Integration
:BZ:1325989
"""
host_collection = entities.HostCollection(organization=module_org).create()
host_collection.host_ids = [fake_hosts[0].id]
host_collection = host_collection.update(['host_ids'])
assert len(host_collection.host) == 1
@pytest.mark.upgrade
@pytest.mark.tier2
def test_positive_add_hosts(module_org, fake_hosts):
"""Add hosts to host collection.
:id: f76b4db1-ccd5-47ab-be15-8c7d91d03b22
:expectedresults: Hosts were added to the host collection.
:CaseLevel: Integration
:BZ: 1325989
"""
host_collection = entities.HostCollection(organization=module_org).create()
host_ids = [str(host.id) for host in fake_hosts]
host_collection.host_ids = host_ids
host_collection = host_collection.update(['host_ids'])
assert len(host_collection.host) == len(fake_hosts)
@pytest.mark.tier1
def test_positive_read_host_ids(module_org, fake_hosts):
"""Read a host collection and look at the ``host_ids`` field.
:id: 444a1528-64c8-41b6-ba2b-6c49799d5980
:expectedresults: The ``host_ids`` field matches the host IDs passed in
when creating the host collection.
:CaseImportance: Critical
:BZ:1325989
"""
host_collection = entities.HostCollection(host=fake_hosts, organization=module_org).create()
assert frozenset(host.id for host in host_collection.host) == frozenset(
host.id for host in fake_hosts
)
@pytest.mark.parametrize('new_name', **parametrized(valid_data_list()))
@pytest.mark.tier1
def test_positive_update_name(module_org, new_name):
"""Check if host collection name can be updated
:id: b2dedb99-6dd7-41be-8aaa-74065c820ac6
:parametrized: yes
:expectedresults: Host collection name was successfully updated
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(organization=module_org).create()
host_collection.name = new_name
assert host_collection.update().name == new_name
@pytest.mark.parametrize('new_desc', **parametrized(valid_data_list()))
@pytest.mark.tier1
def test_positive_update_description(module_org, new_desc):
"""Check if host collection description can be updated
:id: f8e9bd1c-1525-4b5f-a07c-eb6b6e7aa628
:parametrized: yes
:expectedresults: Host collection description was updated
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(organization=module_org).create()
host_collection.description = new_desc
assert host_collection.update().description == new_desc
@pytest.mark.tier1
def test_positive_update_limit(module_org):
"""Check if host collection limit can be updated
:id: 4eda7796-cd81-453b-9b72-4ef84b2c1d8c
:expectedresults: Host collection limit was updated
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(
max_hosts=1, organization=module_org, unlimited_hosts=False
).create()
for limit in (1, 3, 5, 10, 20):
host_collection.max_hosts = limit
assert host_collection.update().max_hosts == limit
@pytest.mark.tier1
def test_positive_update_unlimited_hosts(module_org):
"""Check if host collection 'unlimited hosts' parameter can be updated
:id: 09a3973d-9832-4255-87bf-f9eaeab4aee8
:expectedresults: Host collection 'unlimited hosts' parameter was
updated
:CaseImportance: Critical
"""
random_unlimited = choice([True, False])
host_collection = entities.HostCollection(
max_hosts=1 if not random_unlimited else None,
organization=module_org,
unlimited_hosts=random_unlimited,
).create()
for unlimited in (not random_unlimited, random_unlimited):
host_collection.max_hosts = 1 if not unlimited else None
host_collection.unlimited_hosts = unlimited
host_collection = host_collection.update(['max_hosts', 'unlimited_hosts'])
assert host_collection.unlimited_hosts == unlimited
@pytest.mark.tier1
def test_positive_update_host(module_org, fake_hosts):
"""Update host collection's host.
:id: 23082854-abcf-4085-be9c-a5d155446acb
:expectedresults: The host collection was updated with a new host.
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(
host=[fake_hosts[0]], organization=module_org
).create()
host_collection.host_ids = [fake_hosts[1].id]
host_collection = host_collection.update(['host_ids'])
assert host_collection.host[0].id == fake_hosts[1].id
@pytest.mark.upgrade
@pytest.mark.tier1
def test_positive_update_hosts(module_org, fake_hosts):
"""Update host collection's hosts.
:id: 0433b37d-ae16-456f-a51d-c7b800334861
:expectedresults: The host collection was updated with new hosts.
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(host=fake_hosts, organization=module_org).create()
new_hosts = [entities.Host(organization=module_org).create() for _ in range(2)]
host_ids = [str(host.id) for host in new_hosts]
host_collection.host_ids = host_ids
host_collection = host_collection.update(['host_ids'])
assert {host.id for host in host_collection.host} == {host.id for host in new_hosts}
@pytest.mark.upgrade
@pytest.mark.tier1
def test_positive_delete(module_org):
"""Check if host collection can be deleted
:id: 13a16cd2-16ce-4966-8c03-5d821edf963b
:expectedresults: Host collection was successfully deleted
:CaseImportance: Critical
"""
host_collection = entities.HostCollection(organization=module_org).create()
host_collection.delete()
with pytest.raises(HTTPError):
host_collection.read()
@pytest.mark.parametrize('name', **parametrized(invalid_values_list()))
@pytest.mark.tier1
def test_negative_create_with_invalid_name(module_org, name):
"""Try to create host collections with different invalid names
:id: 38f67d04-a19d-4eab-a577-21b8d62c7389
:parametrized: yes
:expectedresults: The host collection was not created
:CaseImportance: Critical
"""
with pytest.raises(HTTPError):
entities.HostCollection(name=name, organization=module_org).create()
@pytest.mark.tier1
def test_positive_add_remove_subscription(module_org, module_ak_cv_lce):
"""Try to bulk add and remove a subscription to members of a host collection.
:id: c4ec5727-eb25-452e-a91f-87cafb16666b
:steps:
1. Create HC, add AK to HC
2. Create product so we can use it's subscription
3. Create some VMs and register them with AK so they are in HC
4. Add the subscription to the members of the Host Collection
5. Assert subscription is added
6. Bulk remove subscription
7. Assert it is removed
:expectedresults: subscription added to, and removed from, members of host collection
:CaseImportance: Critical
"""
# this command creates a host collection and "appends", makes available, to the AK
module_ak_cv_lce.host_collection.append(
entities.HostCollection(organization=module_org).create()
)
# Move HC from Add tab to List tab on AK view
module_ak_cv_lce = module_ak_cv_lce.update(['host_collection'])
# Create a product so we have a subscription to use
product = entities.Product(organization=module_org).create()
prod_name = product.name
product_subscription = entities.Subscription(organization=module_org).search(
query={'search': f'name={prod_name}'}
)[0]
# Create and register VMs as members of Host Collection
with VMBroker(nick='rhel7', host_classes={'host': ContentHost}, _count=2) as hosts:
for client in hosts:
client.install_katello_ca()
client.register_contenthost(module_org.label, module_ak_cv_lce.name)
# Read host_collection back from Satellite to get host_ids
host_collection = module_ak_cv_lce.host_collection[0].read()
host_ids = [host.id for host in host_collection.host]
# Add subscription
# Call nailgun to make the API PUT to members of Host Collection
entities.Host().bulk_add_subscriptions(
data={
"organization_id": module_org.id,
"included": {"ids": host_ids},
"subscriptions": [{"id": product_subscription.id, "quantity": 1}],
}
)
# GET the subscriptions from hosts and assert they are there
for host_id in host_ids:
req = entities.HostSubscription(host=host_id).subscriptions()
assert (
prod_name in req['results'][0]['product_name']
), 'Subscription not applied to HC members'
# Remove the subscription
# Call nailgun to make the API PUT to members of Host Collection
entities.Host().bulk_remove_subscriptions(
data={
"organization_id": module_org.id,
"included": {"ids": host_ids},
"subscriptions": [{"id": product_subscription.id, "quantity": 1}],
}
)
# GET the subscriptions from hosts and assert they are gone
for host_id in host_ids:
req = entities.HostSubscription(host=host_id).subscriptions()
assert not req['results'], 'Subscription not removed from HC members'
| gpl-3.0 |
druids/django-chamber | setup.py | 1 | 1138 | from setuptools import setup, find_packages
from chamber.version import get_version
setup(
name='django-chamber',
version=get_version(),
description='Utilities library meant as a complement to django-is-core.',
author='Lubos Matl, Oskar Hollmann',
author_email='[email protected], [email protected]',
url='http://github.com/druids/django-chamber',
packages=find_packages(include=['chamber']),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
],
install_requires=[
'Django>=2.2',
'Unidecode>=1.1.1',
'pyprind>=2.11.2',
'filemagic>=1.6',
],
extras_require={
'boto3storage': ['django-storages<2.0', 'boto3'],
},
)
| bsd-3-clause |
rgom/Pydev | plugins/org.python.pydev.jython/Lib/cmd.py | 145 | 15026 | """A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
7. If completion is enabled, completing commands will be done automatically,
and completing of commands args is done by calling complete_foo() with
arguments text, line, begidx, endidx. text is string we are matching
against, all returned matches must begin with it. line is the current
input line (lstripped), begidx and endidx are the beginning and end
indexes of the text being matched, which could be used to provide
different completion depending upon which position the argument is in.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The `completedefault' method may be overridden to intercept completions for
commands that have no complete_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
These interpreters use raw_input; thus, if the readline module is loaded,
they automatically support Emacs-like command history and editing features.
"""
import string
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.ascii_letters + string.digits + '_'
class Cmd:
"""A simple framework for writing line-oriented command interpreters.
These are often useful for test harnesses, administrative tools, and
prototypes that will later be wrapped in a more sophisticated interface.
A Cmd instance or subclass instance is a line-oriented interpreter
framework. There is no good reason to instantiate Cmd itself; rather,
it's useful as a superclass of an interpreter class you define yourself
in order to inherit Cmd's methods and encapsulate action methods.
"""
prompt = PROMPT
identchars = IDENTCHARS
ruler = '='
lastcmd = ''
intro = None
doc_leader = ""
doc_header = "Documented commands (type help <topic>):"
misc_header = "Miscellaneous help topics:"
undoc_header = "Undocumented commands:"
nohelp = "*** No help on %s"
use_rawinput = 1
def __init__(self, completekey='tab', stdin=None, stdout=None):
"""Instantiate a line-oriented interpreter framework.
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically. The optional arguments stdin and stdout
specify alternate input and output file objects; if not specified,
sys.stdin and sys.stdout are used.
"""
import sys
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
self.cmdqueue = []
self.completekey = completekey
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey+": complete")
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = raw_input(self.prompt)
except EOFError:
line = 'EOF'
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
def precmd(self, line):
"""Hook method executed just before the command line is
interpreted, but after the input prompt is generated and issued.
"""
return line
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
return stop
def preloop(self):
"""Hook method executed once when the cmdloop() method is called."""
pass
def postloop(self):
"""Hook method executed once when the cmdloop() method is about to
return.
"""
pass
def parseline(self, line):
"""Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
"""
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if line == 'EOF' :
self.lastcmd = ''
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
"""Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.
"""
if self.lastcmd:
return self.onecmd(self.lastcmd)
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.
"""
self.stdout.write('*** Unknown syntax: %s\n'%line)
def completedefault(self, *ignored):
"""Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.
"""
return []
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if state == 0:
import readline
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
def get_names(self):
# This method used to pull in base class attributes
# at a time dir() didn't do it yet.
return dir(self.__class__)
def complete_help(self, *args):
commands = set(self.completenames(*args))
topics = set(a[5:] for a in self.get_names()
if a.startswith('help_' + args[0]))
return list(commands | topics)
def do_help(self, arg):
'List available commands with "help" or detailed help with "help cmd".'
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.stdout.write("%s\n"%str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, help.keys(),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
self.stdout.write("%s\n"%str(header))
if self.ruler:
self.stdout.write("%s\n"%str(self.ruler * len(header)))
self.columnize(cmds, maxcol-1)
self.stdout.write("\n")
def columnize(self, list, displaywidth=80):
"""Display a list of strings as a compact set of columns.
Each column is only as wide as necessary.
Columns are separated by two spaces (one was not legible enough).
"""
if not list:
self.stdout.write("<empty>\n")
return
nonstrings = [i for i in range(len(list))
if not isinstance(list[i], str)]
if nonstrings:
raise TypeError, ("list[i] not a string for i in %s" %
", ".join(map(str, nonstrings)))
size = len(list)
if size == 1:
self.stdout.write('%s\n'%str(list[0]))
return
# Try every row count from 1 upwards
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > displaywidth:
break
if totwidth <= displaywidth:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
for row in range(nrows):
texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
else:
x = list[i]
texts.append(x)
while texts and not texts[-1]:
del texts[-1]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
self.stdout.write("%s\n"%str(" ".join(texts)))
| epl-1.0 |
Jhaefner/PressureDrop | master_example.py | 1 | 2607 | """
@author: Jonah Haefner and Lane Carasik
Title: master_example.py
The purpose of this script is to ensure the four functions included in this package are functioning properly and as an example of use for the user.
It currently only provides checks for the inline geometry with the fluid at a Reynolds number of 22000.
The expected output is:
Zhukauskas: dP_1 = 21.94 kPa
Gaddis-Gnielinski: dP 2 = 25.67 kPa
Zhukauskas: Nu1 = 142.52
Gaddis-Gnielinski: Nu2 = 147.31
"""
import TORCHE as TE
# Geometric parameters
d = 0.0254 # Outside diameter of tube or cylinder (m)
a = 1.25 # Transverse pitch to diameter ratio
b = 1.25 # Longitudinal pitch to diameter ratio
geom = 'inline' # Tube geometry (inline or staggered)
N_rows = 10 # Number of tube rows
'''
# Fluid thermo-physical properties
rho = 1940 # Density of the working fluid - FLiBe salt (kg/m^3)
mu = 0.0056 # Dynamic visocity of the working fluid - FLiBe salt (Pa-s)
Pr = 1 # Prandtl number of the working fluid
Pr_w = 1 # Prandtl number of the working fluid based on the wall film temperature
'''
# Fluid thermo-physical properties - H2O
rho = 998.6 # Density of the working fluid - water at 20 C (kg/m^3)
mu = 0.00100124 # Dynamic visocity of the working fluid - water 20 C (Pa-s)
Pr = 6.99 # Prandtl number of the working fluid
Pr_w = 6.99 # Prandtl number of the working fluid based on the wall film temperature
# Flow behavior
vel = 0.5 # Free-stream velocity before interacting with the tube bank (m/s)
v_max = vel*(a/(a-1)) # Maximum velocity based in the minimum area between the tubes (m/s)
Re = rho*v_max*d/mu # Reynolds number of the flow based on the maximium velocity in the minimum area between tubes
# Expected Results
dP_Zu_Ex = 21.94 # Expected Zukauskas results for Pressure drop (kPa)
dP_GG_Ex = 25.67 # Expected Gaddis-Gnielinski results for Pressure drop (kPa)
Nu_Zu_Ex = 142.52 # Expected Zukauskas results for Nusselt Number
Nu_GG_Ex = 147.31 # Expected Gaddis-Gnielinski results for Nusselt Number
dP_1 = TE.dP_Zu(rho,a,b,geom,N_rows,vel,Re)
print('The Pressure Drop calculated by Zukauskas is',round(dP_1/1000,2),'kPa')
dP_2 = TE.dP_GG(rho,a,b,geom,N_rows,vel,Re,Return="")
print('The Pressure Drop calculated by Gaddis-Gnielinski is',round(dP_2/1000,2),'kPa')
Nu_1 = TE.HT_Zu(rho,Pr,Pr_w,a,b,d,geom,N_rows,vel,Re)
print('The Nusselt Number calculated by Zukauskas is', round(Nu_1,2))
Nu_2 = TE.HT_GG(rho,Pr,a,b,d,geom,N_rows,vel,Re)
print('The Nusselt Number calculated by Gnielinski is', round(Nu_2,2))
| mit |
krkhan/azure-linux-extensions | OSPatching/test/FakePatching3.py | 8 | 1623 | #!/usr/bin/python
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from AbstractPatching import AbstractPatching
sys.path.append('../patch')
class FakePatching(AbstractPatching):
def __init__(self, hutil=None):
super(FakePatching,self).__init__(hutil)
self.pkg_query_cmd = 'dpkg-query -L'
self.gap_between_stage = 20
self.download_duration = 60
self.security_download_list = ['a', 'b', 'c', 'd', 'e']
self.all_download_list = ['1', '2', '3', '4', 'a', 'b', 'c', 'd', 'e']
def install(self):
"""
Install for dependencies.
"""
pass
def check(self, category):
"""
Check valid upgrades,
Return the package list to download & upgrade
"""
if category == 'important':
return 0, self.security_download_list
else:
return 0, self.all_download_list
def download_package(self, package):
return 0
def patch_package(self, package):
return 0
def check_reboot(self):
return False
| apache-2.0 |
hehongliang/tensorflow | tensorflow/contrib/specs/python/summaries_test.py | 25 | 3070 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for specs-related summarization functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.specs.python import specs
from tensorflow.contrib.specs.python import summaries
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class SummariesTest(test.TestCase):
def testStructure(self):
with self.cached_session():
inputs_shape = (1, 18, 19, 5)
inputs = constant_op.constant(_rand(*inputs_shape))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(
summaries.tf_spec_structure(
spec, input_shape=inputs_shape),
"_ variablev2 conv variablev2 biasadd relu")
def testStructureFromTensor(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu")
def testPrint(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
summaries.tf_spec_print(spec, inputs)
def testSummary(self):
with self.cached_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
summaries.tf_spec_summary(spec, inputs)
if __name__ == "__main__":
test.main()
| apache-2.0 |
markYoungH/chromium.src | third_party/closure_linter/closure_linter/not_strict_test.py | 129 | 2318 | #!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gjslint --nostrict.
Tests errors that can be thrown by gjslint when not in strict mode.
"""
import os
import sys
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = False
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
_TEST_FILES = [
'not_strict.js'
]
class GJsLintTestSuite(unittest.TestSuite):
"""Test suite to run a GJsLintTest for each of several files.
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
testdata to test. Otherwise, _TEST_FILES is used.
"""
def __init__(self, tests=()):
unittest.TestSuite.__init__(self, tests)
argv = sys.argv and sys.argv[1:] or []
if argv:
test_files = argv
else:
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
runner.Run,
errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
| bsd-3-clause |
gertingold/scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_Y.py | 47 | 2514 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, sum, cos, pi
from .go_benchmark import Benchmark
class YaoLiu04(Benchmark):
r"""
Yao-Liu 4 objective function.
This class defines the Yao-Liu function 4 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{YaoLiu04}}(x) = {max}_i \left\{ \left | x_i \right | ,
1 \leq i \leq n \right\}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO line 1201. Gavana code and documentation differ.
max(abs(x)) != abs(max(x))
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return abs(x).max()
class YaoLiu09(Benchmark):
r"""
Yao-Liu 9 objective function.
This class defines the Yao-Liu [1]_ function 9 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{YaoLiu09}}(x) = \sum_{i=1}^n \left [ x_i^2
- 10 \cos(2 \pi x_i ) + 10 \right ]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5.12, 5.12]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO Yao-Liu Fast Evolutionary programming is the the original ref.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return sum(x ** 2.0 - 10.0 * cos(2 * pi * x) + 10)
| bsd-3-clause |
achals/servo | tests/wpt/web-platform-tests/tools/py/py/_path/local.py | 171 | 32118 | """
local path implementation.
"""
from __future__ import with_statement
from contextlib import contextmanager
import sys, os, re, atexit, io
import py
from py._path import common
from py._path.common import iswin32
from stat import S_ISLNK, S_ISDIR, S_ISREG
from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink, dirname
if sys.version_info > (3,0):
def map_as_list(func, iter):
return list(map(func, iter))
else:
map_as_list = map
class Stat(object):
def __getattr__(self, name):
return getattr(self._osstatresult, "st_" + name)
def __init__(self, path, osstatresult):
self.path = path
self._osstatresult = osstatresult
@property
def owner(self):
if iswin32:
raise NotImplementedError("XXX win32")
import pwd
entry = py.error.checked_call(pwd.getpwuid, self.uid)
return entry[0]
@property
def group(self):
""" return group name of file. """
if iswin32:
raise NotImplementedError("XXX win32")
import grp
entry = py.error.checked_call(grp.getgrgid, self.gid)
return entry[0]
def isdir(self):
return S_ISDIR(self._osstatresult.st_mode)
def isfile(self):
return S_ISREG(self._osstatresult.st_mode)
def islink(self):
st = self.path.lstat()
return S_ISLNK(self._osstatresult.st_mode)
class PosixPath(common.PathBase):
def chown(self, user, group, rec=0):
""" change ownership to the given user and group.
user and group may be specified by a number or
by a name. if rec is True change ownership
recursively.
"""
uid = getuserid(user)
gid = getgroupid(group)
if rec:
for x in self.visit(rec=lambda x: x.check(link=0)):
if x.check(link=0):
py.error.checked_call(os.chown, str(x), uid, gid)
py.error.checked_call(os.chown, str(self), uid, gid)
def readlink(self):
""" return value of a symbolic link. """
return py.error.checked_call(os.readlink, self.strpath)
def mklinkto(self, oldname):
""" posix style hard link to another name. """
py.error.checked_call(os.link, str(oldname), str(self))
def mksymlinkto(self, value, absolute=1):
""" create a symbolic link with the given value (pointing to another name). """
if absolute:
py.error.checked_call(os.symlink, str(value), self.strpath)
else:
base = self.common(value)
# with posix local paths '/' is always a common base
relsource = self.__class__(value).relto(base)
reldest = self.relto(base)
n = reldest.count(self.sep)
target = self.sep.join(('..', )*n + (relsource, ))
py.error.checked_call(os.symlink, target, self.strpath)
def getuserid(user):
import pwd
if not isinstance(user, int):
user = pwd.getpwnam(user)[2]
return user
def getgroupid(group):
import grp
if not isinstance(group, int):
group = grp.getgrnam(group)[2]
return group
FSBase = not iswin32 and PosixPath or common.PathBase
class LocalPath(FSBase):
""" object oriented interface to os.path and other local filesystem
related information.
"""
class ImportMismatchError(ImportError):
""" raised on pyimport() if there is a mismatch of __file__'s"""
sep = os.sep
class Checkers(common.Checkers):
def _stat(self):
try:
return self._statcache
except AttributeError:
try:
self._statcache = self.path.stat()
except py.error.ELOOP:
self._statcache = self.path.lstat()
return self._statcache
def dir(self):
return S_ISDIR(self._stat().mode)
def file(self):
return S_ISREG(self._stat().mode)
def exists(self):
return self._stat()
def link(self):
st = self.path.lstat()
return S_ISLNK(st.mode)
def __init__(self, path=None, expanduser=False):
""" Initialize and return a local Path instance.
Path can be relative to the current directory.
If path is None it defaults to the current working directory.
If expanduser is True, tilde-expansion is performed.
Note that Path instances always carry an absolute path.
Note also that passing in a local path object will simply return
the exact same path object. Use new() to get a new copy.
"""
if path is None:
self.strpath = py.error.checked_call(os.getcwd)
elif isinstance(path, common.PathBase):
self.strpath = path.strpath
elif isinstance(path, py.builtin._basestring):
if expanduser:
path = os.path.expanduser(path)
self.strpath = abspath(path)
else:
raise ValueError("can only pass None, Path instances "
"or non-empty strings to LocalPath")
def __hash__(self):
return hash(self.strpath)
def __eq__(self, other):
s1 = self.strpath
s2 = getattr(other, "strpath", other)
if iswin32:
s1 = s1.lower()
try:
s2 = s2.lower()
except AttributeError:
return False
return s1 == s2
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.strpath < getattr(other, "strpath", other)
def __gt__(self, other):
return self.strpath > getattr(other, "strpath", other)
def samefile(self, other):
""" return True if 'other' references the same file as 'self'.
"""
other = getattr(other, "strpath", other)
if not isabs(other):
other = abspath(other)
if self == other:
return True
if iswin32:
return False # there is no samefile
return py.error.checked_call(
os.path.samefile, self.strpath, other)
def remove(self, rec=1, ignore_errors=False):
""" remove a file or directory (or a directory tree if rec=1).
if ignore_errors is True, errors while removing directories will
be ignored.
"""
if self.check(dir=1, link=0):
if rec:
# force remove of readonly files on windows
if iswin32:
self.chmod(448, rec=1) # octcal 0700
py.error.checked_call(py.std.shutil.rmtree, self.strpath,
ignore_errors=ignore_errors)
else:
py.error.checked_call(os.rmdir, self.strpath)
else:
if iswin32:
self.chmod(448) # octcal 0700
py.error.checked_call(os.remove, self.strpath)
def computehash(self, hashtype="md5", chunksize=524288):
""" return hexdigest of hashvalue for this file. """
try:
try:
import hashlib as mod
except ImportError:
if hashtype == "sha1":
hashtype = "sha"
mod = __import__(hashtype)
hash = getattr(mod, hashtype)()
except (AttributeError, ImportError):
raise ValueError("Don't know how to compute %r hash" %(hashtype,))
f = self.open('rb')
try:
while 1:
buf = f.read(chunksize)
if not buf:
return hash.hexdigest()
hash.update(buf)
finally:
f.close()
def new(self, **kw):
""" create a modified version of this path.
the following keyword arguments modify various path parts::
a:/some/path/to/a/file.ext
xx drive
xxxxxxxxxxxxxxxxx dirname
xxxxxxxx basename
xxxx purebasename
xxx ext
"""
obj = object.__new__(self.__class__)
if not kw:
obj.strpath = self.strpath
return obj
drive, dirname, basename, purebasename,ext = self._getbyspec(
"drive,dirname,basename,purebasename,ext")
if 'basename' in kw:
if 'purebasename' in kw or 'ext' in kw:
raise ValueError("invalid specification %r" % kw)
else:
pb = kw.setdefault('purebasename', purebasename)
try:
ext = kw['ext']
except KeyError:
pass
else:
if ext and not ext.startswith('.'):
ext = '.' + ext
kw['basename'] = pb + ext
if ('dirname' in kw and not kw['dirname']):
kw['dirname'] = drive
else:
kw.setdefault('dirname', dirname)
kw.setdefault('sep', self.sep)
obj.strpath = normpath(
"%(dirname)s%(sep)s%(basename)s" % kw)
return obj
def _getbyspec(self, spec):
""" see new for what 'spec' can be. """
res = []
parts = self.strpath.split(self.sep)
args = filter(None, spec.split(',') )
append = res.append
for name in args:
if name == 'drive':
append(parts[0])
elif name == 'dirname':
append(self.sep.join(parts[:-1]))
else:
basename = parts[-1]
if name == 'basename':
append(basename)
else:
i = basename.rfind('.')
if i == -1:
purebasename, ext = basename, ''
else:
purebasename, ext = basename[:i], basename[i:]
if name == 'purebasename':
append(purebasename)
elif name == 'ext':
append(ext)
else:
raise ValueError("invalid part specification %r" % name)
return res
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
if not kwargs:
path = object.__new__(self.__class__)
path.strpath = dirname(self.strpath)
if args:
path = path.join(*args)
return path
return super(LocalPath, self).dirpath(*args, **kwargs)
def join(self, *args, **kwargs):
""" return a new path by appending all 'args' as path
components. if abs=1 is used restart from root if any
of the args is an absolute path.
"""
sep = self.sep
strargs = [getattr(arg, "strpath", arg) for arg in args]
strpath = self.strpath
if kwargs.get('abs'):
newargs = []
for arg in reversed(strargs):
if isabs(arg):
strpath = arg
strargs = newargs
break
newargs.insert(0, arg)
for arg in strargs:
arg = arg.strip(sep)
if iswin32:
# allow unix style paths even on windows.
arg = arg.strip('/')
arg = arg.replace('/', sep)
strpath = strpath + sep + arg
obj = object.__new__(self.__class__)
obj.strpath = normpath(strpath)
return obj
def open(self, mode='r', ensure=False, encoding=None):
""" return an opened file with the given mode.
If ensure is True, create parent directories if needed.
"""
if ensure:
self.dirpath().ensure(dir=1)
if encoding:
return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding)
return py.error.checked_call(open, self.strpath, mode)
def _fastjoin(self, name):
child = object.__new__(self.__class__)
child.strpath = self.strpath + self.sep + name
return child
def islink(self):
return islink(self.strpath)
def check(self, **kw):
if not kw:
return exists(self.strpath)
if len(kw) == 1:
if "dir" in kw:
return not kw["dir"] ^ isdir(self.strpath)
if "file" in kw:
return not kw["file"] ^ isfile(self.strpath)
return super(LocalPath, self).check(**kw)
_patternchars = set("*?[" + os.path.sep)
def listdir(self, fil=None, sort=None):
""" list directory contents, possibly filter by the given fil func
and possibly sorted.
"""
if fil is None and sort is None:
names = py.error.checked_call(os.listdir, self.strpath)
return map_as_list(self._fastjoin, names)
if isinstance(fil, py.builtin._basestring):
if not self._patternchars.intersection(fil):
child = self._fastjoin(fil)
if exists(child.strpath):
return [child]
return []
fil = common.FNMatcher(fil)
names = py.error.checked_call(os.listdir, self.strpath)
res = []
for name in names:
child = self._fastjoin(name)
if fil is None or fil(child):
res.append(child)
self._sortlist(res, sort)
return res
def size(self):
""" return size of the underlying file object """
return self.stat().size
def mtime(self):
""" return last modification time of the path. """
return self.stat().mtime
def copy(self, target, mode=False):
""" copy path to target."""
if self.check(file=1):
if target.check(dir=1):
target = target.join(self.basename)
assert self!=target
copychunked(self, target)
if mode:
copymode(self.strpath, target.strpath)
else:
def rec(p):
return p.check(link=0)
for x in self.visit(rec=rec):
relpath = x.relto(self)
newx = target.join(relpath)
newx.dirpath().ensure(dir=1)
if x.check(link=1):
newx.mksymlinkto(x.readlink())
continue
elif x.check(file=1):
copychunked(x, newx)
elif x.check(dir=1):
newx.ensure(dir=1)
if mode:
copymode(x.strpath, newx.strpath)
def rename(self, target):
""" rename this path to target. """
target = getattr(target, "strpath", target)
return py.error.checked_call(os.rename, self.strpath, target)
def dump(self, obj, bin=1):
""" pickle object into path location"""
f = self.open('wb')
try:
py.error.checked_call(py.std.pickle.dump, obj, f, bin)
finally:
f.close()
def mkdir(self, *args):
""" create & return the directory joined with args. """
p = self.join(*args)
py.error.checked_call(os.mkdir, getattr(p, "strpath", p))
return p
def write_binary(self, data, ensure=False):
""" write binary data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('wb') as f:
f.write(data)
def write_text(self, data, encoding, ensure=False):
""" write text data into path using the specified encoding.
If ensure is True create missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('w', encoding=encoding) as f:
f.write(data)
def write(self, data, mode='w', ensure=False):
""" write data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
if 'b' in mode:
if not py.builtin._isbytes(data):
raise ValueError("can only process bytes")
else:
if not py.builtin._istext(data):
if not py.builtin._isbytes(data):
data = str(data)
else:
data = py.builtin._totext(data, sys.getdefaultencoding())
f = self.open(mode)
try:
f.write(data)
finally:
f.close()
def _ensuredirs(self):
parent = self.dirpath()
if parent == self:
return self
if parent.check(dir=0):
parent._ensuredirs()
if self.check(dir=0):
try:
self.mkdir()
except py.error.EEXIST:
# race condition: file/dir created by another thread/process.
# complain if it is not a dir
if self.check(dir=0):
raise
return self
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'dir=True'
then the path is forced to be a directory path.
"""
p = self.join(*args)
if kwargs.get('dir', 0):
return p._ensuredirs()
else:
p.dirpath()._ensuredirs()
if not p.check(file=1):
p.open('w').close()
return p
def stat(self, raising=True):
""" Return an os.stat() tuple. """
if raising == True:
return Stat(self, py.error.checked_call(os.stat, self.strpath))
try:
return Stat(self, os.stat(self.strpath))
except KeyboardInterrupt:
raise
except Exception:
return None
def lstat(self):
""" Return an os.lstat() tuple. """
return Stat(self, py.error.checked_call(os.lstat, self.strpath))
def setmtime(self, mtime=None):
""" set modification time for the given path. if 'mtime' is None
(the default) then the file's mtime is set to current time.
Note that the resolution for 'mtime' is platform dependent.
"""
if mtime is None:
return py.error.checked_call(os.utime, self.strpath, mtime)
try:
return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
except py.error.EINVAL:
return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
def chdir(self):
""" change directory to self and return old current directory """
try:
old = self.__class__()
except py.error.ENOENT:
old = None
py.error.checked_call(os.chdir, self.strpath)
return old
@contextmanager
def as_cwd(self):
""" return context manager which changes to current dir during the
managed "with" context. On __enter__ it returns the old dir.
"""
old = self.chdir()
try:
yield old
finally:
old.chdir()
def realpath(self):
""" return a new path which contains no symbolic links."""
return self.__class__(os.path.realpath(self.strpath))
def atime(self):
""" return last access time of the path. """
return self.stat().atime
def __repr__(self):
return 'local(%r)' % self.strpath
def __str__(self):
""" return string representation of the Path. """
return self.strpath
def chmod(self, mode, rec=0):
""" change permissions to the given mode. If mode is an
integer it directly encodes the os-specific modes.
if rec is True perform recursively.
"""
if not isinstance(mode, int):
raise TypeError("mode %r must be an integer" % (mode,))
if rec:
for x in self.visit(rec=rec):
py.error.checked_call(os.chmod, str(x), mode)
py.error.checked_call(os.chmod, self.strpath, mode)
def pypkgpath(self):
""" return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Return None if a pkgpath can not be determined.
"""
pkgpath = None
for parent in self.parts(reverse=True):
if parent.isdir():
if not parent.join('__init__.py').exists():
break
if not isimportable(parent.basename):
break
pkgpath = parent
return pkgpath
def _ensuresyspath(self, ensuremode, path):
if ensuremode:
s = str(path)
if ensuremode == "append":
if s not in sys.path:
sys.path.append(s)
else:
if s != sys.path[0]:
sys.path.insert(0, s)
def pyimport(self, modname=None, ensuresyspath=True):
""" return path as an imported python module.
If modname is None, look for the containing package
and construct an according module name.
The module will be put/looked up in sys.modules.
if ensuresyspath is True then the root dir for importing
the file (taking __init__.py files into account) will
be prepended to sys.path if it isn't there already.
If ensuresyspath=="append" the root dir will be appended
if it isn't already contained in sys.path.
if ensuresyspath is False no modification of syspath happens.
"""
if not self.check():
raise py.error.ENOENT(self)
pkgpath = None
if modname is None:
pkgpath = self.pypkgpath()
if pkgpath is not None:
pkgroot = pkgpath.dirpath()
names = self.new(ext="").relto(pkgroot).split(self.sep)
if names[-1] == "__init__":
names.pop()
modname = ".".join(names)
else:
pkgroot = self.dirpath()
modname = self.purebasename
self._ensuresyspath(ensuresyspath, pkgroot)
__import__(modname)
mod = sys.modules[modname]
if self.basename == "__init__.py":
return mod # we don't check anything as we might
# we in a namespace package ... too icky to check
modfile = mod.__file__
if modfile[-4:] in ('.pyc', '.pyo'):
modfile = modfile[:-1]
elif modfile.endswith('$py.class'):
modfile = modfile[:-9] + '.py'
if modfile.endswith(os.path.sep + "__init__.py"):
if self.basename != "__init__.py":
modfile = modfile[:-12]
try:
issame = self.samefile(modfile)
except py.error.ENOENT:
issame = False
if not issame:
raise self.ImportMismatchError(modname, modfile, self)
return mod
else:
try:
return sys.modules[modname]
except KeyError:
# we have a custom modname, do a pseudo-import
mod = py.std.types.ModuleType(modname)
mod.__file__ = str(self)
sys.modules[modname] = mod
try:
py.builtin.execfile(str(self), mod.__dict__)
except:
del sys.modules[modname]
raise
return mod
def sysexec(self, *argv, **popen_opts):
""" return stdout text from executing a system child process,
where the 'self' path points to executable.
The process is directly invoked and not through a system shell.
"""
from subprocess import Popen, PIPE
argv = map_as_list(str, argv)
popen_opts['stdout'] = popen_opts['stderr'] = PIPE
proc = Popen([str(self)] + argv, **popen_opts)
stdout, stderr = proc.communicate()
ret = proc.wait()
if py.builtin._isbytes(stdout):
stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
if ret != 0:
if py.builtin._isbytes(stderr):
stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
raise py.process.cmdexec.Error(ret, ret, str(self),
stdout, stderr,)
return stdout
def sysfind(cls, name, checker=None, paths=None):
""" return a path object found by looking at the systems
underlying PATH specification. If the checker is not None
it will be invoked to filter matching paths. If a binary
cannot be found, None is returned
Note: This is probably not working on plain win32 systems
but may work on cygwin.
"""
if isabs(name):
p = py.path.local(name)
if p.check(file=1):
return p
else:
if paths is None:
if iswin32:
paths = py.std.os.environ['Path'].split(';')
if '' not in paths and '.' not in paths:
paths.append('.')
try:
systemroot = os.environ['SYSTEMROOT']
except KeyError:
pass
else:
paths = [re.sub('%SystemRoot%', systemroot, path)
for path in paths]
else:
paths = py.std.os.environ['PATH'].split(':')
tryadd = []
if iswin32:
tryadd += os.environ['PATHEXT'].split(os.pathsep)
tryadd.append("")
for x in paths:
for addext in tryadd:
p = py.path.local(x).join(name, abs=True) + addext
try:
if p.check(file=1):
if checker:
if not checker(p):
continue
return p
except py.error.EACCES:
pass
return None
sysfind = classmethod(sysfind)
def _gethomedir(cls):
try:
x = os.environ['HOME']
except KeyError:
try:
x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH']
except KeyError:
return None
return cls(x)
_gethomedir = classmethod(_gethomedir)
#"""
#special class constructors for local filesystem paths
#"""
def get_temproot(cls):
""" return the system's temporary directory
(where tempfiles are usually created in)
"""
return py.path.local(py.std.tempfile.gettempdir())
get_temproot = classmethod(get_temproot)
def mkdtemp(cls, rootdir=None):
""" return a Path object pointing to a fresh new temporary directory
(which we created ourself).
"""
import tempfile
if rootdir is None:
rootdir = cls.get_temproot()
return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
mkdtemp = classmethod(mkdtemp)
def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
lock_timeout = 172800): # two days
""" return unique directory with a number greater than the current
maximum one. The number is assumed to start directly after prefix.
if keep is true directories with a number less than (maxnum-keep)
will be removed.
"""
if rootdir is None:
rootdir = cls.get_temproot()
def parse_num(path):
""" parse the number out of a path (if it matches the prefix) """
bn = path.basename
if bn.startswith(prefix):
try:
return int(bn[len(prefix):])
except ValueError:
pass
# compute the maximum number currently in use with the
# prefix
lastmax = None
while True:
maxnum = -1
for path in rootdir.listdir():
num = parse_num(path)
if num is not None:
maxnum = max(maxnum, num)
# make the new directory
try:
udir = rootdir.mkdir(prefix + str(maxnum+1))
except py.error.EEXIST:
# race condition: another thread/process created the dir
# in the meantime. Try counting again
if lastmax == maxnum:
raise
lastmax = maxnum
continue
break
# put a .lock file in the new directory that will be removed at
# process exit
if lock_timeout:
lockfile = udir.join('.lock')
mypid = os.getpid()
if hasattr(lockfile, 'mksymlinkto'):
lockfile.mksymlinkto(str(mypid))
else:
lockfile.write(str(mypid))
def try_remove_lockfile():
# in a fork() situation, only the last process should
# remove the .lock, otherwise the other processes run the
# risk of seeing their temporary dir disappear. For now
# we remove the .lock in the parent only (i.e. we assume
# that the children finish before the parent).
if os.getpid() != mypid:
return
try:
lockfile.remove()
except py.error.Error:
pass
atexit.register(try_remove_lockfile)
# prune old directories
if keep:
for path in rootdir.listdir():
num = parse_num(path)
if num is not None and num <= (maxnum - keep):
lf = path.join('.lock')
try:
t1 = lf.lstat().mtime
t2 = lockfile.lstat().mtime
if not lock_timeout or abs(t2-t1) < lock_timeout:
continue # skip directories still locked
except py.error.Error:
pass # assume that it means that there is no 'lf'
try:
path.remove(rec=1)
except KeyboardInterrupt:
raise
except: # this might be py.error.Error, WindowsError ...
pass
# make link...
try:
username = os.environ['USER'] #linux, et al
except KeyError:
try:
username = os.environ['USERNAME'] #windows
except KeyError:
username = 'current'
src = str(udir)
dest = src[:src.rfind('-')] + '-' + username
try:
os.unlink(dest)
except OSError:
pass
try:
os.symlink(src, dest)
except (OSError, AttributeError, NotImplementedError):
pass
return udir
make_numbered_dir = classmethod(make_numbered_dir)
def copymode(src, dest):
py.std.shutil.copymode(src, dest)
def copychunked(src, dest):
chunksize = 524288 # half a meg of bytes
fsrc = src.open('rb')
try:
fdest = dest.open('wb')
try:
while 1:
buf = fsrc.read(chunksize)
if not buf:
break
fdest.write(buf)
finally:
fdest.close()
finally:
fsrc.close()
def isimportable(name):
if name and (name[0].isalpha() or name[0] == '_'):
name = name.replace("_", '')
return not name or name.isalnum()
| mpl-2.0 |
carolinux/QGIS | scripts/mkuidefaults.py | 23 | 1400 | from PyQt4.QtCore import QCoreApplication, QSettings
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
QCoreApplication.setOrganizationName( "QGIS" )
QCoreApplication.setOrganizationDomain( "qgis.org" )
QCoreApplication.setApplicationName( "QGIS2" )
s = QSettings()
ba = s.value("/UI/geometry").toByteArray()
f = open("src/app/ui_defaults.h", "w")
f.write( "#ifndef UI_DEFAULTS_H\n#define UI_DEFAULTS_H\n\nstatic const unsigned char defaultUIgeometry[] =\n{\n" )
for chunk in chunks(ba,16):
f.write( " %s,\n" % ", ".join( map( lambda x : "0x%02x" % ord(x), chunk ) ) )
f.write( "};\n\nstatic const unsigned char defaultUIstate[] =\n{\n" )
ba = s.value("/UI/state").toByteArray()
for chunk in chunks(ba,16):
f.write( " %s,\n" % ", ".join( map( lambda x : "0x%02x" % ord(x), chunk ) ) )
ba = s.value("/Composer/geometry").toByteArray()
f.write( "};\n\nstatic const unsigned char defaultComposerUIgeometry[] =\n{\n" )
for chunk in chunks(ba,16):
f.write( " %s,\n" % ", ".join( map( lambda x : "0x%02x" % ord(x), chunk ) ) )
f.write( "};\n\nstatic const unsigned char defaultComposerUIstate[] =\n{\n" )
ba = s.value("/ComposerUI/state").toByteArray()
for chunk in chunks(ba,16):
f.write( " %s,\n" % ", ".join( map( lambda x : "0x%02x" % ord(x), chunk ) ) )
f.write( "};\n\n#endif // UI_DEFAULTS_H\n" )
f.close()
| gpl-2.0 |
pinterest/pinball | tests/pinball/master/master_handler_test.py | 6 | 3216 | # Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation tests for master handler."""
import sys
import unittest
from pinball.master.master_handler import MasterHandler
from pinball.master.thrift_lib.ttypes import ArchiveRequest
from pinball.master.thrift_lib.ttypes import GroupRequest
from pinball.master.thrift_lib.ttypes import ModifyRequest
from pinball.master.thrift_lib.ttypes import Query
from pinball.master.thrift_lib.ttypes import QueryAndOwnRequest
from pinball.master.thrift_lib.ttypes import QueryRequest
from pinball.master.thrift_lib.ttypes import Token
from tests.pinball.persistence.ephemeral_store import EphemeralStore
__author__ = 'Pawel Garbacki'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
class MasterHandlerTestCase(unittest.TestCase):
def _insert_token(self, handler):
request = ModifyRequest()
token = Token(name='/some_other_dir/some_token', data='some data')
request.updates = [token]
response = handler.modify(request)
self.assertEqual(1, len(response.updates))
return response.updates[0]
def test_archive(self):
handler = MasterHandler(EphemeralStore())
token = self._insert_token(handler)
request = ArchiveRequest()
request.tokens = [token]
handler.archive(request)
# The logic handling the request is tested thoroughly in
# transaction tests. Here we only make sure that the plumbing is in
# place.
def test_group(self):
request = GroupRequest()
request.namePrefix = '/'
handler = MasterHandler(EphemeralStore())
response = handler.group(request)
self.assertEqual(1, len(response.counts))
self.assertEqual(1, response.counts.values()[0])
def test_modify(self):
handler = MasterHandler(EphemeralStore())
self._insert_token(handler)
def test_query(self):
query = Query()
query.namePrefix = ''
query.maxTokens = 10
request = QueryRequest()
request.queries = [query]
handler = MasterHandler(EphemeralStore())
response = handler.query(request)
self.assertEqual(1, len(response.tokens))
def test_query_and_own(self):
query = Query()
query.namePrefix = ''
query.maxTokens = 10
request = QueryAndOwnRequest()
request.owner = 'some_owner'
request.expirationTime = sys.maxint
request.query = query
handler = MasterHandler(EphemeralStore())
response = handler.query_and_own(request)
self.assertEqual(0, len(response.tokens))
| apache-2.0 |
isandlaTech/cohorte-runtime | python/src/lib/python/unidecode/x057.py | 252 | 4631 | data = (
'Guo ', # 0x00
'Yin ', # 0x01
'Hun ', # 0x02
'Pu ', # 0x03
'Yu ', # 0x04
'Han ', # 0x05
'Yuan ', # 0x06
'Lun ', # 0x07
'Quan ', # 0x08
'Yu ', # 0x09
'Qing ', # 0x0a
'Guo ', # 0x0b
'Chuan ', # 0x0c
'Wei ', # 0x0d
'Yuan ', # 0x0e
'Quan ', # 0x0f
'Ku ', # 0x10
'Fu ', # 0x11
'Yuan ', # 0x12
'Yuan ', # 0x13
'E ', # 0x14
'Tu ', # 0x15
'Tu ', # 0x16
'Tu ', # 0x17
'Tuan ', # 0x18
'Lue ', # 0x19
'Hui ', # 0x1a
'Yi ', # 0x1b
'Yuan ', # 0x1c
'Luan ', # 0x1d
'Luan ', # 0x1e
'Tu ', # 0x1f
'Ya ', # 0x20
'Tu ', # 0x21
'Ting ', # 0x22
'Sheng ', # 0x23
'Pu ', # 0x24
'Lu ', # 0x25
'Iri ', # 0x26
'Ya ', # 0x27
'Zai ', # 0x28
'Wei ', # 0x29
'Ge ', # 0x2a
'Yu ', # 0x2b
'Wu ', # 0x2c
'Gui ', # 0x2d
'Pi ', # 0x2e
'Yi ', # 0x2f
'Di ', # 0x30
'Qian ', # 0x31
'Qian ', # 0x32
'Zhen ', # 0x33
'Zhuo ', # 0x34
'Dang ', # 0x35
'Qia ', # 0x36
'Akutsu ', # 0x37
'Yama ', # 0x38
'Kuang ', # 0x39
'Chang ', # 0x3a
'Qi ', # 0x3b
'Nie ', # 0x3c
'Mo ', # 0x3d
'Ji ', # 0x3e
'Jia ', # 0x3f
'Zhi ', # 0x40
'Zhi ', # 0x41
'Ban ', # 0x42
'Xun ', # 0x43
'Tou ', # 0x44
'Qin ', # 0x45
'Fen ', # 0x46
'Jun ', # 0x47
'Keng ', # 0x48
'Tun ', # 0x49
'Fang ', # 0x4a
'Fen ', # 0x4b
'Ben ', # 0x4c
'Tan ', # 0x4d
'Kan ', # 0x4e
'Pi ', # 0x4f
'Zuo ', # 0x50
'Keng ', # 0x51
'Bi ', # 0x52
'Xing ', # 0x53
'Di ', # 0x54
'Jing ', # 0x55
'Ji ', # 0x56
'Kuai ', # 0x57
'Di ', # 0x58
'Jing ', # 0x59
'Jian ', # 0x5a
'Tan ', # 0x5b
'Li ', # 0x5c
'Ba ', # 0x5d
'Wu ', # 0x5e
'Fen ', # 0x5f
'Zhui ', # 0x60
'Po ', # 0x61
'Pan ', # 0x62
'Tang ', # 0x63
'Kun ', # 0x64
'Qu ', # 0x65
'Tan ', # 0x66
'Zhi ', # 0x67
'Tuo ', # 0x68
'Gan ', # 0x69
'Ping ', # 0x6a
'Dian ', # 0x6b
'Gua ', # 0x6c
'Ni ', # 0x6d
'Tai ', # 0x6e
'Pi ', # 0x6f
'Jiong ', # 0x70
'Yang ', # 0x71
'Fo ', # 0x72
'Ao ', # 0x73
'Liu ', # 0x74
'Qiu ', # 0x75
'Mu ', # 0x76
'Ke ', # 0x77
'Gou ', # 0x78
'Xue ', # 0x79
'Ba ', # 0x7a
'Chi ', # 0x7b
'Che ', # 0x7c
'Ling ', # 0x7d
'Zhu ', # 0x7e
'Fu ', # 0x7f
'Hu ', # 0x80
'Zhi ', # 0x81
'Chui ', # 0x82
'La ', # 0x83
'Long ', # 0x84
'Long ', # 0x85
'Lu ', # 0x86
'Ao ', # 0x87
'Tay ', # 0x88
'Pao ', # 0x89
'[?] ', # 0x8a
'Xing ', # 0x8b
'Dong ', # 0x8c
'Ji ', # 0x8d
'Ke ', # 0x8e
'Lu ', # 0x8f
'Ci ', # 0x90
'Chi ', # 0x91
'Lei ', # 0x92
'Gai ', # 0x93
'Yin ', # 0x94
'Hou ', # 0x95
'Dui ', # 0x96
'Zhao ', # 0x97
'Fu ', # 0x98
'Guang ', # 0x99
'Yao ', # 0x9a
'Duo ', # 0x9b
'Duo ', # 0x9c
'Gui ', # 0x9d
'Cha ', # 0x9e
'Yang ', # 0x9f
'Yin ', # 0xa0
'Fa ', # 0xa1
'Gou ', # 0xa2
'Yuan ', # 0xa3
'Die ', # 0xa4
'Xie ', # 0xa5
'Ken ', # 0xa6
'Jiong ', # 0xa7
'Shou ', # 0xa8
'E ', # 0xa9
'Ha ', # 0xaa
'Dian ', # 0xab
'Hong ', # 0xac
'Wu ', # 0xad
'Kua ', # 0xae
'[?] ', # 0xaf
'Tao ', # 0xb0
'Dang ', # 0xb1
'Kai ', # 0xb2
'Gake ', # 0xb3
'Nao ', # 0xb4
'An ', # 0xb5
'Xing ', # 0xb6
'Xian ', # 0xb7
'Huan ', # 0xb8
'Bang ', # 0xb9
'Pei ', # 0xba
'Ba ', # 0xbb
'Yi ', # 0xbc
'Yin ', # 0xbd
'Han ', # 0xbe
'Xu ', # 0xbf
'Chui ', # 0xc0
'Cen ', # 0xc1
'Geng ', # 0xc2
'Ai ', # 0xc3
'Peng ', # 0xc4
'Fang ', # 0xc5
'Que ', # 0xc6
'Yong ', # 0xc7
'Xun ', # 0xc8
'Jia ', # 0xc9
'Di ', # 0xca
'Mai ', # 0xcb
'Lang ', # 0xcc
'Xuan ', # 0xcd
'Cheng ', # 0xce
'Yan ', # 0xcf
'Jin ', # 0xd0
'Zhe ', # 0xd1
'Lei ', # 0xd2
'Lie ', # 0xd3
'Bu ', # 0xd4
'Cheng ', # 0xd5
'Gomi ', # 0xd6
'Bu ', # 0xd7
'Shi ', # 0xd8
'Xun ', # 0xd9
'Guo ', # 0xda
'Jiong ', # 0xdb
'Ye ', # 0xdc
'Nian ', # 0xdd
'Di ', # 0xde
'Yu ', # 0xdf
'Bu ', # 0xe0
'Ya ', # 0xe1
'Juan ', # 0xe2
'Sui ', # 0xe3
'Pi ', # 0xe4
'Cheng ', # 0xe5
'Wan ', # 0xe6
'Ju ', # 0xe7
'Lun ', # 0xe8
'Zheng ', # 0xe9
'Kong ', # 0xea
'Chong ', # 0xeb
'Dong ', # 0xec
'Dai ', # 0xed
'Tan ', # 0xee
'An ', # 0xef
'Cai ', # 0xf0
'Shu ', # 0xf1
'Beng ', # 0xf2
'Kan ', # 0xf3
'Zhi ', # 0xf4
'Duo ', # 0xf5
'Yi ', # 0xf6
'Zhi ', # 0xf7
'Yi ', # 0xf8
'Pei ', # 0xf9
'Ji ', # 0xfa
'Zhun ', # 0xfb
'Qi ', # 0xfc
'Sao ', # 0xfd
'Ju ', # 0xfe
'Ni ', # 0xff
)
| apache-2.0 |
akashsinghal/Speech-Memorization-App | Python_Backend/env/lib/python3.6/site-packages/pip/utils/packaging.py | 343 | 2080 | from __future__ import absolute_import
from email.parser import FeedParser
import logging
import sys
from pip._vendor.packaging import specifiers
from pip._vendor.packaging import version
from pip._vendor import pkg_resources
from pip import exceptions
logger = logging.getLogger(__name__)
def check_requires_python(requires_python):
"""
Check if the python version in use match the `requires_python` specifier.
Returns `True` if the version of python in use matches the requirement.
Returns `False` if the version of python in use does not matches the
requirement.
Raises an InvalidSpecifier if `requires_python` have an invalid format.
"""
if requires_python is None:
# The package provides no information
return True
requires_python_specifier = specifiers.SpecifierSet(requires_python)
# We only use major.minor.micro
python_version = version.parse('.'.join(map(str, sys.version_info[:3])))
return python_version in requires_python_specifier
def get_metadata(dist):
if (isinstance(dist, pkg_resources.DistInfoDistribution) and
dist.has_metadata('METADATA')):
return dist.get_metadata('METADATA')
elif dist.has_metadata('PKG-INFO'):
return dist.get_metadata('PKG-INFO')
def check_dist_requires_python(dist):
metadata = get_metadata(dist)
feed_parser = FeedParser()
feed_parser.feed(metadata)
pkg_info_dict = feed_parser.close()
requires_python = pkg_info_dict.get('Requires-Python')
try:
if not check_requires_python(requires_python):
raise exceptions.UnsupportedPythonVersion(
"%s requires Python '%s' but the running Python is %s" % (
dist.project_name,
requires_python,
'.'.join(map(str, sys.version_info[:3])),)
)
except specifiers.InvalidSpecifier as e:
logger.warning(
"Package %s has an invalid Requires-Python entry %s - %s" % (
dist.project_name, requires_python, e))
return
| apache-2.0 |
nickanderson/ansible | lib/ansible/inventory/ini.py | 25 | 7628 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
from ansible import errors
from ansible import utils
import shlex
import re
import ast
class InventoryParser(object):
"""
Host inventory for ansible.
"""
def __init__(self, filename=C.DEFAULT_HOST_LIST):
with open(filename) as fh:
self.lines = fh.readlines()
self.groups = {}
self.hosts = {}
self._parse()
def _parse(self):
self._parse_base_groups()
self._parse_group_children()
self._add_allgroup_children()
self._parse_group_variables()
return self.groups
@staticmethod
def _parse_value(v):
if "#" not in v:
try:
return ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
# For some reason this was thought to be malformed.
pass
except SyntaxError:
# Is this a hash with an equals at the end?
pass
return v
# [webservers]
# alpha
# beta:2345
# gamma sudo=True user=root
# delta asdf=jkl favcolor=red
def _add_allgroup_children(self):
for group in self.groups.values():
if group.depth == 0 and group.name != 'all':
self.groups['all'].add_child_group(group)
def _parse_base_groups(self):
# FIXME: refactor
ungrouped = Group(name='ungrouped')
all = Group(name='all')
all.add_child_group(ungrouped)
self.groups = dict(all=all, ungrouped=ungrouped)
active_group_name = 'ungrouped'
for line in self.lines:
line = utils.before_comment(line).strip()
if line.startswith("[") and line.endswith("]"):
active_group_name = line.replace("[","").replace("]","")
if ":vars" in line or ":children" in line:
active_group_name = active_group_name.rsplit(":", 1)[0]
if active_group_name not in self.groups:
new_group = self.groups[active_group_name] = Group(name=active_group_name)
active_group_name = None
elif active_group_name not in self.groups:
new_group = self.groups[active_group_name] = Group(name=active_group_name)
elif line.startswith(";") or line == '':
pass
elif active_group_name:
tokens = shlex.split(line)
if len(tokens) == 0:
continue
hostname = tokens[0]
port = C.DEFAULT_REMOTE_PORT
# Three cases to check:
# 0. A hostname that contains a range pesudo-code and a port
# 1. A hostname that contains just a port
if hostname.count(":") > 1:
# Possible an IPv6 address, or maybe a host line with multiple ranges
# IPv6 with Port XXX:XXX::XXX.port
# FQDN foo.example.com
if hostname.count(".") == 1:
(hostname, port) = hostname.rsplit(".", 1)
elif ("[" in hostname and
"]" in hostname and
":" in hostname and
(hostname.rindex("]") < hostname.rindex(":")) or
("]" not in hostname and ":" in hostname)):
(hostname, port) = hostname.rsplit(":", 1)
hostnames = []
if detect_range(hostname):
hostnames = expand_hostname_range(hostname)
else:
hostnames = [hostname]
for hn in hostnames:
host = None
if hn in self.hosts:
host = self.hosts[hn]
else:
host = Host(name=hn, port=port)
self.hosts[hn] = host
if len(tokens) > 1:
for t in tokens[1:]:
if t.startswith('#'):
break
try:
(k,v) = t.split("=", 1)
except ValueError, e:
raise errors.AnsibleError("Invalid ini entry: %s - %s" % (t, str(e)))
host.set_variable(k, self._parse_value(v))
self.groups[active_group_name].add_host(host)
# [southeast:children]
# atlanta
# raleigh
def _parse_group_children(self):
group = None
for line in self.lines:
line = line.strip()
if line is None or line == '':
continue
if line.startswith("[") and ":children]" in line:
line = line.replace("[","").replace(":children]","")
group = self.groups.get(line, None)
if group is None:
group = self.groups[line] = Group(name=line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
group = None
elif group:
kid_group = self.groups.get(line, None)
if kid_group is None:
raise errors.AnsibleError("child group is not defined: (%s)" % line)
else:
group.add_child_group(kid_group)
# [webservers:vars]
# http_port=1234
# maxRequestsPerChild=200
def _parse_group_variables(self):
group = None
for line in self.lines:
line = line.strip()
if line.startswith("[") and ":vars]" in line:
line = line.replace("[","").replace(":vars]","")
group = self.groups.get(line, None)
if group is None:
raise errors.AnsibleError("can't add vars to undefined group: %s" % line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
group = None
elif line == '':
pass
elif group:
if "=" not in line:
raise errors.AnsibleError("variables assigned to group must be in key=value form")
else:
(k, v) = [e.strip() for e in line.split("=", 1)]
group.set_variable(k, self._parse_value(v))
def get_host_variables(self, host):
return {}
| gpl-3.0 |
matteoalessiocarrara/HTML-Facebook-API | src/lib/fbwrapper/src/lib/bot_virtualbrowser/src/lib/human/src/requests2.py | 6 | 2370 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
#
# Copyright 2015 - 2016 Matteo Alessio Carrara <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
""" Estensione della libreria requests """
import logging
import os
import requests
import version
# Configurazione del sistema di logging
logger = logging.getLogger(version.lib_name)
logger.addHandler(logging.NullHandler())
class Session(requests.Session):
"""Versione modificata di requests.Session"""
def __init__(self):
super(Session, self).__init__()
self.__set_owner_pid()
def __set_owner_pid(self):
"""Imposta il pid del processo creatore, ovvero quello attuale"""
self.__owner_pid = os.getpid()
logger.debug("Owner pid: %s", self.__owner_pid)
def get_owner_pid(self):
"""Restituisce il pid del processo creatore"""
return self.__owner_pid
def get2(self, url, **kwargs):
"""
Versione modificata di get
* Controlla che questo oggetto non sia condiviso fra più processi
* Crea un eccezione HTTPError quando necessario
* Stampa informazioni di debug
"""
if os.getpid() != self.owner_pid:
# STACCAAAA STACCAAAAAAAAAAH
w = "Sembra che l'oggetto requests.Session sia utilizzato da più processi. Questo è sconsigliato e potrebbe creare dei problemi"
logger.warning(w)
if (url[:8] == "https://") and (os.getpid() != self.owner_pid):
logger.info("Casini in arrivo... io ti avevo avvertito, auguri :)")
ret = self.get(url, **kwargs)
try:
ret.raise_for_status()
except requests.HTTPError as e:
logger.error("url %s: %s ", url, e.message)
logger.debug("<!-- ret.text -->\n%s", ret.text)
raise
return ret
owner_pid = property(get_owner_pid)
| gpl-2.0 |
UTSA-ICS/keystone-kerberos | keystone/credential/backends/sql.py | 15 | 3846 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import sql
from keystone import credential
from keystone import exception
class CredentialModel(sql.ModelBase, sql.DictBase):
__tablename__ = 'credential'
attributes = ['id', 'user_id', 'project_id', 'blob', 'type']
id = sql.Column(sql.String(64), primary_key=True)
user_id = sql.Column(sql.String(64),
nullable=False)
project_id = sql.Column(sql.String(64))
blob = sql.Column(sql.JsonBlob(), nullable=False)
type = sql.Column(sql.String(255), nullable=False)
extra = sql.Column(sql.JsonBlob())
class Credential(credential.Driver):
# credential crud
@sql.handle_conflicts(conflict_type='credential')
def create_credential(self, credential_id, credential):
session = sql.get_session()
with session.begin():
ref = CredentialModel.from_dict(credential)
session.add(ref)
return ref.to_dict()
@sql.truncated
def list_credentials(self, hints):
session = sql.get_session()
credentials = session.query(CredentialModel)
credentials = sql.filter_limit_query(CredentialModel,
credentials, hints)
return [s.to_dict() for s in credentials]
def list_credentials_for_user(self, user_id):
session = sql.get_session()
query = session.query(CredentialModel)
refs = query.filter_by(user_id=user_id).all()
return [ref.to_dict() for ref in refs]
def _get_credential(self, session, credential_id):
ref = session.query(CredentialModel).get(credential_id)
if ref is None:
raise exception.CredentialNotFound(credential_id=credential_id)
return ref
def get_credential(self, credential_id):
session = sql.get_session()
return self._get_credential(session, credential_id).to_dict()
@sql.handle_conflicts(conflict_type='credential')
def update_credential(self, credential_id, credential):
session = sql.get_session()
with session.begin():
ref = self._get_credential(session, credential_id)
old_dict = ref.to_dict()
for k in credential:
old_dict[k] = credential[k]
new_credential = CredentialModel.from_dict(old_dict)
for attr in CredentialModel.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_credential, attr))
ref.extra = new_credential.extra
return ref.to_dict()
def delete_credential(self, credential_id):
session = sql.get_session()
with session.begin():
ref = self._get_credential(session, credential_id)
session.delete(ref)
def delete_credentials_for_project(self, project_id):
session = sql.get_session()
with session.begin():
query = session.query(CredentialModel)
query = query.filter_by(project_id=project_id)
query.delete()
def delete_credentials_for_user(self, user_id):
session = sql.get_session()
with session.begin():
query = session.query(CredentialModel)
query = query.filter_by(user_id=user_id)
query.delete()
| apache-2.0 |
topic2k/EventGhost | _build/builder/__init__.py | 1 | 7152 | # -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2019 EventGhost Project <http://www.eventghost.org/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
import argparse
import logging
import os
import sys
import tempfile
import threading
from os.path import abspath, dirname, exists, join
# Local imports
import builder
from builder import VirtualEnv
from builder.Logging import LogToFile
from builder.Utils import (
GetGitHubConfig, GetVersion, Is64bitInterpreter, IsCIBuild
)
logger = logging.getLogger()
class Task(object):
value = None
visible = True
enabled = True
activated = True
def __init__(self, buildSetup):
self.buildSetup = buildSetup
def Setup(self):
pass
def DoTask(self):
raise NotImplementedError
@classmethod
def GetId(cls):
return cls.__module__ + "." + cls.__name__
def Print(self, *args):
logger.log(22, " ".join(args))
class Builder(object):
def __init__(self):
if not VirtualEnv.Running() and VirtualEnv.Exists():
VirtualEnv.Activate()
global buildSetup
Task.buildSetup = self
buildSetup = self
self.pyVersionStr = "%d%d" % sys.version_info[:2]
self.buildDir = abspath(join(dirname(__file__), ".."))
self.sourceDir = abspath(join(self.buildDir, ".."))
self.libraryName = "lib%s" % self.pyVersionStr
self.libraryDir = join(self.sourceDir, self.libraryName)
self.dataDir = join(self.buildDir, "data")
self.docsDir = join(self.dataDir, "docs")
self.pyVersionDir = join(self.dataDir, "Python%s" % self.pyVersionStr)
self.outputDir = join(self.buildDir, "output")
self.websiteDir = join(self.outputDir, "website")
if Is64bitInterpreter():
print(
"ERROR: Sorry, EventGhost can't be built with the 64-bit "
"version of Python!"
)
sys.exit(1)
elif not exists(self.pyVersionDir):
print(
"ERROR: Sorry, EventGhost can't be built with Python %d.%d!"
% sys.version_info[:2]
)
sys.exit(1)
sys.path.append(self.sourceDir)
sys.path.append(join(self.libraryDir, "site-packages"))
self.args = self.ParseArgs()
self.showGui = not (
self.args.build or
self.args.check or
self.args.package or
self.args.release or
self.args.sync
)
if os.environ.get(
"APPVEYOR_REPO_COMMIT_MESSAGE", ""
).upper().startswith("VERBOSE:"):
self.args.verbose = True
os.chdir(self.buildDir)
if not exists(self.outputDir):
os.mkdir(self.outputDir)
LogToFile(join(self.outputDir, "Build.log"), self.args.verbose)
from CheckDependencies import CheckDependencies
if not CheckDependencies(self):
sys.exit(1)
try:
self.gitConfig = GetGitHubConfig()
except Exception as e:
msg = (
"WARNING: To change version or release to GitHub, you must:\n"
" $ git config --global github.user <your github username>\n"
" $ git config --global github.token <your github token>\n"
"To create a token, go to: https://github.com/settings/tokens\n"
)
if type(e) is ValueError:
msg = "WARNING: Specified `github.token` is invalid!\n" + msg
if not IsCIBuild():
token = ""
print msg
else:
token = os.environ["GITHUB_TOKEN"]
self.gitConfig = {
"all_repos": {
"EventGhost/EventGhost": {
"all_branches": ["master"],
"def_branch": "master",
"name": "EventGhost",
},
},
"branch": "master",
"repo": "EventGhost",
"repo_full": "EventGhost/EventGhost",
"token": token,
"user": "EventGhost",
}
self.appVersion = None
self.appVersionInfo = None
self.tmpDir = tempfile.mkdtemp()
self.appName = self.name
def ParseArgs(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"-b", "--build",
action="store_true",
help="build imports, lib%s, and interpreters" % self.pyVersionStr,
)
parser.add_argument(
"-c", "--check",
action="store_true",
help="check source code for issues",
)
parser.add_argument(
"-m", "--make-env",
action="store_true",
help="auto-install dependencies into a virtualenv",
)
parser.add_argument(
"-p", "--package",
action="store_true",
help="build changelog, docs, and setup.exe",
)
parser.add_argument(
"-r", "--release",
action="store_true",
help="release to github and web if credentials available",
)
parser.add_argument(
"-s", "--sync",
action="store_true",
help="build and synchronize website",
)
parser.add_argument(
"-d", "--docs",
action="store_true",
help="build and synchronize usr and dev docs",
)
parser.add_argument(
"-u", "--url",
dest="websiteUrl",
default='',
type=str,
help="sftp url for doc synchronizing",
)
parser.add_argument(
"-vv", "--verbose",
action="store_true",
help="give a more verbose output",
)
parser.add_argument(
"-v", "--version",
action="store",
help="package as the specified version",
)
return parser.parse_args()
def Start(self):
from Tasks import TASKS
self.tasks = [task(self) for task in TASKS]
from Config import Config
self.config = Config(self, join(self.outputDir, "Build.ini"))
for task in self.tasks:
task.Setup()
(self.appVersion, self.appVersionInfo) = GetVersion(self)
if self.showGui:
import Gui
Gui.Main(self)
else:
builder.Tasks.Main(self)
| gpl-2.0 |
wolverineav/neutron | neutron/tests/unit/agent/common/test_ovs_lib.py | 3 | 39120 | # Copyright 2012, VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import testtools
from neutron.agent.common import ovs_lib
from neutron.agent.common import utils
from neutron.common import exceptions
from neutron.plugins.common import constants
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants as p_const
from neutron.tests import base
from neutron.tests import tools
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
# some test data for get_vif_port_to_ofport_map that exhibited bug 1444269
OVSLIST_WITH_UNSET_PORT = (
'{"data":[["patch-tun",["map",[]],1],["tap2ab72a72-44",["map",[["attached-'
'mac","fa:16:3e:b0:f8:38"],["iface-id","2ab72a72-4407-4ef3-806a-b2172f3e4d'
'c7"],["iface-status","active"]]],2],["tap6b108774-15",["map",[["attached-'
'mac","fa:16:3e:02:f5:91"],["iface-id","6b108774-1559-45e9-a7c3-b714f11722'
'cf"],["iface-status","active"]]],["set",[]]]],"headings":["name","externa'
'l_ids","ofport"]}')
class OFCTLParamListMatcher(object):
def _parse(self, params):
actions_pos = params.find('actions')
return set(params[:actions_pos].split(',')), params[actions_pos:]
def __init__(self, params):
self.expected = self._parse(params)
def __eq__(self, other):
return self.expected == self._parse(other)
def __str__(self):
return 'ovs-ofctl parameters: %s, "%s"' % self.expected
__repr__ = __str__
class OVS_Lib_Test(base.BaseTestCase):
"""A test suite to exercise the OVS libraries shared by Neutron agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
"""
def setUp(self):
super(OVS_Lib_Test, self).setUp()
self.BR_NAME = "br-int"
self.br = ovs_lib.OVSBridge(self.BR_NAME)
self.execute = mock.patch.object(
utils, "execute", spec=utils.execute).start()
@property
def TO(self):
return "--timeout=%s" % self.br.vsctl_timeout
def _vsctl_args(self, *args):
cmd = ['ovs-vsctl', self.TO, '--oneline', '--format=json', '--']
cmd += args
return cmd
def _vsctl_mock(self, *args):
cmd = self._vsctl_args(*args)
return mock.call(cmd, run_as_root=True, log_fail_as_error=False)
def _verify_vsctl_mock(self, *args):
cmd = self._vsctl_args(*args)
self.execute.assert_called_once_with(cmd, run_as_root=True,
log_fail_as_error=False)
def test_vifport(self):
"""Create and stringify vif port, confirm no exceptions."""
pname = "vif1.0"
ofport = 5
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
# test __init__
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
# test __str__
str(port)
def _build_timeout_opt(self, exp_timeout):
return "--timeout=%d" % exp_timeout if exp_timeout else self.TO
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
cidr = '192.168.1.0/24'
flow_dict_1 = collections.OrderedDict([
('cookie', 1234),
('priority', 2),
('dl_src', 'ca:fe:de:ad:be:ef'),
('actions', 'strip_vlan,output:0')])
flow_dict_2 = collections.OrderedDict([
('cookie', 1254),
('priority', 1),
('actions', 'normal')])
flow_dict_3 = collections.OrderedDict([
('cookie', 1257),
('priority', 2),
('actions', 'drop')])
flow_dict_4 = collections.OrderedDict([
('cookie', 1274),
('priority', 2),
('in_port', ofport),
('actions', 'drop')])
flow_dict_5 = collections.OrderedDict([
('cookie', 1284),
('priority', 4),
('in_port', ofport),
('dl_vlan', vid),
('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))])
flow_dict_6 = collections.OrderedDict([
('cookie', 1754),
('priority', 3),
('tun_id', lsw_id),
('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))])
flow_dict_7 = collections.OrderedDict([
('cookie', 1256),
('priority', 4),
('nw_src', cidr),
('proto', 'arp'),
('actions', 'drop')])
self.br.add_flow(**flow_dict_1)
self.br.add_flow(**flow_dict_2)
self.br.add_flow(**flow_dict_3)
self.br.add_flow(**flow_dict_4)
self.br.add_flow(**flow_dict_5)
self.br.add_flow(**flow_dict_6)
self.br.add_flow(**flow_dict_7)
expected_calls = [
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,cookie=1234,"
"priority=2,dl_src=ca:fe:de:ad:be:ef,"
"actions=strip_vlan,output:0")),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,cookie=1254,"
"priority=1,actions=normal")),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,cookie=1257,"
"priority=2,actions=drop")),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,cookie=1274,"
"priority=2,in_port=%s,actions=drop" % ofport
)),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,cookie=1284,"
"priority=4,dl_vlan=%s,in_port=%s,"
"actions=strip_vlan,set_tunnel:%s,normal" %
(vid, ofport, lsw_id))),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,cookie=1754,"
"priority=3,"
"tun_id=%s,actions=mod_vlan_vid:%s,output:%s"
% (lsw_id, vid, ofport))),
self._ofctl_mock("add-flows", self.BR_NAME, '-',
process_input=OFCTLParamListMatcher(
"hard_timeout=0,idle_timeout=0,cookie=1256,"
"priority=4,nw_src=%s,arp,actions=drop"
% cidr)),
]
self.execute.assert_has_calls(expected_calls)
def _ofctl_args(self, cmd, *args):
cmd = ['ovs-ofctl', cmd]
cmd += args
return cmd
def _ofctl_mock(self, cmd, *args, **kwargs):
cmd = self._ofctl_args(cmd, *args)
return mock.call(cmd, run_as_root=True, **kwargs)
def _verify_ofctl_mock(self, cmd, *args, **kwargs):
cmd = self._ofctl_args(cmd, *args)
return self.execute.assert_called_once_with(cmd, run_as_root=True,
**kwargs)
def test_add_flow_timeout_set(self):
flow_dict = collections.OrderedDict([
('cookie', 1234),
('priority', 1),
('hard_timeout', 1000),
('idle_timeout', 2000),
('actions', 'normal')])
self.br.add_flow(**flow_dict)
self._verify_ofctl_mock(
"add-flows", self.BR_NAME, '-',
process_input="hard_timeout=1000,idle_timeout=2000,"
"priority=1,cookie=1234,actions=normal")
def test_add_flow_default_priority(self):
flow_dict = collections.OrderedDict([('actions', 'normal'),
('cookie', 1234)])
self.br.add_flow(**flow_dict)
self._verify_ofctl_mock(
"add-flows", self.BR_NAME, '-',
process_input="hard_timeout=0,idle_timeout=0,priority=1,"
"cookie=1234,actions=normal")
def _test_get_port_ofport(self, ofport, expected_result):
pname = "tap99"
self.br.vsctl_timeout = 0 # Don't waste precious time retrying
self.execute.return_value = self._encode_ovs_json(
['ofport'], [[ofport]])
self.assertEqual(self.br.get_port_ofport(pname), expected_result)
self._verify_vsctl_mock("--columns=ofport", "list", "Interface", pname)
def test_get_port_ofport_succeeds_for_valid_ofport(self):
self._test_get_port_ofport(6, 6)
def test_get_port_ofport_returns_invalid_ofport_for_non_int(self):
self._test_get_port_ofport([], ovs_lib.INVALID_OFPORT)
def test_get_port_ofport_returns_invalid_for_invalid(self):
self._test_get_port_ofport(ovs_lib.INVALID_OFPORT,
ovs_lib.INVALID_OFPORT)
def test_default_datapath(self):
# verify kernel datapath is default
expected = p_const.OVS_DATAPATH_SYSTEM
self.assertEqual(expected, self.br.datapath_type)
def test_non_default_datapath(self):
expected = p_const.OVS_DATAPATH_NETDEV
self.br = ovs_lib.OVSBridge(self.BR_NAME, datapath_type=expected)
self.assertEqual(expected, self.br.datapath_type)
def test_count_flows(self):
self.execute.return_value = 'ignore\nflow-1\n'
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self._verify_ofctl_mock("dump-flows", self.BR_NAME, process_input=None)
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
expected_calls = [
self._ofctl_mock("del-flows", self.BR_NAME, '-',
process_input="in_port=" + ofport),
self._ofctl_mock("del-flows", self.BR_NAME, '-',
process_input="tun_id=%s" % lsw_id),
self._ofctl_mock("del-flows", self.BR_NAME, '-',
process_input="dl_vlan=%s" % vid),
]
self.execute.assert_has_calls(expected_calls)
def test_delete_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.delete_flows,
**params)
def test_dump_flows(self):
table = 23
nxst_flow = "NXST_FLOW reply (xid=0x4):"
flows = "\n".join([" cookie=0x0, duration=18042.514s, table=0, "
"n_packets=6, n_bytes=468, "
"priority=2,in_port=1 actions=drop",
" cookie=0x0, duration=18027.562s, table=0, "
"n_packets=0, n_bytes=0, "
"priority=3,in_port=1,dl_vlan=100 "
"actions=mod_vlan_vid:1,NORMAL",
" cookie=0x0, duration=18044.351s, table=0, "
"n_packets=9, n_bytes=594, priority=1 "
"actions=NORMAL", " cookie=0x0, "
"duration=18044.211s, table=23, n_packets=0, "
"n_bytes=0, priority=0 actions=drop"])
flow_args = '\n'.join([nxst_flow, flows])
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
run_ofctl.side_effect = [flow_args]
retflows = self.br.dump_flows_for_table(table)
self.assertEqual(flows, retflows)
def test_dump_flows_ovs_dead(self):
table = 23
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
run_ofctl.side_effect = ['']
retflows = self.br.dump_flows_for_table(table)
self.assertIsNone(retflows)
def test_mod_flow_with_priority_set(self):
params = {'in_port': '1',
'priority': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_mod_flow_no_actions_set(self):
params = {'in_port': '1'}
self.assertRaises(exceptions.InvalidInput,
self.br.mod_flow,
**params)
def test_run_ofctl_retry_on_socket_error(self):
err = RuntimeError('failed to connect to socket')
self.execute.side_effect = [err] * 5
with mock.patch('time.sleep') as sleep:
self.br.run_ofctl('add-flows', [])
self.assertEqual(5, sleep.call_count)
self.assertEqual(6, self.execute.call_count)
# a regular exception fails right away
self.execute.side_effect = RuntimeError('garbage')
self.execute.reset_mock()
with mock.patch('time.sleep') as sleep:
self.br.run_ofctl('add-flows', [])
self.assertEqual(0, sleep.call_count)
self.assertEqual(1, self.execute.call_count)
def test_add_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = 6
command = ["--may-exist", "add-port",
self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=gre", "options:df_default=true",
"options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock(*command), None),
(self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
self._encode_ovs_json(['ofport'], [[ofport]])),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_vxlan_fragmented_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = 6
vxlan_udp_port = "9999"
dont_fragment = False
command = ["--may-exist", "add-port", self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=" + constants.TYPE_VXLAN,
"options:dst_port=" + vxlan_udp_port,
"options:df_default=false",
"options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock(*command), None),
(self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
self._encode_ovs_json(['ofport'], [[ofport]])),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip,
constants.TYPE_VXLAN, vxlan_udp_port,
dont_fragment),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_add_vxlan_csum_tunnel_port(self):
pname = "tap99"
local_ip = "1.1.1.1"
remote_ip = "9.9.9.9"
ofport = 6
vxlan_udp_port = "9999"
dont_fragment = True
tunnel_csum = True
command = ["--may-exist", "add-port", self.BR_NAME, pname]
command.extend(["--", "set", "Interface", pname])
command.extend(["type=" + constants.TYPE_VXLAN,
"options:dst_port=" + vxlan_udp_port,
"options:df_default=true",
"options:remote_ip=" + remote_ip,
"options:local_ip=" + local_ip,
"options:in_key=flow",
"options:out_key=flow",
"options:csum=true"])
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock(*command), None),
(self._vsctl_mock("--columns=ofport", "list", "Interface", pname),
self._encode_ovs_json(['ofport'], [[ofport]])),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertEqual(
self.br.add_tunnel_port(pname, remote_ip, local_ip,
constants.TYPE_VXLAN, vxlan_udp_port,
dont_fragment, tunnel_csum),
ofport)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = 6
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
id_field = 'xs-vif-uuid' if is_xen else 'iface-id'
external_ids = {"attached-mac": mac, id_field: vif_id}
self.br.get_ports_attributes = mock.Mock(return_value=[{
'name': pname, 'ofport': ofport, 'external_ids': external_ids}])
self.br.get_xapi_iface_id = mock.Mock(return_value=vif_id)
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
self.br.get_ports_attributes.assert_called_once_with(
'Interface',
columns=['name', 'external_ids', 'ofport'],
if_exists=True)
def _encode_ovs_json(self, headings, data):
# See man ovs-vsctl(8) for the encoding details.
r = {"data": [],
"headings": headings}
for row in data:
ovs_row = []
r["data"].append(ovs_row)
for cell in row:
if isinstance(cell, (str, int, list)):
ovs_row.append(cell)
elif isinstance(cell, dict):
ovs_row.append(["map", cell.items()])
elif isinstance(cell, set):
ovs_row.append(["set", cell])
else:
raise TypeError('%r not int, str, list, set or dict' %
type(cell))
return jsonutils.dumps(r)
def _test_get_vif_port_set(self, is_xen):
if is_xen:
id_key = 'xs-vif-uuid'
else:
id_key = 'iface-id'
headings = ['name', 'external_ids', 'ofport']
data = [
# A vif port on this bridge:
['tap99', {id_key: 'tap99id', 'attached-mac': 'tap99mac'}, 1],
# A vif port on this bridge not yet configured
['tap98', {id_key: 'tap98id', 'attached-mac': 'tap98mac'}, []],
# Another vif port on this bridge not yet configured
['tap97', {id_key: 'tap97id', 'attached-mac': 'tap97mac'},
['set', []]],
# Non-vif port on this bridge:
['bogus', {}, 2],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), 'tap99\\ntun22'),
(self._vsctl_mock("--if-exists",
"--columns=name,external_ids,ofport",
"list", "Interface", 'tap99', 'tun22'),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id = mock.patch.object(self.br,
'get_xapi_iface_id').start()
get_xapi_iface_id.return_value = 'tap99id'
port_set = self.br.get_vif_port_set()
self.assertEqual(set(['tap99id']), port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
if is_xen:
get_xapi_iface_id.assert_called_once_with('tap99id')
def test_get_vif_port_to_ofport_map(self):
self.execute.return_value = OVSLIST_WITH_UNSET_PORT
results = self.br.get_vif_port_to_ofport_map()
expected = {'2ab72a72-4407-4ef3-806a-b2172f3e4dc7': 2, 'patch-tun': 1}
self.assertEqual(expected, results)
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(is_xen=False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(is_xen=True)
def test_get_vif_port_set_nonxen(self):
self._test_get_vif_port_set(False)
def test_get_vif_port_set_xen(self):
self._test_get_vif_port_set(True)
def test_get_vif_ports_list_ports_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_ports)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_ports_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_vif_port_set_list_interface_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), 'tap99\n'),
(self._vsctl_mock("--if-exists",
"--columns=name,external_ids,ofport",
"list", "Interface", "tap99"), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_port_tag_dict(self):
headings = ['name', 'tag']
data = [
['int-br-eth2', set()],
['patch-tun', set()],
['qr-76d9e6b6-21', 1],
['tapce5318ff-78', 1],
['tape1400310-e6', 1],
]
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME),
'\\n'.join((iface for iface, tag in data))),
(self._vsctl_mock("--columns=name,tag", "list", "Port"),
self._encode_ovs_json(headings, data)),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
port_tags = self.br.get_port_tag_dict()
self.assertEqual(
port_tags,
{u'int-br-eth2': [],
u'patch-tun': [],
u'qr-76d9e6b6-21': 1,
u'tapce5318ff-78': 1,
u'tape1400310-e6': 1}
)
def test_clear_db_attribute(self):
pname = "tap77"
self.br.clear_db_attribute("Port", pname, "tag")
self._verify_vsctl_mock("clear", "Port", pname, "tag")
def _test_iface_to_br(self, exp_timeout=None):
iface = 'tap0'
br = 'br-int'
if exp_timeout:
self.br.vsctl_timeout = exp_timeout
self.execute.return_value = 'br-int'
self.assertEqual(self.br.get_bridge_for_iface(iface), br)
self._verify_vsctl_mock("iface-to-br", iface)
def test_iface_to_br(self):
self._test_iface_to_br()
def test_iface_to_br_non_default_timeout(self):
new_timeout = 5
self._test_iface_to_br(new_timeout)
def test_iface_to_br_handles_ovs_vsctl_exception(self):
iface = 'tap0'
self.execute.side_effect = Exception
self.assertIsNone(self.br.get_bridge_for_iface(iface))
self._verify_vsctl_mock("iface-to-br", iface)
def test_delete_all_ports(self):
with mock.patch.object(self.br, 'get_port_name_list',
return_value=['port1']) as get_port:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=True)
get_port.assert_called_once_with()
delete_port.assert_called_once_with('port1')
def test_delete_neutron_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
'ca:fe:de:ad:be:ef', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
'ca:ee:de:ad:be:ef', 'br')
with mock.patch.object(self.br, 'get_vif_ports',
return_value=[port1, port2]) as get_ports:
with mock.patch.object(self.br, 'delete_port') as delete_port:
self.br.delete_ports(all_ports=False)
get_ports.assert_called_once_with()
delete_port.assert_has_calls([
mock.call('tap1234'),
mock.call('tap5678')
])
def test_delete_neutron_ports_list_error(self):
expected_calls_and_values = [
(self._vsctl_mock("list-ports", self.BR_NAME), RuntimeError()),
]
tools.setup_mock_calls(self.execute, expected_calls_and_values)
self.assertRaises(RuntimeError, self.br.delete_ports, all_ports=False)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
def test_get_bridges_not_default_timeout(self):
bridges = ['br-int', 'br-ex']
self.br.vsctl_timeout = 5
self.execute.return_value = 'br-int\\nbr-ex\n'
self.assertEqual(self.br.get_bridges(), bridges)
self._verify_vsctl_mock("list-br")
def test_get_local_port_mac_succeeds(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address='foo')):
self.assertEqual('foo', self.br.get_local_port_mac())
def test_get_local_port_mac_raises_exception_for_missing_mac(self):
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
return_value=mock.Mock(address=None)):
with testtools.ExpectedException(Exception):
self.br.get_local_port_mac()
def test_get_vifs_by_ids(self):
db_list_res = [
{'name': 'qvo1', 'ofport': 1,
'external_ids': {'iface-id': 'pid1', 'attached-mac': '11'}},
{'name': 'qvo2', 'ofport': 2,
'external_ids': {'iface-id': 'pid2', 'attached-mac': '22'}},
{'name': 'qvo4', 'ofport': -1,
'external_ids': {'iface-id': 'pid4', 'attached-mac': '44'}},
]
self.br.get_ports_attributes = mock.Mock(return_value=db_list_res)
self.br.ovsdb = mock.Mock()
self.br.ovsdb.list_ports.return_value.execute.return_value = [
'qvo1', 'qvo2', 'qvo4']
by_id = self.br.get_vifs_by_ids(['pid1', 'pid2', 'pid3', 'pid4'])
# pid3 isn't on bridge and pid4 doesn't have a valid ofport
self.assertIsNone(by_id['pid3'])
self.assertIsNone(by_id['pid4'])
self.assertEqual('pid1', by_id['pid1'].vif_id)
self.assertEqual('qvo1', by_id['pid1'].port_name)
self.assertEqual(1, by_id['pid1'].ofport)
self.assertEqual('pid2', by_id['pid2'].vif_id)
self.assertEqual('qvo2', by_id['pid2'].port_name)
self.assertEqual(2, by_id['pid2'].ofport)
self.br.get_ports_attributes.assert_has_calls(
[mock.call('Interface', columns=['name', 'external_ids', 'ofport'],
if_exists=True)])
def _test_get_vif_port_by_id(self, iface_id, data, br_name=None,
extra_calls_and_values=None):
headings = ['external_ids', 'name', 'ofport']
# Each element is a tuple of (expected mock call, return_value)
expected_calls_and_values = [
(self._vsctl_mock("--columns=external_ids,name,ofport", "find",
"Interface",
'external_ids:iface-id=%s' % iface_id,
'external_ids:attached-mac!=""'),
self._encode_ovs_json(headings, data))]
if data:
if not br_name:
br_name = self.BR_NAME
# Only the last information list in 'data' is used, so if more
# than one vif is described in data, the rest must be declared
# in the argument 'expected_calls_and_values'.
if extra_calls_and_values:
expected_calls_and_values.extend(extra_calls_and_values)
expected_calls_and_values.append(
(self._vsctl_mock("iface-to-br",
data[-1][headings.index('name')]), br_name))
tools.setup_mock_calls(self.execute, expected_calls_and_values)
vif_port = self.br.get_vif_port_by_id(iface_id)
tools.verify_mock_calls(self.execute, expected_calls_and_values)
return vif_port
def _assert_vif_port(self, vif_port, ofport=None, mac=None):
if not ofport or ofport == -1 or not mac:
self.assertIsNone(vif_port, "Got %s" % vif_port)
return
self.assertEqual('tap99id', vif_port.vif_id)
self.assertEqual(mac, vif_port.vif_mac)
self.assertEqual('tap99', vif_port.port_name)
self.assertEqual(ofport, vif_port.ofport)
def _test_get_vif_port_by_id_with_data(self, ofport=None, mac=None):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"],
["attached-mac", mac]]
data = [[["map", external_ids], "tap99",
ofport if ofport else ["set", []]]]
vif_port = self._test_get_vif_port_by_id('tap99id', data)
self._assert_vif_port(vif_port, ofport, mac)
def test_get_vif_by_port_id_with_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_without_ofport(self):
self._test_get_vif_port_by_id_with_data(mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_with_invalid_ofport(self):
self._test_get_vif_port_by_id_with_data(
ofport=-1, mac="aa:bb:cc:dd:ee:ff")
def test_get_vif_by_port_id_with_no_data(self):
self.assertIsNone(self._test_get_vif_port_by_id('whatever', []))
def test_get_vif_by_port_id_different_bridge(self):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"]]
data = [[["map", external_ids], "tap99", 1]]
self.assertIsNone(self._test_get_vif_port_by_id('tap99id', data,
"br-ext"))
def test_get_vif_by_port_id_multiple_vifs(self):
external_ids = [["iface-id", "tap99id"],
["iface-status", "active"],
["attached-mac", "de:ad:be:ef:13:37"]]
data = [[["map", external_ids], "dummytap", 1],
[["map", external_ids], "tap99", 1337]]
extra_calls_and_values = [
(self._vsctl_mock("iface-to-br", "dummytap"), "br-ext")]
vif_port = self._test_get_vif_port_by_id(
'tap99id', data, extra_calls_and_values=extra_calls_and_values)
self._assert_vif_port(vif_port, ofport=1337, mac="de:ad:be:ef:13:37")
class TestDeferredOVSBridge(base.BaseTestCase):
def setUp(self):
super(TestDeferredOVSBridge, self).setUp()
self.br = mock.Mock()
self.mocked_do_action_flows = mock.patch.object(
self.br, 'do_action_flows').start()
self.add_flow_dict1 = dict(in_port=11, actions='drop')
self.add_flow_dict2 = dict(in_port=12, actions='drop')
self.mod_flow_dict1 = dict(in_port=21, actions='drop')
self.mod_flow_dict2 = dict(in_port=22, actions='drop')
self.del_flow_dict1 = dict(in_port=31)
self.del_flow_dict2 = dict(in_port=32)
def test_right_allowed_passthroughs(self):
expected_passthroughs = ('add_port', 'add_tunnel_port', 'delete_port')
self.assertEqual(expected_passthroughs,
ovs_lib.DeferredOVSBridge.ALLOWED_PASSTHROUGHS)
def _verify_mock_call(self, expected_calls):
self.mocked_do_action_flows.assert_has_calls(expected_calls)
self.assertEqual(len(expected_calls),
len(self.mocked_do_action_flows.mock_calls))
def test_apply_on_exit(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1]),
]
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
self._verify_mock_call([])
self._verify_mock_call(expected_calls)
def test_apply_on_exit_with_errors(self):
try:
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
raise Exception()
except Exception:
self._verify_mock_call([])
else:
self.fail('Exception would be reraised')
def test_apply(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1]),
]
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
self._verify_mock_call([])
deferred_br.apply_flows()
self._verify_mock_call(expected_calls)
self._verify_mock_call(expected_calls)
def test_apply_order(self):
expected_calls = [
mock.call('del', [self.del_flow_dict1, self.del_flow_dict2]),
mock.call('mod', [self.mod_flow_dict1, self.mod_flow_dict2]),
mock.call('add', [self.add_flow_dict1, self.add_flow_dict2]),
]
order = 'del', 'mod', 'add'
with ovs_lib.DeferredOVSBridge(self.br, order=order) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict2)
deferred_br.add_flow(**self.add_flow_dict2)
deferred_br.mod_flow(**self.mod_flow_dict2)
self._verify_mock_call(expected_calls)
def test_apply_full_ordered(self):
expected_calls = [
mock.call('add', [self.add_flow_dict1]),
mock.call('mod', [self.mod_flow_dict1]),
mock.call('del', [self.del_flow_dict1, self.del_flow_dict2]),
mock.call('add', [self.add_flow_dict2]),
mock.call('mod', [self.mod_flow_dict2]),
]
with ovs_lib.DeferredOVSBridge(self.br,
full_ordered=True) as deferred_br:
deferred_br.add_flow(**self.add_flow_dict1)
deferred_br.mod_flow(**self.mod_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict1)
deferred_br.delete_flows(**self.del_flow_dict2)
deferred_br.add_flow(**self.add_flow_dict2)
deferred_br.mod_flow(**self.mod_flow_dict2)
self._verify_mock_call(expected_calls)
def test_getattr_unallowed_attr(self):
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
self.assertEqual(self.br.add_port, deferred_br.add_port)
def test_getattr_unallowed_attr_failure(self):
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
self.assertRaises(AttributeError, getattr, deferred_br, 'failure')
def test_default_cookie(self):
self.br = ovs_lib.OVSBridge("br-tun")
uuid_stamp1 = self.br.default_cookie
self.assertEqual(uuid_stamp1, self.br.default_cookie)
def test_cookie_passed_to_addmod(self):
self.br = ovs_lib.OVSBridge("br-tun")
stamp = str(self.br.default_cookie)
expected_calls = [
mock.call('add-flows', ['-'],
'hard_timeout=0,idle_timeout=0,priority=1,'
'cookie=' + stamp + ',actions=drop'),
mock.call('mod-flows', ['-'],
'cookie=' + stamp + ',actions=drop')
]
with mock.patch.object(self.br, 'run_ofctl') as f:
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(actions='drop')
deferred_br.mod_flow(actions='drop')
f.assert_has_calls(expected_calls)
| apache-2.0 |
Ken69267/config-stuff | .vim/eclim/autoload/eclim/python/rope/base/oi/objectinfo.py | 115 | 8767 | import warnings
from rope.base import exceptions, resourceobserver
from rope.base.oi import objectdb, memorydb, transform
class ObjectInfoManager(object):
"""Stores object information
It uses an instance of `objectdb.ObjectDB` for storing
information.
"""
def __init__(self, project):
self.project = project
self.to_textual = transform.PyObjectToTextual(project)
self.to_pyobject = transform.TextualToPyObject(project)
self.doi_to_pyobject = transform.DOITextualToPyObject(project)
self._init_objectdb()
if project.prefs.get('validate_objectdb', False):
self._init_validation()
def _init_objectdb(self):
dbtype = self.project.get_prefs().get('objectdb_type', None)
persist = None
if dbtype is not None:
warnings.warn(
'"objectdb_type" project config is deprecated;\n'
'Use "save_objectdb" instead in your project '
'config file.\n(".ropeproject/config.py" by default)\n',
DeprecationWarning)
if dbtype != 'memory' and self.project.ropefolder is not None:
persist = True
self.validation = TextualValidation(self.to_pyobject)
db = memorydb.MemoryDB(self.project, persist=persist)
self.objectdb = objectdb.ObjectDB(db, self.validation)
def _init_validation(self):
self.objectdb.validate_files()
observer = resourceobserver.ResourceObserver(
changed=self._resource_changed, moved=self._resource_moved,
removed=self._resource_moved)
files = []
for path in self.objectdb.get_files():
resource = self.to_pyobject.path_to_resource(path)
if resource is not None and resource.project == self.project:
files.append(resource)
self.observer = resourceobserver.FilteredResourceObserver(observer,
files)
self.objectdb.add_file_list_observer(_FileListObserver(self))
self.project.add_observer(self.observer)
def _resource_changed(self, resource):
try:
self.objectdb.validate_file(
self.to_textual.resource_to_path(resource))
except exceptions.ModuleSyntaxError:
pass
def _resource_moved(self, resource, new_resource=None):
self.observer.remove_resource(resource)
if new_resource is not None:
old = self.to_textual.resource_to_path(resource)
new = self.to_textual.resource_to_path(new_resource)
self.objectdb.file_moved(old, new)
self.observer.add_resource(new_resource)
def get_returned(self, pyobject, args):
result = self.get_exact_returned(pyobject, args)
if result is not None:
return result
path, key = self._get_scope(pyobject)
if path is None:
return None
for call_info in self.objectdb.get_callinfos(path, key):
returned = call_info.get_returned()
if returned and returned[0] not in ('unknown', 'none'):
result = returned
break
if result is None:
result = returned
if result is not None:
return self.to_pyobject(result)
def get_exact_returned(self, pyobject, args):
path, key = self._get_scope(pyobject)
if path is not None:
returned = self.objectdb.get_returned(
path, key, self._args_to_textual(pyobject, args))
if returned is not None:
return self.to_pyobject(returned)
def _args_to_textual(self, pyfunction, args):
parameters = list(pyfunction.get_param_names(special_args=False))
arguments = args.get_arguments(parameters)[:len(parameters)]
textual_args = tuple([self.to_textual(arg)
for arg in arguments])
return textual_args
def get_parameter_objects(self, pyobject):
path, key = self._get_scope(pyobject)
if path is None:
return None
arg_count = len(pyobject.get_param_names(special_args=False))
unknowns = arg_count
parameters = [None] * arg_count
for call_info in self.objectdb.get_callinfos(path, key):
args = call_info.get_parameters()
for index, arg in enumerate(args[:arg_count]):
old = parameters[index]
if self.validation.is_more_valid(arg, old):
parameters[index] = arg
if self.validation.is_value_valid(arg):
unknowns -= 1
if unknowns == 0:
break
if unknowns < arg_count:
return [self.to_pyobject(parameter)
for parameter in parameters]
def get_passed_objects(self, pyfunction, parameter_index):
path, key = self._get_scope(pyfunction)
if path is None:
return []
result = []
for call_info in self.objectdb.get_callinfos(path, key):
args = call_info.get_parameters()
if len(args) > parameter_index:
parameter = self.to_pyobject(args[parameter_index])
if parameter is not None:
result.append(parameter)
return result
def doa_data_received(self, data):
def doi_to_normal(textual):
pyobject = self.doi_to_pyobject(textual)
return self.to_textual(pyobject)
function = doi_to_normal(data[0])
args = tuple([doi_to_normal(textual) for textual in data[1]])
returned = doi_to_normal(data[2])
if function[0] == 'defined' and len(function) == 3:
self._save_data(function, args, returned)
def function_called(self, pyfunction, params, returned=None):
function_text = self.to_textual(pyfunction)
params_text = tuple([self.to_textual(param)
for param in params])
returned_text = ('unknown',)
if returned is not None:
returned_text = self.to_textual(returned)
self._save_data(function_text, params_text, returned_text)
def save_per_name(self, scope, name, data):
path, key = self._get_scope(scope.pyobject)
if path is not None:
self.objectdb.add_pername(path, key, name, self.to_textual(data))
def get_per_name(self, scope, name):
path, key = self._get_scope(scope.pyobject)
if path is not None:
result = self.objectdb.get_pername(path, key, name)
if result is not None:
return self.to_pyobject(result)
def _save_data(self, function, args, returned=('unknown',)):
self.objectdb.add_callinfo(function[1], function[2], args, returned)
def _get_scope(self, pyobject):
resource = pyobject.get_module().get_resource()
if resource is None:
return None, None
textual = self.to_textual(pyobject)
if textual[0] == 'defined':
path = textual[1]
if len(textual) == 3:
key = textual[2]
else:
key = ''
return path, key
return None, None
def sync(self):
self.objectdb.sync()
def __str__(self):
return str(self.objectdb)
class TextualValidation(object):
def __init__(self, to_pyobject):
self.to_pyobject = to_pyobject
def is_value_valid(self, value):
# ???: Should none and unknown be considered valid?
if value is None or value[0] in ('none', 'unknown'):
return False
return self.to_pyobject(value) is not None
def is_more_valid(self, new, old):
if old is None:
return True
return new[0] not in ('unknown', 'none')
def is_file_valid(self, path):
return self.to_pyobject.path_to_resource(path) is not None
def is_scope_valid(self, path, key):
if key == '':
textual = ('defined', path)
else:
textual = ('defined', path, key)
return self.to_pyobject(textual) is not None
class _FileListObserver(object):
def __init__(self, object_info):
self.object_info = object_info
self.observer = self.object_info.observer
self.to_pyobject = self.object_info.to_pyobject
def removed(self, path):
resource = self.to_pyobject.path_to_resource(path)
if resource is not None:
self.observer.remove_resource(resource)
def added(self, path):
resource = self.to_pyobject.path_to_resource(path)
if resource is not None:
self.observer.add_resource(resource)
| mit |
hynnet/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/encodings/euc_kr.py | 816 | 1027 | #
# euc_kr.py: Python Unicode Codec for EUC_KR
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_kr, codecs
import _multibytecodec as mbc
codec = _codecs_kr.getcodec('euc_kr')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_kr',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| gpl-2.0 |
AntidoteLabs/Antidote-DM | Antidotes DM/youtube_dl/extractor/footyroom.py | 13 | 1647 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class FootyRoomIE(InfoExtractor):
_VALID_URL = r'http://footyroom\.com/(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://footyroom.com/schalke-04-0-2-real-madrid-2015-02/',
'info_dict': {
'id': 'schalke-04-0-2-real-madrid-2015-02',
'title': 'Schalke 04 0 – 2 Real Madrid',
},
'playlist_count': 3,
'skip': 'Video for this match is not available',
}, {
'url': 'http://footyroom.com/georgia-0-2-germany-2015-03/',
'info_dict': {
'id': 'georgia-0-2-germany-2015-03',
'title': 'Georgia 0 – 2 Germany',
},
'playlist_count': 1,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist = self._parse_json(
self._search_regex(
r'VideoSelector\.load\((\[.+?\])\);', webpage, 'video selector'),
playlist_id)
playlist_title = self._og_search_title(webpage)
entries = []
for video in playlist:
payload = video.get('payload')
if not payload:
continue
playwire_url = self._search_regex(
r'data-config="([^"]+)"', payload,
'playwire url', default=None)
if playwire_url:
entries.append(self.url_result(self._proto_relative_url(
playwire_url, 'http:'), 'Playwire'))
return self.playlist_result(entries, playlist_id, playlist_title)
| gpl-2.0 |
FeMTTU/femus | external/jsoncpp/jsoncpp-src-0.5.0/test/rununittests.py | 249 | 2507 | import sys
import os
import os.path
import subprocess
from glob import glob
import optparse
VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes'
class TestProxy(object):
def __init__( self, test_exe_path, use_valgrind=False ):
self.test_exe_path = os.path.normpath( os.path.abspath( test_exe_path ) )
self.use_valgrind = use_valgrind
def run( self, options ):
if self.use_valgrind:
cmd = VALGRIND_CMD.split()
else:
cmd = []
cmd.extend( [self.test_exe_path, '--test-auto'] + options )
process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
stdout = process.communicate()[0]
if process.returncode:
return False, stdout
return True, stdout
def runAllTests( exe_path, use_valgrind=False ):
test_proxy = TestProxy( exe_path, use_valgrind=use_valgrind )
status, test_names = test_proxy.run( ['--list-tests'] )
if not status:
print >> sys.stderr, "Failed to obtain unit tests list:\n" + test_names
return 1
test_names = [name.strip() for name in test_names.strip().split('\n')]
failures = []
for name in test_names:
print 'TESTING %s:' % name,
succeed, result = test_proxy.run( ['--test', name] )
if succeed:
print 'OK'
else:
failures.append( (name, result) )
print 'FAILED'
failed_count = len(failures)
pass_count = len(test_names) - failed_count
if failed_count:
print
for name, result in failures:
print result
print '%d/%d tests passed (%d failure(s))' % (
pass_count, len(test_names), failed_count)
return 1
else:
print 'All %d tests passed' % len(test_names)
return 0
def main():
from optparse import OptionParser
parser = OptionParser( usage="%prog [options] <path to test_lib_json.exe>" )
parser.add_option("--valgrind",
action="store_true", dest="valgrind", default=False,
help="run all the tests using valgrind to detect memory leaks")
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) != 1:
parser.error( 'Must provides at least path to test_lib_json executable.' )
sys.exit( 1 )
exit_code = runAllTests( args[0], use_valgrind=options.valgrind )
sys.exit( exit_code )
if __name__ == '__main__':
main()
| lgpl-2.1 |
xutian/virt-test | virttest/libvirt_xml/nwfilter_protocols/ah_ipv6.py | 26 | 5826 | """
ah-ipv6 protocl support class(es)
http://libvirt.org/formatnwfilter.html#nwfelemsRulesProtoMiscv6
"""
from virttest.libvirt_xml import accessors, xcepts
from virttest.libvirt_xml.nwfilter_protocols import base
class Ah_ipv6(base.TypedDeviceBase):
"""
Create new Ah_ipv6 xml instances
Properties:
attrs: libvirt_xml.nwfilter_protocols.Ah_ipv6.Attr instance
"""
__slots__ = ('attrs',)
def __init__(self, type_name='file', virsh_instance=base.base.virsh):
accessors.XMLElementNest('attrs', self, parent_xpath='/',
tag_name='ah_ipv6', subclass=self.Attr,
subclass_dargs={
'virsh_instance': virsh_instance})
super(Ah_ipv6, self).__init__(protocol_tag='ah-ipv6',
type_name=type_name,
virsh_instance=virsh_instance)
def new_attr(self, **dargs):
"""
Return a new Attr instance and set properties from dargs
:param dargs: dict of attributes
:return: new Attr instance
"""
new_one = self.Attr(virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
def get_attr(self):
"""
Return ah-ipv6 attribute dict
:return: None if no ah-ipv6 in xml, dict of ah-ipv6's attributes.
"""
try:
ah_node = self.xmltreefile.reroot('/ah-ipv6')
except KeyError, detail:
raise xcepts.LibvirtXMLError(detail)
node = ah_node.getroot()
ah_attr = dict(node.items())
return ah_attr
class Attr(base.base.LibvirtXMLBase):
"""
Ah_ipv6 attribute XML class
Properties:
srcmacaddr: string, MAC address of sender
srcmacmask: string, Mask applied to MAC address of sender
dstmacaddr: string, MAC address of destination
dstmacmask: string, Mask applied to MAC address of destination
srcipaddr: string, Source IP address
srcipmask: string, Mask applied to source IP address
dstipaddr: string, Destination IP address
dstipmask: string, Mask applied to destination IP address
srcipfrom: string, Start of range of source IP address
srcipto: string, End of range of source IP address
dstipfrom: string, Start of range of destination IP address
dstipto: string, End of range of destination IP address
comment: string, text with max. 256 characters
state: string, comma separated list of NEW,ESTABLISHED,RELATED,INVALID or NONE
ipset: The name of an IPSet managed outside of libvirt
ipsetflags: flags for the IPSet; requires ipset attribute
"""
__slots__ = ('srcmacaddr', 'srcmacmask', 'dstmacaddr', 'dstmacmask',
'srcipaddr', 'srcipmask', 'dstipaddr', 'dstipmask',
'srcipfrom', 'srcipto', 'dstipfrom', 'dstipto',
'dscp', 'comment', 'state', 'ipset', 'ipsetflags')
def __init__(self, virsh_instance=base.base.virsh):
accessors.XMLAttribute('srcmacaddr', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='srcmacaddr')
accessors.XMLAttribute('srcmacmask', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='srcmacmask')
accessors.XMLAttribute('dstmacaddr', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='dstmacaddr')
accessors.XMLAttribute('dstmacmask', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='dstmacmask')
accessors.XMLAttribute('srcipaddr', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='srcipaddr')
accessors.XMLAttribute('srcipmask', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='srcipmask')
accessors.XMLAttribute('dstipaddr', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='dstipaddr')
accessors.XMLAttribute('dstipmask', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='dstipmask')
accessors.XMLAttribute('srcipfrom', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='srcipfrom')
accessors.XMLAttribute('srcipto', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='srcipto')
accessors.XMLAttribute('dstipfrom', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='dstipfrom')
accessors.XMLAttribute('dstipto', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='dstipto')
accessors.XMLAttribute('dscp', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='dscp')
accessors.XMLAttribute('comment', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='comment')
accessors.XMLAttribute('state', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='state')
accessors.XMLAttribute('ipset', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='ipset')
accessors.XMLAttribute('ipsetflags', self, parent_xpath='/',
tag_name='ah-ipv6', attribute='ipsetflags')
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<ah-ipv6/>'
| gpl-2.0 |
olafhauk/mne-python | mne/datasets/__init__.py | 6 | 1103 | """Functions for fetching remote datasets.
See :ref:`datasets` for more information.
"""
from . import fieldtrip_cmc
from . import brainstorm
from . import visual_92_categories
from . import kiloword
from . import eegbci
from . import hf_sef
from . import misc
from . import mtrf
from . import sample
from . import somato
from . import multimodal
from . import fnirs_motor
from . import opm
from . import spm_face
from . import testing
from . import _fake
from . import phantom_4dbti
from . import sleep_physionet
from . import limo
from . import refmeg_noise
from .utils import (_download_all_example_data, fetch_hcp_mmp_parcellation,
fetch_aparc_sub_parcellation)
from ._fsaverage.base import fetch_fsaverage
__all__ = [
'_download_all_example_data', '_fake', 'brainstorm', 'eegbci',
'fetch_aparc_sub_parcellation', 'fetch_fsaverage',
'fetch_hcp_mmp_parcellation', 'fieldtrip_cmc', 'hf_sef', 'kiloword',
'misc', 'mtrf', 'multimodal', 'opm', 'phantom_4dbti', 'sample',
'sleep_physionet', 'somato', 'spm_face', 'testing', 'visual_92_categories',
'limo',
]
| bsd-3-clause |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyKDE4/kdeui/KButtonGroup.py | 1 | 1093 | # encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python3/dist-packages/PyKDE4/kdeui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
class KButtonGroup(__PyQt4_QtGui.QGroupBox):
# no doc
def changed(self, *args, **kwargs): # real signature unknown
pass
def childEvent(self, *args, **kwargs): # real signature unknown
pass
def clicked(self, *args, **kwargs): # real signature unknown
pass
def id(self, *args, **kwargs): # real signature unknown
pass
def pressed(self, *args, **kwargs): # real signature unknown
pass
def released(self, *args, **kwargs): # real signature unknown
pass
def selected(self, *args, **kwargs): # real signature unknown
pass
def setSelected(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
| gpl-2.0 |
Intel-tensorflow/tensorflow | tensorflow/python/ops/ctc_ops.py | 6 | 57164 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CTC (Connectionist Temporal Classification) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
from tensorflow.python.eager import context
from tensorflow.python.eager import function as function_eager
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import custom_gradient
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_ctc_ops
from tensorflow.python.ops import inplace_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.nn_grad import _BroadcastMul
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
_DEFUN_API_NAME_ATTRIBUTE = "api_implements"
_DEFUN_DEVICE_ATTRIBUTE = "api_preferred_device"
_CPU_DEVICE_NAME = "CPU"
_GPU_DEVICE_NAME = "GPU"
def _get_context_device_type():
"""Parse the current context and return the device type, eg CPU/GPU."""
current_device = context.context().device_name
if current_device is None:
return None
return device.DeviceSpec.from_string(current_device).device_type
def _generate_defun_backend(unique_api_name, preferred_device, func):
function_attributes = {
_DEFUN_API_NAME_ATTRIBUTE: unique_api_name,
_DEFUN_DEVICE_ATTRIBUTE: preferred_device,
}
return function_eager.defun_with_attributes(
func=func, attributes=function_attributes, autograph=False)
# pylint: disable=protected-access, invalid-name
@tf_export(v1=["nn.ctc_loss"])
@dispatch.add_dispatch_support
def ctc_loss(labels,
inputs=None,
sequence_length=None,
preprocess_collapse_repeated=False,
ctc_merge_repeated=True,
ignore_longer_outputs_than_inputs=False,
time_major=True,
logits=None):
"""Computes the CTC (Connectionist Temporal Classification) Loss.
This op implements the CTC loss as presented in (Graves et al., 2006).
Input requirements:
```
sequence_length(b) <= time for all b
max(labels.indices(labels.indices[:, 1] == b, 2))
<= sequence_length(b) for all b.
```
Notes:
This class performs the softmax operation for you, so inputs should
be e.g. linear projections of outputs by an LSTM.
The `inputs` Tensor's innermost dimension size, `num_classes`, represents
`num_labels + 1` classes, where num_labels is the number of true labels, and
the largest value `(num_classes - 1)` is reserved for the blank label.
For example, for a vocabulary containing 3 labels `[a, b, c]`,
`num_classes = 4` and the labels indexing is `{a: 0, b: 1, c: 2, blank: 3}`.
Regarding the arguments `preprocess_collapse_repeated` and
`ctc_merge_repeated`:
If `preprocess_collapse_repeated` is True, then a preprocessing step runs
before loss calculation, wherein repeated labels passed to the loss
are merged into single labels. This is useful if the training labels come
from, e.g., forced alignments and therefore have unnecessary repetitions.
If `ctc_merge_repeated` is set False, then deep within the CTC calculation,
repeated non-blank labels will not be merged and are interpreted
as individual labels. This is a simplified (non-standard) version of CTC.
Here is a table of the (roughly) expected first order behavior:
* `preprocess_collapse_repeated=False`, `ctc_merge_repeated=True`
Classical CTC behavior: Outputs true repeated classes with blanks in
between, and can also output repeated classes with no blanks in
between that need to be collapsed by the decoder.
* `preprocess_collapse_repeated=True`, `ctc_merge_repeated=False`
Never learns to output repeated classes, as they are collapsed
in the input labels before training.
* `preprocess_collapse_repeated=False`, `ctc_merge_repeated=False`
Outputs repeated classes with blanks in between, but generally does not
require the decoder to collapse/merge repeated classes.
* `preprocess_collapse_repeated=True`, `ctc_merge_repeated=True`
Untested. Very likely will not learn to output repeated classes.
The `ignore_longer_outputs_than_inputs` option allows to specify the behavior
of the CTCLoss when dealing with sequences that have longer outputs than
inputs. If true, the CTCLoss will simply return zero gradient for those
items, otherwise an InvalidArgument error is returned, stopping training.
Args:
labels: An `int32` `SparseTensor`.
`labels.indices[i, :] == [b, t]` means `labels.values[i]` stores the id
for (batch b, time t). `labels.values[i]` must take on values in `[0,
num_labels)`. See `core/ops/ctc_ops.cc` for more details.
inputs: 3-D `float` `Tensor`.
If time_major == False, this will be a `Tensor` shaped: `[batch_size,
max_time, num_classes]`.
If time_major == True (default), this will be a `Tensor` shaped:
`[max_time, batch_size, num_classes]`. The logits.
sequence_length: 1-D `int32` vector, size `[batch_size]`. The sequence
lengths.
preprocess_collapse_repeated: Boolean. Default: False. If True, repeated
labels are collapsed prior to the CTC calculation.
ctc_merge_repeated: Boolean. Default: True.
ignore_longer_outputs_than_inputs: Boolean. Default: False. If True,
sequences with longer outputs than inputs will be ignored.
time_major: The shape format of the `inputs` Tensors. If True, these
`Tensors` must be shaped `[max_time, batch_size, num_classes]`. If False,
these `Tensors` must be shaped `[batch_size, max_time, num_classes]`.
Using `time_major = True` (default) is a bit more efficient because it
avoids transposes at the beginning of the ctc_loss calculation. However,
most TensorFlow data is batch-major, so by this function also accepts
inputs in batch-major form.
logits: Alias for inputs.
Returns:
A 1-D `float` `Tensor`, size `[batch]`, containing the negative log
probabilities.
Raises:
TypeError: if labels is not a `SparseTensor`.
References:
Connectionist Temporal Classification - Labeling Unsegmented Sequence Data
with Recurrent Neural Networks:
[Graves et al., 2006](https://dl.acm.org/citation.cfm?id=1143891)
([pdf](http://www.cs.toronto.edu/~graves/icml_2006.pdf))
"""
return _ctc_loss_impl(
labels,
inputs,
sequence_length,
preprocess_collapse_repeated,
ctc_merge_repeated,
ignore_longer_outputs_than_inputs,
time_major,
logits,
use_cudnn=False)
def _ctc_loss_impl(labels,
inputs=None,
sequence_length=None,
preprocess_collapse_repeated=False,
ctc_merge_repeated=True,
ignore_longer_outputs_than_inputs=False,
time_major=True,
logits=None,
use_cudnn=False):
# Helper function of ctc_loss with one additional param:
# use_cudnn: A bool to enable cuDNN CTC loss operation. If true, the blank
# index has to be 0.
# The second, third, etc output tensors contain the gradients. We use it in
# _CTCLossGrad() below.
if not isinstance(labels, sparse_tensor.SparseTensor):
raise TypeError("Expected labels (first argument) to be a SparseTensor")
# For internal calculations, we transpose to [time, batch, num_classes]
inputs = deprecation.deprecated_argument_lookup("logits", logits, "inputs",
inputs)
if not time_major:
inputs = array_ops.transpose(inputs, [1, 0, 2]) # (B,T,N) => (T,B,N)
# gen_ctc_ops.ctc_loss_v2 differs from gen_ctc_ops.ctc_loss. v2 assumes the
# blank index to be 0, but v1 views it as the last index.
if use_cudnn:
ctc_loss_func = gen_ctc_ops.ctc_loss_v2
else:
ctc_loss_func = gen_ctc_ops.ctc_loss
loss, _ = ctc_loss_func(
inputs,
labels.indices,
labels.values,
sequence_length,
preprocess_collapse_repeated=preprocess_collapse_repeated,
ctc_merge_repeated=ctc_merge_repeated,
ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs)
return loss
# pylint: disable=unused-argument
def _CTCLossGradImpl(op, grad_loss, _):
# Outputs are: loss, grad
#
# Currently there is no way to take the second derivative of this op
# due to the fused implementation's interaction with tf.gradients(),
# so we make sure we prevent silently incorrect results by raising
# an error if the second derivative is requested via prevent_gradient.
grad_without_gradient = array_ops.prevent_gradient(
op.outputs[1],
message="Currently there is no way to take the second "
" derivative of ctc_loss due to the fused implementation's interaction "
" with tf.gradients()")
# Return gradient for inputs and None for
# labels_indices, labels_values and sequence_length
return [_BroadcastMul(grad_loss, grad_without_gradient), None, None, None]
# pylint: disable=unused-argument
@ops.RegisterGradient("CTCLoss")
def _CTCLossGrad(op, grad_loss, _):
"""The derivative provided by CTC Loss.
Args:
op: the CTCLoss op.
grad_loss: The backprop for cost.
Returns:
The CTC Loss gradient.
"""
return _CTCLossGradImpl(op, grad_loss, _)
# pylint: disable=unused-argument
@ops.RegisterGradient("CTCLossV2")
def _CTCLossV2Grad(op, grad_loss, _):
"""The derivative provided by CTC Loss V2.
Args:
op: the CTCLossV2 op.
grad_loss: The backprop for cost.
Returns:
The CTC Loss V2 gradient.
"""
return _CTCLossGradImpl(op, grad_loss, _)
@tf_export("nn.ctc_greedy_decoder")
@dispatch.add_dispatch_support
def ctc_greedy_decoder(inputs,
sequence_length,
merge_repeated=True,
blank_index=None):
"""Performs greedy decoding on the logits given in input (best path).
Given a tensor as `inputs`, the `blank_index` parameter defines the class
index of the blank symbol.
For example:
If `blank_index` is equal to 1:
>>> inf = float("inf")
>>> logits = tf.constant([[[ 0., -inf, -inf],
... [ -2.3, -inf, -0.1]],
... [[ -inf, -0.5, -inf],
... [ -inf, -inf, -0.1]],
... [[ -inf, -inf, -inf],
... [ -0.1, -inf, -2.3]]])
>>> seq_lens = tf.constant([2, 3])
>>> outputs = tf.nn.ctc_greedy_decoder(
... logits,
... seq_lens,
... blank_index=1)
Notes:
- Regardless of the value of `merge_repeated`, if an index of a
given time and batch corresponds to the `blank_index`, no new
element is emitted.
- Default `blank_index` is `(num_classes - 1)`, unless overriden.
If `merge_repeated` is `True`, merge repeated classes in output.
This means that if consecutive logits' maximum indices are the same,
only the first of these is emitted. The sequence `A B B * B * B` (where '*'
is the blank label) becomes
* `A B B B` if `merge_repeated=True`.
* `A B B B B` if `merge_repeated=False`.
Args:
inputs: 3-D `float` `Tensor` sized `[max_time, batch_size, num_classes]`.
The logits.
sequence_length: 1-D `int32` vector containing sequence lengths, having size
`[batch_size]`.
merge_repeated: Boolean. Default: True.
blank_index: (Optional). Default: `num_classes - 1`. Define the class index
to use for the blank label. Negative values will start from num_classes,
ie, -1 will reproduce the ctc_greedy_decoder behavior of using
num_classes - 1 for the blank symbol, which corresponds to the default.
Returns:
A tuple `(decoded, neg_sum_logits)` where
decoded: A single-element list. `decoded[0]`
is an `SparseTensor` containing the decoded outputs s.t.:
`decoded.indices`: Indices matrix `(total_decoded_outputs, 2)`.
The rows store: `[batch, time]`.
`decoded.values`: Values vector, size `(total_decoded_outputs)`.
The vector stores the decoded classes.
`decoded.dense_shape`: Shape vector, size `(2)`.
The shape values are: `[batch_size, max_decoded_length]`
neg_sum_logits: A `float` matrix `(batch_size x 1)` containing, for the
sequence found, the negative of the sum of the greatest logit at each
timeframe.
"""
outputs = gen_ctc_ops.ctc_greedy_decoder(
inputs,
sequence_length,
merge_repeated=merge_repeated,
blank_index=blank_index)
(decoded_ix, decoded_val, decoded_shape, log_probabilities) = outputs
return ([sparse_tensor.SparseTensor(decoded_ix, decoded_val,
decoded_shape)], log_probabilities)
@tf_export(v1=["nn.ctc_beam_search_decoder"])
@dispatch.add_dispatch_support
def ctc_beam_search_decoder(inputs,
sequence_length,
beam_width=100,
top_paths=1,
merge_repeated=True):
"""Performs beam search decoding on the logits given in input.
**Note** The `ctc_greedy_decoder` is a special case of the
`ctc_beam_search_decoder` with `top_paths=1` and `beam_width=1` (but
that decoder is faster for this special case).
If `merge_repeated` is `True`, merge repeated classes in the output beams.
This means that if consecutive entries in a beam are the same,
only the first of these is emitted. That is, when the sequence is
`A B B * B * B` (where '*' is the blank label), the return value is:
* `A B` if `merge_repeated = True`.
* `A B B B` if `merge_repeated = False`.
Args:
inputs: 3-D `float` `Tensor`, size `[max_time x batch_size x num_classes]`.
The logits.
sequence_length: 1-D `int32` vector containing sequence lengths, having size
`[batch_size]`.
beam_width: An int scalar >= 0 (beam search beam width).
top_paths: An int scalar >= 0, <= beam_width (controls output size).
merge_repeated: Boolean. Default: True.
Returns:
A tuple `(decoded, log_probabilities)` where
decoded: A list of length top_paths, where `decoded[j]`
is a `SparseTensor` containing the decoded outputs:
`decoded[j].indices`: Indices matrix `(total_decoded_outputs[j] x 2)`
The rows store: [batch, time].
`decoded[j].values`: Values vector, size `(total_decoded_outputs[j])`.
The vector stores the decoded classes for beam j.
`decoded[j].dense_shape`: Shape vector, size `(2)`.
The shape values are: `[batch_size, max_decoded_length[j]]`.
log_probability: A `float` matrix `(batch_size x top_paths)` containing
sequence log-probabilities.
"""
decoded_ixs, decoded_vals, decoded_shapes, log_probabilities = (
gen_ctc_ops.ctc_beam_search_decoder(
inputs,
sequence_length,
beam_width=beam_width,
top_paths=top_paths,
merge_repeated=merge_repeated))
return ([
sparse_tensor.SparseTensor(ix, val, shape)
for (ix, val, shape) in zip(decoded_ixs, decoded_vals, decoded_shapes)
], log_probabilities)
@tf_export("nn.ctc_beam_search_decoder", v1=["nn.ctc_beam_search_decoder_v2"])
@dispatch.add_dispatch_support
def ctc_beam_search_decoder_v2(inputs,
sequence_length,
beam_width=100,
top_paths=1):
"""Performs beam search decoding on the logits given in input.
**Note** The `ctc_greedy_decoder` is a special case of the
`ctc_beam_search_decoder` with `top_paths=1` and `beam_width=1` (but
that decoder is faster for this special case).
Args:
inputs: 3-D `float` `Tensor`, size `[max_time, batch_size, num_classes]`.
The logits.
sequence_length: 1-D `int32` vector containing sequence lengths, having size
`[batch_size]`.
beam_width: An int scalar >= 0 (beam search beam width).
top_paths: An int scalar >= 0, <= beam_width (controls output size).
Returns:
A tuple `(decoded, log_probabilities)` where
decoded: A list of length top_paths, where `decoded[j]`
is a `SparseTensor` containing the decoded outputs:
`decoded[j].indices`: Indices matrix `[total_decoded_outputs[j], 2]`;
The rows store: `[batch, time]`.
`decoded[j].values`: Values vector, size `[total_decoded_outputs[j]]`.
The vector stores the decoded classes for beam `j`.
`decoded[j].dense_shape`: Shape vector, size `(2)`.
The shape values are: `[batch_size, max_decoded_length[j]]`.
log_probability: A `float` matrix `[batch_size, top_paths]` containing
sequence log-probabilities.
"""
# Note, merge_repeated is an invalid optimization that is removed from the
# public API: it returns low probability paths.
return ctc_beam_search_decoder(
inputs,
sequence_length=sequence_length,
beam_width=beam_width,
top_paths=top_paths,
merge_repeated=False)
ops.NotDifferentiable("CTCGreedyDecoder")
ops.NotDifferentiable("CTCBeamSearchDecoder")
def _ctc_state_trans(label_seq):
"""Compute CTC alignment model transition matrix.
Args:
label_seq: tensor of shape [batch_size, max_seq_length]
Returns:
tensor of shape [batch_size, states, states] with a state transition matrix
computed for each sequence of the batch.
"""
with ops.name_scope("ctc_state_trans"):
label_seq = ops.convert_to_tensor(label_seq, name="label_seq")
batch_size = _get_dim(label_seq, 0)
num_labels = _get_dim(label_seq, 1)
num_label_states = num_labels + 1
num_states = 2 * num_label_states
label_states = math_ops.range(num_label_states)
blank_states = label_states + num_label_states
# Start state to first label.
start_to_label = [[1, 0]]
# Blank to label transitions.
blank_to_label = array_ops.stack([label_states[1:], blank_states[:-1]], 1)
# Label to blank transitions.
label_to_blank = array_ops.stack([blank_states, label_states], 1)
# Scatter transitions that don't depend on sequence.
indices = array_ops.concat([start_to_label, blank_to_label, label_to_blank],
0)
values = array_ops.ones([_get_dim(indices, 0)])
trans = array_ops.scatter_nd(
indices, values, shape=[num_states, num_states])
trans += linalg_ops.eye(num_states) # Self-loops.
# Label to label transitions. Disallow transitions between repeated labels
# with no blank state in between.
batch_idx = array_ops.zeros_like(label_states[2:])
indices = array_ops.stack([batch_idx, label_states[2:], label_states[1:-1]],
1)
indices = array_ops.tile(
array_ops.expand_dims(indices, 0), [batch_size, 1, 1])
batch_idx = array_ops.expand_dims(math_ops.range(batch_size), 1) * [1, 0, 0]
indices += array_ops.expand_dims(batch_idx, 1)
repeats = math_ops.equal(label_seq[:, :-1], label_seq[:, 1:])
values = 1.0 - math_ops.cast(repeats, dtypes.float32)
batched_shape = [batch_size, num_states, num_states]
label_to_label = array_ops.scatter_nd(indices, values, batched_shape)
return array_ops.expand_dims(trans, 0) + label_to_label
def ctc_state_log_probs(seq_lengths, max_seq_length):
"""Computes CTC alignment initial and final state log probabilities.
Create the initial/final state values directly as log values to avoid
having to take a float64 log on tpu (which does not exist).
Args:
seq_lengths: int tensor of shape [batch_size], seq lengths in the batch.
max_seq_length: int, max sequence length possible.
Returns:
initial_state_log_probs, final_state_log_probs
"""
batch_size = _get_dim(seq_lengths, 0)
num_label_states = max_seq_length + 1
num_duration_states = 2
num_states = num_duration_states * num_label_states
log_0 = math_ops.cast(
math_ops.log(math_ops.cast(0, dtypes.float64) + 1e-307), dtypes.float32)
initial_state_log_probs = array_ops.one_hot(
indices=array_ops.zeros([batch_size], dtype=dtypes.int32),
depth=num_states,
on_value=0.0,
off_value=log_0,
axis=1)
label_final_state_mask = array_ops.one_hot(
seq_lengths, depth=num_label_states, axis=0)
duration_final_state_mask = array_ops.ones(
[num_duration_states, 1, batch_size])
final_state_mask = duration_final_state_mask * label_final_state_mask
final_state_log_probs = (1.0 - final_state_mask) * log_0
final_state_log_probs = array_ops.reshape(final_state_log_probs,
[num_states, batch_size])
return initial_state_log_probs, array_ops.transpose(final_state_log_probs)
def _ilabel_to_state(labels, num_labels, ilabel_log_probs):
"""Project ilabel log probs to state log probs."""
num_label_states = _get_dim(labels, 1)
blank = ilabel_log_probs[:, :, :1]
blank = array_ops.tile(blank, [1, 1, num_label_states + 1])
one_hot = array_ops.one_hot(labels, depth=num_labels)
one_hot = array_ops.expand_dims(one_hot, axis=0)
ilabel_log_probs = array_ops.expand_dims(ilabel_log_probs, axis=2)
state_log_probs = math_ops.reduce_sum(ilabel_log_probs * one_hot, axis=3)
state_log_probs = array_ops.concat([state_log_probs, blank], axis=2)
return array_ops.pad(
state_log_probs, [[0, 0], [0, 0], [1, 0]],
constant_values=math_ops.log(0.0))
def _state_to_olabel(labels, num_labels, states):
"""Sum state log probs to ilabel log probs."""
num_label_states = _get_dim(labels, 1) + 1
label_states = states[:, :, 1:num_label_states]
blank_states = states[:, :, num_label_states:]
one_hot = array_ops.one_hot(
labels - 1,
depth=(num_labels - 1),
on_value=0.0,
off_value=math_ops.log(0.0))
one_hot = array_ops.expand_dims(one_hot, axis=0)
label_states = array_ops.expand_dims(label_states, axis=3)
label_olabels = math_ops.reduce_logsumexp(label_states + one_hot, axis=2)
blank_olabels = math_ops.reduce_logsumexp(blank_states, axis=2, keepdims=True)
return array_ops.concat([blank_olabels, label_olabels], axis=-1)
# pylint: disable=redefined-outer-name
def _state_to_olabel_unique(labels, num_labels, states, unique):
"""Sum state log probs to ilabel log probs using unique label indices."""
num_label_states = _get_dim(labels, 1) + 1
label_states = states[:, :, 1:num_label_states]
blank_states = states[:, :, num_label_states:]
unique_y, unique_idx = unique
mul_reduce = _sum_states(unique_idx, label_states)
num_frames = states.shape[0]
batch_size = states.shape[1]
num_states = num_label_states - 1
batch_state_major = array_ops.transpose(mul_reduce, perm=[1, 2, 0])
batch_state_major = array_ops.reshape(batch_state_major,
[batch_size * num_states, num_frames])
batch_offset = math_ops.range(batch_size, dtype=unique_y.dtype) * num_labels
indices = unique_y + array_ops.expand_dims(batch_offset, axis=-1)
indices = array_ops.reshape(indices, [-1, 1])
scatter = array_ops.scatter_nd(
indices=indices,
updates=batch_state_major,
shape=[batch_size * num_labels, num_frames])
scatter = array_ops.reshape(scatter, [batch_size, num_labels, num_frames])
mask = array_ops.ones_like(batch_state_major, dtype=dtypes.bool)
mask = array_ops.scatter_nd(
indices=indices,
updates=mask,
shape=[batch_size * num_labels, num_frames])
mask = array_ops.reshape(mask, [batch_size, num_labels, num_frames])
scatter = array_ops.where(
mask, scatter,
array_ops.fill(array_ops.shape(scatter), math_ops.log(0.0)))
label_olabels = array_ops.transpose(scatter, [2, 0, 1])
label_olabels = label_olabels[:, :, 1:]
blank_olabels = math_ops.reduce_logsumexp(blank_states, axis=2, keepdims=True)
return array_ops.concat([blank_olabels, label_olabels], axis=-1)
def ctc_loss_and_grad(logits, labels, label_length, logit_length, unique=None):
"""Computes the CTC loss and gradients.
Most users will want fwd_bwd.ctc_loss
This function returns the computed gradient, it does not have a gradient
of its own defined.
Args:
logits: tensor of shape [frames, batch_size, num_labels]
labels: tensor of shape [batch_size, max_label_seq_length]
label_length: tensor of shape [batch_size] Length of reference label
sequence in labels.
logit_length: tensor of shape [batch_size] Length of input sequence in
logits.
unique: (optional) unique label indices as computed by unique(labels) If
supplied, enables an implementation that is faster and more memory
efficient on TPU.
Returns:
loss: tensor of shape [batch_size]
gradient: tensor of shape [frames, batch_size, num_labels]
"""
num_labels = _get_dim(logits, 2)
max_label_seq_length = _get_dim(labels, 1)
ilabel_log_probs = nn_ops.log_softmax(logits)
state_log_probs = _ilabel_to_state(labels, num_labels, ilabel_log_probs)
state_trans_probs = _ctc_state_trans(labels)
initial_state_log_probs, final_state_log_probs = ctc_state_log_probs(
label_length, max_label_seq_length)
fwd_bwd_log_probs, log_likelihood = _forward_backward_log(
state_trans_log_probs=math_ops.log(state_trans_probs),
initial_state_log_probs=initial_state_log_probs,
final_state_log_probs=final_state_log_probs,
observed_log_probs=state_log_probs,
sequence_length=logit_length)
if unique:
olabel_log_probs = _state_to_olabel_unique(labels, num_labels,
fwd_bwd_log_probs, unique)
else:
olabel_log_probs = _state_to_olabel(labels, num_labels, fwd_bwd_log_probs)
grad = math_ops.exp(ilabel_log_probs) - math_ops.exp(olabel_log_probs)
# Applies the sequence mask for the gradient. It is enough to appply the mask
# only for ilabel_log_probs because olabel_log_probs already consider the
# mask. However, it is just safe and clean to apply it for the gradient.
max_logit_length = _get_dim(logits, 0)
logit_mask = array_ops.sequence_mask(logit_length, max_logit_length,
dtypes.float32)
logit_mask = array_ops.transpose(logit_mask, perm=[1, 0])
logit_mask = array_ops.expand_dims(logit_mask, axis=2)
grad *= logit_mask
loss = -log_likelihood
return loss, grad
def _ctc_loss_grad(op, grad_loss, _):
grad = op.outputs[1]
grad = [array_ops.reshape(grad_loss, [1, -1, 1]) * grad]
grad += [None] * (len(op.inputs) - len(grad))
return grad
def _ctc_loss_op_standard(labels, logits, logit_length, logits_time_major,
blank_index):
part_before = logits[:, :, :blank_index]
part_after = logits[:, :, blank_index + 1:]
part_blank = logits[:, :, blank_index:blank_index + 1]
logits = array_ops.concat([part_before, part_after, part_blank], axis=2)
labels = sparse_tensor.SparseTensor(
labels.indices,
array_ops.where(labels.values < blank_index, labels.values,
labels.values - 1), labels.dense_shape)
return _ctc_loss_impl(
labels=labels,
inputs=logits,
sequence_length=logit_length,
time_major=logits_time_major,
use_cudnn=False)
def _ctc_loss_op_cudnn(labels, logits, logit_length, logits_time_major,
blank_index):
part_before = logits[:, :, :blank_index]
part_after = logits[:, :, blank_index + 1:]
part_blank = logits[:, :, blank_index:blank_index + 1]
logits = array_ops.concat([part_blank, part_before, part_after], axis=2)
labels = sparse_tensor.SparseTensor(
labels.indices,
array_ops.where(labels.values < blank_index, labels.values + 1,
labels.values), labels.dense_shape)
return _ctc_loss_impl(
labels=labels,
inputs=logits,
sequence_length=logit_length,
time_major=logits_time_major,
use_cudnn=True)
def _ctc_loss_shape(op):
return [op.inputs[2].get_shape(), op.inputs[0].get_shape()]
# pylint: disable=protected-access, invalid-name
@tf_export(v1=["nn.ctc_loss_v2"])
@dispatch.add_dispatch_support
def ctc_loss_v2(labels,
logits,
label_length,
logit_length,
logits_time_major=True,
unique=None,
blank_index=None,
name=None):
"""Computes CTC (Connectionist Temporal Classification) loss.
This op implements the CTC loss as presented in (Graves et al., 2006).
Notes:
- Same as the "Classic CTC" in TensorFlow 1.x's tf.compat.v1.nn.ctc_loss
setting of preprocess_collapse_repeated=False, ctc_merge_repeated=True
- Labels may be supplied as either a dense, zero-padded tensor with a
vector of label sequence lengths OR as a SparseTensor.
- On TPU and GPU: Only dense padded labels are supported.
- On CPU: Caller may use SparseTensor or dense padded labels but calling with
a SparseTensor will be significantly faster.
- Default blank label is 0 rather num_classes - 1, unless overridden by
blank_index.
Args:
labels: tensor of shape [batch_size, max_label_seq_length] or SparseTensor
logits: tensor of shape [frames, batch_size, num_labels], if
logits_time_major == False, shape is [batch_size, frames, num_labels].
label_length: tensor of shape [batch_size], None if labels is SparseTensor
Length of reference label sequence in labels.
logit_length: tensor of shape [batch_size] Length of input sequence in
logits.
logits_time_major: (optional) If True (default), logits is shaped [time,
batch, logits]. If False, shape is [batch, time, logits]
unique: (optional) Unique label indices as computed by
ctc_unique_labels(labels). If supplied, enable a faster, memory efficient
implementation on TPU.
blank_index: (optional) Set the class index to use for the blank label.
Negative values will start from num_classes, ie, -1 will reproduce the
ctc_loss behavior of using num_classes - 1 for the blank symbol. There is
some memory/performance overhead to switching from the default of 0 as an
additional shifted copy of the logits may be created.
name: A name for this `Op`. Defaults to "ctc_loss_dense".
Returns:
loss: tensor of shape [batch_size], negative log probabilities.
References:
Connectionist Temporal Classification - Labeling Unsegmented Sequence Data
with Recurrent Neural Networks:
[Graves et al., 2006](https://dl.acm.org/citation.cfm?id=1143891)
([pdf](http://www.cs.toronto.edu/~graves/icml_2006.pdf))
"""
if isinstance(labels, sparse_tensor.SparseTensor):
if blank_index is None:
raise ValueError(
"blank_index must be given when using SparseTensor labels.")
if blank_index < 0:
blank_index += _get_dim(logits, 2)
if blank_index != _get_dim(logits, 2) - 1:
logits = array_ops.concat([
logits[:, :, :blank_index],
logits[:, :, blank_index + 1:],
logits[:, :, blank_index:blank_index + 1],
],
axis=2)
labels = sparse_tensor.SparseTensor(
labels.indices,
array_ops.where(labels.values < blank_index, labels.values,
labels.values - 1), labels.dense_shape)
return ctc_loss(
labels=labels,
inputs=logits,
sequence_length=logit_length,
time_major=logits_time_major)
if blank_index is None:
blank_index = 0
return ctc_loss_dense(
labels=labels,
logits=logits,
label_length=label_length,
logit_length=logit_length,
logits_time_major=logits_time_major,
unique=unique,
blank_index=blank_index,
name=name)
@tf_export("nn.ctc_loss", v1=[])
@dispatch.add_dispatch_support
def ctc_loss_v3(labels,
logits,
label_length,
logit_length,
logits_time_major=True,
unique=None,
blank_index=None,
name=None):
"""Computes CTC (Connectionist Temporal Classification) loss.
This op implements the CTC loss as presented in (Graves et al., 2006).
Notes:
- Same as the "Classic CTC" in TensorFlow 1.x's tf.compat.v1.nn.ctc_loss
setting of preprocess_collapse_repeated=False, ctc_merge_repeated=True
- Labels may be supplied as either a dense, zero-padded tensor with a
vector of label sequence lengths OR as a SparseTensor.
- On TPU and GPU: Only dense padded labels are supported.
- On CPU: Caller may use SparseTensor or dense padded labels but calling with
a SparseTensor will be significantly faster.
- Default blank label is 0 rather num_classes - 1, unless overridden by
blank_index.
Args:
labels: tensor of shape [batch_size, max_label_seq_length] or SparseTensor
logits: tensor of shape [frames, batch_size, num_labels], if
logits_time_major == False, shape is [batch_size, frames, num_labels].
label_length: tensor of shape [batch_size], None if labels is SparseTensor
Length of reference label sequence in labels.
logit_length: tensor of shape [batch_size] Length of input sequence in
logits.
logits_time_major: (optional) If True (default), logits is shaped [time,
batch, logits]. If False, shape is [batch, time, logits]
unique: (optional) Unique label indices as computed by
ctc_unique_labels(labels). If supplied, enable a faster, memory efficient
implementation on TPU.
blank_index: (optional) Set the class index to use for the blank label.
Negative values will start from num_classes, ie, -1 will reproduce the
ctc_loss behavior of using num_classes - 1 for the blank symbol. There is
some memory/performance overhead to switching from the default of 0 as an
additional shifted copy of the logits may be created.
name: A name for this `Op`. Defaults to "ctc_loss_dense".
Returns:
loss: tensor of shape [batch_size], negative log probabilities.
References:
Connectionist Temporal Classification - Labeling Unsegmented Sequence Data
with Recurrent Neural Networks:
[Graves et al., 2006](https://dl.acm.org/citation.cfm?id=1143891)
([pdf](http://www.cs.toronto.edu/~graves/icml_2006.pdf))
"""
if isinstance(labels, sparse_tensor.SparseTensor):
if blank_index is None:
raise ValueError(
"blank_index must be given when using SparseTensor labels.")
if blank_index < 0:
blank_index += _get_dim(logits, 2)
params = {
"labels": labels,
"logits": logits,
"logit_length": logit_length,
"logits_time_major": logits_time_major,
"blank_index": blank_index
}
if context.executing_eagerly():
device_type = _get_context_device_type()
can_use_gpu = (
# Either user specified GPU or unspecified but GPU is available.
(device_type == _GPU_DEVICE_NAME or
(device_type is None and context.num_gpus() > 0)))
# Under eager context, check the device placement and prefer the
if can_use_gpu:
res = _ctc_loss_op_cudnn(**params)
else:
res = _ctc_loss_op_standard(**params)
else:
api_name = "ctc_loss_" + str(uuid.uuid4())
ctc_loss_op_standard = _generate_defun_backend(api_name, _CPU_DEVICE_NAME,
_ctc_loss_op_standard)
ctc_loss_op_cudnn = _generate_defun_backend(api_name, _GPU_DEVICE_NAME,
_ctc_loss_op_cudnn)
res = ctc_loss_op_standard(**params)
function_eager.register(ctc_loss_op_cudnn, **params)
return res
if blank_index is None:
blank_index = 0
return ctc_loss_dense(
labels=labels,
logits=logits,
label_length=label_length,
logit_length=logit_length,
logits_time_major=logits_time_major,
unique=unique,
blank_index=blank_index,
name=name)
def ctc_loss_dense(labels,
logits,
label_length,
logit_length,
logits_time_major=True,
unique=None,
blank_index=0,
name=None):
"""Computes CTC (Connectionist Temporal Classification) loss.
This op implements the CTC loss as presented in (Graves et al., 2006),
using the batched forward backward algorithm described in (Sim et al., 2017).
Notes:
Significant differences from tf.compat.v1.nn.ctc_loss:
Supports GPU and TPU (tf.compat.v1.nn.ctc_loss supports CPU only):
For batched operations, GPU and TPU are significantly faster than using
ctc_loss on CPU.
This implementation runs on CPU, but significantly slower than ctc_loss.
Blank label is 0 rather num_classes - 1, unless overridden by blank_index.
Logits and labels are dense arrays with padding rather than SparseTensor.
The only mode supported is the same as:
preprocess_collapse_repeated=False, ctc_merge_repeated=True
To collapse labels, the caller can preprocess label sequence first.
The dense implementation supports both CPU, GPU and TPU. A fast path is
provided that significantly improves memory use for large vocabulary if the
caller preprocesses label sequences to get unique label indices on the CPU
(eg. in the data input pipeline) using ctc_ops.unique and simplifies this in
the optional "unique" kwarg. This is especially useful for TPU and GPU but
also works with if used on CPU.
Args:
labels: tensor of shape [batch_size, max_label_seq_length]
logits: tensor of shape [frames, batch_size, num_labels], if
logits_time_major == False, shape is [batch_size, frames, num_labels].
label_length: tensor of shape [batch_size] Length of reference label
sequence in labels.
logit_length: tensor of shape [batch_size] Length of input sequence in
logits.
logits_time_major: (optional) If True (default), logits is shaped [time,
batch, logits]. If False, shape is [batch, time, logits]
unique: (optional) Unique label indices as computed by unique(labels). If
supplied, enable a faster, memory efficient implementation on TPU.
blank_index: (optional) Set the class index to use for the blank label.
Negative values will start from num_classes, ie, -1 will reproduce the
ctc_loss behavior of using num_classes - 1 for the blank symbol. There is
some memory/performance overhead to switching from the default of 0 as an
additional shifted copy of the logits may be created.
name: A name for this `Op`. Defaults to "ctc_loss_dense".
Returns:
loss: tensor of shape [batch_size], negative log probabilities.
References:
Connectionist Temporal Classification - Labeling Unsegmented Sequence Data
with Recurrent Neural Networks:
[Graves et al., 2006](https://dl.acm.org/citation.cfm?id=1143891)
([pdf](http://www.cs.toronto.edu/~graves/icml_2006.pdf))
Improving the efficiency of forward-backward algorithm using batched
computation in TensorFlow:
[Sim et al., 2017](https://ieeexplore.ieee.org/document/8268944)
([pdf](http://bacchiani.net/resume/papers/ASRU2017.pdf))
"""
with ops.name_scope(name, "ctc_loss_dense",
[logits, labels, label_length, logit_length]):
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
label_length = ops.convert_to_tensor(label_length, name="label_length")
logit_length = ops.convert_to_tensor(logit_length, name="logit_length")
if not logits_time_major:
logits = array_ops.transpose(logits, perm=[1, 0, 2])
if blank_index != 0:
if blank_index < 0:
blank_index += _get_dim(logits, 2)
logits = array_ops.concat([
logits[:, :, blank_index:blank_index + 1],
logits[:, :, :blank_index],
logits[:, :, blank_index + 1:],
],
axis=2)
labels = array_ops.where(labels < blank_index, labels + 1, labels)
args = [logits, labels, label_length, logit_length]
if unique:
unique_y, unique_idx = unique
if blank_index != 0:
unique_y = array_ops.where(unique_y < blank_index, unique_y + 1,
unique_y)
label_mask_len = math_ops.reduce_max(unique_idx, axis=1) + 1
max_label_length = _get_dim(unique_y, 1)
label_mask = array_ops.sequence_mask(label_mask_len, max_label_length)
unique_y = array_ops.where(label_mask, unique_y,
array_ops.zeros_like(unique_y))
args.extend([unique_y, unique_idx])
@custom_gradient.custom_gradient
def compute_ctc_loss(logits_t, labels_t, label_length_t, logit_length_t,
*unique_t):
"""Compute CTC loss."""
logits_t.set_shape(logits.shape)
labels_t.set_shape(labels.shape)
label_length_t.set_shape(label_length.shape)
logit_length_t.set_shape(logit_length.shape)
kwargs = dict(
logits=logits_t,
labels=labels_t,
label_length=label_length_t,
logit_length=logit_length_t)
if unique_t:
kwargs["unique"] = unique_t
result = ctc_loss_and_grad(**kwargs)
def grad(grad_loss):
grad = [array_ops.reshape(grad_loss, [1, -1, 1]) * result[1]]
grad += [None] * (len(args) - len(grad))
return grad
return result[0], grad
return compute_ctc_loss(*args)
@tf_export("nn.collapse_repeated")
@dispatch.add_dispatch_support
def collapse_repeated(labels, seq_length, name=None):
"""Merge repeated labels into single labels.
Args:
labels: Tensor of shape [batch, max value in seq_length]
seq_length: Tensor of shape [batch], sequence length of each batch element.
name: A name for this `Op`. Defaults to "collapse_repeated_labels".
Returns:
A tuple `(collapsed_labels, new_seq_length)` where
collapsed_labels: Tensor of shape [batch, max_seq_length] with repeated
labels collapsed and padded to max_seq_length, eg:
`[[A, A, B, B, A], [A, B, C, D, E]] => [[A, B, A, 0, 0], [A, B, C, D, E]]`
new_seq_length: int tensor of shape [batch] with new sequence lengths.
"""
with ops.name_scope(name, "collapse_repeated_labels", [labels, seq_length]):
labels = ops.convert_to_tensor(labels, name="labels")
seq_length = ops.convert_to_tensor(seq_length, name="seq_length")
# Mask labels that don't equal previous label.
label_mask = array_ops.concat([
array_ops.ones_like(labels[:, :1], dtypes.bool),
math_ops.not_equal(labels[:, 1:], labels[:, :-1])
],
axis=1)
# Filter labels that aren't in the original sequence.
maxlen = _get_dim(labels, 1)
seq_mask = array_ops.sequence_mask(seq_length, maxlen=maxlen)
label_mask = math_ops.logical_and(label_mask, seq_mask)
# Count masks for new sequence lengths.
new_seq_len = math_ops.reduce_sum(
math_ops.cast(label_mask, dtypes.int32), axis=1)
# Mask indexes based on sequence length mask.
new_maxlen = math_ops.reduce_max(new_seq_len)
idx_mask = array_ops.sequence_mask(new_seq_len, maxlen=new_maxlen)
# Flatten everything and mask out labels to keep and sparse indices.
flat_labels = array_ops.reshape(labels, [-1])
flat_label_mask = array_ops.reshape(label_mask, [-1])
flat_idx_mask = array_ops.reshape(idx_mask, [-1])
idx = math_ops.range(_get_dim(flat_idx_mask, 0))
# Scatter to flat shape.
flat = array_ops.scatter_nd(
indices=array_ops.expand_dims(
array_ops.boolean_mask(idx, flat_idx_mask), axis=1),
updates=array_ops.boolean_mask(flat_labels, flat_label_mask),
shape=array_ops.shape(flat_idx_mask))
# Reshape back to square batch.
batch_size = _get_dim(labels, 0)
new_shape = [batch_size, new_maxlen]
return (array_ops.reshape(flat, new_shape),
math_ops.cast(new_seq_len, seq_length.dtype))
def dense_labels_to_sparse(dense, length):
"""Convert dense labels with sequence lengths to sparse tensor.
Args:
dense: tensor of shape [batch, max_length]
length: int tensor of shape [batch] The length of each sequence in dense.
Returns:
tf.sparse.SparseTensor with values only for the valid elements of sequences.
"""
flat_values = array_ops.reshape(dense, [-1])
flat_indices = math_ops.range(
array_ops.shape(flat_values, out_type=dtypes.int64)[0])
mask = array_ops.sequence_mask(length, maxlen=array_ops.shape(dense)[1])
flat_mask = array_ops.reshape(mask, [-1])
indices = array_ops.expand_dims(
array_ops.boolean_mask(flat_indices, flat_mask), 1)
values = array_ops.boolean_mask(flat_values, flat_mask)
sparse = sparse_tensor.SparseTensor(
indices=indices,
values=math_ops.cast(values, dtypes.int32),
dense_shape=array_ops.shape(flat_values, out_type=dtypes.int64))
reshaped = sparse_ops.sparse_reshape(sparse, array_ops.shape(dense))
max_length = math_ops.reduce_max(length)
return sparse_tensor.SparseTensor(
indices=reshaped.indices,
values=reshaped.values,
dense_shape=[
math_ops.cast(reshaped.dense_shape[0], dtypes.int64),
math_ops.cast(max_length, dtypes.int64)
])
@tf_export("nn.ctc_unique_labels")
@dispatch.add_dispatch_support
def ctc_unique_labels(labels, name=None):
"""Get unique labels and indices for batched labels for `tf.nn.ctc_loss`.
For use with `tf.nn.ctc_loss` optional argument `unique`: This op can be
used to preprocess labels in input pipeline to for better speed/memory use
computing the ctc loss on TPU.
Example:
ctc_unique_labels([[3, 4, 4, 3]]) ->
unique labels padded with 0: [[3, 4, 0, 0]]
indices of original labels in unique: [0, 1, 1, 0]
Args:
labels: tensor of shape [batch_size, max_label_length] padded with 0.
name: A name for this `Op`. Defaults to "ctc_unique_labels".
Returns:
tuple of
- unique labels, tensor of shape `[batch_size, max_label_length]`
- indices into unique labels, shape `[batch_size, max_label_length]`
"""
with ops.name_scope(name, "ctc_unique_labels", [labels]):
labels = ops.convert_to_tensor(labels, name="labels")
def _unique(x):
u = array_ops.unique(x)
y = array_ops.pad(u.y, [[0, _get_dim(u.idx, 0) - _get_dim(u.y, 0)]])
y = math_ops.cast(y, dtypes.int64)
return [y, u.idx]
return map_fn.map_fn(_unique, labels, dtype=[dtypes.int64, dtypes.int32])
def _sum_states(idx, states):
"""Take logsumexp for each unique state out of all label states.
Args:
idx: tensor of shape [batch, label_length] For each sequence, indices into a
set of unique labels as computed by calling unique.
states: tensor of shape [frames, batch, label_length] Log probabilities for
each label state.
Returns:
tensor of shape [frames, batch_size, label_length], log probabilites summed
for each unique label of the sequence.
"""
with ops.name_scope("sum_states"):
idx = ops.convert_to_tensor(idx, name="idx")
num_states = _get_dim(states, 2)
states = array_ops.expand_dims(states, axis=2)
one_hot = array_ops.one_hot(
idx,
depth=num_states,
on_value=0.0,
off_value=math_ops.log(0.0),
axis=1)
return math_ops.reduce_logsumexp(states + one_hot, axis=-1)
def _forward_backward_log(state_trans_log_probs, initial_state_log_probs,
final_state_log_probs, observed_log_probs,
sequence_length):
"""Forward-backward algorithm computed in log domain.
Args:
state_trans_log_probs: tensor of shape [states, states] or if different
transition matrix per batch [batch_size, states, states]
initial_state_log_probs: tensor of shape [batch_size, states]
final_state_log_probs: tensor of shape [batch_size, states]
observed_log_probs: tensor of shape [frames, batch_size, states]
sequence_length: tensor of shape [batch_size]
Returns:
forward backward log probabilites: tensor of shape [frames, batch, states]
log_likelihood: tensor of shape [batch_size]
Raises:
ValueError: If state_trans_log_probs has unknown or incorrect rank.
"""
if state_trans_log_probs.shape.ndims == 2:
perm = [1, 0]
elif state_trans_log_probs.shape.ndims == 3:
perm = [0, 2, 1]
else:
raise ValueError(
"state_trans_log_probs rank must be known and == 2 or 3, is: %s" %
state_trans_log_probs.shape.ndims)
bwd_state_trans_log_probs = array_ops.transpose(state_trans_log_probs, perm)
batch_size = _get_dim(observed_log_probs, 1)
def _forward(state_log_prob, obs_log_prob):
state_log_prob = array_ops.expand_dims(state_log_prob, axis=1) # Broadcast.
state_log_prob += state_trans_log_probs
state_log_prob = math_ops.reduce_logsumexp(state_log_prob, axis=-1)
state_log_prob += obs_log_prob
log_prob_sum = math_ops.reduce_logsumexp(
state_log_prob, axis=-1, keepdims=True)
state_log_prob -= log_prob_sum
return state_log_prob
fwd = _scan(
_forward, observed_log_probs, initial_state_log_probs, inclusive=True)
def _backward(accs, elems):
"""Calculate log probs and cumulative sum masked for sequence length."""
state_log_prob, cum_log_sum = accs
obs_log_prob, mask = elems
state_log_prob += obs_log_prob
state_log_prob = array_ops.expand_dims(state_log_prob, axis=1) # Broadcast.
state_log_prob += bwd_state_trans_log_probs
state_log_prob = math_ops.reduce_logsumexp(state_log_prob, axis=-1)
log_prob_sum = math_ops.reduce_logsumexp(
state_log_prob, axis=-1, keepdims=True)
state_log_prob -= log_prob_sum
cum_log_sum += array_ops.squeeze(log_prob_sum) * mask
batched_mask = array_ops.expand_dims(mask, axis=1)
out = state_log_prob * batched_mask
out += final_state_log_probs * (1.0 - batched_mask)
return out, cum_log_sum
zero_log_sum = array_ops.zeros([batch_size])
maxlen = _get_dim(observed_log_probs, 0)
mask = array_ops.sequence_mask(sequence_length, maxlen, dtypes.float32)
mask = array_ops.transpose(mask, perm=[1, 0])
bwd, cum_log_sum = _scan(
_backward, (observed_log_probs, mask),
(final_state_log_probs, zero_log_sum),
reverse=True,
inclusive=True)
fwd_bwd_log_probs = fwd[1:] + bwd[1:]
fwd_bwd_log_probs_sum = math_ops.reduce_logsumexp(
fwd_bwd_log_probs, axis=2, keepdims=True)
fwd_bwd_log_probs -= fwd_bwd_log_probs_sum
fwd_bwd_log_probs += math_ops.log(array_ops.expand_dims(mask, axis=2))
log_likelihood = bwd[0, :, 0] + cum_log_sum[0]
return fwd_bwd_log_probs, log_likelihood
# TODO(tombagby): This is currently faster for the ctc implementation than using
# functional_ops.scan, but could be replaced by that or something similar if
# things change.
def _scan(fn, elems, initial, reverse=False, inclusive=False, final_only=False):
"""Repeatedly applies callable `fn` to a sequence of elements.
Implemented by functional_ops.While, tpu friendly, no gradient.
This is similar to functional_ops.scan but significantly faster on tpu/gpu
for the forward backward use case.
Examples:
scan(lambda a, e: a + e, [1.0, 2.0, 3.0], 1.0) => [2.0, 4.0, 7.0]
Multiple accumulators:
scan(lambda a, e: (a[0] + e, a[1] * e), [1.0, 2.0, 3.0], (0.0, 1.0))
Multiple inputs:
scan(lambda a, e: a + (e[0] * e[1]), (elems1, elems2), 0.0)
Args:
fn: callable, fn(accumulators, element) return new accumulator values. The
(possibly nested) sequence of accumulators is the same as `initial` and
the return value must have the same structure.
elems: A (possibly nested) tensor which will be unpacked along the first
dimension. The resulting slices will be the second argument to fn. The
first dimension of all nested input tensors must be the same.
initial: A tensor or (possibly nested) sequence of tensors with initial
values for the accumulators.
reverse: (optional) True enables scan and output elems in reverse order.
inclusive: (optional) True includes the initial accumulator values in the
output. Length of output will be len(elem sequence) + 1. Not meaningful if
final_only is True.
final_only: (optional) When True, return only the final accumulated values,
not the concatenation of accumulated values for each input.
Returns:
A (possibly nested) sequence of tensors with the results of applying fn
to tensors unpacked from elems and previous accumulator values.
"""
flat_elems = [ops.convert_to_tensor(x) for x in nest.flatten(elems)]
num_elems = array_ops.shape(flat_elems[0])[0]
pack_elems = lambda x: nest.pack_sequence_as(structure=elems, flat_sequence=x)
flat_initial = [ops.convert_to_tensor(x) for x in nest.flatten(initial)]
pack = lambda x: nest.pack_sequence_as(structure=initial, flat_sequence=x)
accum_dtypes = [x.dtype for x in flat_initial]
num_accums = len(flat_initial)
# Types for counter, [outputs], [accumulators] loop arguments.
if final_only:
loop_dtypes = [dtypes.int32, dtypes.int32] + accum_dtypes
else:
loop_dtypes = [dtypes.int32, dtypes.int32] + accum_dtypes + accum_dtypes
# TODO(tombagby): Update to tfe.defun
def cond(i, num_elems, *args):
del args
return i >= 0 if reverse else i < num_elems
# The loop *args are [output tensors] + [accumulator tensors] which must
# be paired. Each output corresponds to one accumulator.
def body(i, num_elems, *args):
"""Loop body."""
i.set_shape([])
if final_only:
accum = args
else:
out, accum = args[:num_accums], args[num_accums:]
slices = [array_ops.gather(e, i) for e in flat_elems]
accum = fn(pack(accum), pack_elems(slices))
flat_accum = nest.flatten(accum)
if final_only:
new_out = []
else:
update_i = i + 1 if inclusive and not reverse else i
new_out = [
inplace_ops.alias_inplace_update(x, update_i, y)
for x, y in zip(out, flat_accum)
]
i = i - 1 if reverse else i + 1
return [i, num_elems] + new_out + flat_accum
init_i = (
array_ops.shape(flat_elems[0])[0] -
1 if reverse else constant_op.constant(0, dtype=dtypes.int32))
outputs = []
if not final_only:
num_outputs = array_ops.shape(flat_elems[0])[0] + (1 if inclusive else 0)
for initial_accum in flat_initial:
out_shape = array_ops.concat(
[[num_outputs], array_ops.shape(initial_accum)], 0)
out = inplace_ops.empty(out_shape, dtype=initial_accum.dtype, init=True)
if inclusive:
out = inplace_ops.alias_inplace_add(out, init_i + (1 if reverse else 0),
initial_accum)
outputs.append(out)
loop_in = [init_i, num_elems] + outputs + flat_initial
hostmem = [
i for i, x in enumerate(loop_in)
if x.dtype.base_dtype in (dtypes.int32, dtypes.int64)
]
if context.executing_eagerly():
loop_results = loop_in
while cond(*loop_results):
loop_results = body(*loop_results)
else:
# TODO(tombagby): Update to while_v2.
cond = function.Defun(*loop_dtypes)(cond)
body = function.Defun(*loop_dtypes)(body)
loop_results = functional_ops.While(loop_in, cond, body, hostmem=hostmem)
out = loop_results[2:num_accums + 2]
return pack(out)
def _get_dim(tensor, i):
"""Get value of tensor shape[i] preferring static value if available."""
return tensor_shape.dimension_value(
tensor.shape[i]) or array_ops.shape(tensor)[i]
| apache-2.0 |
schmidtc/pysal | pysal/spreg/diagnostics.py | 6 | 35451 | """
Diagnostics for regression estimations.
"""
__author__ = "Luc Anselin [email protected], Nicholas Malizia [email protected] "
import pysal
from pysal.common import *
import scipy.sparse as SP
from math import sqrt
from utils import spmultiply, sphstack, spmin, spmax
__all__ = [
"f_stat", "t_stat", "r2", "ar2", "se_betas", "log_likelihood", "akaike", "schwarz",
"condition_index", "jarque_bera", "breusch_pagan", "white", "koenker_bassett", "vif", "likratiotest"]
def f_stat(reg):
"""
Calculates the f-statistic and associated p-value of the
regression. [Greene2003]_
(For two stage least squares see f_stat_tsls)
Parameters
----------
reg : regression object
output instance from a regression model
Returns
----------
fs_result : tuple
includes value of F statistic and associated p-value
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the F-statistic for the regression.
>>> testresult = diagnostics.f_stat(reg)
Print the results tuple, including the statistic and its significance.
>>> print("%12.12f"%testresult[0],"%12.12f"%testresult[1])
('28.385629224695', '0.000000009341')
"""
k = reg.k # (scalar) number of ind. vars (includes constant)
n = reg.n # (scalar) number of observations
utu = reg.utu # (scalar) residual sum of squares
predy = reg.predy # (array) vector of predicted values (n x 1)
mean_y = reg.mean_y # (scalar) mean of dependent observations
Q = utu
U = np.sum((predy - mean_y) ** 2)
fStat = (U / (k - 1)) / (Q / (n - k))
pValue = stats.f.sf(fStat, k - 1, n - k)
fs_result = (fStat, pValue)
return fs_result
def t_stat(reg, z_stat=False):
"""
Calculates the t-statistics (or z-statistics) and associated
p-values. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
z_stat : boolean
If True run z-stat instead of t-stat
Returns
-------
ts_result : list of tuples
each tuple includes value of t statistic (or z
statistic) and associated p-value
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate t-statistics for the regression coefficients.
>>> testresult = diagnostics.t_stat(reg)
Print the tuples that contain the t-statistics and their significances.
>>> print("%12.12f"%testresult[0][0], "%12.12f"%testresult[0][1], "%12.12f"%testresult[1][0], "%12.12f"%testresult[1][1], "%12.12f"%testresult[2][0], "%12.12f"%testresult[2][1])
('14.490373143689', '0.000000000000', '-4.780496191297', '0.000018289595', '-2.654408642718', '0.010874504910')
"""
k = reg.k # (scalar) number of ind. vars (includes constant)
n = reg.n # (scalar) number of observations
vm = reg.vm # (array) coefficients of variance matrix (k x k)
betas = reg.betas # (array) coefficients of the regressors (1 x k)
variance = vm.diagonal()
tStat = betas[range(0, len(vm))].reshape(len(vm),) / np.sqrt(variance)
ts_result = []
for t in tStat:
if z_stat:
ts_result.append((t, stats.norm.sf(abs(t)) * 2))
else:
ts_result.append((t, stats.t.sf(abs(t), n - k) * 2))
return ts_result
def r2(reg):
"""
Calculates the R^2 value for the regression. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
----------
r2_result : float
value of the coefficient of determination for the
regression
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the R^2 value for the regression.
>>> testresult = diagnostics.r2(reg)
Print the result.
>>> print("%1.8f"%testresult)
0.55240404
"""
y = reg.y # (array) vector of dep observations (n x 1)
mean_y = reg.mean_y # (scalar) mean of dep observations
utu = reg.utu # (scalar) residual sum of squares
ss_tot = ((y - mean_y) ** 2).sum(0)
r2 = 1 - utu / ss_tot
r2_result = r2[0]
return r2_result
def ar2(reg):
"""
Calculates the adjusted R^2 value for the regression. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
----------
ar2_result : float
value of R^2 adjusted for the number of explanatory
variables.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the adjusted R^2 value for the regression.
>>> testresult = diagnostics.ar2(reg)
Print the result.
>>> print("%1.8f"%testresult)
0.53294335
"""
k = reg.k # (scalar) number of ind. variables (includes constant)
n = reg.n # (scalar) number of observations
ar2_result = 1 - (1 - r2(reg)) * (n - 1) / (n - k)
return ar2_result
def se_betas(reg):
"""
Calculates the standard error of the regression coefficients. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
----------
se_result : array
includes standard errors of each coefficient (1 x k)
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the standard errors of the regression coefficients.
>>> testresult = diagnostics.se_betas(reg)
Print the vector of standard errors.
>>> testresult
array([ 4.73548613, 0.33413076, 0.10319868])
"""
vm = reg.vm # (array) coefficients of variance matrix (k x k)
variance = vm.diagonal()
se_result = np.sqrt(variance)
return se_result
def log_likelihood(reg):
"""
Calculates the log-likelihood value for the regression. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
ll_result : float
value for the log-likelihood of the regression.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the log-likelihood for the regression.
>>> testresult = diagnostics.log_likelihood(reg)
Print the result.
>>> testresult
-187.3772388121491
"""
n = reg.n # (scalar) number of observations
utu = reg.utu # (scalar) residual sum of squares
ll_result = -0.5 * \
(n * (np.log(2 * math.pi)) + n * np.log(utu / n) + (utu / (utu / n)))
return ll_result
def akaike(reg):
"""
Calculates the Akaike Information Criterion. [Akaike1974]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
aic_result : scalar
value for Akaike Information Criterion of the
regression.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Akaike Information Criterion (AIC).
>>> testresult = diagnostics.akaike(reg)
Print the result.
>>> testresult
380.7544776242982
"""
k = reg.k # (scalar) number of explanatory vars (including constant)
try: # ML estimation, logll already exists
# spatial coefficient included in k
aic_result = 2.0 * k - 2.0 * reg.logll
except AttributeError: # OLS case
n = reg.n # (scalar) number of observations
utu = reg.utu # (scalar) residual sum of squares
aic_result = 2 * k + n * (np.log((2 * np.pi * utu) / n) + 1)
return aic_result
def schwarz(reg):
"""
Calculates the Schwarz Information Criterion. [Schwarz1978]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
bic_result : scalar
value for Schwarz (Bayesian) Information Criterion of
the regression.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Schwarz Information Criterion.
>>> testresult = diagnostics.schwarz(reg)
Print the results.
>>> testresult
386.42993851863008
"""
n = reg.n # (scalar) number of observations
k = reg.k # (scalar) number of ind. variables (including constant)
try: # ML case logll already computed
# spatial coeff included in k
sc_result = k * np.log(n) - 2.0 * reg.logll
except AttributeError: # OLS case
utu = reg.utu # (scalar) residual sum of squares
sc_result = k * np.log(n) + n * (np.log((2 * np.pi * utu) / n) + 1)
return sc_result
def condition_index(reg):
"""
Calculates the multicollinearity condition index according to Belsey,
Kuh and Welsh (1980) [Belsley1980]_.
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
ci_result : float
scalar value for the multicollinearity condition
index.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the condition index to check for multicollinearity.
>>> testresult = diagnostics.condition_index(reg)
Print the result.
>>> print("%1.3f"%testresult)
6.542
"""
if hasattr(reg, 'xtx'):
xtx = reg.xtx # (array) k x k projection matrix (includes constant)
elif hasattr(reg, 'hth'):
xtx = reg.hth # (array) k x k projection matrix (includes constant)
diag = np.diagonal(xtx)
scale = xtx / diag
eigval = np.linalg.eigvals(scale)
max_eigval = max(eigval)
min_eigval = min(eigval)
ci_result = sqrt(max_eigval / min_eigval)
return ci_result
def jarque_bera(reg):
"""
Jarque-Bera test for normality in the residuals. [Jarque1980]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
jb_result : dictionary
contains the statistic (jb) for the Jarque-Bera test
and the associated p-value (p-value)
df : integer
degrees of freedom for the test (always 2)
jb : float
value of the test statistic
pvalue : float
p-value associated with the statistic (chi^2
distributed with 2 df)
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"), "r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Jarque-Bera test for normality of residuals.
>>> testresult = diagnostics.jarque_bera(reg)
Print the degrees of freedom for the test.
>>> testresult['df']
2
Print the test statistic.
>>> print("%1.3f"%testresult['jb'])
1.836
Print the associated p-value.
>>> print("%1.4f"%testresult['pvalue'])
0.3994
"""
n = reg.n # (scalar) number of observations
u = reg.u # (array) residuals from regression
u2 = u ** 2
u3 = u ** 3
u4 = u ** 4
mu2 = np.mean(u2)
mu3 = np.mean(u3)
mu4 = np.mean(u4)
S = mu3 / (mu2 ** (1.5)) # skewness measure
K = (mu4 / (mu2 ** 2)) # kurtosis measure
jb = n * (((S ** 2) / 6) + ((K - 3) ** 2) / 24)
pvalue = stats.chisqprob(jb, 2)
jb_result = {"df": 2, "jb": jb, 'pvalue': pvalue}
return jb_result
def breusch_pagan(reg, z=None):
"""
Calculates the Breusch-Pagan test statistic to check for
heteroscedasticity. [Breusch1979]_
Parameters
----------
reg : regression object
output instance from a regression model
z : array
optional input for specifying an alternative set of
variables (Z) to explain the observed variance. By
default this is a matrix of the squared explanatory
variables (X**2) with a constant added to the first
column if not already present. In the default case,
the explanatory variables are squared to eliminate
negative values.
Returns
-------
bp_result : dictionary
contains the statistic (bp) for the test and the
associated p-value (p-value)
bp : float
scalar value for the Breusch-Pagan test statistic
df : integer
degrees of freedom associated with the test (k)
pvalue : float
p-value associated with the statistic (chi^2
distributed with k df)
Notes
-----
x attribute in the reg object must have a constant term included. This is
standard for spreg.OLS so no testing done to confirm constant.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"), "r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Breusch-Pagan test for heteroscedasticity.
>>> testresult = diagnostics.breusch_pagan(reg)
Print the degrees of freedom for the test.
>>> testresult['df']
2
Print the test statistic.
>>> print("%1.3f"%testresult['bp'])
7.900
Print the associated p-value.
>>> print("%1.4f"%testresult['pvalue'])
0.0193
"""
e2 = reg.u ** 2
e = reg.u
n = reg.n
k = reg.k
ete = reg.utu
den = ete / n
g = e2 / den - 1.0
if z == None:
x = reg.x
#constant = constant_check(x)
# if constant == False:
# z = np.hstack((np.ones((n,1)),x))**2
# else:
# z = x**2
z = spmultiply(x, x)
else:
#constant = constant_check(z)
# if constant == False:
# z = np.hstack((np.ones((n,1)),z))
pass
n, p = z.shape
# Check to identify any duplicate columns in Z
omitcolumn = []
for i in range(p):
current = z[:, i]
for j in range(p):
check = z[:, j]
if i < j:
test = abs(current - check).sum()
if test == 0:
omitcolumn.append(j)
uniqueomit = set(omitcolumn)
omitcolumn = list(uniqueomit)
# Now the identified columns must be removed (done in reverse to
# prevent renumbering)
omitcolumn.sort()
omitcolumn.reverse()
for c in omitcolumn:
z = np.delete(z, c, 1)
n, p = z.shape
df = p - 1
# Now that the variables are prepared, we calculate the statistic
zt = np.transpose(z)
gt = np.transpose(g)
gtz = np.dot(gt, z)
ztg = np.dot(zt, g)
ztz = np.dot(zt, z)
ztzi = la.inv(ztz)
part1 = np.dot(gtz, ztzi)
part2 = np.dot(part1, ztg)
bp_array = 0.5 * part2
bp = bp_array[0, 0]
pvalue = stats.chisqprob(bp, df)
bp_result = {'df': df, 'bp': bp, 'pvalue': pvalue}
return bp_result
def white(reg):
"""
Calculates the White test to check for heteroscedasticity. [White1980]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
white_result : dictionary
contains the statistic (white), degrees of freedom
(df) and the associated p-value (pvalue) for the
White test.
white : float
scalar value for the White test statistic.
df : integer
degrees of freedom associated with the test
pvalue : float
p-value associated with the statistic (chi^2
distributed with k df)
Notes
-----
x attribute in the reg object must have a constant term included. This is
standard for spreg.OLS so no testing done to confirm constant.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the White test for heteroscedasticity.
>>> testresult = diagnostics.white(reg)
Print the degrees of freedom for the test.
>>> print testresult['df']
5
Print the test statistic.
>>> print("%1.3f"%testresult['wh'])
19.946
Print the associated p-value.
>>> print("%1.4f"%testresult['pvalue'])
0.0013
"""
e = reg.u ** 2
k = int(reg.k)
n = int(reg.n)
y = reg.y
X = reg.x
#constant = constant_check(X)
# Check for constant, if none add one, see Greene 2003, pg. 222
# if constant == False:
# X = np.hstack((np.ones((n,1)),X))
# Check for multicollinearity in the X matrix
ci = condition_index(reg)
if ci > 30:
white_result = "Not computed due to multicollinearity."
return white_result
# Compute cross-products and squares of the regression variables
if type(X).__name__ == 'ndarray':
A = np.zeros((n, (k * (k + 1)) // 2))
elif type(X).__name__ == 'csc_matrix' or type(X).__name__ == 'csr_matrix':
# this is probably inefficient
A = SP.lil_matrix((n, (k * (k + 1)) // 2))
else:
raise Exception, "unknown X type, %s" % type(X).__name__
counter = 0
for i in range(k):
for j in range(i, k):
v = spmultiply(X[:, i], X[:, j], False)
A[:, counter] = v
counter += 1
# Append the original variables
A = sphstack(X, A) # note: this also converts a LIL to CSR
n, k = A.shape
# Check to identify any duplicate or constant columns in A
omitcolumn = []
for i in range(k):
current = A[:, i]
# remove all constant terms (will add a constant back later)
if spmax(current) == spmin(current):
omitcolumn.append(i)
pass
# do not allow duplicates
for j in range(k):
check = A[:, j]
if i < j:
test = abs(current - check).sum()
if test == 0:
omitcolumn.append(j)
uniqueomit = set(omitcolumn)
omitcolumn = list(uniqueomit)
# Now the identified columns must be removed
if type(A).__name__ == 'ndarray':
A = np.delete(A, omitcolumn, 1)
elif type(A).__name__ == 'csc_matrix' or type(A).__name__ == 'csr_matrix':
# this is probably inefficient
keepcolumn = range(k)
for i in omitcolumn:
keepcolumn.remove(i)
A = A[:, keepcolumn]
else:
raise Exception, "unknown A type, %s" % type(X).__name__
A = sphstack(np.ones((A.shape[0], 1)), A) # add a constant back in
n, k = A.shape
# Conduct the auxiliary regression and calculate the statistic
import ols as OLS
aux_reg = OLS.BaseOLS(e, A)
aux_r2 = r2(aux_reg)
wh = aux_r2 * n
df = k - 1
pvalue = stats.chisqprob(wh, df)
white_result = {'df': df, 'wh': wh, 'pvalue': pvalue}
return white_result
def koenker_bassett(reg, z=None):
"""
Calculates the Koenker-Bassett test statistic to check for
heteroscedasticity. [Koenker1982]_ [Greene2003]_
Parameters
----------
reg : regression output
output from an instance of a regression class
z : array
optional input for specifying an alternative set of
variables (Z) to explain the observed variance. By
default this is a matrix of the squared explanatory
variables (X**2) with a constant added to the first
column if not already present. In the default case,
the explanatory variables are squared to eliminate
negative values.
Returns
-------
kb_result : dictionary
contains the statistic (kb), degrees of freedom (df)
and the associated p-value (pvalue) for the test.
kb : float
scalar value for the Koenker-Bassett test statistic.
df : integer
degrees of freedom associated with the test
pvalue : float
p-value associated with the statistic (chi^2
distributed)
Notes
-----
x attribute in the reg object must have a constant term included. This is
standard for spreg.OLS so no testing done to confirm constant.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the Koenker-Bassett test for heteroscedasticity.
>>> testresult = diagnostics.koenker_bassett(reg)
Print the degrees of freedom for the test.
>>> testresult['df']
2
Print the test statistic.
>>> print("%1.3f"%testresult['kb'])
5.694
Print the associated p-value.
>>> print("%1.4f"%testresult['pvalue'])
0.0580
"""
# The notation here matches that of Greene (2003).
u = reg.u ** 2
e = reg.u
n = reg.n
k = reg.k
x = reg.x
ete = reg.utu
#constant = constant_check(x)
ubar = ete / n
ubari = ubar * np.ones((n, 1))
g = u - ubari
v = (1.0 / n) * np.sum((u - ubar) ** 2)
if z == None:
x = reg.x
#constant = constant_check(x)
# if constant == False:
# z = np.hstack((np.ones((n,1)),x))**2
# else:
# z = x**2
z = spmultiply(x, x)
else:
#constant = constant_check(z)
# if constant == False:
# z = np.hstack((np.ones((n,1)),z))
pass
n, p = z.shape
# Check to identify any duplicate columns in Z
omitcolumn = []
for i in range(p):
current = z[:, i]
for j in range(p):
check = z[:, j]
if i < j:
test = abs(current - check).sum()
if test == 0:
omitcolumn.append(j)
uniqueomit = set(omitcolumn)
omitcolumn = list(uniqueomit)
# Now the identified columns must be removed (done in reverse to
# prevent renumbering)
omitcolumn.sort()
omitcolumn.reverse()
for c in omitcolumn:
z = np.delete(z, c, 1)
n, p = z.shape
df = p - 1
# Conduct the auxiliary regression.
zt = np.transpose(z)
gt = np.transpose(g)
gtz = np.dot(gt, z)
ztg = np.dot(zt, g)
ztz = np.dot(zt, z)
ztzi = la.inv(ztz)
part1 = np.dot(gtz, ztzi)
part2 = np.dot(part1, ztg)
kb_array = (1.0 / v) * part2
kb = kb_array[0, 0]
pvalue = stats.chisqprob(kb, df)
kb_result = {'kb': kb, 'df': df, 'pvalue': pvalue}
return kb_result
def vif(reg):
"""
Calculates the variance inflation factor for each independent variable.
For the ease of indexing the results, the constant is currently
included. This should be omitted when reporting the results to the
output text. [Greene2003]_
Parameters
----------
reg : regression object
output instance from a regression model
Returns
-------
vif_result : list of tuples
each tuple includes the vif and the tolerance, the
order of the variables corresponds to their order in
the reg.x matrix
Examples
--------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
Read the DBF associated with the Columbus data.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
Create the dependent variable vector.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Create the matrix of independent variables.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
Run an OLS regression.
>>> reg = OLS(y,X)
Calculate the variance inflation factor (VIF).
>>> testresult = diagnostics.vif(reg)
Select the tuple for the income variable.
>>> incvif = testresult[1]
Print the VIF for income.
>>> print("%12.12f"%incvif[0])
1.333117497189
Print the tolerance for income.
>>> print("%12.12f"%incvif[1])
0.750121427487
Repeat for the home value variable.
>>> hovalvif = testresult[2]
>>> print("%12.12f"%hovalvif[0])
1.333117497189
>>> print("%12.12f"%hovalvif[1])
0.750121427487
"""
X = reg.x
n, k = X.shape
vif_result = []
for j in range(k):
Z = X.copy()
Z = np.delete(Z, j, 1)
y = X[:, j]
import ols as OLS
aux = OLS.BaseOLS(y, Z)
mean_y = aux.mean_y
utu = aux.utu
ss_tot = sum((y - mean_y) ** 2)
if ss_tot == 0:
resj = pysal.MISSINGVALUE
else:
r2aux = 1 - utu / ss_tot
tolj = 1 - r2aux
vifj = 1 / tolj
resj = (vifj, tolj)
vif_result.append(resj)
return vif_result
def constant_check(array):
"""
Checks to see numpy array includes a constant.
Parameters
----------
array : array
an array of variables to be inspected
Returns
-------
constant : boolean
true signifies the presence of a constant
Example
-------
>>> import numpy as np
>>> import pysal
>>> import diagnostics
>>> from ols import OLS
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
>>> reg = OLS(y,X)
>>> diagnostics.constant_check(reg.x)
True
"""
n, k = array.shape
constant = False
for j in range(k):
variable = array[:, j]
varmin = variable.min()
varmax = variable.max()
if varmin == varmax:
constant = True
break
return constant
def likratiotest(reg0, reg1):
"""
Likelihood ratio test statistic [Greene2003]_
Parameters
----------
reg0 : regression object for constrained model (H0)
reg1 : regression object for unconstrained model (H1)
Returns
-------
likratio : dictionary
contains the statistic (likr), the degrees of
freedom (df) and the p-value (pvalue)
likr : float
likelihood ratio statistic
df : integer
degrees of freedom
p-value : float
p-value
Examples
--------
>>> import numpy as np
>>> import pysal as ps
>>> import scipy.stats as stats
>>> import pysal.spreg.ml_lag as lag
Use the baltim sample data set
>>> db = ps.open(ps.examples.get_path("baltim.dbf"),'r')
>>> y_name = "PRICE"
>>> y = np.array(db.by_col(y_name)).T
>>> y.shape = (len(y),1)
>>> x_names = ["NROOM","NBATH","PATIO","FIREPL","AC","GAR","AGE","LOTSZ","SQFT"]
>>> x = np.array([db.by_col(var) for var in x_names]).T
>>> ww = ps.open(ps.examples.get_path("baltim_q.gal"))
>>> w = ww.read()
>>> ww.close()
>>> w.transform = 'r'
OLS regression
>>> ols1 = ps.spreg.OLS(y,x)
ML Lag regression
>>> mllag1 = lag.ML_Lag(y,x,w)
>>> lr = likratiotest(ols1,mllag1)
>>> print "Likelihood Ratio Test: {0:.4f} df: {1} p-value: {2:.4f}".format(lr["likr"],lr["df"],lr["p-value"])
Likelihood Ratio Test: 44.5721 df: 1 p-value: 0.0000
"""
likratio = {}
try:
likr = 2.0 * (reg1.logll - reg0.logll)
except AttributeError:
raise Exception, "Missing or improper log-likelihoods in regression objects"
if likr < 0.0: # always enforces positive likelihood ratio
likr = -likr
pvalue = stats.chisqprob(likr, 1)
likratio = {"likr": likr, "df": 1, "p-value": pvalue}
return likratio
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| bsd-3-clause |
olologin/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
qvit/django-color-captcha | color_captcha/utils.py | 1 | 1095 | # -*- coding: utf-8 -*-
class IncorrectCaptchaColorsFormatError(Exception):
message = "Incorrect 'CAPTCHA_COLORS' setting format (must be iterable of two-string-value tuples)"
def __str__(self):
return self.message
class TooFewCaptchaColorsError(Exception):
message = "Please specify al least two colors in 'CAPTCHA_COLORS' setting"
def __str__(self):
return self.message
def check_colors(COLORS):
def check_color_option(color_option):
try:
if not (len(color_option) == 2 and
isinstance(color_option[0], basestring) and
isinstance(color_option[1], basestring)):
raise IncorrectCaptchaColorsFormatError()
except IndexError:
raise IncorrectCaptchaColorsFormatError()
try:
iter(COLORS)
except TypeError:
raise IncorrectCaptchaColorsFormatError()
else:
if len(COLORS) < 2:
raise TooFewCaptchaColorsError()
else:
for color_option in COLORS:
check_color_option(color_option)
| mit |
salabim/salabim | test/test_componentgenerator.py | 1 | 4345 | import salabim as sim
import pytest
class X(sim.Component):
def setup(self, color='red'):
self.color = color
self.enter(components)
class Vehicle(sim.Component):
def setup(self):
self.enter(components)
class Car(Vehicle):
pass
class Bus(Vehicle):
pass
class Truck(Vehicle):
pass
def exp(X, run_time=None, *args, **kwargs):
global components
env = sim.Environment()
components = sim.Queue()
sim.ComponentGenerator(X, *args, **kwargs)
env.run(run_time)
return components
def test_iat():
components = exp(X, iat=sim.Uniform(0, 2), at=500, till=1000, force_at=True)
assert len(components) == pytest.approx(500, rel=1e-2)
assert components[0].enter_time(components) == 500
assert 998 <= components[-1].enter_time(components) <= 1000
with pytest.raises(ValueError):
components = exp(X, iat=sim.Uniform(0, 2), at=500, till=1000, force_at=True, force_till=True)
components = exp(X, iat=sim.Uniform(0, 2), till=1000, force_at=True)
assert len(components) == pytest.approx(1000, rel=1e-2)
assert components[-1].enter_time(components) <= 1000
components = exp(X, iat=20,at=10, till=111,force_at=True)
assert len(components) == 6
assert components[0].enter_time(components) == 10
assert components[-1].enter_time(components) == 110
components = exp(X, iat=20,at=10, till=111)
assert len(components) == 5
assert components[-1].enter_time(components) == 110
components = exp(X, iat=20,at=10,number=5,force_at=True)
assert len(components) == 5
assert components[0].enter_time(components) == 10
assert components[-1].enter_time(components) == 90
components = exp(X, iat=20,at=10,number=5)
assert len(components) == 5
assert components[0].enter_time(components) == 30
assert components[-1].enter_time(components) == 110
components = exp(X, run_time=110, iat=20, at=10)
assert len(components) == 4
assert components[0].enter_time(components) == 30
assert components[-1].enter_time(components) == 90
def test_spread():
components = exp(X, at=100, till=200, number=10)
assert len(components) == 10
assert components[0].enter_time(components) > 100
assert components[-1].enter_time(components) < 200
components = exp(X, at=100, till=200, number=10, force_at=True)
assert len(components) == 10
assert components[0].enter_time(components) == 100
assert components[-1].enter_time(components) < 200
components = exp(X, at=100, till=200, number=10, force_till=True)
assert len(components) == 10
assert components[0].enter_time(components) > 100
assert components[-1].enter_time(components) == 200
components = exp(X, at=100, till=200, number=10, force_at=True, force_till=True)
assert len(components) == 10
assert components[0].enter_time(components) ==100
assert components[-1].enter_time(components) == 200
components = exp(X, at=100, till=200, number=1, force_till=True)
assert len(components) == 1
assert components[0].enter_time(components) == 200
components = exp(X, at=100, till=200, number=1, force_at=True)
assert len(components) == 1
assert components[0].enter_time(components) == 100
with pytest.raises(ValueError):
components = exp(X, at=100, till=200, number=1, force_at=True, force_till=True)
components = exp(X, at=100, till=200, number=0, force_till=True)
assert len(components) == 0
def test_propagate():
components = exp(X, number=1, iat=1)
assert components[0].color == 'red'
assert components[0].name() == 'x.0'
components = exp(X, number=1, iat=1, color='blue', name='my name,')
assert components[0].color == 'blue'
assert components[0].name() == 'my name.1'
def test_dis():
components = exp(sim.Pdf((Car, Bus, Truck), (50, 30, 20)), iat=1, number=1000)
names = sim.Monitor()
for component in components:
names.tally(component.name().split('.')[0])
# names.print_histogram(values=True, sort_on_weight=True)
if __name__ == "__main__":
pytest.main(["-vv", "-s", __file__])
| mit |
imatge-upc/unsupervised-2017-cvprw | autoencoder_train.py | 1 | 7127 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='1'
from os import listdir
import sys
import time
import tools.ops
import subprocess
import numpy as np
import tensorflow as tf
import scipy.misc as sm
from models.autoencoder_net import *
from tools.utilities import *
from tools.ops import *
from random import randint
flags = tf.app.flags
flags.DEFINE_integer('batch_size', 10, 'Batch size.')
flags.DEFINE_integer('num_epochs', 2000, 'Number of epochs.') # ~13 min per epoch
flags.DEFINE_integer('num_gpus', 4, 'Number of GPUs.')
flags.DEFINE_integer('seq_length', 16, 'Length of each video clip.')
flags.DEFINE_integer('height', 128, 'Height of video frame.')
flags.DEFINE_integer('width', 128, 'Width of video frame.')
flags.DEFINE_integer('channel', 3, 'Number of channels for each frame.')
flags.DEFINE_integer('num_sample', 10060, 'Number of samples in this dataset.')
FLAGS = flags.FLAGS
prefix = 'autoencoder'
model_save_dir = './ckpt/' + prefix
logs_save_dir = './logs/' + prefix
pred_save_dir = './output/' + prefix
loss_save_dir = './loss'
train_list_path = './dataset/trainlist.txt'
dataset_path = './dataset/UCF-101-tf-records'
evaluation_job = './jobs/autoencoder_val'
use_pretrained_model = True
save_predictions = True
def run_training():
# Create model directory
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
model_filename = "./mfb_ae_ucf24.model"
# Consturct computational graph
tower_grads = []
tower_losses, tower_rec_losses, tower_wd_losses = [], [], []
global_step = tf.get_variable(
'global_step',
[],
initializer=tf.constant_initializer(0),
trainable=False
)
starter_learning_rate = 1e-4
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
1000000, 0.8, staircase=True)
opt = tf.train.AdamOptimizer(learning_rate)
# Create a session for running Ops on the Graph.
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
coord = tf.train.Coordinator()
threads = None
train_list_file = open(train_list_path, 'r')
train_list = train_list_file.read().splitlines()
for i, line in enumerate(train_list):
train_list[i] = os.path.join(dataset_path, train_list[i])
assert(len(train_list) % FLAGS.num_gpus == 0)
num_for_each_gpu = len(train_list) // FLAGS.num_gpus
clips_list = []
with sess.as_default():
for i in range(FLAGS.num_gpus):
clips, _, _ = input_pipeline(train_list[i*num_for_each_gpu:(i+1)*num_for_each_gpu], \
FLAGS.batch_size, num_epochs=FLAGS.num_epochs, is_training=True)
clips_list.append(clips)
autoencoder_list = []
with tf.variable_scope('vars') as var_scope:
for gpu_index in range(FLAGS.num_gpus):
with tf.device('/gpu:%d' % (gpu_index)):
with tf.name_scope('%s_%d' % ('tower', gpu_index)) as scope:
# construct model
autoencoder = autoencoder_net(clips_list[gpu_index], FLAGS.height, FLAGS.width, FLAGS.seq_length, \
FLAGS.channel, FLAGS.batch_size)
autoencoder_list.append(autoencoder)
loss, rec_loss, wd_loss = tower_loss(scope, autoencoder, clips_list[gpu_index])
var_scope.reuse_variables()
vars_to_optimize = tf.trainable_variables()
grads = opt.compute_gradients(loss, var_list=vars_to_optimize)
tower_grads.append(grads)
tower_losses.append(loss)
tower_rec_losses.append(rec_loss)
tower_wd_losses.append(wd_loss)
# concatenate the losses of all towers
loss_op = tf.reduce_mean(tower_losses)
rec_loss_op = tf.reduce_mean(tower_rec_losses)
wd_loss_op = tf.reduce_mean(tower_wd_losses)
tf.summary.scalar('loss', loss_op)
tf.summary.scalar('rec_loss', rec_loss_op)
tf.summary.scalar('wd_loss', wd_loss_op)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
grads = average_gradients(tower_grads)
with tf.control_dependencies(update_ops):
train_op = opt.apply_gradients(grads, global_step=global_step)
# saver for saving checkpoints
saver = tf.train.Saver(max_to_keep=10)
init = tf.initialize_all_variables()
sess.run(init)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
if use_pretrained_model:
print('[*] Loading checkpoint ...')
model = tf.train.latest_checkpoint(model_save_dir)
if model is not None:
saver.restore(sess, model)
print('[*] Loading success: %s!'%model)
else:
print('[*] Loading failed ...')
# Create summary writer
merged = tf.summary.merge_all()
if not os.path.exists(logs_save_dir):
os.makedirs(logs_save_dir)
sum_writer = tf.summary.FileWriter(logs_save_dir, sess.graph)
# Create prediction output folder
if not os.path.exists(pred_save_dir):
os.makedirs(pred_save_dir)
# Create loss output folder
if not os.path.exists(loss_save_dir):
os.makedirs(loss_save_dir)
loss_file = open(os.path.join(loss_save_dir, prefix+'.txt'), 'w')
total_steps = (FLAGS.num_sample / (FLAGS.num_gpus * FLAGS.batch_size)) * FLAGS.num_epochs
# start queue runner
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
gpu_idx = 0
try:
with sess.as_default():
print('\n\n\n*********** start training ***********\n\n\n')
while not coord.should_stop():
# Run training steps or whatever
start_time = time.time()
sess.run(train_op)
duration = time.time() - start_time
step = global_step.eval()
if step == 1 or step % 10 == 0: # evaluate loss
loss, rec_loss, wd_loss, lr = sess.run([loss_op, rec_loss_op, wd_loss_op, learning_rate])
line = 'step %d/%d, loss=%.8f, rec=%.8f, lwd=%.8f, dur=%.3f, lr=%.8f' \
%(step, total_steps, loss, rec_loss, wd_loss, duration, lr)
print(line)
loss_file.write(line + '\n')
loss_file.flush()
if step == 1 or step % 10 == 0: # save summary
summary = summary_str = sess.run(merged)
sum_writer.add_summary(summary, step)
if step % 100 == 0 and save_predictions: # save current predictions
clips = clips_list[gpu_idx]
autoencoder = autoencoder_list[gpu_idx]
gt_vid, rec_vid = sess.run([clips[0], autoencoder.rec_vid[0]])
gt_vid, rec_vid = (gt_vid+1)/2*255.0, (rec_vid+1)/2*255.0
rec_img = gen_pred_vid(rec_vid)
gt_img = gen_pred_vid(gt_vid)
save_img = np.concatenate((rec_img, gt_img))
sm.imsave(os.path.join(pred_save_dir, '%07d.jpg'%step), save_img)
gpu_idx += 1
if gpu_idx == FLAGS.num_gpus:
gpu_idx = 0
if step % 500 == 0: # save checkpoint
saver.save(sess, os.path.join(model_save_dir, model_filename), global_step=global_step)
if step % 500 == 0:
pass
# launch a new script for validation (please modify it for your own script)
#subprocess.check_output(['python', evaluation_job])
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
| mit |
pichillilorenzo/JavaScriptEnhancements | src/libs/__init__.py | 1 | 1423 | from . import global_vars
from .javascript_enhancements_settings import javaScriptEnhancements
from . import util
from .node import NodeJS
from .npm import NPM
from .flow import main as flow
from .flow.flow_cli import FlowCLI
from .flow.flow_ide_server import FlowIDEServer, flow_ide_clients, JavascriptEnhancementsStartFlowIDEServerEventListener
from .animation_loader import AnimationLoader
from .repeated_timer import RepeatedTimer
from .hook import Hook
from .terminal import Terminal
from .popup_manager import popup_manager
from .socket import SocketClient
from .socket import SocketServer
from .folder_explorer import FolderExplorer
from .window_view import window_view_manager, WindowView, JavascriptEnhancementsWindowViewKeypressCommand,JavascriptEnhancementsWindowViewEventListener
from .execute_on_terminal import JavascriptEnhancementsExecuteOnTerminalCommand
__all__ = [
"global_vars",
"javaScriptEnhancements",
"util",
"NodeJS",
"NPM",
"AnimationLoader",
"RepeatedTimer",
"Hook",
"Terminal",
"popup_manager",
"SocketClient",
"SocketServer",
"FolderExplorer",
"window_view_manager",
"WindowView",
"JavascriptEnhancementsWindowViewKeypressCommand",
"JavascriptEnhancementsWindowViewEventListener",
"JavascriptEnhancementsExecuteOnTerminalCommand",
"flow",
"FlowCLI",
"FlowIDEServer",
"flow_ide_clients",
"JavascriptEnhancementsStartFlowIDEServerEventListener"
]
| mit |
ESOedX/edx-platform | lms/djangoapps/commerce/tests/test_signals.py | 1 | 13844 | # coding=UTF-8
"""
Tests for signal handling in commerce djangoapp.
"""
from __future__ import absolute_import, unicode_literals
import base64
import json
import ddt
import httpretty
import mock
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.test import TestCase
from django.test.utils import override_settings
from opaque_keys.edx.keys import CourseKey
from requests import Timeout
from six.moves.urllib.parse import urljoin # pylint: disable=import-error
from course_modes.models import CourseMode
from student.signals import REFUND_ORDER
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from ..models import CommerceConfiguration
from ..utils import _generate_refund_notification_body, _send_refund_notification, create_zendesk_ticket
from . import JSON
from .mocks import mock_create_refund, mock_process_refund
ZENDESK_URL = 'http://zendesk.example.com/'
ZENDESK_USER = '[email protected]'
ZENDESK_API_KEY = 'abc123'
@ddt.ddt
@override_settings(ZENDESK_URL=ZENDESK_URL, ZENDESK_USER=ZENDESK_USER, ZENDESK_API_KEY=ZENDESK_API_KEY)
class TestRefundSignal(TestCase):
"""
Exercises logic triggered by the REFUND_ORDER signal.
"""
def setUp(self):
super(TestRefundSignal, self).setUp()
# Ensure the E-Commerce service user exists
UserFactory(username=settings.ECOMMERCE_SERVICE_WORKER_USERNAME, is_staff=True)
self.requester = UserFactory(username="test-requester")
self.student = UserFactory(
username="test-student",
email="[email protected]",
)
self.course_enrollment = CourseEnrollmentFactory(
user=self.student,
course_id=CourseKey.from_string('course-v1:org+course+run'),
mode=CourseMode.VERIFIED,
)
self.course_enrollment.refundable = mock.Mock(return_value=True)
self.config = CommerceConfiguration.current()
self.config.enable_automatic_refund_approval = True
self.config.save()
def send_signal(self):
"""
DRY helper: emit the REFUND_ORDER signal, as is done in
common.djangoapps.student.models after a successful unenrollment.
"""
REFUND_ORDER.send(sender=None, course_enrollment=self.course_enrollment)
@override_settings(
ECOMMERCE_PUBLIC_URL_ROOT=None,
ECOMMERCE_API_URL=None,
)
def test_no_service(self):
"""
Ensure that the receiver quietly bypasses attempts to initiate
refunds when there is no external service configured.
"""
with mock.patch('lms.djangoapps.commerce.signals.refund_seat') as mock_refund_seat:
self.send_signal()
self.assertFalse(mock_refund_seat.called)
@mock.patch('lms.djangoapps.commerce.signals.refund_seat')
def test_receiver(self, mock_refund_seat):
"""
Ensure that the REFUND_ORDER signal triggers correct calls to
refund_seat(), when it is appropriate to do so.
TODO (jsa): ideally we would assert that the signal receiver got wired
up independently of the import statement in this module. I'm not aware
of any reliable / sane way to do this.
"""
self.send_signal()
self.assertTrue(mock_refund_seat.called)
self.assertEqual(mock_refund_seat.call_args[0], (self.course_enrollment,))
# if the course_enrollment is not refundable, we should not try to initiate a refund.
mock_refund_seat.reset_mock()
self.course_enrollment.refundable = mock.Mock(return_value=False)
self.send_signal()
self.assertFalse(mock_refund_seat.called)
@mock.patch('lms.djangoapps.commerce.signals.refund_seat')
@mock.patch('lms.djangoapps.commerce.signals.get_request_user', return_value=None)
def test_requester(self, mock_get_request_user, mock_refund_seat):
"""
Ensure the right requester is specified when initiating refunds.
"""
# no HTTP request/user: auth to commerce service as the unenrolled student.
self.send_signal()
self.assertTrue(mock_refund_seat.called)
self.assertEqual(mock_refund_seat.call_args[0], (self.course_enrollment,))
# HTTP user is the student: auth to commerce service as the unenrolled student.
mock_get_request_user.return_value = self.student
mock_refund_seat.reset_mock()
self.send_signal()
self.assertTrue(mock_refund_seat.called)
self.assertEqual(mock_refund_seat.call_args[0], (self.course_enrollment,))
# HTTP user is another user: auth to commerce service as the requester.
mock_get_request_user.return_value = self.requester
mock_refund_seat.reset_mock()
self.send_signal()
self.assertTrue(mock_refund_seat.called)
self.assertEqual(mock_refund_seat.call_args[0], (self.course_enrollment,))
# HTTP user is another server (AnonymousUser): do not try to initiate a refund at all.
mock_get_request_user.return_value = AnonymousUser()
mock_refund_seat.reset_mock()
self.send_signal()
self.assertFalse(mock_refund_seat.called)
@mock.patch('lms.djangoapps.commerce.signals.log.exception')
def test_error_logging(self, mock_log_exception):
"""
Ensure that unexpected Exceptions are logged as errors (but do not
break program flow).
"""
with mock_create_refund(status=500):
self.send_signal()
self.assertTrue(mock_log_exception.called)
@mock.patch('lms.djangoapps.commerce.utils._send_refund_notification')
def test_notification_when_approval_fails(self, mock_send_notification):
"""
Ensure the notification function is triggered when refunds are initiated, and cannot be automatically approved.
"""
refund_id = 1
failed_refund_id = 2
with mock_create_refund(status=201, response=[refund_id, failed_refund_id]):
with mock_process_refund(refund_id, reset_on_exit=False):
with mock_process_refund(failed_refund_id, status=500, reset_on_exit=False):
self.send_signal()
self.assertTrue(mock_send_notification.called)
mock_send_notification.assert_called_with(self.course_enrollment.user, [failed_refund_id])
@mock.patch('lms.djangoapps.commerce.utils._send_refund_notification')
def test_notification_if_automatic_approval_disabled(self, mock_send_notification):
"""
Ensure the notification is always sent if the automatic approval functionality is disabled.
"""
refund_id = 1
self.config.enable_automatic_refund_approval = False
self.config.save()
with mock_create_refund(status=201, response=[refund_id]):
self.send_signal()
self.assertTrue(mock_send_notification.called)
mock_send_notification.assert_called_with(self.course_enrollment.user, [refund_id])
@mock.patch('lms.djangoapps.commerce.utils._send_refund_notification')
def test_no_notification_after_approval(self, mock_send_notification):
"""
Ensure the notification function is triggered when refunds are initiated, and cannot be automatically approved.
"""
refund_id = 1
with mock_create_refund(status=201, response=[refund_id]):
with mock_process_refund(refund_id, reset_on_exit=False):
self.send_signal()
self.assertFalse(mock_send_notification.called)
last_request = httpretty.last_request()
self.assertDictEqual(json.loads(last_request.body.decode('utf8')), {'action': 'approve_payment_only'})
@mock.patch('lms.djangoapps.commerce.utils._send_refund_notification')
def test_notification_no_refund(self, mock_send_notification):
"""
Ensure the notification function is NOT triggered when no refunds are
initiated
"""
with mock_create_refund(status=200, response=[]):
self.send_signal()
self.assertFalse(mock_send_notification.called)
@mock.patch('lms.djangoapps.commerce.utils._send_refund_notification')
@ddt.data(
CourseMode.HONOR,
CourseMode.PROFESSIONAL,
CourseMode.AUDIT,
CourseMode.NO_ID_PROFESSIONAL_MODE,
CourseMode.CREDIT_MODE,
)
def test_notification_not_verified(self, mode, mock_send_notification):
"""
Ensure the notification function is NOT triggered when the
unenrollment is for any mode other than verified (i.e. any mode other
than one for which refunds are presently supported). See the
TODO associated with XCOM-371 in the signals module in the commerce
package for more information.
"""
self.course_enrollment.mode = mode
with mock_create_refund(status=200, response=[1, 2, 3]):
self.send_signal()
self.assertFalse(mock_send_notification.called)
@mock.patch('lms.djangoapps.commerce.utils._send_refund_notification', side_effect=Exception("Splat!"))
@mock.patch('lms.djangoapps.commerce.utils.log.warning')
def test_notification_error(self, mock_log_warning, mock_send_notification):
"""
Ensure an error occuring during notification does not break program
flow, but a warning is logged.
"""
with mock_create_refund(status=200, response=[1, 2, 3]):
self.send_signal()
self.assertTrue(mock_send_notification.called)
self.assertTrue(mock_log_warning.called)
@mock.patch('openedx.core.djangoapps.theming.helpers.is_request_in_themed_site', return_value=True)
def test_notification_themed_site(self, mock_is_request_in_themed_site): # pylint: disable=unused-argument
"""
Ensure the notification function raises an Exception if used in the
context of themed site.
"""
with self.assertRaises(NotImplementedError):
_send_refund_notification(self.course_enrollment.user, [1, 2, 3])
@ddt.data('[email protected]', 'üñî[email protected]')
@mock.patch('lms.djangoapps.commerce.utils.create_zendesk_ticket')
def test_send_refund_notification(self, student_email, mock_zendesk):
""" Verify the support team is notified of the refund request. """
refund_ids = [1, 2, 3]
# pass a student with unicode and ascii email to ensure that
# generate_refund_notification_body can handle formatting a unicode
# message
self.student.email = student_email
_send_refund_notification(self.course_enrollment.user, refund_ids)
body = _generate_refund_notification_body(self.student, refund_ids)
mock_zendesk.assert_called_with(
self.student.profile.name,
self.student.email,
"[Refund] User-Requested Refund",
body,
['auto_refund']
)
def _mock_zendesk_api(self, status=201):
""" Mock Zendesk's ticket creation API. """
httpretty.register_uri(httpretty.POST, urljoin(ZENDESK_URL, '/api/v2/tickets.json'), status=status,
body='{}', content_type=JSON)
def call_create_zendesk_ticket(self, name='Test user', email='[email protected]', subject='Test Ticket',
body='I want a refund!', tags=None):
""" Call the create_zendesk_ticket function. """
tags = tags or ['auto_refund']
return create_zendesk_ticket(name, email, subject, body, tags)
@override_settings(ZENDESK_URL=ZENDESK_URL, ZENDESK_USER=None, ZENDESK_API_KEY=None)
def test_create_zendesk_ticket_no_settings(self):
""" Verify the Zendesk API is not called if the settings are not all set. """
with mock.patch('requests.post') as mock_post:
success = self.call_create_zendesk_ticket()
self.assertFalse(success)
self.assertFalse(mock_post.called)
def test_create_zendesk_ticket_request_error(self):
"""
Verify exceptions are handled appropriately if the request to the Zendesk API fails.
We simply need to ensure the exception is not raised beyond the function.
"""
with mock.patch('requests.post', side_effect=Timeout) as mock_post:
success = self.call_create_zendesk_ticket()
self.assertFalse(success)
self.assertTrue(mock_post.called)
@httpretty.activate
def test_create_zendesk_ticket(self):
""" Verify the Zendesk API is called. """
self._mock_zendesk_api()
name = 'Test user'
email = '[email protected]'
subject = 'Test Ticket'
body = 'I want a refund!'
tags = ['auto_refund']
ticket_created = self.call_create_zendesk_ticket(name, email, subject, body, tags)
self.assertTrue(ticket_created)
last_request = httpretty.last_request()
# Verify the headers
expected = {
'content-type': JSON,
'Authorization': 'Basic {}'.format(base64.b64encode(
'{user}/token:{pwd}'.format(user=ZENDESK_USER, pwd=ZENDESK_API_KEY).encode('utf8')).decode('utf8')
)
}
self.assertDictContainsSubset(expected, last_request.headers)
# Verify the content
expected = {
'ticket': {
'requester': {
'name': name,
'email': email
},
'subject': subject,
'comment': {'body': body},
'tags': ['LMS'] + tags
}
}
self.assertDictEqual(json.loads(last_request.body.decode('utf8')), expected)
| agpl-3.0 |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/encodings/punycode.py | 586 | 6813 | # -*- coding: iso-8859-1 -*-
""" Codec for the Punicode encoding, as specified in RFC 3492
Written by Martin v. Löwis.
"""
import codecs
##################### Encoding #####################################
def segregate(str):
"""3.1 Basic code point segregation"""
base = []
extended = {}
for c in str:
if ord(c) < 128:
base.append(c)
else:
extended[c] = 1
extended = extended.keys()
extended.sort()
return "".join(base).encode("ascii"),extended
def selective_len(str, max):
"""Return the length of str, considering only characters below max."""
res = 0
for c in str:
if ord(c) < max:
res += 1
return res
def selective_find(str, char, index, pos):
"""Return a pair (index, pos), indicating the next occurrence of
char in str. index is the position of the character considering
only ordinals up to and including char, and pos is the position in
the full string. index/pos is the starting position in the full
string."""
l = len(str)
while 1:
pos += 1
if pos == l:
return (-1, -1)
c = str[pos]
if c == char:
return index+1, pos
elif c < char:
index += 1
def insertion_unsort(str, extended):
"""3.2 Insertion unsort coding"""
oldchar = 0x80
result = []
oldindex = -1
for c in extended:
index = pos = -1
char = ord(c)
curlen = selective_len(str, char)
delta = (curlen+1) * (char - oldchar)
while 1:
index,pos = selective_find(str,c,index,pos)
if index == -1:
break
delta += index - oldindex
result.append(delta-1)
oldindex = index
delta = 0
oldchar = char
return result
def T(j, bias):
# Punycode parameters: tmin = 1, tmax = 26, base = 36
res = 36 * (j + 1) - bias
if res < 1: return 1
if res > 26: return 26
return res
digits = "abcdefghijklmnopqrstuvwxyz0123456789"
def generate_generalized_integer(N, bias):
"""3.3 Generalized variable-length integers"""
result = []
j = 0
while 1:
t = T(j, bias)
if N < t:
result.append(digits[N])
return result
result.append(digits[t + ((N - t) % (36 - t))])
N = (N - t) // (36 - t)
j += 1
def adapt(delta, first, numchars):
if first:
delta //= 700
else:
delta //= 2
delta += delta // numchars
# ((base - tmin) * tmax) // 2 == 455
divisions = 0
while delta > 455:
delta = delta // 35 # base - tmin
divisions += 36
bias = divisions + (36 * delta // (delta + 38))
return bias
def generate_integers(baselen, deltas):
"""3.4 Bias adaptation"""
# Punycode parameters: initial bias = 72, damp = 700, skew = 38
result = []
bias = 72
for points, delta in enumerate(deltas):
s = generate_generalized_integer(delta, bias)
result.extend(s)
bias = adapt(delta, points==0, baselen+points+1)
return "".join(result)
def punycode_encode(text):
base, extended = segregate(text)
base = base.encode("ascii")
deltas = insertion_unsort(text, extended)
extended = generate_integers(len(base), deltas)
if base:
return base + "-" + extended
return extended
##################### Decoding #####################################
def decode_generalized_number(extended, extpos, bias, errors):
"""3.3 Generalized variable-length integers"""
result = 0
w = 1
j = 0
while 1:
try:
char = ord(extended[extpos])
except IndexError:
if errors == "strict":
raise UnicodeError, "incomplete punicode string"
return extpos + 1, None
extpos += 1
if 0x41 <= char <= 0x5A: # A-Z
digit = char - 0x41
elif 0x30 <= char <= 0x39:
digit = char - 22 # 0x30-26
elif errors == "strict":
raise UnicodeError("Invalid extended code point '%s'"
% extended[extpos])
else:
return extpos, None
t = T(j, bias)
result += digit * w
if digit < t:
return extpos, result
w = w * (36 - t)
j += 1
def insertion_sort(base, extended, errors):
"""3.2 Insertion unsort coding"""
char = 0x80
pos = -1
bias = 72
extpos = 0
while extpos < len(extended):
newpos, delta = decode_generalized_number(extended, extpos,
bias, errors)
if delta is None:
# There was an error in decoding. We can't continue because
# synchronization is lost.
return base
pos += delta+1
char += pos // (len(base) + 1)
if char > 0x10FFFF:
if errors == "strict":
raise UnicodeError, ("Invalid character U+%x" % char)
char = ord('?')
pos = pos % (len(base) + 1)
base = base[:pos] + unichr(char) + base[pos:]
bias = adapt(delta, (extpos == 0), len(base))
extpos = newpos
return base
def punycode_decode(text, errors):
pos = text.rfind("-")
if pos == -1:
base = ""
extended = text
else:
base = text[:pos]
extended = text[pos+1:]
base = unicode(base, "ascii", errors)
extended = extended.upper()
return insertion_sort(base, extended, errors)
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
res = punycode_encode(input)
return res, len(input)
def decode(self,input,errors='strict'):
if errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+errors
res = punycode_decode(input, errors)
return res, len(input)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return punycode_encode(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
if self.errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+self.errors
return punycode_decode(input, self.errors)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='punycode',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| gpl-2.0 |
groschovskiy/keyczar | cpp/src/tools/swtoolkit/test/help_test.py | 18 | 2153 | #!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test hammer displays SCons help for SCons help options (MEDIUM TEST)."""
import TestFramework
def main():
test = TestFramework.TestFramework()
expect = "usage: scons [OPTION] [TARGET] ..."
test.run(arguments="-h")
test.fail_test(test.stdout().find(expect) == -1)
test.run(arguments="--help")
test.fail_test(test.stdout().find(expect) == -1)
test.run(arguments="-H")
test.fail_test(test.stdout().find(expect) == -1)
test.run(arguments="--help-options")
test.fail_test(test.stdout().find(expect) == -1)
test.pass_test()
return 0
if __name__ == "__main__":
main()
| apache-2.0 |
erdc-cm/air-water-vv | 2d/floatingStructures/floating_caisson_chrono/redist_n.py | 12 | 3054 | from proteus.default_n import *
from proteus import (StepControl,
TimeIntegration,
NonlinearSolvers,
LinearSolvers,
LinearAlgebraTools,
NumericalFlux)
from proteus.mprans import RDLS
import redist_p as physics
from proteus import Context
ct = Context.get()
domain = ct.domain
nd = ct.domain.nd
mesh = domain.MeshOptions
# time stepping
runCFL = ct.runCFL
# mesh options
nLevels = ct.nLevels
parallelPartitioningType = mesh.parallelPartitioningType
nLayersOfOverlapForParallel = mesh.nLayersOfOverlapForParallel
restrictFineSolutionToAllMeshes = mesh.restrictFineSolutionToAllMeshes
triangleOptions = mesh.triangleOptions
elementQuadrature = ct.elementQuadrature
elementBoundaryQuadrature = ct.elementBoundaryQuadrature
femSpaces = {0: ct.basis}
elementQuadrature = ct.elementQuadrature
elementBoundaryQuadrature = ct.elementBoundaryQuadrature
massLumping = False
numericalFluxType = NumericalFlux.DoNothing
conservativeFlux = None
subgridError = RDLS.SubgridError(coefficients=physics.coefficients,
nd=ct.domain.nd)
shockCapturing = RDLS.ShockCapturing(coefficients=physics.coefficients,
nd=ct.domain.nd,
shockCapturingFactor=ct.rd_shockCapturingFactor,
lag=ct.rd_lag_shockCapturing)
fullNewtonFlag = True
multilevelNonlinearSolver = NonlinearSolvers.Newton
levelNonlinearSolver = NonlinearSolvers.Newton
nonlinearSmoother = NonlinearSolvers.NLGaussSeidel
linearSmoother = None
matrix = LinearAlgebraTools.SparseMatrix
if ct.useOldPETSc:
multilevelLinearSolver = LinearSolvers.PETSc
levelLinearSolver = LinearSolvers.PETSc
else:
multilevelLinearSolver = LinearSolvers.KSP_petsc4py
levelLinearSolver = LinearSolvers.KSP_petsc4py
if ct.useSuperlu:
multilevelLinearSolver = LinearSolvers.LU
levelLinearSolver = LinearSolvers.LU
if ct.redist_Newton:
timeIntegration = TimeIntegration.NoIntegration
stepController = StepControl.Newton_controller
maxNonlinearIts = 25
maxLineSearches = 0
nonlinearSolverConvergenceTest = 'rits'
levelNonlinearSolverConvergenceTest = 'rits'
linearSolverConvergenceTest = 'r-true'
else:
timeIntegration = TimeIntegration.BackwardEuler_cfl
stepController = RDLS.PsiTC
runCFL = 0.5
psitc['nStepsForce'] = 6
psitc['nStepsMax'] = 25
psitc['reduceRatio'] = 3.0
psitc['startRatio'] = 1.0
rtol_res[0] = 0.0
atol_res[0] = ct.rd_nl_atol_res
useEisenstatWalker = False#True
maxNonlinearIts = 1
maxLineSearches = 0
nonlinearSolverConvergenceTest = 'rits'
levelNonlinearSolverConvergenceTest = 'rits'
linearSolverConvergenceTest = 'r-true'
linear_solver_options_prefix = 'rdls_'
nl_atol_res = ct.rd_nl_atol_res
tolFac = 0.0
linTolFac = 0.001
l_atol_res = 0.001*ct.rd_nl_atol_res
useEisenstatWalker = False#True
| mit |
ishanic/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
adamreis/nyc-jazz | src/lib/werkzeug/testsuite/multipart/collect.py | 78 | 1584 | #!/usr/bin/env python
"""
Hacky helper application to collect form data.
"""
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
def copy_stream(request):
from os import mkdir
from time import time
folder = 'request-%d' % time()
mkdir(folder)
environ = request.environ
f = file(folder + '/request.txt', 'wb+')
f.write(environ['wsgi.input'].read(int(environ['CONTENT_LENGTH'])))
f.flush()
f.seek(0)
environ['wsgi.input'] = f
request.stat_folder = folder
def stats(request):
copy_stream(request)
f1 = request.files['file1']
f2 = request.files['file2']
text = request.form['text']
f1.save(request.stat_folder + '/file1.bin')
f2.save(request.stat_folder + '/file2.bin')
file(request.stat_folder + '/text.txt', 'w').write(text.encode('utf-8'))
return Response('Done.')
def upload_file(request):
return Response('''
<h1>Upload File</h1>
<form action="" method="post" enctype="multipart/form-data">
<input type="file" name="file1"><br>
<input type="file" name="file2"><br>
<textarea name="text"></textarea><br>
<input type="submit" value="Send">
</form>
''', mimetype='text/html')
def application(environ, start_responseonse):
request = Request(environ)
if request.method == 'POST':
response = stats(request)
else:
response = upload_file(request)
return response(environ, start_responseonse)
if __name__ == '__main__':
run_simple('localhost', 5000, application, use_debugger=True)
| mit |
mikhail-gorobets/chipsec | chipsec/modules/tools/smm/smm_ptr.py | 8 | 24962 | #CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#[email protected]
#
"""
CanSecWest 2015
`A New Class of Vulnerability in SMI Handlers of BIOS/UEFI Firmware <https://cansecwest.com/slides/2015/A%20New%20Class%20of%20Vulnin%20SMI%20-%20Andrew%20Furtak.pdf>`_
A tool to test SMI handlers for pointer validation vulnerabilities
Usage:
``chipsec_main -m tools.smm.smm_ptr -l log.txt \``
``[-a <mode>,<config_file>|<smic_start:smic_end>,<size>,<address>]``
- ``mode``: SMI fuzzing mode
* ``config`` = use SMI configuration file <config_file>
* ``fuzz`` = fuzz all SMI handlers with code in the range <smic_start:smic_end>
* ``fuzzmore`` = fuzz mode + pass 2nd-order pointers within buffer to SMI handlers
- ``size``: size of the memory buffer (in Hex)
- ``address``: physical address of memory buffer to pass in GP regs to SMI handlers (in Hex)
* ``smram`` = option passes address of SMRAM base (system may hang in this mode!)
In ``config`` mode, SMI configuration file should have the following format
::
SMI_code=<SMI code> or *
SMI_data=<SMI data> or *
RAX=<value of RAX> or * or PTR or VAL
RBX=<value of RBX> or * or PTR or VAL
RCX=<value of RCX> or * or PTR or VAL
RDX=<value of RDX> or * or PTR or VAL
RSI=<value of RSI> or * or PTR or VAL
RDI=<value of RDI> or * or PTR or VAL
[PTR_OFFSET=<offset to pointer in the buffer>]
[SIG=<signature>]
[SIG_OFFSET=<offset to signature in the buffer>]
[Name=<SMI name>]
[Desc=<SMI description>]
Where
- ``[]``: optional line
- ``*``: Don't Care (the module will replace * with 0x0)
- ``PTR``: Physical address SMI handler will write to (the module will replace PTR with physical address provided as a command-line argument)
- ``VAL``: Value SMI handler will write to PTR address (the module will replace VAL with hardcoded _FILL_VALUE_xx)
"""
from chipsec.module_common import *
from chipsec.file import *
from chipsec.hal.interrupts import Interrupts
#logger.VERBOSE = False
#################################################################
# Fuzzing configuration
#################################################################
#
# Logging option
#
# False - better performance, True - better results tracking
DUMP_MEMORY_ON_DETECT = False
# False - better performance, True - better results tracking
FLUSH_OUTPUT_ALWAYS = False
# makes sure SMI code is logged in case of a crash
FLUSH_OUTPUT_AFTER_SMI = True
# dump all registers in log before every SMI (True - large size of log file)
DUMP_GPRS_EVERY_SMI = True
#
# SMI fuzzing options
#
# stop fuzzing after the first potential issue detected
FUZZ_BAIL_ON_1ST_DETECT = True
# Consider SMI handler subfunctions are passed in RCX GP register
# Fuzz RCX as SMI subfunctions: from 0 to MAX_SMI_FUNCTIONS
# False - better performance, True - smarter fuzzing
FUZZ_SMI_FUNCTIONS_IN_ECX = True
MAX_SMI_FUNCTIONS = 0x10
# Max value of the value written to SMI data port (0xB3)
MAX_SMI_DATA = 0x100
#
# Pass the pointer to SMI handlers in all general-purpose registers
# rather than in one register
# True - faster, False - gives you specific GPR that the vulnerable SMI handler is consuming
#
PTR_IN_ALL_GPRS = False
#
# SMI handler may take a pointer/PA from (some offset of off) address passed in GPRs and write to it
# Treat contents at physical address passed in GPRs as pointers and check contents at that pointer
# If they changed, SMI handler might have modified them
#
#MODE_SECOND_ORDER_BUFFER = True
# Max offset of the pointer (physical address)
# of the 2nd order buffer written in the memory buffer passed to SMI
MAX_PTR_OFFSET_IN_BUFFER = 0x20
# very obscure option, don't even try to understand
GPR_2ADDR = False
#
# Defaults
#
_FILL_VALUE_QWORD = 0x5A5A5A5A5A5A5A5A
_FILL_VALUE_BYTE = 0x5A
_SMI_CODE_DATA = 0x0
_MEM_FILL_VALUE = chr(0x11)
_MEM_FILL_SIZE = 0x500
_MAX_ALLOC_PA = 0xFFFFFFFF
_DEFAULT_GPRS = {'rax' : _FILL_VALUE_QWORD, 'rbx' : _FILL_VALUE_QWORD, 'rcx' : _FILL_VALUE_QWORD, 'rdx' : _FILL_VALUE_QWORD, 'rsi' : _FILL_VALUE_QWORD, 'rdi' : _FILL_VALUE_QWORD}
_pth = 'smm_ptr'
class BadSMIDetected (RuntimeError):
pass
class smi_desc( object ):
def __init__(self):
self.smi_code = None
self.smi_data = None
self.name = 'smi'
self.desc = ''
self.gprs = _DEFAULT_GPRS
self.ptr_in_buffer = False
self.ptr = None
self.ptr_offset = 0
self.sig = None
self.sig_offset = 0
def DIFF( s, t, sz ):
return [ pos for pos in range( sz ) if s[pos] != t[pos] ]
def FILL_BUFFER( _fill_byte, _fill_size, _ptr_in_buffer, _ptr, _ptr_offset, _sig, _sig_offset ):
fill_buf = _fill_byte*_fill_size
if _ptr_in_buffer and _ptr is not None:
fill_buf = fill_buf[ : _ptr_offset ] + struct.pack('=I',_ptr&0xFFFFFFFF) + fill_buf[ _ptr_offset + 4 : ]
if _sig is not None:
fill_buf = fill_buf[ : _sig_offset ] + _sig + fill_buf[ _sig_offset + len(_sig) : ]
return fill_buf
class smm_ptr(BaseModule):
def __init__(self):
BaseModule.__init__(self)
self.interrupts = Interrupts( self.cs )
self.is_check_memory = True
self.test_ptr_in_buffer = False
self.fill_byte = _MEM_FILL_VALUE
self.fill_size = _MEM_FILL_SIZE
def is_supported(self):
return True
def fill_memory( self, _addr, is_ptr_in_buffer, _ptr, _ptr_offset, _sig, _sig_offset ):
#
# Fill in contents at PA = _addr with known pattern to check later if any SMI handler modifies them
#
fill_buf = FILL_BUFFER( self.fill_byte, self.fill_size, is_ptr_in_buffer, _ptr, _ptr_offset, _sig, _sig_offset )
s = "[*] writing 0x%X bytes at 0x%016X" % (self.fill_size, _addr)
if is_ptr_in_buffer: s += " -> PTR at +0x%X" % _ptr_offset
if _sig is not None: s += " -> SIG at +0x%X" % _sig_offset
self.logger.log( s )
self.cs.mem.write_physical_mem( _addr, self.fill_size, fill_buf )
if self.logger.VERBOSE:
self.logger.log( "filling in contents at PA 0x%016X:" % _addr )
chipsec.logger.print_buffer( fill_buf )
if is_ptr_in_buffer and _ptr is not None:
self.logger.log( "[*] writing buffer at PA 0x%016X with 0x%X bytes '%c'" % (_ptr, self.fill_size, self.fill_byte) )
self.cs.mem.write_physical_mem( _ptr, self.fill_size, self.fill_byte*self.fill_size )
return True
def send_smi( self, thread_id, smi_code, smi_data, name, desc, rax, rbx, rcx, rdx, rsi, rdi ):
self.logger.log( " > SMI %02X (data: %02X)" % (smi_code,smi_data) )
if DUMP_GPRS_EVERY_SMI:
self.logger.log( " RAX: 0x%016X\n RBX: 0x%016X\n RCX: 0x%016X\n RDX: 0x%016X\n RSI: 0x%016X\n RDI: 0x%016X" % (rax,rbx,rcx,rdx,rsi,rdi) )
self.interrupts.send_SW_SMI( thread_id, smi_code, smi_data, rax, rbx, rcx, rdx, rsi, rdi )
return True
def check_memory( self, _addr, _smi_desc, fn, restore_contents=False ):
_ptr = _smi_desc.ptr
filler = self.fill_byte*self.fill_size
#
# Check if contents have changed at physical address passed in GPRs to SMI handler
# If changed, SMI handler might have written to that address
#
self.logger.log( " < checking buffers" )
expected_buf = FILL_BUFFER( self.fill_byte, self.fill_size, _smi_desc.ptr_in_buffer, _smi_desc.ptr, _smi_desc.ptr_offset, _smi_desc.sig, _smi_desc.sig_offset )
buf = self.cs.mem.read_physical_mem( _addr, self.fill_size )
differences = DIFF( expected_buf, buf, self.fill_size )
_changed = (len(differences) > 0)
if self.logger.VERBOSE:
self.logger.log( "checking contents at PA 0x%016X:" % _addr )
chipsec.logger.print_buffer( buf )
self.logger.log( "expected contents:" )
chipsec.logger.print_buffer( expected_buf )
if _changed:
self.logger.log( " contents changed at 0x%016X +%s" % (_addr,differences) )
if restore_contents:
self.logger.log( " restoring 0x%X bytes at 0x%016X" % (self.fill_size, _addr) )
self.cs.mem.write_physical_mem( _addr, self.fill_size, expected_buf )
if DUMP_MEMORY_ON_DETECT:
_pth_smi = os.path.join( _pth, '%X_%s'% (_smi_desc.smi_code,_smi_desc.name) )
if not os.path.exists( _pth_smi ): os.makedirs( _pth_smi )
_f = os.path.join( _pth_smi, fn + '.dmp' )
self.logger.log( " dumping buffer to '%s'" % _f )
write_file( _f, buf )
_changed1 = False
expected_buf = filler
if _smi_desc.ptr_in_buffer and _ptr is not None:
buf1 = self.cs.mem.read_physical_mem( _ptr, self.fill_size )
differences1 = DIFF( expected_buf, buf1, self.fill_size )
_changed1 = (len(differences1) > 0)
if self.logger.VERBOSE:
self.logger.log( "checking contents at PA 0x%016X:" % _ptr )
chipsec.logger.print_buffer( buf1 )
if _changed1:
self.logger.log( " contents changed at 0x%016X +%s" % (_ptr,differences1) )
if restore_contents:
self.logger.log( " restoring 0x%X bytes at PA 0x%016X" % (self.fill_size, _ptr) )
self.cs.mem.write_physical_mem( _ptr, self.fill_size, expected_buf )
if DUMP_MEMORY_ON_DETECT:
_pth_smi = os.path.join( _pth, '%X_%s'% (_smi_desc.smi_code,_smi_desc.name) )
if not os.path.exists( _pth_smi ): os.makedirs( _pth_smi )
_f = os.path.join( _pth_smi, fn + ('_ptr%X.dmp' % _smi_desc.ptr_offset) )
self.logger.log( " dumping buffer to '%s'" % _f )
write_file( _f, buf1 )
return (_changed or _changed1)
def smi_fuzz_iter( self, thread_id, _addr, _smi_desc, fill_contents=True, restore_contents=False ):
#
# Fill memory buffer if not in 'No Fill' mode
#
if self.is_check_memory and fill_contents:
self.fill_memory( _addr, _smi_desc.ptr_in_buffer, _smi_desc.ptr, _smi_desc.ptr_offset, _smi_desc.sig, _smi_desc.sig_offset )
#
# Invoke SW SMI Handler
#
_rax = _smi_desc.gprs['rax']
_rbx = _smi_desc.gprs['rbx']
_rcx = _smi_desc.gprs['rcx']
_rdx = _smi_desc.gprs['rdx']
_rsi = _smi_desc.gprs['rsi']
_rdi = _smi_desc.gprs['rdi']
self.send_smi( thread_id, _smi_desc.smi_code, _smi_desc.smi_data, _smi_desc.name, _smi_desc.desc, _rax, _rbx, _rcx, _rdx, _rsi, _rdi )
#
# Check memory buffer if not in 'No Fill' mode
#
contents_changed = False
if self.is_check_memory:
fn = '%X-a%X_b%X_c%X_d%X_si%X_di%X' % (_smi_desc.smi_data,_rax,_rbx,_rcx,_rdx,_rsi,_rdi)
contents_changed = self.check_memory( _addr, _smi_desc, fn, restore_contents )
if contents_changed:
msg = "DETECTED: SMI# %X data %X (rax=%X rbx=%X rcx=%X rdx=%X rsi=%X rdi=%X)" % (_smi_desc.smi_code,_smi_desc.smi_data,_rax,_rbx,_rcx,_rdx,_rsi,_rdi)
self.logger.log_important( msg )
if FUZZ_BAIL_ON_1ST_DETECT: raise BadSMIDetected, msg
if FLUSH_OUTPUT_AFTER_SMI: self.logger.flush()
return contents_changed
def test_config( self, thread_id, _smi_config_fname, _addr, _addr1 ):
#
# Parse SMM config file describing SMI handlers and their call arguments
# Then invoke SMI handlers
#
fcfg = open( _smi_config_fname, 'r' )
self.logger.log( "\n[*] >>> Testing SMI handlers defined in '%s'.." % _smi_config_fname )
bad_ptr_cnt = 0
_smi_desc = smi_desc()
for line in fcfg:
if '' == line.strip():
self.logger.log( "\n[*] testing SMI# 0x%02X (data: 0x%02X) %s (%s)" % (_smi_desc.smi_code,_smi_desc.smi_data,_smi_desc.name,_smi_desc.desc) )
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc ): bad_ptr_cnt += 1
_smi_desc = None
_smi_desc = smi_desc()
else:
name, var = line.strip().partition('=')[::2]
_n = name.strip().lower()
if 'name' == _n: _smi_desc.name = var
elif 'desc' == _n: _smi_desc.desc = var
elif 'smi_code' == _n: _smi_desc.smi_code = int(var,16) if '*'!=var else _SMI_CODE_DATA
elif 'smi_data' == _n: _smi_desc.smi_data = int(var,16) if '*'!=var else _SMI_CODE_DATA
elif 'ptr_offset' == _n:
_smi_desc.ptr_in_buffer = True
_smi_desc.ptr_offset = int(var,16)
_smi_desc.ptr = _addr1
elif 'sig' == _n: _smi_desc.sig = str( bytearray.fromhex( var ) )
elif 'sig_offset' == _n: _smi_desc.sig_offset = int(var,16)
else: _smi_desc.gprs[ _n ] = ( _addr if 'PTR'==var else (_FILL_VALUE_BYTE if 'VAL'==var else int(var,16)) ) if '*'!=var else _FILL_VALUE_QWORD
return bad_ptr_cnt
def test_fuzz( self, thread_id, smic_start, smic_end, _addr, _addr1 ):
gpr_value = ((_addr<<32)|_addr) if GPR_2ADDR else _addr
gprs_addr = {'rax' : gpr_value, 'rbx' : gpr_value, 'rcx' : gpr_value, 'rdx' : gpr_value, 'rsi' : gpr_value, 'rdi' : gpr_value}
gprs_fill = {'rax' : _FILL_VALUE_QWORD, 'rbx' : _FILL_VALUE_QWORD, 'rcx' : _FILL_VALUE_QWORD, 'rdx' : _FILL_VALUE_QWORD, 'rsi' : _FILL_VALUE_QWORD, 'rdi' : _FILL_VALUE_QWORD}
self.logger.log( "\n[*] >>> Fuzzing SMI handlers.." )
self.logger.log( "[*] AX in RAX will be overwridden with values of SW SMI ports 0xB2/0xB3" )
self.logger.log( " DX in RDX will be overwridden with value 0x00B2" )
bad_ptr_cnt = 0
_smi_desc = smi_desc()
_smi_desc.gprs = gprs_addr if PTR_IN_ALL_GPRS else gprs_fill
self.logger.log( "\n[*] Setting values of general purpose registers to 0x%016X" % _smi_desc.gprs['rax'] )
max_ptr_off = 1
if self.is_check_memory and self.test_ptr_in_buffer:
_smi_desc.ptr_in_buffer = True
_smi_desc.ptr = _addr1
max_ptr_off = MAX_PTR_OFFSET_IN_BUFFER+1
# if we are not in fuzzmore mode, i.e. we are not testing the pointer within memory buffer
# then this outer loop will only have 1 iteration
for off in range(max_ptr_off):
_smi_desc.ptr_offset = off
self.logger.log( "\n[*] reloading buffer with PTR at offset 0x%X.." % off )
if self.is_check_memory:
self.fill_memory( _addr, _smi_desc.ptr_in_buffer, _smi_desc.ptr, _smi_desc.ptr_offset, None, None )
for smi_code in range(smic_start, smic_end + 1, 1):
_smi_desc.smi_code = smi_code
for smi_data in range(MAX_SMI_DATA):
_smi_desc.smi_data = smi_data
self.logger.log( "\n[*] fuzzing SMI# 0x%02X (data: 0x%02X)" % (smi_code,smi_data) )
if FUZZ_SMI_FUNCTIONS_IN_ECX:
for _rcx in range(MAX_SMI_FUNCTIONS):
self.logger.log( " >> function (RCX): 0x%016X" % _rcx )
_smi_desc.gprs['rcx'] = _rcx
if PTR_IN_ALL_GPRS:
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
else:
self.logger.log( " RBX: 0x%016X" % _addr )
_smi_desc.gprs['rbx'] = gpr_value
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
_smi_desc.gprs['rbx'] = _FILL_VALUE_QWORD
self.logger.log( " RSI: 0x%016X" % _addr )
_smi_desc.gprs['rsi'] = gpr_value
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
_smi_desc.gprs['rsi'] = _FILL_VALUE_QWORD
self.logger.log( " RDI: 0x%016X" % _addr )
_smi_desc.gprs['rdi'] = gpr_value
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
_smi_desc.gprs['rdi'] = _FILL_VALUE_QWORD
else:
if PTR_IN_ALL_GPRS:
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
else:
self.logger.log( " RBX: 0x%016X" % _addr )
_smi_desc.gprs['rbx'] = gpr_value
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
_smi_desc.gprs['rbx'] = _FILL_VALUE_QWORD
self.logger.log( " RCX: 0x%016X" % _addr )
_smi_desc.gprs['rcx'] = gpr_value
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
_smi_desc.gprs['rcx'] = _FILL_VALUE_QWORD
self.logger.log( " RSI: 0x%016X" % _addr )
_smi_desc.gprs['rsi'] = gpr_value
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
_smi_desc.gprs['rsi'] = _FILL_VALUE_QWORD
self.logger.log( " RDI: 0x%016X" % _addr )
_smi_desc.gprs['rdi'] = gpr_value
if self.smi_fuzz_iter( thread_id, _addr, _smi_desc, False, True ): bad_ptr_cnt += 1
_smi_desc.gprs['rdi'] = _FILL_VALUE_QWORD
return bad_ptr_cnt
def run( self, module_argv ):
self.logger.start_test( "A tool to test SMI handlers for pointer validation vulnerabilies" )
self.logger.log( "Usage: chipsec_main -m tools.smm.smm_ptr [ -a <mode>,<config_file>|<smic_start:smic_end>,<size>,<address> ]" )
self.logger.log( " mode SMI handlers testing mode" )
self.logger.log( " = config use SMI configuration file <config_file>" )
self.logger.log( " = fuzz fuzz all SMI handlers with code in the range <smic_start:smic_end>" )
self.logger.log( " = fuzzmore fuzz mode + pass '2nd-order' pointers within buffer to SMI handlers")
self.logger.log( " size size of the memory buffer (in Hex)" )
self.logger.log( " address physical address of memory buffer to pass in GP regs to SMI handlers (in Hex)" )
self.logger.log( " = smram pass address of SMRAM base (system may hang in this mode!)\n" )
test_mode = 'config'
_smi_config_fname = 'chipsec/modules/tools/smm/smm_config.ini'
_addr = None
_addr1 = None
thread_id = 0x0
global DUMP_GPRS_EVERY_SMI
if len(module_argv) > 1:
test_mode = module_argv[0].lower()
if 'config' == test_mode:
_smi_config_fname = module_argv[1]
elif 'fuzz' == test_mode or 'fuzzmore' == test_mode:
smic_arr = module_argv[1].split(':')
smic_start = int(smic_arr[0],16)
smic_end = int(smic_arr[1],16)
if 'fuzzmore' == test_mode:
self.test_ptr_in_buffer = True
DUMP_GPRS_EVERY_SMI = False
else:
self.logger.error( "Unknown fuzzing mode '%s'" % module_argv[0] )
return ModuleResult.ERROR
if len(module_argv) > 2: self.fill_size = int(module_argv[2],16)
if len(module_argv) > 3:
if 'smram' == module_argv[3]:
(_addr, smram_limit, smram_size) = self.cs.cpu.get_SMRAM()
self.is_check_memory = False
self.logger.log( "[*] Using SMRAM base address (0x%016X) to pass to SMI handlers" % _addr )
else:
_addr = int(module_argv[3],16)
self.logger.log( "[*] Using address from command-line (0x%016X) to pass to SMI handlers" % _addr )
else:
(va, _addr) = self.cs.mem.alloc_physical_mem( self.fill_size, _MAX_ALLOC_PA )
self.logger.log( "[*] Allocated memory buffer (to pass to SMI handlers) : 0x%016X" % _addr )
if self.is_check_memory:
(va1, _addr1) = self.cs.mem.alloc_physical_mem( self.fill_size, _MAX_ALLOC_PA )
self.logger.log( "[*] Allocated 2nd buffer (address will be in the 1st buffer): 0x%016X" % _addr1 )
#
# @TODO: Need to check that SW/APMC SMI is enabled
#
self.logger.log( "\n[*] Configuration" )
self.logger.log( " SMI testing mode : %s" % test_mode )
if 'config' == test_mode:
self.logger.log( " Config file : %s" % _smi_config_fname )
else:
self.logger.log( " Range of SMI codes (B2) : 0x%02X:0x%02X" % (smic_start,smic_end) )
self.logger.log( " Memory buffer pointer : 0x%016X (address passed in GP regs to SMI)" % _addr )
self.logger.log( " Filling/checking memory? : %s" % ('YES' if self.is_check_memory else 'NO'))
if self.is_check_memory:
self.logger.log( " Second buffer pointer : 0x%016X (address written to memory buffer)" % _addr1 )
self.logger.log( " Number of bytes to fill : 0x%X" % self.fill_size )
self.logger.log( " Byte to fill with : 0x%X" % ord(self.fill_byte) )
self.logger.log( " Additional options (can be changed in the source code):" )
self.logger.log( " Fuzzing SMI functions in ECX? : %d" % FUZZ_SMI_FUNCTIONS_IN_ECX )
self.logger.log( " Max value of SMI function in ECX : 0x%X" % MAX_SMI_FUNCTIONS )
self.logger.log( " Max value of SMI data (B3) : 0x%X" % MAX_SMI_DATA )
self.logger.log( " Max offset of the pointer in the buffer: 0x%X" % MAX_PTR_OFFSET_IN_BUFFER )
self.logger.log( " Passing pointer in all GP registers? : %d" % PTR_IN_ALL_GPRS )
self.logger.log( " Default values of the registers : 0x%016X" % _FILL_VALUE_QWORD )
self.logger.log( " Dump all register values every SMI : %d" % DUMP_GPRS_EVERY_SMI )
self.logger.log( " Bail on first detection : %d" % FUZZ_BAIL_ON_1ST_DETECT )
self.logger.set_always_flush( FLUSH_OUTPUT_ALWAYS )
if DUMP_MEMORY_ON_DETECT and not os.path.exists( _pth ): os.makedirs( _pth )
bad_ptr_cnt = 0
try:
if 'config' == test_mode:
bad_ptr_cnt = self.test_config( thread_id, _smi_config_fname, _addr, _addr1 )
elif 'fuzz' == test_mode or 'fuzzmore' == test_mode:
bad_ptr_cnt = self.test_fuzz ( thread_id, smic_start, smic_end, _addr, _addr1 )
except BadSMIDetected, msg:
bad_ptr_cnt = 1
self.logger.log_important( "Potentially bad SMI detected! Stopped fuzing (see FUZZ_BAIL_ON_1ST_DETECT option)" )
if bad_ptr_cnt > 0: self.logger.log_bad( "<<< Done: found %d potential occurrences of unchecked input pointers" % bad_ptr_cnt )
else: self.logger.log_good( "<<< Done: didn't find unchecked input pointers in tested SMI handlers" )
res = ModuleResult.FAILED if (bad_ptr_cnt > 0) else ModuleResult.PASSED
return res
| gpl-2.0 |
afaheem88/rally | tests/unit/plugins/openstack/scenarios/sahara/test_node_group_templates.py | 12 | 3700 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.sahara import (node_group_templates
as ngts)
from tests.unit import test
SAHARA_NGTS = ("rally.plugins.openstack.scenarios.sahara.node_group_templates"
".SaharaNodeGroupTemplates")
class SaharaNodeGroupTemplatesTestCase(test.TestCase):
def setUp(self):
super(SaharaNodeGroupTemplatesTestCase, self).setUp()
self.context = test.get_test_context()
@mock.patch(SAHARA_NGTS + "._list_node_group_templates")
@mock.patch(SAHARA_NGTS + "._create_master_node_group_template",
return_value=object())
@mock.patch(SAHARA_NGTS + "._create_worker_node_group_template",
return_value=object)
def test_create_and_list_node_group_templates(
self,
mock__create_worker_node_group_template,
mock__create_master_node_group_template,
mock__list_node_group_templates):
ngts_scenario = ngts.SaharaNodeGroupTemplates(self.context)
ngts_scenario.create_and_list_node_group_templates("test_flavor",
"test_plugin",
"test_version")
mock__create_master_node_group_template.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version")
mock__create_worker_node_group_template.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version")
mock__list_node_group_templates.assert_called_once_with()
@mock.patch(SAHARA_NGTS + "._delete_node_group_template")
@mock.patch(SAHARA_NGTS + "._create_master_node_group_template",
return_value=mock.MagicMock(id=1))
@mock.patch(SAHARA_NGTS + "._create_worker_node_group_template",
return_value=mock.MagicMock(id=2))
def test_create_delete_node_group_templates(
self,
mock__create_worker_node_group_template,
mock__create_master_node_group_template,
mock__delete_node_group_template):
ngts_scenario = ngts.SaharaNodeGroupTemplates(self.context)
ngts_scenario.create_delete_node_group_templates(
"test_flavor",
"test_plugin",
"test_version")
mock__create_master_node_group_template.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version")
mock__create_worker_node_group_template.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version")
mock__delete_node_group_template.assert_has_calls(calls=[
mock.call(mock__create_master_node_group_template.return_value),
mock.call(mock__create_worker_node_group_template.return_value)])
| apache-2.0 |
flingone/frameworks_base_cmds_remoted | libs/boost/libs/python/pyste/src/Pyste/infos.py | 13 | 9212 | # Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os.path
import copy
import exporters
from ClassExporter import ClassExporter
from FunctionExporter import FunctionExporter
from EnumExporter import EnumExporter
from HeaderExporter import HeaderExporter
from VarExporter import VarExporter
from CodeExporter import CodeExporter
from exporterutils import FunctionWrapper
from utils import makeid
import warnings
#==============================================================================
# DeclarationInfo
#==============================================================================
class DeclarationInfo:
def __init__(self, otherInfo=None):
self.__infos = {}
self.__attributes = {}
if otherInfo is not None:
self.__infos = copy.deepcopy(otherInfo.__infos)
self.__attributes = copy.deepcopy(otherInfo.__attributes)
def __getitem__(self, name):
'Used to access sub-infos'
if name.startswith('__'):
raise AttributeError
default = DeclarationInfo()
default._Attribute('name', name)
return self.__infos.setdefault(name, default)
def __getattr__(self, name):
return self[name]
def _Attribute(self, name, value=None):
if value is None:
# get value
return self.__attributes.get(name)
else:
# set value
self.__attributes[name] = value
def AddExporter(self, exporter):
# this was causing a much serious bug, as reported by Niall Douglas:
# another solution must be found!
#if not exporters.importing:
if exporter not in exporters.exporters:
exporters.exporters.append(exporter)
exporter.interface_file = exporters.current_interface
#==============================================================================
# FunctionInfo
#==============================================================================
class FunctionInfo(DeclarationInfo):
def __init__(self, name, include, tail=None, otherOption=None,
exporter_class = FunctionExporter):
DeclarationInfo.__init__(self, otherOption)
self._Attribute('name', name)
self._Attribute('include', include)
self._Attribute('exclude', False)
# create a FunctionExporter
exporter = exporter_class(InfoWrapper(self), tail)
self.AddExporter(exporter)
#==============================================================================
# ClassInfo
#==============================================================================
class ClassInfo(DeclarationInfo):
def __init__(self, name, include, tail=None, otherInfo=None,
exporter_class = ClassExporter):
DeclarationInfo.__init__(self, otherInfo)
self._Attribute('name', name)
self._Attribute('include', include)
self._Attribute('exclude', False)
# create a ClassExporter
exporter = exporter_class(InfoWrapper(self), tail)
self.AddExporter(exporter)
#==============================================================================
# templates
#==============================================================================
def GenerateName(name, type_list):
name = name.replace('::', '_')
names = [name] + type_list
return makeid('_'.join(names))
class ClassTemplateInfo(DeclarationInfo):
def __init__(self, name, include,
exporter_class = ClassExporter):
DeclarationInfo.__init__(self)
self._Attribute('name', name)
self._Attribute('include', include)
self._exporter_class = exporter_class
def Instantiate(self, type_list, rename=None):
if not rename:
rename = GenerateName(self._Attribute('name'), type_list)
# generate code to instantiate the template
types = ', '.join(type_list)
tail = 'typedef %s< %s > %s;\n' % (self._Attribute('name'), types, rename)
tail += 'void __instantiate_%s()\n' % rename
tail += '{ sizeof(%s); }\n\n' % rename
# create a ClassInfo
class_ = ClassInfo(rename, self._Attribute('include'), tail, self,
exporter_class = self._exporter_class)
return class_
def __call__(self, types, rename=None):
if isinstance(types, str):
types = types.split()
return self.Instantiate(types, rename)
#==============================================================================
# EnumInfo
#==============================================================================
class EnumInfo(DeclarationInfo):
def __init__(self, name, include, exporter_class = EnumExporter):
DeclarationInfo.__init__(self)
self._Attribute('name', name)
self._Attribute('include', include)
self._Attribute('exclude', False)
self._Attribute('export_values', False)
exporter = exporter_class(InfoWrapper(self))
self.AddExporter(exporter)
#==============================================================================
# HeaderInfo
#==============================================================================
class HeaderInfo(DeclarationInfo):
def __init__(self, include, exporter_class = HeaderExporter):
warnings.warn('AllFromHeader is not working in all cases in the current version.')
DeclarationInfo.__init__(self)
self._Attribute('include', include)
exporter = exporter_class(InfoWrapper(self))
self.AddExporter(exporter)
#==============================================================================
# VarInfo
#==============================================================================
class VarInfo(DeclarationInfo):
def __init__(self, name, include, exporter_class = VarExporter):
DeclarationInfo.__init__(self)
self._Attribute('name', name)
self._Attribute('include', include)
exporter = exporter_class(InfoWrapper(self))
self.AddExporter(exporter)
#==============================================================================
# CodeInfo
#==============================================================================
class CodeInfo(DeclarationInfo):
def __init__(self, code, section, exporter_class = CodeExporter):
DeclarationInfo.__init__(self)
self._Attribute('code', code)
self._Attribute('section', section)
exporter = exporter_class(InfoWrapper(self))
self.AddExporter(exporter)
#==============================================================================
# InfoWrapper
#==============================================================================
class InfoWrapper:
'Provides a nicer interface for a info'
def __init__(self, info):
self.__dict__['_info'] = info # so __setattr__ is not called
def __getitem__(self, name):
return InfoWrapper(self._info[name])
def __getattr__(self, name):
return self._info._Attribute(name)
def __setattr__(self, name, value):
self._info._Attribute(name, value)
#==============================================================================
# Functions
#==============================================================================
def exclude(info):
info._Attribute('exclude', True)
def set_policy(info, policy):
info._Attribute('policy', policy)
def rename(info, name):
info._Attribute('rename', name)
def set_wrapper(info, wrapper):
if isinstance(wrapper, str):
wrapper = FunctionWrapper(wrapper)
info._Attribute('wrapper', wrapper)
def instantiate(template, types, rename=None):
if isinstance(types, str):
types = types.split()
return template.Instantiate(types, rename)
def use_shared_ptr(info):
info._Attribute('smart_ptr', 'boost::shared_ptr< %s >')
def use_auto_ptr(info):
info._Attribute('smart_ptr', 'std::auto_ptr< %s >')
def holder(info, function):
msg = "Expected a callable that accepts one string argument."
assert callable(function), msg
info._Attribute('holder', function)
def add_method(info, name, rename=None):
added = info._Attribute('__added__')
if added is None:
info._Attribute('__added__', [(name, rename)])
else:
added.append((name, rename))
def class_code(info, code):
added = info._Attribute('__code__')
if added is None:
info._Attribute('__code__', [code])
else:
added.append(code)
def final(info):
info._Attribute('no_override', True)
def export_values(info):
info._Attribute('export_values', True)
| apache-2.0 |
aerickson/ansible | lib/ansible/modules/network/nxos/_nxos_mtu.py | 59 | 11681 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['deprecated'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_mtu
extends_documentation_fragment: nxos
version_added: "2.2"
deprecated: Deprecated in 2.3 use M(nxos_system)'s C(mtu) option.
short_description: Manages MTU settings on Nexus switch.
description:
- Manages MTU settings on Nexus switch.
author:
- Jason Edelman (@jedelman8)
notes:
- Either C(sysmtu) param is required or (C(interface) AND C(mtu)) parameters are required.
- C(state=absent) unconfigures a given MTU if that value is currently present.
options:
interface:
description:
- Full name of interface, i.e. Ethernet1/1.
required: false
default: null
mtu:
description:
- MTU for a specific interface. Must be an even number between 576 and 9216.
required: false
default: null
sysmtu:
description:
- System jumbo MTU. Must be an even number between 576 and 9216.
required: false
default: null
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Ensure system mtu is 9126
- nxos_mtu:
sysmtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Config mtu on Eth1/1 (routed interface)
- nxos_mtu:
interface: Ethernet1/1
mtu: 1600
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Config mtu on Eth1/3 (switched interface)
- nxos_mtu:
interface: Ethernet1/3
mtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Unconfigure mtu on a given interface
- nxos_mtu:
interface: Ethernet1/3
mtu: 9216
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
state: absent
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"mtu": "1700"}
existing:
description:
- k/v pairs of existing mtu/sysmtu on the interface/system
returned: always
type: dict
sample: {"mtu": "1600", "sysmtu": "9216"}
end_state:
description: k/v pairs of mtu/sysmtu values after module execution
returned: always
type: dict
sample: {"mtu": "1700", sysmtu": "9216"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["interface vlan10", "mtu 1700"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_mtu(interface, module):
command = 'show interface {0}'.format(interface)
mtu = {}
body = execute_show_command(command, module)
try:
mtu_table = body[0]['TABLE_interface']['ROW_interface']
mtu['mtu'] = str(
mtu_table.get('eth_mtu',
mtu_table.get('svi_mtu', 'unreadable_via_api')))
mtu['sysmtu'] = get_system_mtu(module)['sysmtu']
except KeyError:
mtu = {}
return mtu
def get_system_mtu(module):
command = 'show run all | inc jumbomtu'
sysmtu = ''
body = execute_show_command(command, module, command_type='cli_show_ascii')
if body:
sysmtu = str(body[0].split(' ')[-1])
try:
sysmtu = int(sysmtu)
except:
sysmtu = ""
return dict(sysmtu=str(sysmtu))
def get_commands_config_mtu(delta, interface):
CONFIG_ARGS = {
'mtu': 'mtu {mtu}',
'sysmtu': 'system jumbomtu {sysmtu}',
}
commands = []
for param, value in delta.items():
command = CONFIG_ARGS.get(param, 'DNE').format(**delta)
if command and command != 'DNE':
commands.append(command)
command = None
mtu_check = delta.get('mtu', None)
if mtu_check:
commands.insert(0, 'interface {0}'.format(interface))
return commands
def get_commands_remove_mtu(delta, interface):
CONFIG_ARGS = {
'mtu': 'no mtu {mtu}',
'sysmtu': 'no system jumbomtu {sysmtu}',
}
commands = []
for param, value in delta.items():
command = CONFIG_ARGS.get(param, 'DNE').format(**delta)
if command and command != 'DNE':
commands.append(command)
command = None
mtu_check = delta.get('mtu', None)
if mtu_check:
commands.insert(0, 'interface {0}'.format(interface))
return commands
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(
command, module, command_type='cli_show_ascii')[0]
if body == 'DNE':
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError):
return 'DNE'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
mode = 'unknown'
interface_table = {}
body = execute_show_command(command, module)
try:
interface_table = body[0]['TABLE_interface']['ROW_interface']
except (KeyError, AttributeError, IndexError):
return mode
if intf_type in ['ethernet', 'portchannel']:
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode in ['access', 'trunk']:
mode = 'layer2'
elif mode == 'routed':
mode = 'layer3'
elif intf_type in ['loopback', 'svi']:
mode = 'layer3'
return mode
def main():
argument_spec = dict(
mtu=dict(type='str'),
interface=dict(type='str'),
sysmtu=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_together=[['mtu', 'interface']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
interface = module.params['interface']
mtu = module.params['mtu']
sysmtu = module.params['sysmtu']
state = module.params['state']
if sysmtu and (interface or mtu):
module.fail_json(msg='Proper usage-- either just use the sysmtu param '
'or use interface AND mtu params')
if interface:
intf_type = get_interface_type(interface)
if intf_type != 'ethernet':
if is_default(interface, module) == 'DNE':
module.fail_json(msg='Invalid interface. It does not exist '
'on the switch.')
existing = get_mtu(interface, module)
else:
existing = get_system_mtu(module)
if interface and mtu:
if intf_type == 'loopback':
module.fail_json(msg='Cannot set MTU for loopback interface.')
mode = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
if intf_type in ['ethernet', 'portchannel']:
if mtu not in [existing['sysmtu'], '1500']:
module.fail_json(msg='MTU on L2 interfaces can only be set'
' to the system default (1500) or '
'existing sysmtu value which is '
' {0}'.format(existing['sysmtu']))
elif mode == 'layer3':
if intf_type in ['ethernet', 'portchannel', 'svi']:
if ((int(mtu) < 576 or int(mtu) > 9216) or
((int(mtu) % 2) != 0)):
module.fail_json(msg='Invalid MTU for Layer 3 interface'
'needs to be an even number between'
'576 and 9216')
if sysmtu:
if ((int(sysmtu) < 576 or int(sysmtu) > 9216 or
((int(sysmtu) % 2) != 0))):
module.fail_json(msg='Invalid MTU- needs to be an even '
'number between 576 and 9216')
args = dict(mtu=mtu, sysmtu=sysmtu)
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
changed = False
end_state = existing
commands = []
if state == 'present':
if delta:
command = get_commands_config_mtu(delta, interface)
commands.append(command)
elif state == 'absent':
common = set(proposed.items()).intersection(existing.items())
if common:
command = get_commands_remove_mtu(dict(common), interface)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
if interface:
end_state = get_mtu(interface, module)
else:
end_state = get_system_mtu(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
grpc/grpc | src/python/grpcio/support.py | 10 | 4388 | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import shutil
import sys
import tempfile
from distutils import errors
import commands
C_PYTHON_DEV = """
#include <Python.h>
int main(int argc, char **argv) { return 0; }
"""
C_PYTHON_DEV_ERROR_MESSAGE = """
Could not find <Python.h>. This could mean the following:
* You're on Ubuntu and haven't run `apt-get install <PY_REPR>-dev`.
* You're on RHEL/Fedora and haven't run `yum install <PY_REPR>-devel` or
`dnf install <PY_REPR>-devel` (make sure you also have redhat-rpm-config
installed)
* You're on Mac OS X and the usual Python framework was somehow corrupted
(check your environment variables or try re-installing?)
* You're on Windows and your Python installation was somehow corrupted
(check your environment variables or try re-installing?)
"""
if sys.version_info[0] == 2:
PYTHON_REPRESENTATION = 'python'
elif sys.version_info[0] == 3:
PYTHON_REPRESENTATION = 'python3'
else:
raise NotImplementedError('Unsupported Python version: %s' % sys.version)
C_CHECKS = {
C_PYTHON_DEV:
C_PYTHON_DEV_ERROR_MESSAGE.replace('<PY_REPR>', PYTHON_REPRESENTATION),
}
def _compile(compiler, source_string):
tempdir = tempfile.mkdtemp()
cpath = os.path.join(tempdir, 'a.c')
with open(cpath, 'w') as cfile:
cfile.write(source_string)
try:
compiler.compile([cpath])
except errors.CompileError as error:
return error
finally:
shutil.rmtree(tempdir)
def _expect_compile(compiler, source_string, error_message):
if _compile(compiler, source_string) is not None:
sys.stderr.write(error_message)
raise commands.CommandError(
"Diagnostics found a compilation environment issue:\n{}".format(
error_message))
def diagnose_compile_error(build_ext, error):
"""Attempt to diagnose an error during compilation."""
for c_check, message in C_CHECKS.items():
_expect_compile(build_ext.compiler, c_check, message)
python_sources = [
source for source in build_ext.get_source_files()
if source.startswith('./src/python') and source.endswith('c')
]
for source in python_sources:
if not os.path.isfile(source):
raise commands.CommandError((
"Diagnostics found a missing Python extension source file:\n{}\n\n"
"This is usually because the Cython sources haven't been transpiled "
"into C yet and you're building from source.\n"
"Try setting the environment variable "
"`GRPC_PYTHON_BUILD_WITH_CYTHON=1` when invoking `setup.py` or "
"when using `pip`, e.g.:\n\n"
"pip install -rrequirements.txt\n"
"GRPC_PYTHON_BUILD_WITH_CYTHON=1 pip install .").format(source))
def diagnose_attribute_error(build_ext, error):
if any('_needs_stub' in arg for arg in error.args):
raise commands.CommandError(
"We expect a missing `_needs_stub` attribute from older versions of "
"setuptools. Consider upgrading setuptools.")
_ERROR_DIAGNOSES = {
errors.CompileError: diagnose_compile_error,
AttributeError: diagnose_attribute_error,
}
def diagnose_build_ext_error(build_ext, error, formatted):
diagnostic = _ERROR_DIAGNOSES.get(type(error))
if diagnostic is None:
raise commands.CommandError(
"\n\nWe could not diagnose your build failure. If you are unable to "
"proceed, please file an issue at http://www.github.com/grpc/grpc "
"with `[Python install]` in the title; please attach the whole log "
"(including everything that may have appeared above the Python "
"backtrace).\n\n{}".format(formatted))
else:
diagnostic(build_ext, error)
| apache-2.0 |
loco-odoo/localizacion_co | openerp/addons-extra/print_receipt/reports/account_cheque_bancolombia.py | 3 | 1068 | # -*- coding: utf-8 -*-
import time
from openerp.report import report_sxw
from openerp import pooler
class account_voucher(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_voucher, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'getLines': self._lines_get,
})
self.context = context
def _lines_get(self, voucher):
voucherline_obj = pooler.get_pool(self.cr.dbname).get('account.voucher.line')
voucherlines = voucherline_obj.search(self.cr, self.uid,[('voucher_id','=',voucher.id)])
voucherlines = voucherline_obj.browse(self.cr, self.uid, voucherlines)
return voucherlines
report_sxw.report_sxw('report.account_cheque_bancolombia', 'account.voucher',
'addons/print_receipt/reports/account_cheque_bancolombia.rml',
parser=account_voucher)
| agpl-3.0 |
gxx/lettuce | tests/integration/lib/Django-1.3/django/core/management/sql.py | 229 | 8259 | import os
import re
from django.conf import settings
from django.core.management.base import CommandError
from django.db import models
from django.db.models import get_models
def sql_create(app, style, connection):
"Returns a list of the CREATE TABLE SQL statements for the given app."
if connection.settings_dict['ENGINE'] == 'django.db.backends.dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set ENGINE for the databse.
raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" +
"because you haven't specified the ENGINE setting for the database.\n" +
"Edit your settings file and change DATBASES['default']['ENGINE'] to something like\n" +
"'django.db.backends.postgresql' or 'django.db.backends.mysql'.")
# Get installed models, so we generate REFERENCES right.
# We trim models from the current app so that the sqlreset command does not
# generate invalid SQL (leaving models out of known_models is harmless, so
# we can be conservative).
app_models = models.get_models(app, include_auto_created=True)
final_output = []
tables = connection.introspection.table_names()
known_models = set([model for model in connection.introspection.installed_models(tables) if model not in app_models])
pending_references = {}
for model in app_models:
output, references = connection.creation.sql_create_model(model, style, known_models)
final_output.extend(output)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in known_models:
final_output.extend(connection.creation.sql_for_pending_references(refto, style, pending_references))
final_output.extend(connection.creation.sql_for_pending_references(model, style, pending_references))
# Keep track of the fact that we've created the table for this model.
known_models.add(model)
# Handle references to tables that are from other apps
# but don't exist physically.
not_installed_models = set(pending_references.keys())
if not_installed_models:
alter_sql = []
for model in not_installed_models:
alter_sql.extend(['-- ' + sql for sql in
connection.creation.sql_for_pending_references(model, style, pending_references)])
if alter_sql:
final_output.append('-- The following references should be added but depend on non-existent tables:')
final_output.extend(alter_sql)
return final_output
def sql_delete(app, style, connection):
"Returns a list of the DROP TABLE SQL statements for the given app."
# This should work even if a connection isn't available
try:
cursor = connection.cursor()
except:
cursor = None
# Figure out which tables already exist
if cursor:
table_names = connection.introspection.get_table_list(cursor)
else:
table_names = []
output = []
# Output DROP TABLE statements for standard application tables.
to_delete = set()
references_to_delete = {}
app_models = models.get_models(app, include_auto_created=True)
for model in app_models:
if cursor and connection.introspection.table_name_converter(model._meta.db_table) in table_names:
# The table exists, so it needs to be dropped
opts = model._meta
for f in opts.local_fields:
if f.rel and f.rel.to not in to_delete:
references_to_delete.setdefault(f.rel.to, []).append( (model, f) )
to_delete.add(model)
for model in app_models:
if connection.introspection.table_name_converter(model._meta.db_table) in table_names:
output.extend(connection.creation.sql_destroy_model(model, references_to_delete, style))
# Close database connection explicitly, in case this output is being piped
# directly into a database client, to avoid locking issues.
if cursor:
cursor.close()
connection.close()
return output[::-1] # Reverse it, to deal with table dependencies.
def sql_reset(app, style, connection):
"Returns a list of the DROP TABLE SQL, then the CREATE TABLE SQL, for the given module."
# This command breaks a lot and should be deprecated
import warnings
warnings.warn(
'This command has been deprecated. The command ``sqlflush`` can be used to delete everything. You can also use ALTER TABLE or DROP TABLE statements manually.',
PendingDeprecationWarning
)
return sql_delete(app, style, connection) + sql_all(app, style, connection)
def sql_flush(style, connection, only_django=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True)
else:
tables = connection.introspection.table_names()
statements = connection.ops.sql_flush(
style, tables, connection.introspection.sequence_list()
)
return statements
def sql_custom(app, style, connection):
"Returns a list of the custom table modifying SQL statements for the given app."
output = []
app_models = get_models(app)
app_dir = os.path.normpath(os.path.join(os.path.dirname(app.__file__), 'sql'))
for model in app_models:
output.extend(custom_sql_for_model(model, style, connection))
return output
def sql_indexes(app, style, connection):
"Returns a list of the CREATE INDEX SQL statements for all models in the given app."
output = []
for model in models.get_models(app):
output.extend(connection.creation.sql_indexes_for_model(model, style))
return output
def sql_all(app, style, connection):
"Returns a list of CREATE TABLE SQL, initial-data inserts, and CREATE INDEX SQL for the given module."
return sql_create(app, style, connection) + sql_custom(app, style, connection) + sql_indexes(app, style, connection)
def custom_sql_for_model(model, style, connection):
opts = model._meta
app_dir = os.path.normpath(os.path.join(os.path.dirname(models.get_app(model._meta.app_label).__file__), 'sql'))
output = []
# Post-creation SQL should come before any initial SQL data is loaded.
# However, this should not be done for models that are unmanaged or
# for fields that are part of a parent model (via model inheritance).
if opts.managed:
post_sql_fields = [f for f in opts.local_fields if hasattr(f, 'post_create_sql')]
for f in post_sql_fields:
output.extend(f.post_create_sql(style, model._meta.db_table))
# Some backends can't execute more than one SQL statement at a time,
# so split into separate statements.
statements = re.compile(r";[ \t]*$", re.M)
# Find custom SQL, if it's available.
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = [os.path.join(app_dir, "%s.%s.sql" % (opts.object_name.lower(), backend_name)),
os.path.join(app_dir, "%s.sql" % opts.object_name.lower())]
for sql_file in sql_files:
if os.path.exists(sql_file):
fp = open(sql_file, 'U')
for statement in statements.split(fp.read().decode(settings.FILE_CHARSET)):
# Remove any comments from the file
statement = re.sub(ur"--.*([\n\Z]|$)", "", statement)
if statement.strip():
output.append(statement + u";")
fp.close()
return output
def emit_post_sync_signal(created_models, verbosity, interactive, db):
# Emit the post_sync signal for every application.
for app in models.get_apps():
app_name = app.__name__.split('.')[-2]
if verbosity >= 2:
print "Running post-sync handlers for application", app_name
models.signals.post_syncdb.send(sender=app, app=app,
created_models=created_models, verbosity=verbosity,
interactive=interactive, db=db)
| gpl-3.0 |
dbarbier/privot-doc | src/UseCasesGuide/script_WhiteNoise.py | 1 | 1386 | from openturns import *
# Time grid over which all the processes will be defined
nt = 100
timeGrid = RegularGrid(0.0, 1.0, nt)
# Definition of the distribution
sigma = 1.0
myDistribution = Normal(0., sigma)
# Definition of the process
myProcess = WhiteNoise(myDistribution, timeGrid)
# We get a realization of the white noise process
realization = myProcess.getRealization()
# The realization is a time series
# we draw it as function of time thanks to the drawMarginal method
# We rework the legend name and color to have pretty graph
graph = Graph()
marginalDraw = realization.drawMarginal(0)
drawable = marginalDraw.getDrawable(0)
drawable.setLegendName('realization')
drawable.setColor('blue')
graph.add(drawable)
graph.setXTitle('Time')
graph.setYTitle('Values')
graph.setTitle("White noise process")
graph.setLegendPosition('topright')
graph.draw("whitenoise_realization", 800, 600, GraphImplementation.PNG)
# Several realization ==> here we fix 5 in order to be able to compare and visualize difference
sample = myProcess.getSample(5)
graphSample = sample.drawMarginal(0)
graphSample.setTitle("5 realizations of the White noise process")
for k in range(5):
drawable = graphSample.getDrawable(k)
drawable.setLegendName('realization ' + str(k+1))
graphSample.setDrawable(drawable, k)
graphSample.draw("whitenoise_realizations", 800, 600, GraphImplementation.PNG)
| lgpl-2.1 |
AdrianGaudebert/elmo | vendor-local/lib/python/south/management/commands/datamigration.py | 10 | 4665 | """
Data migration creation command
"""
from __future__ import print_function
import sys
import os
import re
from optparse import make_option
try:
set
except NameError:
from sets import Set as set
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import models
from django.conf import settings
from south.migration import Migrations
from south.exceptions import NoMigrations
from south.creator import freezer
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--freeze', action='append', dest='freeze_list', type='string',
help='Freeze the specified app(s). Provide an app name with each; use the option multiple times for multiple apps'),
make_option('--stdout', action='store_true', dest='stdout', default=False,
help='Print the migration to stdout instead of writing it to a file.'),
)
help = "Creates a new template data migration for the given app"
usage_str = "Usage: ./manage.py datamigration appname migrationname [--stdout] [--freeze appname]"
def handle(self, app=None, name="", freeze_list=None, stdout=False, verbosity=1, **options):
# Any supposed lists that are None become empty lists
freeze_list = freeze_list or []
# --stdout means name = -
if stdout:
name = "-"
# Only allow valid names
if re.search('[^_\w]', name) and name != "-":
self.error("Migration names should contain only alphanumeric characters and underscores.")
# if not name, there's an error
if not name:
self.error("You must provide a name for this migration\n" + self.usage_str)
if not app:
self.error("You must provide an app to create a migration for.\n" + self.usage_str)
# Get the Migrations for this app (creating the migrations dir if needed)
migrations = Migrations(app, force_creation=True, verbose_creation=verbosity > 0)
# See what filename is next in line. We assume they use numbers.
new_filename = migrations.next_filename(name)
# Work out which apps to freeze
apps_to_freeze = self.calc_frozen_apps(migrations, freeze_list)
# So, what's in this file, then?
file_contents = MIGRATION_TEMPLATE % {
"frozen_models": freezer.freeze_apps_to_string(apps_to_freeze),
"complete_apps": apps_to_freeze and "complete_apps = [%s]" % (", ".join(map(repr, apps_to_freeze))) or ""
}
# - is a special name which means 'print to stdout'
if name == "-":
print(file_contents)
# Write the migration file if the name isn't -
else:
fp = open(os.path.join(migrations.migrations_dir(), new_filename), "w")
fp.write(file_contents)
fp.close()
print("Created %s." % new_filename, file=sys.stderr)
def calc_frozen_apps(self, migrations, freeze_list):
"""
Works out, from the current app, settings, and the command line options,
which apps should be frozen.
"""
apps_to_freeze = []
for to_freeze in freeze_list:
if "." in to_freeze:
self.error("You cannot freeze %r; you must provide an app label, like 'auth' or 'books'." % to_freeze)
# Make sure it's a real app
if not models.get_app(to_freeze):
self.error("You cannot freeze %r; it's not an installed app." % to_freeze)
# OK, it's fine
apps_to_freeze.append(to_freeze)
if getattr(settings, 'SOUTH_AUTO_FREEZE_APP', True):
apps_to_freeze.append(migrations.app_label())
return apps_to_freeze
def error(self, message, code=1):
"""
Prints the error, and exits with the given code.
"""
print(message, file=sys.stderr)
sys.exit(code)
MIGRATION_TEMPLATE = """# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
def backwards(self, orm):
"Write your backwards methods here."
models = %(frozen_models)s
%(complete_apps)s
symmetrical = True
"""
| mpl-2.0 |
ehashman/oh-mainline | vendor/packages/sqlparse/tests/test_parse.py | 16 | 6668 | # -*- coding: utf-8 -*-
"""Tests sqlparse function."""
import pytest
from tests.utils import TestCaseBase
import sqlparse
import sqlparse.sql
from sqlparse import tokens as T
class SQLParseTest(TestCaseBase):
"""Tests sqlparse.parse()."""
def test_tokenize(self):
sql = 'select * from foo;'
stmts = sqlparse.parse(sql)
self.assertEqual(len(stmts), 1)
self.assertEqual(str(stmts[0]), sql)
def test_multistatement(self):
sql1 = 'select * from foo;'
sql2 = 'select * from bar;'
stmts = sqlparse.parse(sql1 + sql2)
self.assertEqual(len(stmts), 2)
self.assertEqual(str(stmts[0]), sql1)
self.assertEqual(str(stmts[1]), sql2)
def test_newlines(self):
sql = u'select\n*from foo;'
p = sqlparse.parse(sql)[0]
self.assertEqual(unicode(p), sql)
sql = u'select\r\n*from foo'
p = sqlparse.parse(sql)[0]
self.assertEqual(unicode(p), sql)
sql = u'select\r*from foo'
p = sqlparse.parse(sql)[0]
self.assertEqual(unicode(p), sql)
sql = u'select\r\n*from foo\n'
p = sqlparse.parse(sql)[0]
self.assertEqual(unicode(p), sql)
def test_within(self):
sql = 'foo(col1, col2)'
p = sqlparse.parse(sql)[0]
col1 = p.tokens[0].tokens[1].tokens[1].tokens[0]
self.assert_(col1.within(sqlparse.sql.Function))
def test_child_of(self):
sql = '(col1, col2)'
p = sqlparse.parse(sql)[0]
self.assert_(p.tokens[0].tokens[1].is_child_of(p.tokens[0]))
sql = 'select foo'
p = sqlparse.parse(sql)[0]
self.assert_(not p.tokens[2].is_child_of(p.tokens[0]))
self.assert_(p.tokens[2].is_child_of(p))
def test_has_ancestor(self):
sql = 'foo or (bar, baz)'
p = sqlparse.parse(sql)[0]
baz = p.tokens[-1].tokens[1].tokens[-1]
self.assert_(baz.has_ancestor(p.tokens[-1].tokens[1]))
self.assert_(baz.has_ancestor(p.tokens[-1]))
self.assert_(baz.has_ancestor(p))
def test_float(self):
t = sqlparse.parse('.5')[0].tokens
self.assertEqual(len(t), 1)
self.assert_(t[0].ttype is sqlparse.tokens.Number.Float)
t = sqlparse.parse('.51')[0].tokens
self.assertEqual(len(t), 1)
self.assert_(t[0].ttype is sqlparse.tokens.Number.Float)
t = sqlparse.parse('1.5')[0].tokens
self.assertEqual(len(t), 1)
self.assert_(t[0].ttype is sqlparse.tokens.Number.Float)
t = sqlparse.parse('12.5')[0].tokens
self.assertEqual(len(t), 1)
self.assert_(t[0].ttype is sqlparse.tokens.Number.Float)
def test_placeholder(self):
def _get_tokens(sql):
return sqlparse.parse(sql)[0].tokens[-1].tokens
t = _get_tokens('select * from foo where user = ?')
self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
self.assertEqual(t[-1].value, '?')
t = _get_tokens('select * from foo where user = :1')
self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
self.assertEqual(t[-1].value, ':1')
t = _get_tokens('select * from foo where user = :name')
self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
self.assertEqual(t[-1].value, ':name')
t = _get_tokens('select * from foo where user = %s')
self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
self.assertEqual(t[-1].value, '%s')
t = _get_tokens('select * from foo where user = $a')
self.assert_(t[-1].ttype is sqlparse.tokens.Name.Placeholder)
self.assertEqual(t[-1].value, '$a')
def test_access_symbol(self): # see issue27
t = sqlparse.parse('select a.[foo bar] as foo')[0].tokens
self.assert_(isinstance(t[-1], sqlparse.sql.Identifier))
self.assertEqual(t[-1].get_name(), 'foo')
self.assertEqual(t[-1].get_real_name(), '[foo bar]')
self.assertEqual(t[-1].get_parent_name(), 'a')
def test_keyword_like_identifier(self): # see issue47
t = sqlparse.parse('foo.key')[0].tokens
self.assertEqual(len(t), 1)
self.assert_(isinstance(t[0], sqlparse.sql.Identifier))
def test_function_parameter(self): # see issue94
t = sqlparse.parse('abs(some_col)')[0].tokens[0].get_parameters()
self.assertEqual(len(t), 1)
self.assert_(isinstance(t[0], sqlparse.sql.Identifier))
def test_function_param_single_literal(self):
t = sqlparse.parse('foo(5)')[0].tokens[0].get_parameters()
self.assertEqual(len(t), 1)
self.assert_(t[0].ttype is T.Number.Integer)
def test_nested_function(self):
t = sqlparse.parse('foo(bar(5))')[0].tokens[0].get_parameters()
self.assertEqual(len(t), 1)
self.assert_(type(t[0]) is sqlparse.sql.Function)
def test_quoted_identifier():
t = sqlparse.parse('select x.y as "z" from foo')[0].tokens
assert isinstance(t[2], sqlparse.sql.Identifier)
assert t[2].get_name() == 'z'
assert t[2].get_real_name() == 'y'
def test_psql_quotation_marks(): # issue83
# regression: make sure plain $$ work
t = sqlparse.split("""
CREATE OR REPLACE FUNCTION testfunc1(integer) RETURNS integer AS $$
....
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION testfunc2(integer) RETURNS integer AS $$
....
$$ LANGUAGE plpgsql;""")
assert len(t) == 2
# make sure $SOMETHING$ works too
t = sqlparse.split("""
CREATE OR REPLACE FUNCTION testfunc1(integer) RETURNS integer AS $PROC_1$
....
$PROC_1$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION testfunc2(integer) RETURNS integer AS $PROC_2$
....
$PROC_2$ LANGUAGE plpgsql;""")
assert len(t) == 2
@pytest.mark.parametrize('ph', ['?', ':1', ':foo', '%s', '%(foo)s'])
def test_placeholder(ph):
p = sqlparse.parse(ph)[0].tokens
assert len(p) == 1
assert p[0].ttype is T.Name.Placeholder
@pytest.mark.parametrize('num', ['6.67428E-8', '1.988e33', '1e-12'])
def test_scientific_numbers(num):
p = sqlparse.parse(num)[0].tokens
assert len(p) == 1
assert p[0].ttype is T.Number.Float
def test_single_quotes_are_strings():
p = sqlparse.parse("'foo'")[0].tokens
assert len(p) == 1
assert p[0].ttype is T.String.Single
def test_double_quotes_are_identifiers():
p = sqlparse.parse('"foo"')[0].tokens
assert len(p) == 1
assert isinstance(p[0], sqlparse.sql.Identifier)
def test_single_quotes_with_linebreaks(): # issue118
p = sqlparse.parse("'f\nf'")[0].tokens
assert len(p) == 1
assert p[0].ttype is T.String.Single
| agpl-3.0 |
sagiss/sardana | src/sardana/spock/magic.py | 1 | 9633 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
##
## This file is part of Sardana
##
## http://www.sardana-controls.org/
##
## Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
## Sardana is free software: you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Sardana is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""Initial magic commands and hooks for the spock IPython environment"""
__all__ = ['expconf', 'showscan', 'spsplot', 'debug_completer',
'debug', 'www',
'post_mortem', 'macrodata', 'edmac', 'spock_late_startup_hook',
'spock_pre_prompt_hook']
from .genutils import page, get_door, get_macro_server, ask_yes_no, arg_split
from .genutils import MSG_DONE, MSG_FAILED
from .genutils import get_ipapi
def expconf(self, parameter_s=''):
"""Launches a GUI for configuring the environment variables
for the experiments (scans)"""
try:
from sardana.taurus.qt.qtgui.extra_sardana import ExpDescriptionEditor
except:
print "Error importing ExpDescriptionEditor " \
"(hint: is taurus extra_sardana installed?)"
return
try:
doorname = get_door().name()
except TypeError:
# TODO: For Taurus 4 adaptation
doorname = get_door().fullname
#===========================================================================
## ugly hack to avoid ipython/qt thread problems #e.g. see
## https://sourceforge.net/p/sardana/tickets/10/
## this hack does not allow inter-process communication and leaves the
## widget open after closing spock
## @todo: investigate cause of segfaults when using launching qt widgets from ipython
#
# w = ExpDescriptionEditor(door=doorname)
# w.show() #launching it like this, produces the problem of https://sourceforge.net/p/sardana/tickets/10/
import subprocess
import sys
fname = sys.modules[ExpDescriptionEditor.__module__].__file__
args = ['python', fname, doorname]
subprocess.Popen(args)
# ===========================================================================
def showscan(self, parameter_s=''):
"""Shows a scan in a GUI.
:param scan_id: scan number [default: None, meaning show last scan]"""
params = parameter_s.split()
door = get_door()
online, scan_nb = False, None
if len(params) > 0:
if params[0].lower() == 'online':
msg = 'To see the scans online, launch "expconf" and ' + \
'enable the plots from the "plots" button ' + \
'(top-right in the first tab)'
print msg
return
# show the scan plot, ignoring the plot configuration
elif params[0].lower() == 'online_raw':
online = True
else:
scan_nb = int(params[0])
door.show_scan(scan_nb, online=online)
def spsplot(self, parameter_s=''):
get_door().plot()
def debug_completer(self, event):
# calculate parameter index
param_idx = len(event.line.split()) - 1
if not event.line.endswith(' '):
param_idx -= 1
if param_idx == 0:
return ('off', 'on')
def debug(self, parameter_s=''):
"""Activate/Deactivate macro server debug output"""
params = parameter_s.split()
door = get_door()
if len(params) == 0:
s = door.getDebugMode() and 'on' or 'off'
print "debug mode is %s" % s
return
elif len(params) == 1:
s = params[0].lower()
if not s in ('off', 'on'):
print "Usage: debug [on|off]"
return
door.setDebugMode(s == 'on')
print "debug mode is now %s" % s
else:
print "Usage: debug [on|off]"
def www(self, parameter_s=''):
"""What went wrong. Prints the error message from the last macro execution"""
import PyTango
door = get_door()
try:
last_macro = door.getLastRunningMacro()
if last_macro is None:
door.writeln("No macro ran from this console yet!")
return
if not hasattr(last_macro, 'exc_stack') or last_macro.exc_stack is None:
door.writeln("Sorry, but no exception occurred running last " \
"macro (%s)." % last_macro.name)
return
exc = "".join(last_macro.exc_stack)
door.write(exc)
except Exception, e:
door.writeln("Unexpected exception occurred executing www:",
stream=door.Error)
door.writeln(str(e), stream=door.Error)
import traceback
traceback.print_exc()
def post_mortem(self, parameter_s='', from_www=False):
"""Post mortem analysis. Prints the local stream buffer. If no stream is
specified, it reads 'debug' stream. Valid values are output, critical,
error, warning, info, debug, result"""
params = parameter_s.split() or ['debug']
door = get_door()
logger = door.getLogObj(params[0])
msg = ""
if not from_www:
try:
msg = "\n".join(logger.read(cache=False).value)
except:
from_www = True
if from_www:
msg = "------------------------------\n" \
"Server is offline.\n" \
"This is a post mortem analysis\n" \
"------------------------------\n"
msg += "\n".join(logger.getLogBuffer())
page(msg)
def macrodata(self, parameter_s=''):
"""macrodata
Returns the data produced by the last macro"""
door = get_door()
macro_data = door.read_attribute("RecordData")
from taurus.core.util.codecs import CodecFactory
factory = CodecFactory()
data = factory.decode(macro_data.value)
return data
def edmac(self, parameter_s=''):
"""edmac <macro name> [<module>]
Returns the contents of the macro file which contains the macro code for
the given macro name. If the module is given and it does not exist a new
one is created. If the given module is a simple module name and it does
not exist, it will be created on the first directory mentioned in the
MacroPath"""
import os
import tempfile
import PyTango
ms = get_macro_server()
pars = arg_split(parameter_s, posix=True)
if len(pars) == 1:
macro_name = pars[0]
is_new_macro = False
else:
is_new_macro = True
macro_name, macro_lib = pars
macro_info_obj = ms.getMacroInfoObj(macro_name)
if not is_new_macro:
if macro_info_obj is None:
print "Macro '%s' could not be found" % macro_name
return
macro_lib = macro_info_obj.module
if is_new_macro:
if macro_info_obj is not None:
msg = ('Do you want to create macro "%s" in module "%s" that will'
' override the already existing macro in module "%s"'
% (macro_name, macro_lib, macro_info_obj.module))
if not ask_yes_no(msg, 'y'):
print "Aborting edition..."
return
macro_info = (macro_lib, macro_name)
print 'Opening %s.%s...' % macro_info
try:
remote_fname, code, line_nb = ms.GetMacroCode(macro_info)
except PyTango.DevFailed, e:
PyTango.Except.print_exception(e)
return
fd, local_fname = tempfile.mkstemp(prefix='spock_%s_' % pars[0],
suffix='.py', text=True)
os.write(fd, code)
os.close(fd)
cmd = 'edit -x -n %s %s' % (line_nb, local_fname)
ip = get_ipapi()
ip.magic(cmd)
if ask_yes_no('Do you want to apply the new code on the server?', 'y'):
print 'Storing...',
try:
f = file(local_fname)
try:
new_code = f.read()
ms.SetMacroCode([remote_fname, new_code])
print MSG_DONE
except Exception, e:
print MSG_FAILED
print 'Reason:', str(e)
f.close()
except:
print 'Could not open file \'%s\' for safe transfer to the ' \
'server' % local_fname
print 'Did you forget to save?'
else:
print "Discarding changes..."
# if os.path.exists(local_fname):
# if ask_yes_no('Delete temporary file \'%s\'?' % local_fname, 'y'):
# os.remove(local_fname)
# bkp = '%s~' % local_fname
# if os.path.exists(bkp):
# os.remove(bkp)
try:
os.remove(local_fname)
except:
pass
def spock_late_startup_hook(self):
try:
get_door().setConsoleReady(True)
except:
import traceback
print "Exception in spock_late_startup_hook:"
traceback.print_exc()
def spock_pre_prompt_hook(self):
try:
get_door().pre_prompt_hook(self)
except:
import traceback
print "Exception in spock_pre_prompt_hook:"
traceback.print_exc()
# def spock_pre_runcode_hook(self):
# print "spock_pre_runcode_hook"
# return None
| lgpl-3.0 |
wjo1212/aliyun-log-python-sdk | aliyun/log/es_migration/migration_manager.py | 1 | 8762 | #!/usr/bin/env python
# encoding: utf-8
# Copyright (C) Alibaba Cloud Computing
# All rights reserved.
import logging
import time
from multiprocessing import Pool
from aliyun.log import LogClient
from aliyun.log.es_migration.collection_task import (CollectionTaskStatus,
run_collection_task)
from aliyun.log.es_migration.collection_task_config import CollectionTaskConfig
from aliyun.log.es_migration.index_logstore_mappings import \
IndexLogstoreMappings
from aliyun.log.es_migration.mapping_index_converter import \
MappingIndexConverter
from aliyun.log.es_migration.util import split_and_strip
from aliyun.log.logexception import LogException
from elasticsearch import Elasticsearch
results = []
def log_result(result):
results.append(result)
class MigrationManager(object):
def __init__(self, hosts=None, indexes=None, query=None, scroll="5m", endpoint=None, project_name=None,
access_key_id=None, access_key=None, logstore_index_mappings=None, pool_size=10, time_reference=None,
source=None, topic=None, wait_time_in_secs=60):
"""
:param hosts: required, a comma-separated list of source ES nodes.
(example: "localhost:9200,other_host:9200")
:param indexes: optional, a comma-separated list of source index names.
(default: None, which will pull all indexes. example: "index1,index2")
:param query: optional, used to filter docs, so that you can specify the docs you want to migrate.
(default: None, example: '{"query":{"match":{"es_text":"text1"}}}')
:param scroll: optional, specify how long a consistent view of the index should be
maintained for scrolled search. (default: "5m", example: "10m")
:param endpoint: required, specify the endpoint of your log services.
(example: "cn-beijing.log.aliyuncs.com")
:param project_name: required, specify the project_name of your log services.
:param access_key_id: required, specify the access_key_id of your account.
:param access_key: required, specify the access_key of your account.
:param logstore_index_mappings: optional, specify the mappings of log service logstore and ES index.
(default is one-to-one mapping,
example: '{"logstore1": "my_index*","logstore2": "a_index,b_index"}')
:param pool_size: optional, specify the size of process pool.
The process pool will be used to run collection tasks.
(default: 10, example: 20)
:param time_reference: optional, specify what ES doc's field to use as log's time field.
(default: None, which will use current timestamp as log's time. example: "field1")
:param source: optional, specify the value of log's source field.
(default: None, which will be the value of hosts. example: "your_source")
:param topic: optional, specify the value of log's topic field.
(default: None, example: "your_topic")
:param wait_time_in_secs: optional, specify the waiting time before execute data migration task after init aliyun log.
(default: 60, example: 120)
"""
self.hosts = hosts
self.indexes = indexes
self.query = query
self.scroll = scroll
self.endpoint = endpoint
self.project_name = project_name
self.access_key_id = access_key_id
self.access_key = access_key
self.logstore_index_mappings = logstore_index_mappings
self.pool_size = pool_size
self.time_reference = time_reference
self.source = source
self.topic = topic
self.wait_time_in_secs = wait_time_in_secs
def migrate(self):
es = Elasticsearch(split_and_strip(self.hosts))
log_client = LogClient(self.endpoint, self.access_key_id, self.access_key)
index_lst = self.get_index_lst(es, self.indexes)
index_logstore_mappings = IndexLogstoreMappings(index_lst, self.logstore_index_mappings)
self.init_aliyun_log(es, log_client, self.project_name, index_logstore_mappings, self.wait_time_in_secs)
shard_cnt = self.get_shard_count(es, self.indexes, self.query)
p = Pool(min(shard_cnt, self.pool_size))
for i in range(shard_cnt):
config = CollectionTaskConfig(task_id=i,
slice_id=i,
slice_max=shard_cnt,
hosts=self.hosts,
indexes=self.indexes,
query=self.query,
scroll=self.scroll,
endpoint=self.endpoint,
project=self.project_name,
access_key_id=self.access_key_id,
access_key=self.access_key,
index_logstore_mappings=index_logstore_mappings,
time_reference=self.time_reference,
source=self.source,
topic=self.topic)
p.apply_async(func=run_collection_task, args=(config,), callback=log_result)
p.close()
p.join()
self.logging_summary_info(shard_cnt)
@classmethod
def logging_summary_info(cls, shard_cnt):
total_started_task_cnt = shard_cnt
success_task_cnt = 0
fail_task_cnt = 0
doc_cnt = 0
logging.info("========Tasks Info========")
for res in results:
logging.info(res)
doc_cnt += res.count
if res.status == CollectionTaskStatus.SUCCESS:
success_task_cnt += 1
else:
fail_task_cnt += 1
logging.info("========Summary========")
logging.info("Total started task count: %d", total_started_task_cnt)
logging.info("Successful task count: %d", success_task_cnt)
logging.info("Failed task count: %d", fail_task_cnt)
logging.info("Total collected documentation count: %d", doc_cnt)
@classmethod
def get_shard_count(cls, es, indexes, query=None):
resp = es.count(index=indexes, body=query)
return resp["_shards"]["total"]
@classmethod
def get_index_lst(cls, es, indexes):
resp = es.indices.stats(index=indexes)
return resp["indices"].keys()
@classmethod
def init_aliyun_log(cls, es, log_client, project_name, index_logstore_mappings, wait_time_in_secs):
logging.info("Start to init aliyun log")
cls._create_logstores(log_client, project_name, index_logstore_mappings)
cls._create_index_configs(es, log_client, project_name, index_logstore_mappings)
logging.info("Init aliyun log successfully")
logging.info("Enter wating time, wait_time_in_secs=%d", wait_time_in_secs)
time.sleep(wait_time_in_secs)
logging.info("Exit wating time")
@classmethod
def _create_logstores(cls, log_client, project_name, index_logstore_mappings):
logstores = index_logstore_mappings.get_all_logstores()
for logstore in logstores:
try:
log_client.create_logstore(project_name=project_name, logstore_name=logstore)
except LogException as e:
if e.get_error_code() == "LogStoreAlreadyExist":
continue
else:
raise
@classmethod
def _create_index_configs(cls, es, log_client, project_name, index_logstore_mappings):
logstores = index_logstore_mappings.get_all_logstores()
for logstore in logstores:
indexes = index_logstore_mappings.get_indexes(logstore)
first_index = True
for index in indexes:
resp = es.indices.get(index=index)
for mapping in resp[index]["mappings"].values():
index_config = MappingIndexConverter.to_index_config(mapping)
if first_index:
try:
log_client.create_index(project_name, logstore, index_config)
first_index = False
except LogException as e:
if e.get_error_code() == "IndexAlreadyExist":
continue
else:
raise
else:
log_client.update_index(project_name, logstore, index_config)
| mit |
luoyetx/mxnet | tools/ipynb2md.py | 41 | 2272 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Convert jupyter notebook into the markdown format. The notebook outputs will be
removed.
It is heavily adapted from https://gist.github.com/decabyte/0ed87372774cf5d34d7e
"""
import sys
import io
import os
import argparse
import nbformat
def remove_outputs(nb):
"""Removes the outputs cells for a jupyter notebook."""
for cell in nb.cells:
if cell.cell_type == 'code':
cell.outputs = []
def clear_notebook(old_ipynb, new_ipynb):
with io.open(old_ipynb, 'r') as f:
nb = nbformat.read(f, nbformat.NO_CONVERT)
remove_outputs(nb)
with io.open(new_ipynb, 'w', encoding='utf8') as f:
nbformat.write(nb, f, nbformat.NO_CONVERT)
def main():
parser = argparse.ArgumentParser(
description="Jupyter Notebooks to markdown"
)
parser.add_argument("notebook", nargs=1, help="The notebook to be converted.")
parser.add_argument("-o", "--output", help="output markdown file")
args = parser.parse_args()
old_ipynb = args.notebook[0]
new_ipynb = 'tmp.ipynb'
md_file = args.output
print md_file
if not md_file:
md_file = os.path.splitext(old_ipynb)[0] + '.md'
clear_notebook(old_ipynb, new_ipynb)
os.system('jupyter nbconvert ' + new_ipynb + ' --to markdown --output ' + md_file)
with open(md_file, 'a') as f:
f.write('<!-- INSERT SOURCE DOWNLOAD BUTTONS -->')
os.system('rm ' + new_ipynb)
if __name__ == '__main__':
main()
| apache-2.0 |
atpy/atpy | atpy/registry.py | 1 | 6496 | _readers = {}
_writers = {}
_set_readers = {}
_set_writers = {}
_extensions = {}
def register_reader(ttype, function, override=False):
'''
Register a table reader function.
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that will be used to
specify the table type when reading.
*function*: [ function ]
The function to read in a single table.
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any existing type if already present.
'''
if not ttype in _readers or override:
_readers[ttype] = function
else:
raise Exception("Type %s is already defined" % ttype)
def register_writer(ttype, function, override=False):
'''
Register a table writer function.
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that will be used to
specify the table type when writing.
*function*: [ function ]
The function to write out a single table.
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any existing type if already present.
'''
if not ttype in _writers or override:
_writers[ttype] = function
else:
raise Exception("Type %s is already defined" % ttype)
def register_set_reader(ttype, function, override=False):
'''
Register a table set reader function.
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that will be used to
specify the table type when reading.
*function*: [ function ]
The function to read in a table set.
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any existing type if already present.
'''
if not ttype in _set_readers or override:
_set_readers[ttype] = function
else:
raise Exception("Type %s is already defined" % ttype)
def register_set_writer(ttype, function, override=False):
'''
Register a table set writer function.
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that will be used to
specify the table type when writing.
*function*: [ function ]
The function to write out a table set.
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any existing type if already present.
'''
if not ttype in _set_writers or override:
_set_writers[ttype] = function
else:
raise Exception("Type %s is already defined" % ttype)
def register_extensions(ttype, extensions, override=False):
'''
Associate file extensions with a specific table type
Required Arguments:
*ttype*: [ string ]
The table type identifier. This is the string that is used to
specify the table type when reading.
*extensions*: [ string or list or tuple ]
List of valid extensions for the table type - used for auto type
selection. All extensions should be given in lowercase as file
extensions are converted to lowercase before checking against this
list. If a single extension is given, it can be specified as a
string rather than a list of strings
Optional Keyword Arguments:
*override*: [ True | False ]
Whether to override any extensions if already present.
'''
if type(extensions) == str:
extensions = [extensions]
for extension in extensions:
if not extension in _extensions or override:
_extensions[extension] = ttype
else:
raise Exception("Extension %s is already defined" % extension)
def _determine_type(string, verbose):
if not isinstance(string, basestring):
raise Exception('Could not determine table type (non-string argument)')
s = str(string).lower()
if not '.' in s:
extension = s
else:
extension = s.split('.')[-1]
if extension.lower() in ['gz', 'bz2', 'bzip2']:
extension = s.split('.')[-2]
if extension in _extensions:
table_type = _extensions[extension]
if verbose:
print("Auto-detected table type: %s" % table_type)
else:
raise Exception('Could not determine table type for extension %s' % extension)
return table_type
from . import fitstable
register_reader('fits', fitstable.read)
register_writer('fits', fitstable.write)
register_set_reader('fits', fitstable.read_set)
register_set_writer('fits', fitstable.write_set)
register_extensions('fits', ['fit', 'fits'])
from . import votable
register_reader('vo', votable.read)
register_writer('vo', votable.write)
register_set_reader('vo', votable.read_set)
register_set_writer('vo', votable.write_set)
register_extensions('vo', ['xml', 'vot'])
from . import ipactable
register_reader('ipac', ipactable.read)
register_writer('ipac', ipactable.write)
register_extensions('ipac', ['ipac', 'tbl'])
from . import sqltable
register_reader('sql', sqltable.read)
register_writer('sql', sqltable.write)
register_set_reader('sql', sqltable.read_set)
register_set_writer('sql', sqltable.write_set)
register_extensions('sql', ['sqlite', 'postgres', 'mysql', 'db'])
from . import asciitables
register_reader('cds', asciitables.read_cds)
register_reader('mrt', asciitables.read_cds)
register_reader('latex', asciitables.read_latex)
register_writer('latex', asciitables.write_latex)
register_reader('rdb', asciitables.read_rdb)
register_writer('rdb', asciitables.write_rdb)
register_extensions('rdb', ['rdb'])
register_reader('daophot', asciitables.read_daophot)
register_reader('ascii', asciitables.read_ascii)
register_writer('ascii', asciitables.write_ascii)
from . import hdf5table
register_reader('hdf5', hdf5table.read)
register_set_reader('hdf5', hdf5table.read_set)
register_writer('hdf5', hdf5table.write)
register_set_writer('hdf5', hdf5table.write_set)
register_extensions('hdf5', ['hdf5', 'h5'])
from . import irsa_service
register_reader('irsa', irsa_service.read)
from . import vo_conesearch
register_reader('vo_conesearch', vo_conesearch.read)
from . import htmltable
register_writer('html', htmltable.write)
register_extensions('html', ['html', 'htm'])
| mit |
thomas-sterrenburg/fingerprinting-python | src/static/constants.py | 1 | 1239 | # Copyright 2017 Thomas Sterrenburg
#
# Licensed under the MIT License (the License); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at https://opensource.org/licenses/MIT#
import datetime
# file and directory names
CACHE = 'data/cache'
KNOWN = 'data/known'
REQUESTS = 'data/requests'
BLACKLIST = 'data/blacklist'
# CSV = 'aaa_' + str(datetime.datetime.now()).replace(' ', '_')[:-7] + '.csv'
CSV = 'aaa.csv'
# files to ignore in the requests directories
REQUEST_BLACKLIST = ['.keep', '.DS_Store']
# export csv with server names as columns, instead of hostnames
SERVER_NAMES = False
# failure handler times
PAUSE_TIME_AFTER_TIMEOUT = 1
MAX_ATTEMPTS_PER_HOST = 3
# logger formatting
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
LOGNAME_START = {
'logname': 'setup',
'host_index': 0,
'host_total': 0
}
# fingerprint attribute names
LEXICAL = 'LEXICAL'
SYNTACTIC = 'SYNTACTIC'
SEMANTIC = 'SEMANTIC'
NO_RESPONSE = 'NO_RESPONSE'
NO_RESPONSE_CODE = 'NO_RESPONSE_CODE'
NO_RESPONSE_TEXT = 'NONE'
DATA_LIST = 'LIST'
DATA_NONE = None
# TODO make verbose a possibility again
# TODO make part of arguments list
CSV_VERBOSE = True
EXPORT_CSV = True | mit |
jagguli/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geos/mutable_list.py | 405 | 10386 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://www.aryehleib.com/MutableLists.html
Author: Aryeh Leib Taurog.
"""
class ListMixin(object):
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
class _IndexError:
The type of exception to be raise on invalid index [Optional]
"""
_minlength = 0
_maxlength = None
_IndexError = IndexError
### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super(ListMixin, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in xrange(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, (int, long, slice)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, (int, long)):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = ( self._get_single_internal(i)
for i in xrange(origLen)
if i not in indexRange )
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
def __iter__(self):
"Iterate over the items in the list"
for i in xrange(len(self)):
yield self[i]
### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n-1):
self.extend(cache)
return self
def __cmp__(self, other):
'cmp'
slen = len(self)
for i in range(slen):
try:
c = cmp(self[i], other[i])
except IndexError:
# must be other is shorter
return 1
else:
# elements not equal
if c: return c
return cmp(slen, len(other))
### Public list interface Methods ###
## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i: count += 1
return count
def index(self, val):
"Standard list index method"
for i in xrange(0, len(self)):
if self[i] == val: return i
raise ValueError('%s not found in object' % str(val))
## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, (int, long)):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=cmp, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v),v) for v in self]
temp.sort(cmp=cmp, key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
temp.sort(cmp=cmp, reverse=reverse)
self[:] = temp
### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise self._IndexError('invalid index: %s' % str(index))
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in xrange(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in xrange(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
| apache-2.0 |
kochbeck/icsisumm | icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/parse/__init__.py | 9 | 4491 | # Natural Language Toolkit: Parsers
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Steven Bird <[email protected]>
# Edward Loper <[email protected]>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
"""
Classes and interfaces for producing tree structures that represent
the internal organization of a text. This task is known as X{parsing}
the text, and the resulting tree structures are called the text's
X{parses}. Typically, the text is a single sentence, and the tree
structure represents the syntactic structure of the sentence.
However, parsers can also be used in other domains. For example,
parsers can be used to derive the morphological structure of the
morphemes that make up a word, or to derive the discourse structure
for a set of utterances.
Sometimes, a single piece of text can be represented by more than one
tree structure. Texts represented by more than one tree structure are
called X{ambiguous} texts. Note that there are actually two ways in
which a text can be ambiguous:
- The text has multiple correct parses.
- There is not enough information to decide which of several
candidate parses is correct.
However, the parser module does I{not} distinguish these two types of
ambiguity.
The parser module defines C{ParserI}, a standard interface for parsing
texts; and two simple implementations of that interface,
C{ShiftReduceParser} and C{RecursiveDescentParser}. It also contains
three sub-modules for specialized kinds of parsing:
- C{nltk.parser.chart} defines chart parsing, which uses dynamic
programming to efficiently parse texts.
- C{nltk.parser.probabilistic} defines probabilistic parsing, which
associates a probability with each parse.
"""
from api import *
from chart import *
from featurechart import *
from pchart import *
from rd import *
from sr import *
from util import *
from viterbi import *
__all__ = [
# Parser interface
'ParserI',
# Parsers
'RecursiveDescentParser', 'SteppingRecursiveDescentParser',
'ShiftReduceParser', 'SteppingShiftReduceParser',
'EarleyChartParser', 'ChartParser', 'SteppingChartParser',
'BottomUpChartParser', 'InsideChartParser', 'RandomChartParser',
'UnsortedChartParser', 'LongestChartParser', 'ViterbiParser',
'FeatureEarleyChartParser',
]
######################################################################
#{ Deprecated
######################################################################
from nltk.internals import Deprecated
class ParseI(ParserI, Deprecated):
"""Use nltk.ParserI instead."""
class AbstractParse(AbstractParser, Deprecated):
"""Use nltk.ParserI instead."""
class RecursiveDescent(RecursiveDescentParser, Deprecated):
"""Use nltk.RecursiveDescentParser instead."""
class SteppingRecursiveDescent(SteppingRecursiveDescentParser, Deprecated):
"""Use nltk.SteppingRecursiveDescentParser instead."""
class ShiftReduce(ShiftReduceParser, Deprecated):
"""Use nltk.ShiftReduceParser instead."""
class SteppingShiftReduce(SteppingShiftReduceParser, Deprecated):
"""Use nltk.SteppingShiftReduceParser instead."""
class EarleyChartParse(EarleyChartParser, Deprecated):
"""Use nltk.EarleyChartParser instead."""
class FeatureEarleyChartParse(FeatureEarleyChartParser, Deprecated):
"""Use nltk.FeatureEarleyChartParser instead."""
class ChartParse(ChartParser, Deprecated):
"""Use nltk.ChartParser instead."""
class SteppingChartParse(SteppingChartParser, Deprecated):
"""Use nltk.SteppingChartParser instead."""
class BottomUpChartParse(BottomUpChartParser, Deprecated):
"""Use nltk.BottomUpChartParser instead."""
class InsideParse(InsideChartParser, Deprecated):
"""Use nltk.InsideChartParser instead."""
class RandomParse(RandomChartParser, Deprecated):
"""Use nltk.RandomChartParser instead."""
class UnsortedParse(UnsortedChartParser, Deprecated):
"""Use nltk.UnsortedChartParser instead."""
class LongestParse(LongestChartParser, Deprecated):
"""Use nltk.LongestChartParser instead."""
class ViterbiParse(ViterbiParser, Deprecated):
"""Use nltk.ViterbiParser instead."""
class GrammarFile(Deprecated):
"""Use nltk.data.load() instead."""
# [xx] had directives: %start, %kimmo, %tagger_file?
def __init__(self, filename=None, verbose=False):
raise ValueError("GrammarFile is no longer supported -- "
"use nltk.data.load() instead.")
| gpl-3.0 |
pbougue/navitia | source/jormungandr/jormungandr/interfaces/v1/Places.py | 2 | 13833 | # coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from flask_restful import abort
from flask.globals import g
from jormungandr.authentication import get_all_available_instances
from jormungandr.interfaces.v1.decorators import get_serializer
from jormungandr.interfaces.v1.serializer.api import PlacesSerializer, PlacesNearbySerializer
from jormungandr import i_manager, timezone, global_autocomplete, authentication
from jormungandr.interfaces.v1.ResourceUri import ResourceUri
from jormungandr.interfaces.parsers import default_count_arg_type
from copy import deepcopy
from jormungandr.interfaces.v1.transform_id import transform_id
from jormungandr.exceptions import TechnicalError, InvalidArguments
from datetime import datetime
from jormungandr.parking_space_availability.parking_places_manager import ManageParkingPlaces
import ujson as json
from jormungandr.scenarios.utils import places_type
from navitiacommon import parser_args_type
from navitiacommon.parser_args_type import (
TypeSchema,
CoordFormat,
CustomSchemaType,
BooleanType,
OptionValue,
DateTimeFormat,
DepthArgument,
)
from jormungandr.interfaces.common import add_poi_infos_types, handle_poi_infos
import six
class geojson_argument(CustomSchemaType):
def __call__(self, value):
decoded = json.loads(value)
if not decoded:
raise ValueError('invalid shape')
return parser_args_type.geojson_argument(decoded)
def schema(self):
return TypeSchema(type=str) # TODO a better description of the geojson
class Places(ResourceUri):
def __init__(self, *args, **kwargs):
ResourceUri.__init__(
self, authentication=False, output_type_serializer=PlacesSerializer, *args, **kwargs
)
self.parsers["get"].add_argument("q", type=six.text_type, required=True, help="The data to search")
self.parsers["get"].add_argument(
"type[]",
type=OptionValue(list(places_type.keys())),
action="append",
default=["stop_area", "address", "poi", "administrative_region"],
help="The type of data to search",
)
self.parsers["get"].add_argument(
"count", type=default_count_arg_type, default=10, help="The maximum number of places returned"
)
self.parsers["get"].add_argument(
"search_type", type=int, default=0, hidden=True, help="Type of search: firstletter or type error"
)
self.parsers["get"].add_argument(
"_main_stop_area_weight_factor",
type=float,
default=1.0,
hidden=True,
help="multiplicator for the weight of main stop area",
)
self.parsers["get"].add_argument(
"admin_uri[]",
type=six.text_type,
action="append",
help="If filled, will restrain the search within the " "given admin uris",
)
self.parsers["get"].add_argument("depth", type=DepthArgument(), default=1, help="The depth of objects")
self.parsers["get"].add_argument(
"_current_datetime",
type=DateTimeFormat(),
schema_metadata={'default': 'now'},
hidden=True,
default=datetime.utcnow(),
help='The datetime considered as "now". Used for debug, default is '
'the moment of the request. It will mainly change the output '
'of the disruptions.',
)
self.parsers['get'].add_argument(
"disable_geojson", type=BooleanType(), default=False, help="remove geojson from the response"
)
self.parsers['get'].add_argument(
"from",
type=CoordFormat(nullable=True),
help="Coordinates longitude;latitude used to prioritize " "the objects around this coordinate",
)
self.parsers['get'].add_argument(
"_autocomplete",
type=six.text_type,
hidden=True,
help="name of the autocomplete service, used under the hood",
)
self.parsers['get'].add_argument(
'shape', type=geojson_argument(), help='Geographical shape to limit the search.'
)
def get(self, region=None, lon=None, lat=None):
args = self.parsers["get"].parse_args()
self._register_interpreted_parameters(args)
if len(args['q']) == 0:
abort(400, message="Search word absent")
if args['disable_geojson']:
g.disable_geojson = True
user = authentication.get_user(token=authentication.get_token(), abort_if_no_token=False)
if args['shape'] is None and user and user.shape:
args['shape'] = json.loads(user.shape)
if user and user.default_coord:
if args['from'] is None:
args['from'] = CoordFormat()(user.default_coord)
else:
if args['from'] == '':
raise InvalidArguments("if 'from' is provided it cannot be null")
# If a region or coords are asked, we do the search according
# to the region, else, we do a word wide search
if any([region, lon, lat]):
self.region = i_manager.get_region(region, lon, lat)
timezone.set_request_timezone(self.region)
response = i_manager.dispatch(args, "places", instance_name=self.region)
else:
available_instances = get_all_available_instances(user)
autocomplete = global_autocomplete.get('bragi')
if not autocomplete:
raise TechnicalError('world wide autocompletion service not available')
response = autocomplete.get(args, instances=available_instances)
return response, 200
def options(self, **kwargs):
return self.api_description(**kwargs)
class PlaceUri(ResourceUri):
def __init__(self, *args, **kwargs):
ResourceUri.__init__(
self, authentication=False, output_type_serializer=PlacesSerializer, *args, **kwargs
)
self.parsers["get"].add_argument(
"bss_stands",
type=BooleanType(),
default=False,
deprecated=True,
help="DEPRECATED, Use add_poi_infos[]=bss_stands",
)
self.parsers["get"].add_argument(
"add_poi_infos[]",
type=OptionValue(add_poi_infos_types),
default=['bss_stands', 'car_park'],
dest="add_poi_infos",
action="append",
help="Show more information about the poi if it's available, for instance, "
"show BSS/car park availability in the pois(BSS/car park) of the response",
)
self.parsers['get'].add_argument(
"disable_geojson", type=BooleanType(), default=False, help="remove geojson from the response"
)
self.parsers['get'].add_argument(
"disable_disruption", type=BooleanType(), default=False, help="remove disruptions from the response"
)
args = self.parsers["get"].parse_args()
if handle_poi_infos(args["add_poi_infos"], args["bss_stands"]):
self.get_decorators.insert(1, ManageParkingPlaces(self, 'places'))
if args['disable_geojson']:
g.disable_geojson = True
self.parsers['get'].add_argument(
"_autocomplete",
type=six.text_type,
hidden=True,
help="name of the autocomplete service, used under the hood",
)
def get(self, id, region=None, lon=None, lat=None):
args = self.parsers["get"].parse_args()
args.update({"uri": transform_id(id), "_current_datetime": datetime.utcnow()})
if any([region, lon, lat]):
self.region = i_manager.get_region(region, lon, lat)
timezone.set_request_timezone(self.region)
response = i_manager.dispatch(args, "place_uri", instance_name=self.region)
else:
user = authentication.get_user(token=authentication.get_token(), abort_if_no_token=False)
available_instances = get_all_available_instances(user)
autocomplete = global_autocomplete.get('bragi')
if not autocomplete:
raise TechnicalError('world wide autocompletion service not available')
response = autocomplete.get_by_uri(args["uri"], instances=available_instances)
return response, 200
def options(self, **kwargs):
return self.api_description(**kwargs)
places_types = {
'stop_areas',
'stop_points',
'pois',
'addresses',
'coords',
'places',
'coord',
} # add admins when possible
class PlacesNearby(ResourceUri):
def __init__(self, *args, **kwargs):
ResourceUri.__init__(self, output_type_serializer=PlacesNearbySerializer, *args, **kwargs)
parser_get = self.parsers["get"]
parser_get.add_argument(
"type[]",
type=OptionValue(list(places_type.keys())),
action="append",
default=["stop_area", "stop_point", "poi"],
help="Type of the objects to return",
)
parser_get.add_argument("filter", type=six.text_type, default="", help="Filter your objects")
parser_get.add_argument("distance", type=int, default=500, help="Distance range of the query in meters")
parser_get.add_argument("count", type=default_count_arg_type, default=10, help="Elements per page")
parser_get.add_argument("depth", type=DepthArgument(), default=1, help="Maximum depth on objects")
parser_get.add_argument("start_page", type=int, default=0, help="The page number of the ptref result")
parser_get.add_argument(
"bss_stands",
type=BooleanType(),
default=False,
deprecated=True,
help="DEPRECATED, Use add_poi_infos[]=bss_stands",
)
parser_get.add_argument(
"add_poi_infos[]",
type=OptionValue(add_poi_infos_types),
default=['bss_stands', 'car_park'],
dest="add_poi_infos",
action="append",
help="Show more information about the poi if it's available, for instance, "
"show BSS/car park availability in the pois(BSS/car park) of the response",
)
parser_get.add_argument(
"_current_datetime",
type=DateTimeFormat(),
schema_metadata={'default': 'now'},
hidden=True,
default=datetime.utcnow(),
help='The datetime considered as "now". Used for debug, default is '
'the moment of the request. It will mainly change the output '
'of the disruptions.',
)
parser_get.add_argument(
"disable_geojson", type=BooleanType(), default=False, help="remove geojson from the response"
)
parser_get.add_argument(
"disable_disruption", type=BooleanType(), default=False, help="remove disruptions from the response"
)
args = parser_get.parse_args()
if handle_poi_infos(args["add_poi_infos"], args["bss_stands"]):
self.get_decorators.insert(1, ManageParkingPlaces(self, 'places_nearby'))
@get_serializer(serpy=PlacesNearbySerializer)
def get(self, region=None, lon=None, lat=None, uri=None):
self.region = i_manager.get_region(region, lon, lat)
timezone.set_request_timezone(self.region)
args = self.parsers["get"].parse_args()
if args['disable_geojson']:
g.disable_geojson = True
if uri:
if uri[-1] == '/':
uri = uri[:-1]
uris = uri.split("/")
if len(uris) >= 2:
args["uri"] = transform_id(uris[-1])
# for coherence we check the type of the object
obj_type = uris[-2]
if obj_type not in places_types:
abort(404, message='places_nearby api not available for {}'.format(obj_type))
else:
abort(404)
elif lon and lat:
# check if lon and lat can be converted to float
float(lon)
float(lat)
args["uri"] = "coord:{}:{}".format(lon, lat)
else:
abort(404)
args["filter"] = args["filter"].replace(".id", ".uri")
self._register_interpreted_parameters(args)
response = i_manager.dispatch(args, "places_nearby", instance_name=self.region)
return response, 200
def options(self, **kwargs):
return self.api_description(**kwargs)
| agpl-3.0 |
achabotl/pambox | setup.py | 1 | 3387 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from setuptools import setup
from setuptools.command.test import test as TestCommand
import codecs
import os
import re
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
# intentionally *not* adding an encoding option to open
return codecs.open(os.path.join(here, *parts), 'r').read()
def read(*parts):
# intentionally *not* adding an encoding option to open
return codecs.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
long_description = read('README.rst')
def check_dependencies():
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
return
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import numpy
except ImportError:
raise ImportError("pambox requires numpy")
try:
import scipy
except ImportError:
raise ImportError("pambox requires scipy")
try:
import matplotlib
except ImportError:
raise ImportError("pambox requires matplotlib")
try:
import pandas
except ImportError:
raise ImportError("pambox requires pandas")
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--runslow', 'pambox/tests']
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
if __name__ == '__main__':
import sys
if not (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands', 'egg_info', '--version',
'clean'))):
check_dependencies()
setup(
name='pambox',
description='A Python toolbox for auditory modeling',
author='Alexandre Chabot-Leclerc',
author_email='[email protected]',
version=find_version('pambox', '__init__.py'),
url='https://bitbucket.org/achabotl/pambox',
license='Modified BSD License',
tests_require=['pytest'],
install_requires=[
'six>=1.4.1',
],
cmdclass={'test': PyTest},
long_description=long_description,
packages=['pambox'],
include_package_data=True,
platforms='any',
test_suite='pambox.tests',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'
],
extras_require={
'testing': ['pytest']
}
)
| bsd-3-clause |
tseaver/gcloud-python | container/google/cloud/container_v1/proto/cluster_service_pb2.py | 1 | 251488 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/container_v1/proto/cluster_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/container_v1/proto/cluster_service.proto',
package='google.container.v1',
syntax='proto3',
serialized_pb=_b('\n5google/cloud/container_v1/proto/cluster_service.proto\x12\x13google.container.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xed\x03\n\nNodeConfig\x12\x14\n\x0cmachine_type\x18\x01 \x01(\t\x12\x14\n\x0c\x64isk_size_gb\x18\x02 \x01(\x05\x12\x14\n\x0coauth_scopes\x18\x03 \x03(\t\x12\x17\n\x0fservice_account\x18\t \x01(\t\x12?\n\x08metadata\x18\x04 \x03(\x0b\x32-.google.container.v1.NodeConfig.MetadataEntry\x12\x12\n\nimage_type\x18\x05 \x01(\t\x12;\n\x06labels\x18\x06 \x03(\x0b\x32+.google.container.v1.NodeConfig.LabelsEntry\x12\x17\n\x0flocal_ssd_count\x18\x07 \x01(\x05\x12\x0c\n\x04tags\x18\x08 \x03(\t\x12\x13\n\x0bpreemptible\x18\n \x01(\x08\x12<\n\x0c\x61\x63\x63\x65lerators\x18\x0b \x03(\x0b\x32&.google.container.v1.AcceleratorConfig\x12\x18\n\x10min_cpu_platform\x18\r \x01(\t\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xd1\x01\n\nMasterAuth\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x10\n\x08password\x18\x02 \x01(\t\x12O\n\x19\x63lient_certificate_config\x18\x03 \x01(\x0b\x32,.google.container.v1.ClientCertificateConfig\x12\x1e\n\x16\x63luster_ca_certificate\x18\x64 \x01(\t\x12\x1a\n\x12\x63lient_certificate\x18\x65 \x01(\t\x12\x12\n\nclient_key\x18\x66 \x01(\t\";\n\x17\x43lientCertificateConfig\x12 \n\x18issue_client_certificate\x18\x01 \x01(\x08\"\xb7\x02\n\x0c\x41\x64\x64onsConfig\x12\x43\n\x13http_load_balancing\x18\x01 \x01(\x0b\x32&.google.container.v1.HttpLoadBalancing\x12Q\n\x1ahorizontal_pod_autoscaling\x18\x02 \x01(\x0b\x32-.google.container.v1.HorizontalPodAutoscaling\x12\x46\n\x14kubernetes_dashboard\x18\x03 \x01(\x0b\x32(.google.container.v1.KubernetesDashboard\x12G\n\x15network_policy_config\x18\x04 \x01(\x0b\x32(.google.container.v1.NetworkPolicyConfig\"%\n\x11HttpLoadBalancing\x12\x10\n\x08\x64isabled\x18\x01 \x01(\x08\",\n\x18HorizontalPodAutoscaling\x12\x10\n\x08\x64isabled\x18\x01 \x01(\x08\"\'\n\x13KubernetesDashboard\x12\x10\n\x08\x64isabled\x18\x01 \x01(\x08\"\'\n\x13NetworkPolicyConfig\x12\x10\n\x08\x64isabled\x18\x01 \x01(\x08\"\xbc\x01\n\x1eMasterAuthorizedNetworksConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12R\n\x0b\x63idr_blocks\x18\x02 \x03(\x0b\x32=.google.container.v1.MasterAuthorizedNetworksConfig.CidrBlock\x1a\x35\n\tCidrBlock\x12\x14\n\x0c\x64isplay_name\x18\x01 \x01(\t\x12\x12\n\ncidr_block\x18\x02 \x01(\t\"\x1d\n\nLegacyAbac\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\"\x91\x01\n\rNetworkPolicy\x12=\n\x08provider\x18\x01 \x01(\x0e\x32+.google.container.v1.NetworkPolicy.Provider\x12\x0f\n\x07\x65nabled\x18\x02 \x01(\x08\"0\n\x08Provider\x12\x18\n\x14PROVIDER_UNSPECIFIED\x10\x00\x12\n\n\x06\x43\x41LICO\x10\x01\"\xdd\x02\n\x12IPAllocationPolicy\x12\x16\n\x0euse_ip_aliases\x18\x01 \x01(\x08\x12\x19\n\x11\x63reate_subnetwork\x18\x02 \x01(\x08\x12\x17\n\x0fsubnetwork_name\x18\x03 \x01(\t\x12\x19\n\x11\x63luster_ipv4_cidr\x18\x04 \x01(\t\x12\x16\n\x0enode_ipv4_cidr\x18\x05 \x01(\t\x12\x1a\n\x12services_ipv4_cidr\x18\x06 \x01(\t\x12$\n\x1c\x63luster_secondary_range_name\x18\x07 \x01(\t\x12%\n\x1dservices_secondary_range_name\x18\x08 \x01(\t\x12\x1f\n\x17\x63luster_ipv4_cidr_block\x18\t \x01(\t\x12\x1c\n\x14node_ipv4_cidr_block\x18\n \x01(\t\x12 \n\x18services_ipv4_cidr_block\x18\x0b \x01(\t\"\xaa\x0b\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x1a\n\x12initial_node_count\x18\x03 \x01(\x05\x12\x34\n\x0bnode_config\x18\x04 \x01(\x0b\x32\x1f.google.container.v1.NodeConfig\x12\x34\n\x0bmaster_auth\x18\x05 \x01(\x0b\x32\x1f.google.container.v1.MasterAuth\x12\x17\n\x0flogging_service\x18\x06 \x01(\t\x12\x1a\n\x12monitoring_service\x18\x07 \x01(\t\x12\x0f\n\x07network\x18\x08 \x01(\t\x12\x19\n\x11\x63luster_ipv4_cidr\x18\t \x01(\t\x12\x38\n\raddons_config\x18\n \x01(\x0b\x32!.google.container.v1.AddonsConfig\x12\x12\n\nsubnetwork\x18\x0b \x01(\t\x12\x31\n\nnode_pools\x18\x0c \x03(\x0b\x32\x1d.google.container.v1.NodePool\x12\x11\n\tlocations\x18\r \x03(\t\x12\x1f\n\x17\x65nable_kubernetes_alpha\x18\x0e \x01(\x08\x12I\n\x0fresource_labels\x18\x0f \x03(\x0b\x32\x30.google.container.v1.Cluster.ResourceLabelsEntry\x12\x19\n\x11label_fingerprint\x18\x10 \x01(\t\x12\x34\n\x0blegacy_abac\x18\x12 \x01(\x0b\x32\x1f.google.container.v1.LegacyAbac\x12:\n\x0enetwork_policy\x18\x13 \x01(\x0b\x32\".google.container.v1.NetworkPolicy\x12\x45\n\x14ip_allocation_policy\x18\x14 \x01(\x0b\x32\'.google.container.v1.IPAllocationPolicy\x12^\n!master_authorized_networks_config\x18\x16 \x01(\x0b\x32\x33.google.container.v1.MasterAuthorizedNetworksConfig\x12\x42\n\x12maintenance_policy\x18\x17 \x01(\x0b\x32&.google.container.v1.MaintenancePolicy\x12\x11\n\tself_link\x18\x64 \x01(\t\x12\x0c\n\x04zone\x18\x65 \x01(\t\x12\x10\n\x08\x65ndpoint\x18\x66 \x01(\t\x12\x1f\n\x17initial_cluster_version\x18g \x01(\t\x12\x1e\n\x16\x63urrent_master_version\x18h \x01(\t\x12\x1c\n\x14\x63urrent_node_version\x18i \x01(\t\x12\x13\n\x0b\x63reate_time\x18j \x01(\t\x12\x33\n\x06status\x18k \x01(\x0e\x32#.google.container.v1.Cluster.Status\x12\x16\n\x0estatus_message\x18l \x01(\t\x12\x1b\n\x13node_ipv4_cidr_size\x18m \x01(\x05\x12\x1a\n\x12services_ipv4_cidr\x18n \x01(\t\x12\x1b\n\x13instance_group_urls\x18o \x03(\t\x12\x1a\n\x12\x63urrent_node_count\x18p \x01(\x05\x12\x13\n\x0b\x65xpire_time\x18q \x01(\t\x1a\x35\n\x13ResourceLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"i\n\x06Status\x12\x16\n\x12STATUS_UNSPECIFIED\x10\x00\x12\x10\n\x0cPROVISIONING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x0f\n\x0bRECONCILING\x10\x03\x12\x0c\n\x08STOPPING\x10\x04\x12\t\n\x05\x45RROR\x10\x05\"\xc1\x03\n\rClusterUpdate\x12\x1c\n\x14\x64\x65sired_node_version\x18\x04 \x01(\t\x12\"\n\x1a\x64\x65sired_monitoring_service\x18\x05 \x01(\t\x12@\n\x15\x64\x65sired_addons_config\x18\x06 \x01(\x0b\x32!.google.container.v1.AddonsConfig\x12\x1c\n\x14\x64\x65sired_node_pool_id\x18\x07 \x01(\t\x12\x1a\n\x12\x64\x65sired_image_type\x18\x08 \x01(\t\x12O\n\x1d\x64\x65sired_node_pool_autoscaling\x18\t \x01(\x0b\x32(.google.container.v1.NodePoolAutoscaling\x12\x19\n\x11\x64\x65sired_locations\x18\n \x03(\t\x12\x66\n)desired_master_authorized_networks_config\x18\x0c \x01(\x0b\x32\x33.google.container.v1.MasterAuthorizedNetworksConfig\x12\x1e\n\x16\x64\x65sired_master_version\x18\x64 \x01(\t\"\xe5\x05\n\tOperation\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12;\n\x0eoperation_type\x18\x03 \x01(\x0e\x32#.google.container.v1.Operation.Type\x12\x35\n\x06status\x18\x04 \x01(\x0e\x32%.google.container.v1.Operation.Status\x12\x0e\n\x06\x64\x65tail\x18\x08 \x01(\t\x12\x16\n\x0estatus_message\x18\x05 \x01(\t\x12\x11\n\tself_link\x18\x06 \x01(\t\x12\x13\n\x0btarget_link\x18\x07 \x01(\t\x12\x12\n\nstart_time\x18\n \x01(\t\x12\x10\n\x08\x65nd_time\x18\x0b \x01(\t\"R\n\x06Status\x12\x16\n\x12STATUS_UNSPECIFIED\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x08\n\x04\x44ONE\x10\x03\x12\x0c\n\x08\x41\x42ORTING\x10\x04\"\xfd\x02\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x12\n\x0e\x43REATE_CLUSTER\x10\x01\x12\x12\n\x0e\x44\x45LETE_CLUSTER\x10\x02\x12\x12\n\x0eUPGRADE_MASTER\x10\x03\x12\x11\n\rUPGRADE_NODES\x10\x04\x12\x12\n\x0eREPAIR_CLUSTER\x10\x05\x12\x12\n\x0eUPDATE_CLUSTER\x10\x06\x12\x14\n\x10\x43REATE_NODE_POOL\x10\x07\x12\x14\n\x10\x44\x45LETE_NODE_POOL\x10\x08\x12\x1c\n\x18SET_NODE_POOL_MANAGEMENT\x10\t\x12\x15\n\x11\x41UTO_REPAIR_NODES\x10\n\x12\x16\n\x12\x41UTO_UPGRADE_NODES\x10\x0b\x12\x0e\n\nSET_LABELS\x10\x0c\x12\x13\n\x0fSET_MASTER_AUTH\x10\r\x12\x16\n\x12SET_NODE_POOL_SIZE\x10\x0e\x12\x16\n\x12SET_NETWORK_POLICY\x10\x0f\x12\x1a\n\x16SET_MAINTENANCE_POLICY\x10\x10\"g\n\x14\x43reateClusterRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12-\n\x07\x63luster\x18\x03 \x01(\x0b\x32\x1c.google.container.v1.Cluster\"I\n\x11GetClusterRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\"\x80\x01\n\x14UpdateClusterRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x32\n\x06update\x18\x04 \x01(\x0b\x32\".google.container.v1.ClusterUpdate\"\x8d\x01\n\x15UpdateNodePoolRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x14\n\x0cnode_pool_id\x18\x04 \x01(\t\x12\x14\n\x0cnode_version\x18\x05 \x01(\t\x12\x12\n\nimage_type\x18\x06 \x01(\t\"\xaa\x01\n\x1dSetNodePoolAutoscalingRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x14\n\x0cnode_pool_id\x18\x04 \x01(\t\x12=\n\x0b\x61utoscaling\x18\x05 \x01(\x0b\x32(.google.container.v1.NodePoolAutoscaling\"i\n\x18SetLoggingServiceRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x17\n\x0flogging_service\x18\x04 \x01(\t\"o\n\x1bSetMonitoringServiceRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x1a\n\x12monitoring_service\x18\x04 \x01(\t\"\x88\x01\n\x16SetAddonsConfigRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x38\n\raddons_config\x18\x04 \x01(\x0b\x32!.google.container.v1.AddonsConfig\"^\n\x13SetLocationsRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x11\n\tlocations\x18\x04 \x03(\t\"c\n\x13UpdateMasterRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x16\n\x0emaster_version\x18\x04 \x01(\t\"\x91\x02\n\x14SetMasterAuthRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12@\n\x06\x61\x63tion\x18\x04 \x01(\x0e\x32\x30.google.container.v1.SetMasterAuthRequest.Action\x12/\n\x06update\x18\x05 \x01(\x0b\x32\x1f.google.container.v1.MasterAuth\"P\n\x06\x41\x63tion\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x10\n\x0cSET_PASSWORD\x10\x01\x12\x15\n\x11GENERATE_PASSWORD\x10\x02\x12\x10\n\x0cSET_USERNAME\x10\x03\"L\n\x14\x44\x65leteClusterRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\"7\n\x13ListClustersRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\"]\n\x14ListClustersResponse\x12.\n\x08\x63lusters\x18\x01 \x03(\x0b\x32\x1c.google.container.v1.Cluster\x12\x15\n\rmissing_zones\x18\x02 \x03(\t\"M\n\x13GetOperationRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x14\n\x0coperation_id\x18\x03 \x01(\t\"9\n\x15ListOperationsRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\"P\n\x16\x43\x61ncelOperationRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x14\n\x0coperation_id\x18\x03 \x01(\t\"c\n\x16ListOperationsResponse\x12\x32\n\noperations\x18\x01 \x03(\x0b\x32\x1e.google.container.v1.Operation\x12\x15\n\rmissing_zones\x18\x02 \x03(\t\":\n\x16GetServerConfigRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\"\xa2\x01\n\x0cServerConfig\x12\x1f\n\x17\x64\x65\x66\x61ult_cluster_version\x18\x01 \x01(\t\x12\x1b\n\x13valid_node_versions\x18\x03 \x03(\t\x12\x1a\n\x12\x64\x65\x66\x61ult_image_type\x18\x04 \x01(\t\x12\x19\n\x11valid_image_types\x18\x05 \x03(\t\x12\x1d\n\x15valid_master_versions\x18\x06 \x03(\t\"\x7f\n\x15\x43reateNodePoolRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x30\n\tnode_pool\x18\x04 \x01(\x0b\x32\x1d.google.container.v1.NodePool\"c\n\x15\x44\x65leteNodePoolRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x14\n\x0cnode_pool_id\x18\x04 \x01(\t\"L\n\x14ListNodePoolsRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\"`\n\x12GetNodePoolRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x14\n\x0cnode_pool_id\x18\x04 \x01(\t\"\xf0\x03\n\x08NodePool\x12\x0c\n\x04name\x18\x01 \x01(\t\x12/\n\x06\x63onfig\x18\x02 \x01(\x0b\x32\x1f.google.container.v1.NodeConfig\x12\x1a\n\x12initial_node_count\x18\x03 \x01(\x05\x12\x11\n\tself_link\x18\x64 \x01(\t\x12\x0f\n\x07version\x18\x65 \x01(\t\x12\x1b\n\x13instance_group_urls\x18\x66 \x03(\t\x12\x34\n\x06status\x18g \x01(\x0e\x32$.google.container.v1.NodePool.Status\x12\x16\n\x0estatus_message\x18h \x01(\t\x12=\n\x0b\x61utoscaling\x18\x04 \x01(\x0b\x32(.google.container.v1.NodePoolAutoscaling\x12\x37\n\nmanagement\x18\x05 \x01(\x0b\x32#.google.container.v1.NodeManagement\"\x81\x01\n\x06Status\x12\x16\n\x12STATUS_UNSPECIFIED\x10\x00\x12\x10\n\x0cPROVISIONING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x16\n\x12RUNNING_WITH_ERROR\x10\x03\x12\x0f\n\x0bRECONCILING\x10\x04\x12\x0c\n\x08STOPPING\x10\x05\x12\t\n\x05\x45RROR\x10\x06\"}\n\x0eNodeManagement\x12\x14\n\x0c\x61uto_upgrade\x18\x01 \x01(\x08\x12\x13\n\x0b\x61uto_repair\x18\x02 \x01(\x08\x12@\n\x0fupgrade_options\x18\n \x01(\x0b\x32\'.google.container.v1.AutoUpgradeOptions\"J\n\x12\x41utoUpgradeOptions\x12\x1f\n\x17\x61uto_upgrade_start_time\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\"K\n\x11MaintenancePolicy\x12\x36\n\x06window\x18\x01 \x01(\x0b\x32&.google.container.v1.MaintenanceWindow\"n\n\x11MaintenanceWindow\x12O\n\x18\x64\x61ily_maintenance_window\x18\x02 \x01(\x0b\x32+.google.container.v1.DailyMaintenanceWindowH\x00\x42\x08\n\x06policy\">\n\x16\x44\x61ilyMaintenanceWindow\x12\x12\n\nstart_time\x18\x02 \x01(\t\x12\x10\n\x08\x64uration\x18\x03 \x01(\t\"\xa3\x01\n\x1cSetNodePoolManagementRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x14\n\x0cnode_pool_id\x18\x04 \x01(\t\x12\x37\n\nmanagement\x18\x05 \x01(\x0b\x32#.google.container.v1.NodeManagement\"x\n\x16SetNodePoolSizeRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x14\n\x0cnode_pool_id\x18\x04 \x01(\t\x12\x12\n\nnode_count\x18\x05 \x01(\x05\"l\n\x1eRollbackNodePoolUpgradeRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x14\n\x0cnode_pool_id\x18\x04 \x01(\t\"J\n\x15ListNodePoolsResponse\x12\x31\n\nnode_pools\x18\x01 \x03(\x0b\x32\x1d.google.container.v1.NodePool\"V\n\x13NodePoolAutoscaling\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12\x16\n\x0emin_node_count\x18\x02 \x01(\x05\x12\x16\n\x0emax_node_count\x18\x03 \x01(\x05\"\xee\x01\n\x10SetLabelsRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12R\n\x0fresource_labels\x18\x04 \x03(\x0b\x32\x39.google.container.v1.SetLabelsRequest.ResourceLabelsEntry\x12\x19\n\x11label_fingerprint\x18\x05 \x01(\t\x1a\x35\n\x13ResourceLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"]\n\x14SetLegacyAbacRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x0f\n\x07\x65nabled\x18\x04 \x01(\x08\"N\n\x16StartIPRotationRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\"Q\n\x19\x43ompleteIPRotationRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\"H\n\x11\x41\x63\x63\x65leratorConfig\x12\x19\n\x11\x61\x63\x63\x65lerator_count\x18\x01 \x01(\x03\x12\x18\n\x10\x61\x63\x63\x65lerator_type\x18\x02 \x01(\t\"\x8b\x01\n\x17SetNetworkPolicyRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12:\n\x0enetwork_policy\x18\x04 \x01(\x0b\x32\".google.container.v1.NetworkPolicy\"\x97\x01\n\x1bSetMaintenancePolicyRequest\x12\x12\n\nproject_id\x18\x01 \x01(\t\x12\x0c\n\x04zone\x18\x02 \x01(\t\x12\x12\n\ncluster_id\x18\x03 \x01(\t\x12\x42\n\x12maintenance_policy\x18\x04 \x01(\x0b\x32&.google.container.v1.MaintenancePolicy2\xe3*\n\x0e\x43lusterManager\x12\x9c\x01\n\x0cListClusters\x12(.google.container.v1.ListClustersRequest\x1a).google.container.v1.ListClustersResponse\"7\x82\xd3\xe4\x93\x02\x31\x12//v1/projects/{project_id}/zones/{zone}/clusters\x12\x98\x01\n\nGetCluster\x12&.google.container.v1.GetClusterRequest\x1a\x1c.google.container.v1.Cluster\"D\x82\xd3\xe4\x93\x02>\x12</v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}\x12\x96\x01\n\rCreateCluster\x12).google.container.v1.CreateClusterRequest\x1a\x1e.google.container.v1.Operation\":\x82\xd3\xe4\x93\x02\x34\"//v1/projects/{project_id}/zones/{zone}/clusters:\x01*\x12\xa3\x01\n\rUpdateCluster\x12).google.container.v1.UpdateClusterRequest\x1a\x1e.google.container.v1.Operation\"G\x82\xd3\xe4\x93\x02\x41\x1a</v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:\x01*\x12\xc5\x01\n\x0eUpdateNodePool\x12*.google.container.v1.UpdateNodePoolRequest\x1a\x1e.google.container.v1.Operation\"g\x82\xd3\xe4\x93\x02\x61\"\\/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/update:\x01*\x12\xda\x01\n\x16SetNodePoolAutoscaling\x12\x32.google.container.v1.SetNodePoolAutoscalingRequest\x1a\x1e.google.container.v1.Operation\"l\x82\xd3\xe4\x93\x02\x66\"a/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/autoscaling:\x01*\x12\xb3\x01\n\x11SetLoggingService\x12-.google.container.v1.SetLoggingServiceRequest\x1a\x1e.google.container.v1.Operation\"O\x82\xd3\xe4\x93\x02I\"D/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/logging:\x01*\x12\xbc\x01\n\x14SetMonitoringService\x12\x30.google.container.v1.SetMonitoringServiceRequest\x1a\x1e.google.container.v1.Operation\"R\x82\xd3\xe4\x93\x02L\"G/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/monitoring:\x01*\x12\xae\x01\n\x0fSetAddonsConfig\x12+.google.container.v1.SetAddonsConfigRequest\x1a\x1e.google.container.v1.Operation\"N\x82\xd3\xe4\x93\x02H\"C/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/addons:\x01*\x12\xab\x01\n\x0cSetLocations\x12(.google.container.v1.SetLocationsRequest\x1a\x1e.google.container.v1.Operation\"Q\x82\xd3\xe4\x93\x02K\"F/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/locations:\x01*\x12\xa8\x01\n\x0cUpdateMaster\x12(.google.container.v1.UpdateMasterRequest\x1a\x1e.google.container.v1.Operation\"N\x82\xd3\xe4\x93\x02H\"C/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/master:\x01*\x12\xb1\x01\n\rSetMasterAuth\x12).google.container.v1.SetMasterAuthRequest\x1a\x1e.google.container.v1.Operation\"U\x82\xd3\xe4\x93\x02O\"J/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:setMasterAuth:\x01*\x12\xa0\x01\n\rDeleteCluster\x12).google.container.v1.DeleteClusterRequest\x1a\x1e.google.container.v1.Operation\"D\x82\xd3\xe4\x93\x02>*</v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}\x12\xa4\x01\n\x0eListOperations\x12*.google.container.v1.ListOperationsRequest\x1a+.google.container.v1.ListOperationsResponse\"9\x82\xd3\xe4\x93\x02\x33\x12\x31/v1/projects/{project_id}/zones/{zone}/operations\x12\xa2\x01\n\x0cGetOperation\x12(.google.container.v1.GetOperationRequest\x1a\x1e.google.container.v1.Operation\"H\x82\xd3\xe4\x93\x02\x42\x12@/v1/projects/{project_id}/zones/{zone}/operations/{operation_id}\x12\xaa\x01\n\x0f\x43\x61ncelOperation\x12+.google.container.v1.CancelOperationRequest\x1a\x16.google.protobuf.Empty\"R\x82\xd3\xe4\x93\x02L\"G/v1/projects/{project_id}/zones/{zone}/operations/{operation_id}:cancel:\x01*\x12\x9e\x01\n\x0fGetServerConfig\x12+.google.container.v1.GetServerConfigRequest\x1a!.google.container.v1.ServerConfig\";\x82\xd3\xe4\x93\x02\x35\x12\x33/v1/projects/{project_id}/zones/{zone}/serverconfig\x12\xb6\x01\n\rListNodePools\x12).google.container.v1.ListNodePoolsRequest\x1a*.google.container.v1.ListNodePoolsResponse\"N\x82\xd3\xe4\x93\x02H\x12\x46/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools\x12\xb4\x01\n\x0bGetNodePool\x12\'.google.container.v1.GetNodePoolRequest\x1a\x1d.google.container.v1.NodePool\"]\x82\xd3\xe4\x93\x02W\x12U/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}\x12\xaf\x01\n\x0e\x43reateNodePool\x12*.google.container.v1.CreateNodePoolRequest\x1a\x1e.google.container.v1.Operation\"Q\x82\xd3\xe4\x93\x02K\"F/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools:\x01*\x12\xbb\x01\n\x0e\x44\x65leteNodePool\x12*.google.container.v1.DeleteNodePoolRequest\x1a\x1e.google.container.v1.Operation\"]\x82\xd3\xe4\x93\x02W*U/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}\x12\xd9\x01\n\x17RollbackNodePoolUpgrade\x12\x33.google.container.v1.RollbackNodePoolUpgradeRequest\x1a\x1e.google.container.v1.Operation\"i\x82\xd3\xe4\x93\x02\x63\"^/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}:rollback:\x01*\x12\xda\x01\n\x15SetNodePoolManagement\x12\x31.google.container.v1.SetNodePoolManagementRequest\x1a\x1e.google.container.v1.Operation\"n\x82\xd3\xe4\x93\x02h\"c/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/setManagement:\x01*\x12\xaa\x01\n\tSetLabels\x12%.google.container.v1.SetLabelsRequest\x1a\x1e.google.container.v1.Operation\"V\x82\xd3\xe4\x93\x02P\"K/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/resourceLabels:\x01*\x12\xae\x01\n\rSetLegacyAbac\x12).google.container.v1.SetLegacyAbacRequest\x1a\x1e.google.container.v1.Operation\"R\x82\xd3\xe4\x93\x02L\"G/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/legacyAbac:\x01*\x12\xb7\x01\n\x0fStartIPRotation\x12+.google.container.v1.StartIPRotationRequest\x1a\x1e.google.container.v1.Operation\"W\x82\xd3\xe4\x93\x02Q\"L/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:startIpRotation:\x01*\x12\xc0\x01\n\x12\x43ompleteIPRotation\x12..google.container.v1.CompleteIPRotationRequest\x1a\x1e.google.container.v1.Operation\"Z\x82\xd3\xe4\x93\x02T\"O/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:completeIpRotation:\x01*\x12\xc8\x01\n\x0fSetNodePoolSize\x12+.google.container.v1.SetNodePoolSizeRequest\x1a\x1e.google.container.v1.Operation\"h\x82\xd3\xe4\x93\x02\x62\"]/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/setSize:\x01*\x12\xba\x01\n\x10SetNetworkPolicy\x12,.google.container.v1.SetNetworkPolicyRequest\x1a\x1e.google.container.v1.Operation\"X\x82\xd3\xe4\x93\x02R\"M/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:setNetworkPolicy:\x01*\x12\xc6\x01\n\x14SetMaintenancePolicy\x12\x30.google.container.v1.SetMaintenancePolicyRequest\x1a\x1e.google.container.v1.Operation\"\\\x82\xd3\xe4\x93\x02V\"Q/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:setMaintenancePolicy:\x01*B\xa6\x01\n\x17\x63om.google.container.v1B\x13\x43lusterServiceProtoP\x01Z<google.golang.org/genproto/googleapis/container/v1;container\xaa\x02\x19Google.Cloud.Container.V1\xca\x02\x19Google\\Cloud\\Container\\V1b\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_NETWORKPOLICY_PROVIDER = _descriptor.EnumDescriptor(
name='Provider',
full_name='google.container.v1.NetworkPolicy.Provider',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PROVIDER_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CALICO', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1707,
serialized_end=1755,
)
_sym_db.RegisterEnumDescriptor(_NETWORKPOLICY_PROVIDER)
_CLUSTER_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='google.container.v1.Cluster.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATUS_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROVISIONING', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RUNNING', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RECONCILING', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOPPING', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=3455,
serialized_end=3560,
)
_sym_db.RegisterEnumDescriptor(_CLUSTER_STATUS)
_OPERATION_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='google.container.v1.Operation.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATUS_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PENDING', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RUNNING', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DONE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ABORTING', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4290,
serialized_end=4372,
)
_sym_db.RegisterEnumDescriptor(_OPERATION_STATUS)
_OPERATION_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='google.container.v1.Operation.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CREATE_CLUSTER', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DELETE_CLUSTER', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPGRADE_MASTER', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPGRADE_NODES', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REPAIR_CLUSTER', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPDATE_CLUSTER', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CREATE_NODE_POOL', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DELETE_NODE_POOL', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SET_NODE_POOL_MANAGEMENT', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUTO_REPAIR_NODES', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUTO_UPGRADE_NODES', index=11, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SET_LABELS', index=12, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SET_MASTER_AUTH', index=13, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SET_NODE_POOL_SIZE', index=14, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SET_NETWORK_POLICY', index=15, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SET_MAINTENANCE_POLICY', index=16, number=16,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4375,
serialized_end=4756,
)
_sym_db.RegisterEnumDescriptor(_OPERATION_TYPE)
_SETMASTERAUTHREQUEST_ACTION = _descriptor.EnumDescriptor(
name='Action',
full_name='google.container.v1.SetMasterAuthRequest.Action',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SET_PASSWORD', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GENERATE_PASSWORD', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SET_USERNAME', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6136,
serialized_end=6216,
)
_sym_db.RegisterEnumDescriptor(_SETMASTERAUTHREQUEST_ACTION)
_NODEPOOL_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='google.container.v1.NodePool.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATUS_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROVISIONING', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RUNNING', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RUNNING_WITH_ERROR', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RECONCILING', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOPPING', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=6, number=6,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=7768,
serialized_end=7897,
)
_sym_db.RegisterEnumDescriptor(_NODEPOOL_STATUS)
_NODECONFIG_METADATAENTRY = _descriptor.Descriptor(
name='MetadataEntry',
full_name='google.container.v1.NodeConfig.MetadataEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.container.v1.NodeConfig.MetadataEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='google.container.v1.NodeConfig.MetadataEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=537,
serialized_end=584,
)
_NODECONFIG_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='google.container.v1.NodeConfig.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.container.v1.NodeConfig.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='google.container.v1.NodeConfig.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=586,
serialized_end=631,
)
_NODECONFIG = _descriptor.Descriptor(
name='NodeConfig',
full_name='google.container.v1.NodeConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='machine_type', full_name='google.container.v1.NodeConfig.machine_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disk_size_gb', full_name='google.container.v1.NodeConfig.disk_size_gb', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='oauth_scopes', full_name='google.container.v1.NodeConfig.oauth_scopes', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service_account', full_name='google.container.v1.NodeConfig.service_account', index=3,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='google.container.v1.NodeConfig.metadata', index=4,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_type', full_name='google.container.v1.NodeConfig.image_type', index=5,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='google.container.v1.NodeConfig.labels', index=6,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='local_ssd_count', full_name='google.container.v1.NodeConfig.local_ssd_count', index=7,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='google.container.v1.NodeConfig.tags', index=8,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='preemptible', full_name='google.container.v1.NodeConfig.preemptible', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='accelerators', full_name='google.container.v1.NodeConfig.accelerators', index=10,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_cpu_platform', full_name='google.container.v1.NodeConfig.min_cpu_platform', index=11,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_NODECONFIG_METADATAENTRY, _NODECONFIG_LABELSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=138,
serialized_end=631,
)
_MASTERAUTH = _descriptor.Descriptor(
name='MasterAuth',
full_name='google.container.v1.MasterAuth',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='username', full_name='google.container.v1.MasterAuth.username', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='password', full_name='google.container.v1.MasterAuth.password', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='client_certificate_config', full_name='google.container.v1.MasterAuth.client_certificate_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_ca_certificate', full_name='google.container.v1.MasterAuth.cluster_ca_certificate', index=3,
number=100, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='client_certificate', full_name='google.container.v1.MasterAuth.client_certificate', index=4,
number=101, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='client_key', full_name='google.container.v1.MasterAuth.client_key', index=5,
number=102, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=634,
serialized_end=843,
)
_CLIENTCERTIFICATECONFIG = _descriptor.Descriptor(
name='ClientCertificateConfig',
full_name='google.container.v1.ClientCertificateConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='issue_client_certificate', full_name='google.container.v1.ClientCertificateConfig.issue_client_certificate', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=845,
serialized_end=904,
)
_ADDONSCONFIG = _descriptor.Descriptor(
name='AddonsConfig',
full_name='google.container.v1.AddonsConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='http_load_balancing', full_name='google.container.v1.AddonsConfig.http_load_balancing', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='horizontal_pod_autoscaling', full_name='google.container.v1.AddonsConfig.horizontal_pod_autoscaling', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kubernetes_dashboard', full_name='google.container.v1.AddonsConfig.kubernetes_dashboard', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='network_policy_config', full_name='google.container.v1.AddonsConfig.network_policy_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=907,
serialized_end=1218,
)
_HTTPLOADBALANCING = _descriptor.Descriptor(
name='HttpLoadBalancing',
full_name='google.container.v1.HttpLoadBalancing',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='disabled', full_name='google.container.v1.HttpLoadBalancing.disabled', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1220,
serialized_end=1257,
)
_HORIZONTALPODAUTOSCALING = _descriptor.Descriptor(
name='HorizontalPodAutoscaling',
full_name='google.container.v1.HorizontalPodAutoscaling',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='disabled', full_name='google.container.v1.HorizontalPodAutoscaling.disabled', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1259,
serialized_end=1303,
)
_KUBERNETESDASHBOARD = _descriptor.Descriptor(
name='KubernetesDashboard',
full_name='google.container.v1.KubernetesDashboard',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='disabled', full_name='google.container.v1.KubernetesDashboard.disabled', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1305,
serialized_end=1344,
)
_NETWORKPOLICYCONFIG = _descriptor.Descriptor(
name='NetworkPolicyConfig',
full_name='google.container.v1.NetworkPolicyConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='disabled', full_name='google.container.v1.NetworkPolicyConfig.disabled', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1346,
serialized_end=1385,
)
_MASTERAUTHORIZEDNETWORKSCONFIG_CIDRBLOCK = _descriptor.Descriptor(
name='CidrBlock',
full_name='google.container.v1.MasterAuthorizedNetworksConfig.CidrBlock',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='display_name', full_name='google.container.v1.MasterAuthorizedNetworksConfig.CidrBlock.display_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cidr_block', full_name='google.container.v1.MasterAuthorizedNetworksConfig.CidrBlock.cidr_block', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1523,
serialized_end=1576,
)
_MASTERAUTHORIZEDNETWORKSCONFIG = _descriptor.Descriptor(
name='MasterAuthorizedNetworksConfig',
full_name='google.container.v1.MasterAuthorizedNetworksConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enabled', full_name='google.container.v1.MasterAuthorizedNetworksConfig.enabled', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cidr_blocks', full_name='google.container.v1.MasterAuthorizedNetworksConfig.cidr_blocks', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MASTERAUTHORIZEDNETWORKSCONFIG_CIDRBLOCK, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1388,
serialized_end=1576,
)
_LEGACYABAC = _descriptor.Descriptor(
name='LegacyAbac',
full_name='google.container.v1.LegacyAbac',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enabled', full_name='google.container.v1.LegacyAbac.enabled', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1578,
serialized_end=1607,
)
_NETWORKPOLICY = _descriptor.Descriptor(
name='NetworkPolicy',
full_name='google.container.v1.NetworkPolicy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='provider', full_name='google.container.v1.NetworkPolicy.provider', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enabled', full_name='google.container.v1.NetworkPolicy.enabled', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_NETWORKPOLICY_PROVIDER,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1610,
serialized_end=1755,
)
_IPALLOCATIONPOLICY = _descriptor.Descriptor(
name='IPAllocationPolicy',
full_name='google.container.v1.IPAllocationPolicy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='use_ip_aliases', full_name='google.container.v1.IPAllocationPolicy.use_ip_aliases', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='create_subnetwork', full_name='google.container.v1.IPAllocationPolicy.create_subnetwork', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subnetwork_name', full_name='google.container.v1.IPAllocationPolicy.subnetwork_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_ipv4_cidr', full_name='google.container.v1.IPAllocationPolicy.cluster_ipv4_cidr', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_ipv4_cidr', full_name='google.container.v1.IPAllocationPolicy.node_ipv4_cidr', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='services_ipv4_cidr', full_name='google.container.v1.IPAllocationPolicy.services_ipv4_cidr', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_secondary_range_name', full_name='google.container.v1.IPAllocationPolicy.cluster_secondary_range_name', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='services_secondary_range_name', full_name='google.container.v1.IPAllocationPolicy.services_secondary_range_name', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_ipv4_cidr_block', full_name='google.container.v1.IPAllocationPolicy.cluster_ipv4_cidr_block', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_ipv4_cidr_block', full_name='google.container.v1.IPAllocationPolicy.node_ipv4_cidr_block', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='services_ipv4_cidr_block', full_name='google.container.v1.IPAllocationPolicy.services_ipv4_cidr_block', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1758,
serialized_end=2107,
)
_CLUSTER_RESOURCELABELSENTRY = _descriptor.Descriptor(
name='ResourceLabelsEntry',
full_name='google.container.v1.Cluster.ResourceLabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.container.v1.Cluster.ResourceLabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='google.container.v1.Cluster.ResourceLabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3400,
serialized_end=3453,
)
_CLUSTER = _descriptor.Descriptor(
name='Cluster',
full_name='google.container.v1.Cluster',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.container.v1.Cluster.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='google.container.v1.Cluster.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='initial_node_count', full_name='google.container.v1.Cluster.initial_node_count', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_config', full_name='google.container.v1.Cluster.node_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='master_auth', full_name='google.container.v1.Cluster.master_auth', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='logging_service', full_name='google.container.v1.Cluster.logging_service', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='monitoring_service', full_name='google.container.v1.Cluster.monitoring_service', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='network', full_name='google.container.v1.Cluster.network', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_ipv4_cidr', full_name='google.container.v1.Cluster.cluster_ipv4_cidr', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='addons_config', full_name='google.container.v1.Cluster.addons_config', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subnetwork', full_name='google.container.v1.Cluster.subnetwork', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_pools', full_name='google.container.v1.Cluster.node_pools', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='locations', full_name='google.container.v1.Cluster.locations', index=12,
number=13, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_kubernetes_alpha', full_name='google.container.v1.Cluster.enable_kubernetes_alpha', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resource_labels', full_name='google.container.v1.Cluster.resource_labels', index=14,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label_fingerprint', full_name='google.container.v1.Cluster.label_fingerprint', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='legacy_abac', full_name='google.container.v1.Cluster.legacy_abac', index=16,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='network_policy', full_name='google.container.v1.Cluster.network_policy', index=17,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ip_allocation_policy', full_name='google.container.v1.Cluster.ip_allocation_policy', index=18,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='master_authorized_networks_config', full_name='google.container.v1.Cluster.master_authorized_networks_config', index=19,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='maintenance_policy', full_name='google.container.v1.Cluster.maintenance_policy', index=20,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='self_link', full_name='google.container.v1.Cluster.self_link', index=21,
number=100, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.Cluster.zone', index=22,
number=101, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endpoint', full_name='google.container.v1.Cluster.endpoint', index=23,
number=102, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='initial_cluster_version', full_name='google.container.v1.Cluster.initial_cluster_version', index=24,
number=103, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='current_master_version', full_name='google.container.v1.Cluster.current_master_version', index=25,
number=104, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='current_node_version', full_name='google.container.v1.Cluster.current_node_version', index=26,
number=105, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='create_time', full_name='google.container.v1.Cluster.create_time', index=27,
number=106, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='google.container.v1.Cluster.status', index=28,
number=107, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status_message', full_name='google.container.v1.Cluster.status_message', index=29,
number=108, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_ipv4_cidr_size', full_name='google.container.v1.Cluster.node_ipv4_cidr_size', index=30,
number=109, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='services_ipv4_cidr', full_name='google.container.v1.Cluster.services_ipv4_cidr', index=31,
number=110, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instance_group_urls', full_name='google.container.v1.Cluster.instance_group_urls', index=32,
number=111, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='current_node_count', full_name='google.container.v1.Cluster.current_node_count', index=33,
number=112, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='expire_time', full_name='google.container.v1.Cluster.expire_time', index=34,
number=113, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CLUSTER_RESOURCELABELSENTRY, ],
enum_types=[
_CLUSTER_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2110,
serialized_end=3560,
)
_CLUSTERUPDATE = _descriptor.Descriptor(
name='ClusterUpdate',
full_name='google.container.v1.ClusterUpdate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='desired_node_version', full_name='google.container.v1.ClusterUpdate.desired_node_version', index=0,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desired_monitoring_service', full_name='google.container.v1.ClusterUpdate.desired_monitoring_service', index=1,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desired_addons_config', full_name='google.container.v1.ClusterUpdate.desired_addons_config', index=2,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desired_node_pool_id', full_name='google.container.v1.ClusterUpdate.desired_node_pool_id', index=3,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desired_image_type', full_name='google.container.v1.ClusterUpdate.desired_image_type', index=4,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desired_node_pool_autoscaling', full_name='google.container.v1.ClusterUpdate.desired_node_pool_autoscaling', index=5,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desired_locations', full_name='google.container.v1.ClusterUpdate.desired_locations', index=6,
number=10, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desired_master_authorized_networks_config', full_name='google.container.v1.ClusterUpdate.desired_master_authorized_networks_config', index=7,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desired_master_version', full_name='google.container.v1.ClusterUpdate.desired_master_version', index=8,
number=100, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3563,
serialized_end=4012,
)
_OPERATION = _descriptor.Descriptor(
name='Operation',
full_name='google.container.v1.Operation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.container.v1.Operation.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.Operation.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operation_type', full_name='google.container.v1.Operation.operation_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='google.container.v1.Operation.status', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='detail', full_name='google.container.v1.Operation.detail', index=4,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status_message', full_name='google.container.v1.Operation.status_message', index=5,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='self_link', full_name='google.container.v1.Operation.self_link', index=6,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_link', full_name='google.container.v1.Operation.target_link', index=7,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start_time', full_name='google.container.v1.Operation.start_time', index=8,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end_time', full_name='google.container.v1.Operation.end_time', index=9,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_OPERATION_STATUS,
_OPERATION_TYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4015,
serialized_end=4756,
)
_CREATECLUSTERREQUEST = _descriptor.Descriptor(
name='CreateClusterRequest',
full_name='google.container.v1.CreateClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.CreateClusterRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.CreateClusterRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster', full_name='google.container.v1.CreateClusterRequest.cluster', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4758,
serialized_end=4861,
)
_GETCLUSTERREQUEST = _descriptor.Descriptor(
name='GetClusterRequest',
full_name='google.container.v1.GetClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.GetClusterRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.GetClusterRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.GetClusterRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4863,
serialized_end=4936,
)
_UPDATECLUSTERREQUEST = _descriptor.Descriptor(
name='UpdateClusterRequest',
full_name='google.container.v1.UpdateClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.UpdateClusterRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.UpdateClusterRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.UpdateClusterRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='update', full_name='google.container.v1.UpdateClusterRequest.update', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4939,
serialized_end=5067,
)
_UPDATENODEPOOLREQUEST = _descriptor.Descriptor(
name='UpdateNodePoolRequest',
full_name='google.container.v1.UpdateNodePoolRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.UpdateNodePoolRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.UpdateNodePoolRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.UpdateNodePoolRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_pool_id', full_name='google.container.v1.UpdateNodePoolRequest.node_pool_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_version', full_name='google.container.v1.UpdateNodePoolRequest.node_version', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_type', full_name='google.container.v1.UpdateNodePoolRequest.image_type', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5070,
serialized_end=5211,
)
_SETNODEPOOLAUTOSCALINGREQUEST = _descriptor.Descriptor(
name='SetNodePoolAutoscalingRequest',
full_name='google.container.v1.SetNodePoolAutoscalingRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.SetNodePoolAutoscalingRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.SetNodePoolAutoscalingRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.SetNodePoolAutoscalingRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_pool_id', full_name='google.container.v1.SetNodePoolAutoscalingRequest.node_pool_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='autoscaling', full_name='google.container.v1.SetNodePoolAutoscalingRequest.autoscaling', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5214,
serialized_end=5384,
)
_SETLOGGINGSERVICEREQUEST = _descriptor.Descriptor(
name='SetLoggingServiceRequest',
full_name='google.container.v1.SetLoggingServiceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.SetLoggingServiceRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.SetLoggingServiceRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.SetLoggingServiceRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='logging_service', full_name='google.container.v1.SetLoggingServiceRequest.logging_service', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5386,
serialized_end=5491,
)
_SETMONITORINGSERVICEREQUEST = _descriptor.Descriptor(
name='SetMonitoringServiceRequest',
full_name='google.container.v1.SetMonitoringServiceRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.SetMonitoringServiceRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.SetMonitoringServiceRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.SetMonitoringServiceRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='monitoring_service', full_name='google.container.v1.SetMonitoringServiceRequest.monitoring_service', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5493,
serialized_end=5604,
)
_SETADDONSCONFIGREQUEST = _descriptor.Descriptor(
name='SetAddonsConfigRequest',
full_name='google.container.v1.SetAddonsConfigRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.SetAddonsConfigRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.SetAddonsConfigRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.SetAddonsConfigRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='addons_config', full_name='google.container.v1.SetAddonsConfigRequest.addons_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5607,
serialized_end=5743,
)
_SETLOCATIONSREQUEST = _descriptor.Descriptor(
name='SetLocationsRequest',
full_name='google.container.v1.SetLocationsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.SetLocationsRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.SetLocationsRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.SetLocationsRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='locations', full_name='google.container.v1.SetLocationsRequest.locations', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5745,
serialized_end=5839,
)
_UPDATEMASTERREQUEST = _descriptor.Descriptor(
name='UpdateMasterRequest',
full_name='google.container.v1.UpdateMasterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.UpdateMasterRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.UpdateMasterRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.UpdateMasterRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='master_version', full_name='google.container.v1.UpdateMasterRequest.master_version', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5841,
serialized_end=5940,
)
_SETMASTERAUTHREQUEST = _descriptor.Descriptor(
name='SetMasterAuthRequest',
full_name='google.container.v1.SetMasterAuthRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.SetMasterAuthRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.SetMasterAuthRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.SetMasterAuthRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='action', full_name='google.container.v1.SetMasterAuthRequest.action', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='update', full_name='google.container.v1.SetMasterAuthRequest.update', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_SETMASTERAUTHREQUEST_ACTION,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5943,
serialized_end=6216,
)
_DELETECLUSTERREQUEST = _descriptor.Descriptor(
name='DeleteClusterRequest',
full_name='google.container.v1.DeleteClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.DeleteClusterRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.DeleteClusterRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.DeleteClusterRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6218,
serialized_end=6294,
)
_LISTCLUSTERSREQUEST = _descriptor.Descriptor(
name='ListClustersRequest',
full_name='google.container.v1.ListClustersRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.ListClustersRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.ListClustersRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6296,
serialized_end=6351,
)
_LISTCLUSTERSRESPONSE = _descriptor.Descriptor(
name='ListClustersResponse',
full_name='google.container.v1.ListClustersResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='clusters', full_name='google.container.v1.ListClustersResponse.clusters', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='missing_zones', full_name='google.container.v1.ListClustersResponse.missing_zones', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6353,
serialized_end=6446,
)
_GETOPERATIONREQUEST = _descriptor.Descriptor(
name='GetOperationRequest',
full_name='google.container.v1.GetOperationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.GetOperationRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.GetOperationRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operation_id', full_name='google.container.v1.GetOperationRequest.operation_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6448,
serialized_end=6525,
)
_LISTOPERATIONSREQUEST = _descriptor.Descriptor(
name='ListOperationsRequest',
full_name='google.container.v1.ListOperationsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.ListOperationsRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.ListOperationsRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6527,
serialized_end=6584,
)
_CANCELOPERATIONREQUEST = _descriptor.Descriptor(
name='CancelOperationRequest',
full_name='google.container.v1.CancelOperationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.CancelOperationRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.CancelOperationRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operation_id', full_name='google.container.v1.CancelOperationRequest.operation_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6586,
serialized_end=6666,
)
_LISTOPERATIONSRESPONSE = _descriptor.Descriptor(
name='ListOperationsResponse',
full_name='google.container.v1.ListOperationsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operations', full_name='google.container.v1.ListOperationsResponse.operations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='missing_zones', full_name='google.container.v1.ListOperationsResponse.missing_zones', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6668,
serialized_end=6767,
)
_GETSERVERCONFIGREQUEST = _descriptor.Descriptor(
name='GetServerConfigRequest',
full_name='google.container.v1.GetServerConfigRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.GetServerConfigRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.GetServerConfigRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6769,
serialized_end=6827,
)
_SERVERCONFIG = _descriptor.Descriptor(
name='ServerConfig',
full_name='google.container.v1.ServerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='default_cluster_version', full_name='google.container.v1.ServerConfig.default_cluster_version', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='valid_node_versions', full_name='google.container.v1.ServerConfig.valid_node_versions', index=1,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_image_type', full_name='google.container.v1.ServerConfig.default_image_type', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='valid_image_types', full_name='google.container.v1.ServerConfig.valid_image_types', index=3,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='valid_master_versions', full_name='google.container.v1.ServerConfig.valid_master_versions', index=4,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6830,
serialized_end=6992,
)
_CREATENODEPOOLREQUEST = _descriptor.Descriptor(
name='CreateNodePoolRequest',
full_name='google.container.v1.CreateNodePoolRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.CreateNodePoolRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.CreateNodePoolRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.CreateNodePoolRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_pool', full_name='google.container.v1.CreateNodePoolRequest.node_pool', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6994,
serialized_end=7121,
)
_DELETENODEPOOLREQUEST = _descriptor.Descriptor(
name='DeleteNodePoolRequest',
full_name='google.container.v1.DeleteNodePoolRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.DeleteNodePoolRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.DeleteNodePoolRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.DeleteNodePoolRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_pool_id', full_name='google.container.v1.DeleteNodePoolRequest.node_pool_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7123,
serialized_end=7222,
)
_LISTNODEPOOLSREQUEST = _descriptor.Descriptor(
name='ListNodePoolsRequest',
full_name='google.container.v1.ListNodePoolsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.ListNodePoolsRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.ListNodePoolsRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.ListNodePoolsRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7224,
serialized_end=7300,
)
_GETNODEPOOLREQUEST = _descriptor.Descriptor(
name='GetNodePoolRequest',
full_name='google.container.v1.GetNodePoolRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.GetNodePoolRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.GetNodePoolRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.GetNodePoolRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_pool_id', full_name='google.container.v1.GetNodePoolRequest.node_pool_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7302,
serialized_end=7398,
)
_NODEPOOL = _descriptor.Descriptor(
name='NodePool',
full_name='google.container.v1.NodePool',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.container.v1.NodePool.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='config', full_name='google.container.v1.NodePool.config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='initial_node_count', full_name='google.container.v1.NodePool.initial_node_count', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='self_link', full_name='google.container.v1.NodePool.self_link', index=3,
number=100, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='google.container.v1.NodePool.version', index=4,
number=101, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instance_group_urls', full_name='google.container.v1.NodePool.instance_group_urls', index=5,
number=102, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='google.container.v1.NodePool.status', index=6,
number=103, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status_message', full_name='google.container.v1.NodePool.status_message', index=7,
number=104, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='autoscaling', full_name='google.container.v1.NodePool.autoscaling', index=8,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='management', full_name='google.container.v1.NodePool.management', index=9,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_NODEPOOL_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7401,
serialized_end=7897,
)
_NODEMANAGEMENT = _descriptor.Descriptor(
name='NodeManagement',
full_name='google.container.v1.NodeManagement',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='auto_upgrade', full_name='google.container.v1.NodeManagement.auto_upgrade', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='auto_repair', full_name='google.container.v1.NodeManagement.auto_repair', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='upgrade_options', full_name='google.container.v1.NodeManagement.upgrade_options', index=2,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7899,
serialized_end=8024,
)
_AUTOUPGRADEOPTIONS = _descriptor.Descriptor(
name='AutoUpgradeOptions',
full_name='google.container.v1.AutoUpgradeOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='auto_upgrade_start_time', full_name='google.container.v1.AutoUpgradeOptions.auto_upgrade_start_time', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='google.container.v1.AutoUpgradeOptions.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8026,
serialized_end=8100,
)
_MAINTENANCEPOLICY = _descriptor.Descriptor(
name='MaintenancePolicy',
full_name='google.container.v1.MaintenancePolicy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='window', full_name='google.container.v1.MaintenancePolicy.window', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8102,
serialized_end=8177,
)
_MAINTENANCEWINDOW = _descriptor.Descriptor(
name='MaintenanceWindow',
full_name='google.container.v1.MaintenanceWindow',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='daily_maintenance_window', full_name='google.container.v1.MaintenanceWindow.daily_maintenance_window', index=0,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='policy', full_name='google.container.v1.MaintenanceWindow.policy',
index=0, containing_type=None, fields=[]),
],
serialized_start=8179,
serialized_end=8289,
)
_DAILYMAINTENANCEWINDOW = _descriptor.Descriptor(
name='DailyMaintenanceWindow',
full_name='google.container.v1.DailyMaintenanceWindow',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start_time', full_name='google.container.v1.DailyMaintenanceWindow.start_time', index=0,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='duration', full_name='google.container.v1.DailyMaintenanceWindow.duration', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8291,
serialized_end=8353,
)
_SETNODEPOOLMANAGEMENTREQUEST = _descriptor.Descriptor(
name='SetNodePoolManagementRequest',
full_name='google.container.v1.SetNodePoolManagementRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.SetNodePoolManagementRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.SetNodePoolManagementRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.SetNodePoolManagementRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_pool_id', full_name='google.container.v1.SetNodePoolManagementRequest.node_pool_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='management', full_name='google.container.v1.SetNodePoolManagementRequest.management', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8356,
serialized_end=8519,
)
_SETNODEPOOLSIZEREQUEST = _descriptor.Descriptor(
name='SetNodePoolSizeRequest',
full_name='google.container.v1.SetNodePoolSizeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.SetNodePoolSizeRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.SetNodePoolSizeRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.SetNodePoolSizeRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_pool_id', full_name='google.container.v1.SetNodePoolSizeRequest.node_pool_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_count', full_name='google.container.v1.SetNodePoolSizeRequest.node_count', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8521,
serialized_end=8641,
)
_ROLLBACKNODEPOOLUPGRADEREQUEST = _descriptor.Descriptor(
name='RollbackNodePoolUpgradeRequest',
full_name='google.container.v1.RollbackNodePoolUpgradeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.RollbackNodePoolUpgradeRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.RollbackNodePoolUpgradeRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.RollbackNodePoolUpgradeRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='node_pool_id', full_name='google.container.v1.RollbackNodePoolUpgradeRequest.node_pool_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8643,
serialized_end=8751,
)
_LISTNODEPOOLSRESPONSE = _descriptor.Descriptor(
name='ListNodePoolsResponse',
full_name='google.container.v1.ListNodePoolsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node_pools', full_name='google.container.v1.ListNodePoolsResponse.node_pools', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8753,
serialized_end=8827,
)
_NODEPOOLAUTOSCALING = _descriptor.Descriptor(
name='NodePoolAutoscaling',
full_name='google.container.v1.NodePoolAutoscaling',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enabled', full_name='google.container.v1.NodePoolAutoscaling.enabled', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_node_count', full_name='google.container.v1.NodePoolAutoscaling.min_node_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_node_count', full_name='google.container.v1.NodePoolAutoscaling.max_node_count', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8829,
serialized_end=8915,
)
_SETLABELSREQUEST_RESOURCELABELSENTRY = _descriptor.Descriptor(
name='ResourceLabelsEntry',
full_name='google.container.v1.SetLabelsRequest.ResourceLabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.container.v1.SetLabelsRequest.ResourceLabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='google.container.v1.SetLabelsRequest.ResourceLabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3400,
serialized_end=3453,
)
_SETLABELSREQUEST = _descriptor.Descriptor(
name='SetLabelsRequest',
full_name='google.container.v1.SetLabelsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.SetLabelsRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.SetLabelsRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.SetLabelsRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resource_labels', full_name='google.container.v1.SetLabelsRequest.resource_labels', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label_fingerprint', full_name='google.container.v1.SetLabelsRequest.label_fingerprint', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SETLABELSREQUEST_RESOURCELABELSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8918,
serialized_end=9156,
)
_SETLEGACYABACREQUEST = _descriptor.Descriptor(
name='SetLegacyAbacRequest',
full_name='google.container.v1.SetLegacyAbacRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.SetLegacyAbacRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.SetLegacyAbacRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.SetLegacyAbacRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enabled', full_name='google.container.v1.SetLegacyAbacRequest.enabled', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=9158,
serialized_end=9251,
)
_STARTIPROTATIONREQUEST = _descriptor.Descriptor(
name='StartIPRotationRequest',
full_name='google.container.v1.StartIPRotationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.StartIPRotationRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.StartIPRotationRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.StartIPRotationRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=9253,
serialized_end=9331,
)
_COMPLETEIPROTATIONREQUEST = _descriptor.Descriptor(
name='CompleteIPRotationRequest',
full_name='google.container.v1.CompleteIPRotationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.CompleteIPRotationRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.CompleteIPRotationRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.CompleteIPRotationRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=9333,
serialized_end=9414,
)
_ACCELERATORCONFIG = _descriptor.Descriptor(
name='AcceleratorConfig',
full_name='google.container.v1.AcceleratorConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='accelerator_count', full_name='google.container.v1.AcceleratorConfig.accelerator_count', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='accelerator_type', full_name='google.container.v1.AcceleratorConfig.accelerator_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=9416,
serialized_end=9488,
)
_SETNETWORKPOLICYREQUEST = _descriptor.Descriptor(
name='SetNetworkPolicyRequest',
full_name='google.container.v1.SetNetworkPolicyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.SetNetworkPolicyRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.SetNetworkPolicyRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.SetNetworkPolicyRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='network_policy', full_name='google.container.v1.SetNetworkPolicyRequest.network_policy', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=9491,
serialized_end=9630,
)
_SETMAINTENANCEPOLICYREQUEST = _descriptor.Descriptor(
name='SetMaintenancePolicyRequest',
full_name='google.container.v1.SetMaintenancePolicyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='project_id', full_name='google.container.v1.SetMaintenancePolicyRequest.project_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zone', full_name='google.container.v1.SetMaintenancePolicyRequest.zone', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.container.v1.SetMaintenancePolicyRequest.cluster_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='maintenance_policy', full_name='google.container.v1.SetMaintenancePolicyRequest.maintenance_policy', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=9633,
serialized_end=9784,
)
_NODECONFIG_METADATAENTRY.containing_type = _NODECONFIG
_NODECONFIG_LABELSENTRY.containing_type = _NODECONFIG
_NODECONFIG.fields_by_name['metadata'].message_type = _NODECONFIG_METADATAENTRY
_NODECONFIG.fields_by_name['labels'].message_type = _NODECONFIG_LABELSENTRY
_NODECONFIG.fields_by_name['accelerators'].message_type = _ACCELERATORCONFIG
_MASTERAUTH.fields_by_name['client_certificate_config'].message_type = _CLIENTCERTIFICATECONFIG
_ADDONSCONFIG.fields_by_name['http_load_balancing'].message_type = _HTTPLOADBALANCING
_ADDONSCONFIG.fields_by_name['horizontal_pod_autoscaling'].message_type = _HORIZONTALPODAUTOSCALING
_ADDONSCONFIG.fields_by_name['kubernetes_dashboard'].message_type = _KUBERNETESDASHBOARD
_ADDONSCONFIG.fields_by_name['network_policy_config'].message_type = _NETWORKPOLICYCONFIG
_MASTERAUTHORIZEDNETWORKSCONFIG_CIDRBLOCK.containing_type = _MASTERAUTHORIZEDNETWORKSCONFIG
_MASTERAUTHORIZEDNETWORKSCONFIG.fields_by_name['cidr_blocks'].message_type = _MASTERAUTHORIZEDNETWORKSCONFIG_CIDRBLOCK
_NETWORKPOLICY.fields_by_name['provider'].enum_type = _NETWORKPOLICY_PROVIDER
_NETWORKPOLICY_PROVIDER.containing_type = _NETWORKPOLICY
_CLUSTER_RESOURCELABELSENTRY.containing_type = _CLUSTER
_CLUSTER.fields_by_name['node_config'].message_type = _NODECONFIG
_CLUSTER.fields_by_name['master_auth'].message_type = _MASTERAUTH
_CLUSTER.fields_by_name['addons_config'].message_type = _ADDONSCONFIG
_CLUSTER.fields_by_name['node_pools'].message_type = _NODEPOOL
_CLUSTER.fields_by_name['resource_labels'].message_type = _CLUSTER_RESOURCELABELSENTRY
_CLUSTER.fields_by_name['legacy_abac'].message_type = _LEGACYABAC
_CLUSTER.fields_by_name['network_policy'].message_type = _NETWORKPOLICY
_CLUSTER.fields_by_name['ip_allocation_policy'].message_type = _IPALLOCATIONPOLICY
_CLUSTER.fields_by_name['master_authorized_networks_config'].message_type = _MASTERAUTHORIZEDNETWORKSCONFIG
_CLUSTER.fields_by_name['maintenance_policy'].message_type = _MAINTENANCEPOLICY
_CLUSTER.fields_by_name['status'].enum_type = _CLUSTER_STATUS
_CLUSTER_STATUS.containing_type = _CLUSTER
_CLUSTERUPDATE.fields_by_name['desired_addons_config'].message_type = _ADDONSCONFIG
_CLUSTERUPDATE.fields_by_name['desired_node_pool_autoscaling'].message_type = _NODEPOOLAUTOSCALING
_CLUSTERUPDATE.fields_by_name['desired_master_authorized_networks_config'].message_type = _MASTERAUTHORIZEDNETWORKSCONFIG
_OPERATION.fields_by_name['operation_type'].enum_type = _OPERATION_TYPE
_OPERATION.fields_by_name['status'].enum_type = _OPERATION_STATUS
_OPERATION_STATUS.containing_type = _OPERATION
_OPERATION_TYPE.containing_type = _OPERATION
_CREATECLUSTERREQUEST.fields_by_name['cluster'].message_type = _CLUSTER
_UPDATECLUSTERREQUEST.fields_by_name['update'].message_type = _CLUSTERUPDATE
_SETNODEPOOLAUTOSCALINGREQUEST.fields_by_name['autoscaling'].message_type = _NODEPOOLAUTOSCALING
_SETADDONSCONFIGREQUEST.fields_by_name['addons_config'].message_type = _ADDONSCONFIG
_SETMASTERAUTHREQUEST.fields_by_name['action'].enum_type = _SETMASTERAUTHREQUEST_ACTION
_SETMASTERAUTHREQUEST.fields_by_name['update'].message_type = _MASTERAUTH
_SETMASTERAUTHREQUEST_ACTION.containing_type = _SETMASTERAUTHREQUEST
_LISTCLUSTERSRESPONSE.fields_by_name['clusters'].message_type = _CLUSTER
_LISTOPERATIONSRESPONSE.fields_by_name['operations'].message_type = _OPERATION
_CREATENODEPOOLREQUEST.fields_by_name['node_pool'].message_type = _NODEPOOL
_NODEPOOL.fields_by_name['config'].message_type = _NODECONFIG
_NODEPOOL.fields_by_name['status'].enum_type = _NODEPOOL_STATUS
_NODEPOOL.fields_by_name['autoscaling'].message_type = _NODEPOOLAUTOSCALING
_NODEPOOL.fields_by_name['management'].message_type = _NODEMANAGEMENT
_NODEPOOL_STATUS.containing_type = _NODEPOOL
_NODEMANAGEMENT.fields_by_name['upgrade_options'].message_type = _AUTOUPGRADEOPTIONS
_MAINTENANCEPOLICY.fields_by_name['window'].message_type = _MAINTENANCEWINDOW
_MAINTENANCEWINDOW.fields_by_name['daily_maintenance_window'].message_type = _DAILYMAINTENANCEWINDOW
_MAINTENANCEWINDOW.oneofs_by_name['policy'].fields.append(
_MAINTENANCEWINDOW.fields_by_name['daily_maintenance_window'])
_MAINTENANCEWINDOW.fields_by_name['daily_maintenance_window'].containing_oneof = _MAINTENANCEWINDOW.oneofs_by_name['policy']
_SETNODEPOOLMANAGEMENTREQUEST.fields_by_name['management'].message_type = _NODEMANAGEMENT
_LISTNODEPOOLSRESPONSE.fields_by_name['node_pools'].message_type = _NODEPOOL
_SETLABELSREQUEST_RESOURCELABELSENTRY.containing_type = _SETLABELSREQUEST
_SETLABELSREQUEST.fields_by_name['resource_labels'].message_type = _SETLABELSREQUEST_RESOURCELABELSENTRY
_SETNETWORKPOLICYREQUEST.fields_by_name['network_policy'].message_type = _NETWORKPOLICY
_SETMAINTENANCEPOLICYREQUEST.fields_by_name['maintenance_policy'].message_type = _MAINTENANCEPOLICY
DESCRIPTOR.message_types_by_name['NodeConfig'] = _NODECONFIG
DESCRIPTOR.message_types_by_name['MasterAuth'] = _MASTERAUTH
DESCRIPTOR.message_types_by_name['ClientCertificateConfig'] = _CLIENTCERTIFICATECONFIG
DESCRIPTOR.message_types_by_name['AddonsConfig'] = _ADDONSCONFIG
DESCRIPTOR.message_types_by_name['HttpLoadBalancing'] = _HTTPLOADBALANCING
DESCRIPTOR.message_types_by_name['HorizontalPodAutoscaling'] = _HORIZONTALPODAUTOSCALING
DESCRIPTOR.message_types_by_name['KubernetesDashboard'] = _KUBERNETESDASHBOARD
DESCRIPTOR.message_types_by_name['NetworkPolicyConfig'] = _NETWORKPOLICYCONFIG
DESCRIPTOR.message_types_by_name['MasterAuthorizedNetworksConfig'] = _MASTERAUTHORIZEDNETWORKSCONFIG
DESCRIPTOR.message_types_by_name['LegacyAbac'] = _LEGACYABAC
DESCRIPTOR.message_types_by_name['NetworkPolicy'] = _NETWORKPOLICY
DESCRIPTOR.message_types_by_name['IPAllocationPolicy'] = _IPALLOCATIONPOLICY
DESCRIPTOR.message_types_by_name['Cluster'] = _CLUSTER
DESCRIPTOR.message_types_by_name['ClusterUpdate'] = _CLUSTERUPDATE
DESCRIPTOR.message_types_by_name['Operation'] = _OPERATION
DESCRIPTOR.message_types_by_name['CreateClusterRequest'] = _CREATECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['GetClusterRequest'] = _GETCLUSTERREQUEST
DESCRIPTOR.message_types_by_name['UpdateClusterRequest'] = _UPDATECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['UpdateNodePoolRequest'] = _UPDATENODEPOOLREQUEST
DESCRIPTOR.message_types_by_name['SetNodePoolAutoscalingRequest'] = _SETNODEPOOLAUTOSCALINGREQUEST
DESCRIPTOR.message_types_by_name['SetLoggingServiceRequest'] = _SETLOGGINGSERVICEREQUEST
DESCRIPTOR.message_types_by_name['SetMonitoringServiceRequest'] = _SETMONITORINGSERVICEREQUEST
DESCRIPTOR.message_types_by_name['SetAddonsConfigRequest'] = _SETADDONSCONFIGREQUEST
DESCRIPTOR.message_types_by_name['SetLocationsRequest'] = _SETLOCATIONSREQUEST
DESCRIPTOR.message_types_by_name['UpdateMasterRequest'] = _UPDATEMASTERREQUEST
DESCRIPTOR.message_types_by_name['SetMasterAuthRequest'] = _SETMASTERAUTHREQUEST
DESCRIPTOR.message_types_by_name['DeleteClusterRequest'] = _DELETECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['ListClustersRequest'] = _LISTCLUSTERSREQUEST
DESCRIPTOR.message_types_by_name['ListClustersResponse'] = _LISTCLUSTERSRESPONSE
DESCRIPTOR.message_types_by_name['GetOperationRequest'] = _GETOPERATIONREQUEST
DESCRIPTOR.message_types_by_name['ListOperationsRequest'] = _LISTOPERATIONSREQUEST
DESCRIPTOR.message_types_by_name['CancelOperationRequest'] = _CANCELOPERATIONREQUEST
DESCRIPTOR.message_types_by_name['ListOperationsResponse'] = _LISTOPERATIONSRESPONSE
DESCRIPTOR.message_types_by_name['GetServerConfigRequest'] = _GETSERVERCONFIGREQUEST
DESCRIPTOR.message_types_by_name['ServerConfig'] = _SERVERCONFIG
DESCRIPTOR.message_types_by_name['CreateNodePoolRequest'] = _CREATENODEPOOLREQUEST
DESCRIPTOR.message_types_by_name['DeleteNodePoolRequest'] = _DELETENODEPOOLREQUEST
DESCRIPTOR.message_types_by_name['ListNodePoolsRequest'] = _LISTNODEPOOLSREQUEST
DESCRIPTOR.message_types_by_name['GetNodePoolRequest'] = _GETNODEPOOLREQUEST
DESCRIPTOR.message_types_by_name['NodePool'] = _NODEPOOL
DESCRIPTOR.message_types_by_name['NodeManagement'] = _NODEMANAGEMENT
DESCRIPTOR.message_types_by_name['AutoUpgradeOptions'] = _AUTOUPGRADEOPTIONS
DESCRIPTOR.message_types_by_name['MaintenancePolicy'] = _MAINTENANCEPOLICY
DESCRIPTOR.message_types_by_name['MaintenanceWindow'] = _MAINTENANCEWINDOW
DESCRIPTOR.message_types_by_name['DailyMaintenanceWindow'] = _DAILYMAINTENANCEWINDOW
DESCRIPTOR.message_types_by_name['SetNodePoolManagementRequest'] = _SETNODEPOOLMANAGEMENTREQUEST
DESCRIPTOR.message_types_by_name['SetNodePoolSizeRequest'] = _SETNODEPOOLSIZEREQUEST
DESCRIPTOR.message_types_by_name['RollbackNodePoolUpgradeRequest'] = _ROLLBACKNODEPOOLUPGRADEREQUEST
DESCRIPTOR.message_types_by_name['ListNodePoolsResponse'] = _LISTNODEPOOLSRESPONSE
DESCRIPTOR.message_types_by_name['NodePoolAutoscaling'] = _NODEPOOLAUTOSCALING
DESCRIPTOR.message_types_by_name['SetLabelsRequest'] = _SETLABELSREQUEST
DESCRIPTOR.message_types_by_name['SetLegacyAbacRequest'] = _SETLEGACYABACREQUEST
DESCRIPTOR.message_types_by_name['StartIPRotationRequest'] = _STARTIPROTATIONREQUEST
DESCRIPTOR.message_types_by_name['CompleteIPRotationRequest'] = _COMPLETEIPROTATIONREQUEST
DESCRIPTOR.message_types_by_name['AcceleratorConfig'] = _ACCELERATORCONFIG
DESCRIPTOR.message_types_by_name['SetNetworkPolicyRequest'] = _SETNETWORKPOLICYREQUEST
DESCRIPTOR.message_types_by_name['SetMaintenancePolicyRequest'] = _SETMAINTENANCEPOLICYREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NodeConfig = _reflection.GeneratedProtocolMessageType('NodeConfig', (_message.Message,), dict(
MetadataEntry = _reflection.GeneratedProtocolMessageType('MetadataEntry', (_message.Message,), dict(
DESCRIPTOR = _NODECONFIG_METADATAENTRY,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:google.container.v1.NodeConfig.MetadataEntry)
))
,
LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _NODECONFIG_LABELSENTRY,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:google.container.v1.NodeConfig.LabelsEntry)
))
,
DESCRIPTOR = _NODECONFIG,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """Parameters that describe the nodes in a cluster.
Attributes:
machine_type:
The name of a Google Compute Engine `machine type
</compute/docs/machine-types>`__ (e.g. ``n1-standard-1``). If
unspecified, the default machine type is ``n1-standard-1``.
disk_size_gb:
Size of the disk attached to each node, specified in GB. The
smallest allowed disk size is 10GB. If unspecified, the
default disk size is 100GB.
oauth_scopes:
The set of Google API scopes to be made available on all of
the node VMs under the "default" service account. The
following scopes are recommended, but not required, and by
default are not included: -
``https://www.googleapis.com/auth/compute`` is required for
mounting persistent storage on your nodes. -
``https://www.googleapis.com/auth/devstorage.read_only`` is
required for communicating with **gcr.io** (the `Google
Container Registry </container-registry/>`__). If
unspecified, no scopes are added, unless Cloud Logging or
Cloud Monitoring are enabled, in which case their required
scopes will be added.
service_account:
The Google Cloud Platform Service Account to be used by the
node VMs. If no Service Account is specified, the "default"
service account is used.
metadata:
The metadata key/value pairs assigned to instances in the
cluster. Keys must conform to the regexp [a-zA-Z0-9-\_]+ and
be less than 128 bytes in length. These are reflected as part
of a URL in the metadata server. Additionally, to avoid
ambiguity, keys must not conflict with any other metadata keys
for the project or be one of the four reserved keys:
"instance-template", "kube-env", "startup-script", and "user-
data" Values are free-form strings, and only have meaning as
interpreted by the image running in the instance. The only
restriction placed on them is that each value's size must be
less than or equal to 32 KB. The total size of all keys and
values must be less than 512 KB.
image_type:
The image type to use for this node. Note that for a given
image type, the latest version of it will be used.
labels:
The map of Kubernetes labels (key/value pairs) to be applied
to each node. These will added in addition to any default
label(s) that Kubernetes may apply to the node. In case of
conflict in label keys, the applied set may differ depending
on the Kubernetes version -- it's best to assume the behavior
is undefined and conflicts should be avoided. For more
information, including usage and the valid values, see:
https://kubernetes.io/docs/concepts/overview/working-with-
objects/labels/
local_ssd_count:
The number of local SSD disks to be attached to the node. The
limit for this value is dependant upon the maximum number of
disks available on a machine per zone. See:
https://cloud.google.com/compute/docs/disks/local-
ssd#local\_ssd\_limits for more information.
tags:
The list of instance tags applied to all nodes. Tags are used
to identify valid sources or targets for network firewalls and
are specified by the client during cluster or node pool
creation. Each tag within the list must comply with RFC1035.
preemptible:
Whether the nodes are created as preemptible VM instances.
See:
https://cloud.google.com/compute/docs/instances/preemptible
for more information about preemptible VM instances.
accelerators:
A list of hardware accelerators to be attached to each node.
See https://cloud.google.com/compute/docs/gpus for more
information about support for GPUs.
min_cpu_platform:
Minimum CPU platform to be used by this instance. The instance
may be scheduled on the specified or newer CPU platform.
Applicable values are the friendly names of CPU platforms,
such as minCpuPlatform: "Intel Haswell" or minCpuPlatform:
"Intel Sandy Bridge". For more information, read `how to
specify min CPU platform
<https://cloud.google.com/compute/docs/instances/specify-min-
cpu-platform>`__
""",
# @@protoc_insertion_point(class_scope:google.container.v1.NodeConfig)
))
_sym_db.RegisterMessage(NodeConfig)
_sym_db.RegisterMessage(NodeConfig.MetadataEntry)
_sym_db.RegisterMessage(NodeConfig.LabelsEntry)
MasterAuth = _reflection.GeneratedProtocolMessageType('MasterAuth', (_message.Message,), dict(
DESCRIPTOR = _MASTERAUTH,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """The authentication information for accessing the master endpoint.
Authentication can be done using HTTP basic auth or using client
certificates.
Attributes:
username:
The username to use for HTTP basic authentication to the
master endpoint. For clusters v1.6.0 and later, you can
disable basic authentication by providing an empty username.
password:
The password to use for HTTP basic authentication to the
master endpoint. Because the master endpoint is open to the
Internet, you should create a strong password. If a password
is provided for cluster creation, username must be non-empty.
client_certificate_config:
Configuration for client certificate authentication on the
cluster. If no configuration is specified, a client
certificate is issued.
cluster_ca_certificate:
[Output only] Base64-encoded public certificate that is the
root of trust for the cluster.
client_certificate:
[Output only] Base64-encoded public certificate used by
clients to authenticate to the cluster endpoint.
client_key:
[Output only] Base64-encoded private key used by clients to
authenticate to the cluster endpoint.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.MasterAuth)
))
_sym_db.RegisterMessage(MasterAuth)
ClientCertificateConfig = _reflection.GeneratedProtocolMessageType('ClientCertificateConfig', (_message.Message,), dict(
DESCRIPTOR = _CLIENTCERTIFICATECONFIG,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """Configuration for client certificates on the cluster.
Attributes:
issue_client_certificate:
Issue a client certificate.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.ClientCertificateConfig)
))
_sym_db.RegisterMessage(ClientCertificateConfig)
AddonsConfig = _reflection.GeneratedProtocolMessageType('AddonsConfig', (_message.Message,), dict(
DESCRIPTOR = _ADDONSCONFIG,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """Configuration for the addons that can be automatically spun up in the
cluster, enabling additional functionality.
Attributes:
http_load_balancing:
Configuration for the HTTP (L7) load balancing controller
addon, which makes it easy to set up HTTP load balancers for
services in a cluster.
horizontal_pod_autoscaling:
Configuration for the horizontal pod autoscaling feature,
which increases or decreases the number of replica pods a
replication controller has based on the resource usage of the
existing pods.
kubernetes_dashboard:
Configuration for the Kubernetes Dashboard.
network_policy_config:
Configuration for NetworkPolicy. This only tracks whether the
addon is enabled or not on the Master, it does not track
whether network policy is enabled for the nodes.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.AddonsConfig)
))
_sym_db.RegisterMessage(AddonsConfig)
HttpLoadBalancing = _reflection.GeneratedProtocolMessageType('HttpLoadBalancing', (_message.Message,), dict(
DESCRIPTOR = _HTTPLOADBALANCING,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """Configuration options for the HTTP (L7) load balancing controller addon,
which makes it easy to set up HTTP load balancers for services in a
cluster.
Attributes:
disabled:
Whether the HTTP Load Balancing controller is enabled in the
cluster. When enabled, it runs a small pod in the cluster that
manages the load balancers.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.HttpLoadBalancing)
))
_sym_db.RegisterMessage(HttpLoadBalancing)
HorizontalPodAutoscaling = _reflection.GeneratedProtocolMessageType('HorizontalPodAutoscaling', (_message.Message,), dict(
DESCRIPTOR = _HORIZONTALPODAUTOSCALING,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """Configuration options for the horizontal pod autoscaling feature, which
increases or decreases the number of replica pods a replication
controller has based on the resource usage of the existing pods.
Attributes:
disabled:
Whether the Horizontal Pod Autoscaling feature is enabled in
the cluster. When enabled, it ensures that a Heapster pod is
running in the cluster, which is also used by the Cloud
Monitoring service.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.HorizontalPodAutoscaling)
))
_sym_db.RegisterMessage(HorizontalPodAutoscaling)
KubernetesDashboard = _reflection.GeneratedProtocolMessageType('KubernetesDashboard', (_message.Message,), dict(
DESCRIPTOR = _KUBERNETESDASHBOARD,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """Configuration for the Kubernetes Dashboard.
Attributes:
disabled:
Whether the Kubernetes Dashboard is enabled for this cluster.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.KubernetesDashboard)
))
_sym_db.RegisterMessage(KubernetesDashboard)
NetworkPolicyConfig = _reflection.GeneratedProtocolMessageType('NetworkPolicyConfig', (_message.Message,), dict(
DESCRIPTOR = _NETWORKPOLICYCONFIG,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """Configuration for NetworkPolicy. This only tracks whether the addon is
enabled or not on the Master, it does not track whether network policy
is enabled for the nodes.
Attributes:
disabled:
Whether NetworkPolicy is enabled for this cluster.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.NetworkPolicyConfig)
))
_sym_db.RegisterMessage(NetworkPolicyConfig)
MasterAuthorizedNetworksConfig = _reflection.GeneratedProtocolMessageType('MasterAuthorizedNetworksConfig', (_message.Message,), dict(
CidrBlock = _reflection.GeneratedProtocolMessageType('CidrBlock', (_message.Message,), dict(
DESCRIPTOR = _MASTERAUTHORIZEDNETWORKSCONFIG_CIDRBLOCK,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """CidrBlock contains an optional name and one CIDR block.
Attributes:
display_name:
display\_name is an optional field for users to identify CIDR
blocks.
cidr_block:
cidr\_block must be specified in CIDR notation.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.MasterAuthorizedNetworksConfig.CidrBlock)
))
,
DESCRIPTOR = _MASTERAUTHORIZEDNETWORKSCONFIG,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """Master authorized networks is a Beta feature. Configuration options for
the master authorized networks feature. Enabled master authorized
networks will disallow all external traffic to access Kubernetes master
through HTTPS except traffic from the given CIDR blocks, Google Compute
Engine Public IPs and Google Prod IPs.
Attributes:
enabled:
Whether or not master authorized networks is enabled.
cidr_blocks:
cidr\_blocks define up to 10 external networks that could
access Kubernetes master through HTTPS.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.MasterAuthorizedNetworksConfig)
))
_sym_db.RegisterMessage(MasterAuthorizedNetworksConfig)
_sym_db.RegisterMessage(MasterAuthorizedNetworksConfig.CidrBlock)
LegacyAbac = _reflection.GeneratedProtocolMessageType('LegacyAbac', (_message.Message,), dict(
DESCRIPTOR = _LEGACYABAC,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """Configuration for the legacy Attribute Based Access Control
authorization mode.
Attributes:
enabled:
Whether the ABAC authorizer is enabled for this cluster. When
enabled, identities in the system, including service accounts,
nodes, and controllers, will have statically granted
permissions beyond those provided by the RBAC configuration or
IAM.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.LegacyAbac)
))
_sym_db.RegisterMessage(LegacyAbac)
NetworkPolicy = _reflection.GeneratedProtocolMessageType('NetworkPolicy', (_message.Message,), dict(
DESCRIPTOR = _NETWORKPOLICY,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """Configuration options for the NetworkPolicy feature.
https://kubernetes.io/docs/concepts/services-networking/networkpolicies/
Attributes:
provider:
The selected network policy provider.
enabled:
Whether network policy is enabled on the cluster.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.NetworkPolicy)
))
_sym_db.RegisterMessage(NetworkPolicy)
IPAllocationPolicy = _reflection.GeneratedProtocolMessageType('IPAllocationPolicy', (_message.Message,), dict(
DESCRIPTOR = _IPALLOCATIONPOLICY,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """Configuration for controlling how IPs are allocated in the cluster.
Attributes:
use_ip_aliases:
Whether alias IPs will be used for pod IPs in the cluster.
create_subnetwork:
Whether a new subnetwork will be created automatically for the
cluster. This field is only applicable when
``use_ip_aliases`` is true.
subnetwork_name:
A custom subnetwork name to be used if ``create_subnetwork``
is true. If this field is empty, then an automatic name will
be chosen for the new subnetwork.
cluster_ipv4_cidr:
This field is deprecated, use cluster\_ipv4\_cidr\_block.
node_ipv4_cidr:
This field is deprecated, use node\_ipv4\_cidr\_block.
services_ipv4_cidr:
This field is deprecated, use services\_ipv4\_cidr\_block.
cluster_secondary_range_name:
The name of the secondary range to be used for the cluster
CIDR block. The secondary range will be used for pod IP
addresses. This must be an existing secondary range associated
with the cluster subnetwork. This field is only applicable
with use\_ip\_aliases is true and create\_subnetwork is false.
services_secondary_range_name:
The name of the secondary range to be used as for the services
CIDR block. The secondary range will be used for service
ClusterIPs. This must be an existing secondary range
associated with the cluster subnetwork. This field is only
applicable with use\_ip\_aliases is true and
create\_subnetwork is false.
cluster_ipv4_cidr_block:
The IP address range for the cluster pod IPs. If this field is
set, then ``cluster.cluster_ipv4_cidr`` must be left blank.
This field is only applicable when ``use_ip_aliases`` is true.
Set to blank to have a range chosen with the default size.
Set to /netmask (e.g. ``/14``) to have a range chosen with a
specific netmask. Set to a `CIDR
<http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing>`__ notation (e.g. ``10.96.0.0/14``) from the
RFC-1918 private networks (e.g. ``10.0.0.0/8``,
``172.16.0.0/12``, ``192.168.0.0/16``) to pick a specific
range to use.
node_ipv4_cidr_block:
The IP address range of the instance IPs in this cluster.
This is applicable only if ``create_subnetwork`` is true. Set
to blank to have a range chosen with the default size. Set to
/netmask (e.g. ``/14``) to have a range chosen with a specific
netmask. Set to a `CIDR
<http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing>`__ notation (e.g. ``10.96.0.0/14``) from the
RFC-1918 private networks (e.g. ``10.0.0.0/8``,
``172.16.0.0/12``, ``192.168.0.0/16``) to pick a specific
range to use.
services_ipv4_cidr_block:
The IP address range of the services IPs in this cluster. If
blank, a range will be automatically chosen with the default
size. This field is only applicable when ``use_ip_aliases``
is true. Set to blank to have a range chosen with the default
size. Set to /netmask (e.g. ``/14``) to have a range chosen
with a specific netmask. Set to a `CIDR
<http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing>`__ notation (e.g. ``10.96.0.0/14``) from the
RFC-1918 private networks (e.g. ``10.0.0.0/8``,
``172.16.0.0/12``, ``192.168.0.0/16``) to pick a specific
range to use.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.IPAllocationPolicy)
))
_sym_db.RegisterMessage(IPAllocationPolicy)
Cluster = _reflection.GeneratedProtocolMessageType('Cluster', (_message.Message,), dict(
ResourceLabelsEntry = _reflection.GeneratedProtocolMessageType('ResourceLabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _CLUSTER_RESOURCELABELSENTRY,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:google.container.v1.Cluster.ResourceLabelsEntry)
))
,
DESCRIPTOR = _CLUSTER,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """A Google Container Engine cluster.
Attributes:
name:
The name of this cluster. The name must be unique within this
project and zone, and can be up to 40 characters with the
following restrictions: - Lowercase letters, numbers, and
hyphens only. - Must start with a letter. - Must end with a
number or a letter.
description:
An optional description of this cluster.
initial_node_count:
The number of nodes to create in this cluster. You must ensure
that your Compute Engine resource quota is sufficient for this
number of instances. You must also have available firewall and
routes quota. For requests, this field should only be used in
lieu of a "node\_pool" object, since this configuration (along
with the "node\_config") will be used to create a "NodePool"
object with an auto-generated name. Do not use this and a
node\_pool at the same time.
node_config:
Parameters used in creating the cluster's nodes. See
``nodeConfig`` for the description of its properties. For
requests, this field should only be used in lieu of a
"node\_pool" object, since this configuration (along with the
"initial\_node\_count") will be used to create a "NodePool"
object with an auto-generated name. Do not use this and a
node\_pool at the same time. For responses, this field will be
populated with the node configuration of the first node pool.
If unspecified, the defaults are used.
master_auth:
The authentication information for accessing the master
endpoint.
logging_service:
The logging service the cluster should use to write logs.
Currently available options: - ``logging.googleapis.com`` -
the Google Cloud Logging service. - ``none`` - no logs will
be exported from the cluster. - if left as an empty string,\
``logging.googleapis.com`` will be used.
monitoring_service:
The monitoring service the cluster should use to write
metrics. Currently available options: -
``monitoring.googleapis.com`` - the Google Cloud Monitoring
service. - ``none`` - no metrics will be exported from the
cluster. - if left as an empty string,
``monitoring.googleapis.com`` will be used.
network:
The name of the Google Compute Engine `network
</compute/docs/networks-and-firewalls#networks>`__ to which
the cluster is connected. If left unspecified, the ``default``
network will be used.
cluster_ipv4_cidr:
The IP address range of the container pods in this cluster, in
`CIDR <http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing>`__ notation (e.g. ``10.96.0.0/14``). Leave
blank to have one automatically chosen or specify a ``/14``
block in ``10.0.0.0/8``.
addons_config:
Configurations for the various addons available to run in the
cluster.
subnetwork:
The name of the Google Compute Engine `subnetwork
</compute/docs/subnetworks>`__ to which the cluster is
connected.
node_pools:
The node pools associated with this cluster. This field should
not be set if "node\_config" or "initial\_node\_count" are
specified.
locations:
The list of Google Compute Engine `locations
</compute/docs/zones#available>`__ in which the cluster's
nodes should be located.
enable_kubernetes_alpha:
Kubernetes alpha features are enabled on this cluster. This
includes alpha API groups (e.g. v1alpha1) and features that
may not be production ready in the kubernetes version of the
master and nodes. The cluster has no SLA for uptime and
master/node upgrades are disabled. Alpha enabled clusters are
automatically deleted thirty days after creation.
resource_labels:
The resource labels for the cluster to use to annotate any
related Google Compute Engine resources.
label_fingerprint:
The fingerprint of the set of labels for this cluster.
legacy_abac:
Configuration for the legacy ABAC authorization mode.
network_policy:
Configuration options for the NetworkPolicy feature.
ip_allocation_policy:
Configuration for cluster IP allocation.
master_authorized_networks_config:
Master authorized networks is a Beta feature. The
configuration options for master authorized networks feature.
maintenance_policy:
Configure the maintenance policy for this cluster.
self_link:
[Output only] Server-defined URL for the resource.
zone:
[Output only] The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
endpoint:
[Output only] The IP address of this cluster's master
endpoint. The endpoint can be accessed from the internet at
``https://username:password@endpoint/``. See the
``masterAuth`` property of this resource for username and
password information.
initial_cluster_version:
The initial Kubernetes version for this cluster. Valid
versions are those found in validMasterVersions returned by
getServerConfig. The version can be upgraded over time; such
upgrades are reflected in currentMasterVersion and
currentNodeVersion.
current_master_version:
[Output only] The current software version of the master
endpoint.
current_node_version:
[Output only] The current version of the node software
components. If they are currently at multiple versions because
they're in the process of being upgraded, this reflects the
minimum version of all nodes.
create_time:
[Output only] The time the cluster was created, in `RFC3339
<https://www.ietf.org/rfc/rfc3339.txt>`__ text format.
status:
[Output only] The current status of this cluster.
status_message:
[Output only] Additional information about the current status
of this cluster, if available.
node_ipv4_cidr_size:
[Output only] The size of the address space on each node for
hosting containers. This is provisioned from within the
``container_ipv4_cidr`` range.
services_ipv4_cidr:
[Output only] The IP address range of the Kubernetes services
in this cluster, in `CIDR
<http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing>`__ notation (e.g. ``1.2.3.4/29``). Service
addresses are typically put in the last ``/16`` from the
container CIDR.
instance_group_urls:
Deprecated. Use node\_pools.instance\_group\_urls.
current_node_count:
[Output only] The number of nodes currently in the cluster.
expire_time:
[Output only] The time the cluster will be automatically
deleted in `RFC3339 <https://www.ietf.org/rfc/rfc3339.txt>`__
text format.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.Cluster)
))
_sym_db.RegisterMessage(Cluster)
_sym_db.RegisterMessage(Cluster.ResourceLabelsEntry)
ClusterUpdate = _reflection.GeneratedProtocolMessageType('ClusterUpdate', (_message.Message,), dict(
DESCRIPTOR = _CLUSTERUPDATE,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """ClusterUpdate describes an update to the cluster. Exactly one update can
be applied to a cluster with each request, so at most one field can be
provided.
Attributes:
desired_node_version:
The Kubernetes version to change the nodes to (typically an
upgrade). Use ``-`` to upgrade to the latest version supported
by the server.
desired_monitoring_service:
The monitoring service the cluster should use to write
metrics. Currently available options: -
"monitoring.googleapis.com" - the Google Cloud Monitoring
service - "none" - no metrics will be exported from the
cluster
desired_addons_config:
Configurations for the various addons available to run in the
cluster.
desired_node_pool_id:
The node pool to be upgraded. This field is mandatory if
"desired\_node\_version", "desired\_image\_family" or
"desired\_node\_pool\_autoscaling" is specified and there is
more than one node pool on the cluster.
desired_image_type:
The desired image type for the node pool. NOTE: Set the
"desired\_node\_pool" field as well.
desired_node_pool_autoscaling:
Autoscaler configuration for the node pool specified in
desired\_node\_pool\_id. If there is only one pool in the
cluster and desired\_node\_pool\_id is not provided then the
change applies to that single node pool.
desired_locations:
The desired list of Google Compute Engine `locations
</compute/docs/zones#available>`__ in which the cluster's
nodes should be located. Changing the locations a cluster is
in will result in nodes being either created or removed from
the cluster, depending on whether locations are being added or
removed. This list must always include the cluster's primary
zone.
desired_master_authorized_networks_config:
Master authorized networks is a Beta feature. The desired
configuration options for master authorized networks feature.
desired_master_version:
The Kubernetes version to change the master to. The only valid
value is the latest supported version. Use "-" to have the
server automatically select the latest version.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.ClusterUpdate)
))
_sym_db.RegisterMessage(ClusterUpdate)
Operation = _reflection.GeneratedProtocolMessageType('Operation', (_message.Message,), dict(
DESCRIPTOR = _OPERATION,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """This operation resource represents operations that may have happened or
are happening on the cluster. All fields are output only.
Attributes:
name:
The server-assigned ID for the operation.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the operation is
taking place.
operation_type:
The operation type.
status:
The current status of the operation.
detail:
Detailed operation progress, if available.
status_message:
If an error has occurred, a textual description of the error.
self_link:
Server-defined URL for the resource.
target_link:
Server-defined URL for the target of the operation.
start_time:
[Output only] The time the operation started, in `RFC3339
<https://www.ietf.org/rfc/rfc3339.txt>`__ text format.
end_time:
[Output only] The time the operation completed, in `RFC3339
<https://www.ietf.org/rfc/rfc3339.txt>`__ text format.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.Operation)
))
_sym_db.RegisterMessage(Operation)
CreateClusterRequest = _reflection.GeneratedProtocolMessageType('CreateClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATECLUSTERREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """CreateClusterRequest creates a cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster:
A `cluster resource </container-
engine/reference/rest/v1/projects.zones.clusters>`__
""",
# @@protoc_insertion_point(class_scope:google.container.v1.CreateClusterRequest)
))
_sym_db.RegisterMessage(CreateClusterRequest)
GetClusterRequest = _reflection.GeneratedProtocolMessageType('GetClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _GETCLUSTERREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """GetClusterRequest gets the settings of a cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to retrieve.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.GetClusterRequest)
))
_sym_db.RegisterMessage(GetClusterRequest)
UpdateClusterRequest = _reflection.GeneratedProtocolMessageType('UpdateClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _UPDATECLUSTERREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """UpdateClusterRequest updates the settings of a cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to upgrade.
update:
A description of the update.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.UpdateClusterRequest)
))
_sym_db.RegisterMessage(UpdateClusterRequest)
UpdateNodePoolRequest = _reflection.GeneratedProtocolMessageType('UpdateNodePoolRequest', (_message.Message,), dict(
DESCRIPTOR = _UPDATENODEPOOLREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """UpdateNodePoolRequests update a node pool's image and/or version.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to upgrade.
node_pool_id:
The name of the node pool to upgrade.
node_version:
The Kubernetes version to change the nodes to (typically an
upgrade). Use ``-`` to upgrade to the latest version supported
by the server.
image_type:
The desired image type for the node pool.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.UpdateNodePoolRequest)
))
_sym_db.RegisterMessage(UpdateNodePoolRequest)
SetNodePoolAutoscalingRequest = _reflection.GeneratedProtocolMessageType('SetNodePoolAutoscalingRequest', (_message.Message,), dict(
DESCRIPTOR = _SETNODEPOOLAUTOSCALINGREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """SetNodePoolAutoscalingRequest sets the autoscaler settings of a node
pool.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to upgrade.
node_pool_id:
The name of the node pool to upgrade.
autoscaling:
Autoscaling configuration for the node pool.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.SetNodePoolAutoscalingRequest)
))
_sym_db.RegisterMessage(SetNodePoolAutoscalingRequest)
SetLoggingServiceRequest = _reflection.GeneratedProtocolMessageType('SetLoggingServiceRequest', (_message.Message,), dict(
DESCRIPTOR = _SETLOGGINGSERVICEREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """SetLoggingServiceRequest sets the logging service of a cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to upgrade.
logging_service:
The logging service the cluster should use to write metrics.
Currently available options: - "logging.googleapis.com" -
the Google Cloud Logging service - "none" - no metrics will
be exported from the cluster
""",
# @@protoc_insertion_point(class_scope:google.container.v1.SetLoggingServiceRequest)
))
_sym_db.RegisterMessage(SetLoggingServiceRequest)
SetMonitoringServiceRequest = _reflection.GeneratedProtocolMessageType('SetMonitoringServiceRequest', (_message.Message,), dict(
DESCRIPTOR = _SETMONITORINGSERVICEREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """SetMonitoringServiceRequest sets the monitoring service of a cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to upgrade.
monitoring_service:
The monitoring service the cluster should use to write
metrics. Currently available options: -
"monitoring.googleapis.com" - the Google Cloud Monitoring
service - "none" - no metrics will be exported from the
cluster
""",
# @@protoc_insertion_point(class_scope:google.container.v1.SetMonitoringServiceRequest)
))
_sym_db.RegisterMessage(SetMonitoringServiceRequest)
SetAddonsConfigRequest = _reflection.GeneratedProtocolMessageType('SetAddonsConfigRequest', (_message.Message,), dict(
DESCRIPTOR = _SETADDONSCONFIGREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """SetAddonsConfigRequest sets the addons associated with the cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to upgrade.
addons_config:
The desired configurations for the various addons available to
run in the cluster.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.SetAddonsConfigRequest)
))
_sym_db.RegisterMessage(SetAddonsConfigRequest)
SetLocationsRequest = _reflection.GeneratedProtocolMessageType('SetLocationsRequest', (_message.Message,), dict(
DESCRIPTOR = _SETLOCATIONSREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """SetLocationsRequest sets the locations of the cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to upgrade.
locations:
The desired list of Google Compute Engine `locations
</compute/docs/zones#available>`__ in which the cluster's
nodes should be located. Changing the locations a cluster is
in will result in nodes being either created or removed from
the cluster, depending on whether locations are being added or
removed. This list must always include the cluster's primary
zone.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.SetLocationsRequest)
))
_sym_db.RegisterMessage(SetLocationsRequest)
UpdateMasterRequest = _reflection.GeneratedProtocolMessageType('UpdateMasterRequest', (_message.Message,), dict(
DESCRIPTOR = _UPDATEMASTERREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """UpdateMasterRequest updates the master of the cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to upgrade.
master_version:
The Kubernetes version to change the master to. The only valid
value is the latest supported version. Use "-" to have the
server automatically select the latest version.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.UpdateMasterRequest)
))
_sym_db.RegisterMessage(UpdateMasterRequest)
SetMasterAuthRequest = _reflection.GeneratedProtocolMessageType('SetMasterAuthRequest', (_message.Message,), dict(
DESCRIPTOR = _SETMASTERAUTHREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """SetMasterAuthRequest updates the admin password of a cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to upgrade.
action:
The exact form of action to be taken on the master auth.
update:
A description of the update.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.SetMasterAuthRequest)
))
_sym_db.RegisterMessage(SetMasterAuthRequest)
DeleteClusterRequest = _reflection.GeneratedProtocolMessageType('DeleteClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETECLUSTERREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """DeleteClusterRequest deletes a cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to delete.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.DeleteClusterRequest)
))
_sym_db.RegisterMessage(DeleteClusterRequest)
ListClustersRequest = _reflection.GeneratedProtocolMessageType('ListClustersRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTCLUSTERSREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """ListClustersRequest lists clusters.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides, or "-" for all zones.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.ListClustersRequest)
))
_sym_db.RegisterMessage(ListClustersRequest)
ListClustersResponse = _reflection.GeneratedProtocolMessageType('ListClustersResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTCLUSTERSRESPONSE,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """ListClustersResponse is the result of ListClustersRequest.
Attributes:
clusters:
A list of clusters in the project in the specified zone, or
across all ones.
missing_zones:
If any zones are listed here, the list of clusters returned
may be missing those zones.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.ListClustersResponse)
))
_sym_db.RegisterMessage(ListClustersResponse)
GetOperationRequest = _reflection.GeneratedProtocolMessageType('GetOperationRequest', (_message.Message,), dict(
DESCRIPTOR = _GETOPERATIONREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """GetOperationRequest gets a single operation.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
operation_id:
The server-assigned ``name`` of the operation.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.GetOperationRequest)
))
_sym_db.RegisterMessage(GetOperationRequest)
ListOperationsRequest = _reflection.GeneratedProtocolMessageType('ListOperationsRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTOPERATIONSREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """ListOperationsRequest lists operations.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ to return operations for,
or ``-`` for all zones.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.ListOperationsRequest)
))
_sym_db.RegisterMessage(ListOperationsRequest)
CancelOperationRequest = _reflection.GeneratedProtocolMessageType('CancelOperationRequest', (_message.Message,), dict(
DESCRIPTOR = _CANCELOPERATIONREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """CancelOperationRequest cancels a single operation.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the operation
resides.
operation_id:
The server-assigned ``name`` of the operation.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.CancelOperationRequest)
))
_sym_db.RegisterMessage(CancelOperationRequest)
ListOperationsResponse = _reflection.GeneratedProtocolMessageType('ListOperationsResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTOPERATIONSRESPONSE,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """ListOperationsResponse is the result of ListOperationsRequest.
Attributes:
operations:
A list of operations in the project in the specified zone.
missing_zones:
If any zones are listed here, the list of operations returned
may be missing the operations from those zones.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.ListOperationsResponse)
))
_sym_db.RegisterMessage(ListOperationsResponse)
GetServerConfigRequest = _reflection.GeneratedProtocolMessageType('GetServerConfigRequest', (_message.Message,), dict(
DESCRIPTOR = _GETSERVERCONFIGREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """Gets the current Container Engine service configuration.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ to return operations for.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.GetServerConfigRequest)
))
_sym_db.RegisterMessage(GetServerConfigRequest)
ServerConfig = _reflection.GeneratedProtocolMessageType('ServerConfig', (_message.Message,), dict(
DESCRIPTOR = _SERVERCONFIG,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """Container Engine service configuration.
Attributes:
default_cluster_version:
Version of Kubernetes the service deploys by default.
valid_node_versions:
List of valid node upgrade target versions.
default_image_type:
Default image type.
valid_image_types:
List of valid image types.
valid_master_versions:
List of valid master versions.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.ServerConfig)
))
_sym_db.RegisterMessage(ServerConfig)
CreateNodePoolRequest = _reflection.GeneratedProtocolMessageType('CreateNodePoolRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATENODEPOOLREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """CreateNodePoolRequest creates a node pool for a cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number <h
ttps://developers.google.com/console/help/new/#projectnumber>`
__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster.
node_pool:
The node pool to create.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.CreateNodePoolRequest)
))
_sym_db.RegisterMessage(CreateNodePoolRequest)
DeleteNodePoolRequest = _reflection.GeneratedProtocolMessageType('DeleteNodePoolRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETENODEPOOLREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """DeleteNodePoolRequest deletes a node pool for a cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number <h
ttps://developers.google.com/console/help/new/#projectnumber>`
__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster.
node_pool_id:
The name of the node pool to delete.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.DeleteNodePoolRequest)
))
_sym_db.RegisterMessage(DeleteNodePoolRequest)
ListNodePoolsRequest = _reflection.GeneratedProtocolMessageType('ListNodePoolsRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTNODEPOOLSREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """ListNodePoolsRequest lists the node pool(s) for a cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number <h
ttps://developers.google.com/console/help/new/#projectnumber>`
__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.ListNodePoolsRequest)
))
_sym_db.RegisterMessage(ListNodePoolsRequest)
GetNodePoolRequest = _reflection.GeneratedProtocolMessageType('GetNodePoolRequest', (_message.Message,), dict(
DESCRIPTOR = _GETNODEPOOLREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """GetNodePoolRequest retrieves a node pool for a cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number <h
ttps://developers.google.com/console/help/new/#projectnumber>`
__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster.
node_pool_id:
The name of the node pool.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.GetNodePoolRequest)
))
_sym_db.RegisterMessage(GetNodePoolRequest)
NodePool = _reflection.GeneratedProtocolMessageType('NodePool', (_message.Message,), dict(
DESCRIPTOR = _NODEPOOL,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """NodePool contains the name and configuration for a cluster's node pool.
Node pools are a set of nodes (i.e. VM's), with a common configuration
and specification, under the control of the cluster master. They may
have a set of Kubernetes labels applied to them, which may be used to
reference them during pod scheduling. They may also be resized up or
down, to accommodate the workload.
Attributes:
name:
The name of the node pool.
config:
The node configuration of the pool.
initial_node_count:
The initial node count for the pool. You must ensure that your
Compute Engine resource quota is sufficient for this number of
instances. You must also have available firewall and routes
quota.
self_link:
[Output only] Server-defined URL for the resource.
version:
The version of the Kubernetes of this node.
instance_group_urls:
[Output only] The resource URLs of the `managed instance
groups </compute/docs/instance-groups/creating-groups-of-
managed-instances>`__ associated with this node pool.
status:
[Output only] The status of the nodes in this pool instance.
status_message:
[Output only] Additional information about the current status
of this node pool instance, if available.
autoscaling:
Autoscaler configuration for this NodePool. Autoscaler is
enabled only if a valid configuration is present.
management:
NodeManagement configuration for this NodePool.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.NodePool)
))
_sym_db.RegisterMessage(NodePool)
NodeManagement = _reflection.GeneratedProtocolMessageType('NodeManagement', (_message.Message,), dict(
DESCRIPTOR = _NODEMANAGEMENT,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """NodeManagement defines the set of node management services turned on for
the node pool.
Attributes:
auto_upgrade:
A flag that specifies whether node auto-upgrade is enabled for
the node pool. If enabled, node auto-upgrade helps keep the
nodes in your node pool up to date with the latest release
version of Kubernetes.
auto_repair:
A flag that specifies whether the node auto-repair is enabled
for the node pool. If enabled, the nodes in this node pool
will be monitored and, if they fail health checks too many
times, an automatic repair action will be triggered.
upgrade_options:
Specifies the Auto Upgrade knobs for the node pool.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.NodeManagement)
))
_sym_db.RegisterMessage(NodeManagement)
AutoUpgradeOptions = _reflection.GeneratedProtocolMessageType('AutoUpgradeOptions', (_message.Message,), dict(
DESCRIPTOR = _AUTOUPGRADEOPTIONS,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """AutoUpgradeOptions defines the set of options for the user to control
how the Auto Upgrades will proceed.
Attributes:
auto_upgrade_start_time:
[Output only] This field is set when upgrades are about to
commence with the approximate start time for the upgrades, in
`RFC3339 <https://www.ietf.org/rfc/rfc3339.txt>`__ text
format.
description:
[Output only] This field is set when upgrades are about to
commence with the description of the upgrade.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.AutoUpgradeOptions)
))
_sym_db.RegisterMessage(AutoUpgradeOptions)
MaintenancePolicy = _reflection.GeneratedProtocolMessageType('MaintenancePolicy', (_message.Message,), dict(
DESCRIPTOR = _MAINTENANCEPOLICY,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """MaintenancePolicy defines the maintenance policy to be used for the
cluster.
Attributes:
window:
Specifies the maintenance window in which maintenance may be
performed.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.MaintenancePolicy)
))
_sym_db.RegisterMessage(MaintenancePolicy)
MaintenanceWindow = _reflection.GeneratedProtocolMessageType('MaintenanceWindow', (_message.Message,), dict(
DESCRIPTOR = _MAINTENANCEWINDOW,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """MaintenanceWindow defines the maintenance window to be used for the
cluster.
Attributes:
daily_maintenance_window:
DailyMaintenanceWindow specifies a daily maintenance operation
window.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.MaintenanceWindow)
))
_sym_db.RegisterMessage(MaintenanceWindow)
DailyMaintenanceWindow = _reflection.GeneratedProtocolMessageType('DailyMaintenanceWindow', (_message.Message,), dict(
DESCRIPTOR = _DAILYMAINTENANCEWINDOW,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """Time window specified for daily maintenance operations.
Attributes:
start_time:
Time within the maintenance window to start the maintenance
operations. Time format should be in `RFC3339
<https://www.ietf.org/rfc/rfc3339.txt>`__ format "HH:MM”,
where HH : [00-23] and MM : [00-59] GMT.
duration:
[Output only] Duration of the time window, automatically
chosen to be smallest possible in the given scenario. Duration
will be in `RFC3339 <https://www.ietf.org/rfc/rfc3339.txt>`__
format "PTnHnMnS".
""",
# @@protoc_insertion_point(class_scope:google.container.v1.DailyMaintenanceWindow)
))
_sym_db.RegisterMessage(DailyMaintenanceWindow)
SetNodePoolManagementRequest = _reflection.GeneratedProtocolMessageType('SetNodePoolManagementRequest', (_message.Message,), dict(
DESCRIPTOR = _SETNODEPOOLMANAGEMENTREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """SetNodePoolManagementRequest sets the node management properties of a
node pool.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to update.
node_pool_id:
The name of the node pool to update.
management:
NodeManagement configuration for the node pool.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.SetNodePoolManagementRequest)
))
_sym_db.RegisterMessage(SetNodePoolManagementRequest)
SetNodePoolSizeRequest = _reflection.GeneratedProtocolMessageType('SetNodePoolSizeRequest', (_message.Message,), dict(
DESCRIPTOR = _SETNODEPOOLSIZEREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """SetNodePoolSizeRequest sets the size a node pool.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to update.
node_pool_id:
The name of the node pool to update.
node_count:
The desired node count for the pool.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.SetNodePoolSizeRequest)
))
_sym_db.RegisterMessage(SetNodePoolSizeRequest)
RollbackNodePoolUpgradeRequest = _reflection.GeneratedProtocolMessageType('RollbackNodePoolUpgradeRequest', (_message.Message,), dict(
DESCRIPTOR = _ROLLBACKNODEPOOLUPGRADEREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or
Failed NodePool upgrade. This will be an no-op if the last upgrade
successfully completed.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to rollback.
node_pool_id:
The name of the node pool to rollback.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.RollbackNodePoolUpgradeRequest)
))
_sym_db.RegisterMessage(RollbackNodePoolUpgradeRequest)
ListNodePoolsResponse = _reflection.GeneratedProtocolMessageType('ListNodePoolsResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTNODEPOOLSRESPONSE,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """ListNodePoolsResponse is the result of ListNodePoolsRequest.
Attributes:
node_pools:
A list of node pools for a cluster.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.ListNodePoolsResponse)
))
_sym_db.RegisterMessage(ListNodePoolsResponse)
NodePoolAutoscaling = _reflection.GeneratedProtocolMessageType('NodePoolAutoscaling', (_message.Message,), dict(
DESCRIPTOR = _NODEPOOLAUTOSCALING,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """NodePoolAutoscaling contains information required by cluster autoscaler
to adjust the size of the node pool to the current cluster usage.
Attributes:
enabled:
Is autoscaling enabled for this node pool.
min_node_count:
Minimum number of nodes in the NodePool. Must be >= 1 and <=
max\_node\_count.
max_node_count:
Maximum number of nodes in the NodePool. Must be >=
min\_node\_count. There has to enough quota to scale up the
cluster.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.NodePoolAutoscaling)
))
_sym_db.RegisterMessage(NodePoolAutoscaling)
SetLabelsRequest = _reflection.GeneratedProtocolMessageType('SetLabelsRequest', (_message.Message,), dict(
ResourceLabelsEntry = _reflection.GeneratedProtocolMessageType('ResourceLabelsEntry', (_message.Message,), dict(
DESCRIPTOR = _SETLABELSREQUEST_RESOURCELABELSENTRY,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
# @@protoc_insertion_point(class_scope:google.container.v1.SetLabelsRequest.ResourceLabelsEntry)
))
,
DESCRIPTOR = _SETLABELSREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """SetLabelsRequest sets the Google Cloud Platform labels on a Google
Container Engine cluster, which will in turn set them for Google Compute
Engine resources used by that cluster
Attributes:
project_id:
The Google Developers Console `project ID or project number <h
ttps://developers.google.com/console/help/new/#projectnumber>`
__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster.
resource_labels:
The labels to set for that cluster.
label_fingerprint:
The fingerprint of the previous set of labels for this
resource, used to detect conflicts. The fingerprint is
initially generated by Container Engine and changes after
every request to modify or update labels. You must always
provide an up-to-date fingerprint hash when updating or
changing labels. Make a get() request to the resource to get
the latest fingerprint.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.SetLabelsRequest)
))
_sym_db.RegisterMessage(SetLabelsRequest)
_sym_db.RegisterMessage(SetLabelsRequest.ResourceLabelsEntry)
SetLegacyAbacRequest = _reflection.GeneratedProtocolMessageType('SetLegacyAbacRequest', (_message.Message,), dict(
DESCRIPTOR = _SETLEGACYABACREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """SetLegacyAbacRequest enables or disables the ABAC authorization
mechanism for a cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to update.
enabled:
Whether ABAC authorization will be enabled in the cluster.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.SetLegacyAbacRequest)
))
_sym_db.RegisterMessage(SetLegacyAbacRequest)
StartIPRotationRequest = _reflection.GeneratedProtocolMessageType('StartIPRotationRequest', (_message.Message,), dict(
DESCRIPTOR = _STARTIPROTATIONREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """StartIPRotationRequest creates a new IP for the cluster and then
performs a node upgrade on each node pool to point to the new IP.
Attributes:
project_id:
The Google Developers Console `project ID or project number <h
ttps://developers.google.com/console/help/new/#projectnumber>`
__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.StartIPRotationRequest)
))
_sym_db.RegisterMessage(StartIPRotationRequest)
CompleteIPRotationRequest = _reflection.GeneratedProtocolMessageType('CompleteIPRotationRequest', (_message.Message,), dict(
DESCRIPTOR = _COMPLETEIPROTATIONREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """CompleteIPRotationRequest moves the cluster master back into single-IP
mode.
Attributes:
project_id:
The Google Developers Console `project ID or project number <h
ttps://developers.google.com/console/help/new/#projectnumber>`
__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.CompleteIPRotationRequest)
))
_sym_db.RegisterMessage(CompleteIPRotationRequest)
AcceleratorConfig = _reflection.GeneratedProtocolMessageType('AcceleratorConfig', (_message.Message,), dict(
DESCRIPTOR = _ACCELERATORCONFIG,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """AcceleratorConfig represents a Hardware Accelerator request.
Attributes:
accelerator_count:
The number of the accelerator cards exposed to an instance.
accelerator_type:
The accelerator type resource name. List of supported
accelerators `here </compute/docs/gpus/#Introduction>`__
""",
# @@protoc_insertion_point(class_scope:google.container.v1.AcceleratorConfig)
))
_sym_db.RegisterMessage(AcceleratorConfig)
SetNetworkPolicyRequest = _reflection.GeneratedProtocolMessageType('SetNetworkPolicyRequest', (_message.Message,), dict(
DESCRIPTOR = _SETNETWORKPOLICYREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """SetNetworkPolicyRequest enables/disables network policy for a cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number <h
ttps://developers.google.com/console/help/new/#projectnumber>`
__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster.
network_policy:
Configuration options for the NetworkPolicy feature.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.SetNetworkPolicyRequest)
))
_sym_db.RegisterMessage(SetNetworkPolicyRequest)
SetMaintenancePolicyRequest = _reflection.GeneratedProtocolMessageType('SetMaintenancePolicyRequest', (_message.Message,), dict(
DESCRIPTOR = _SETMAINTENANCEPOLICYREQUEST,
__module__ = 'google.cloud.container_v1.proto.cluster_service_pb2'
,
__doc__ = """SetMaintenancePolicyRequest sets the maintenance policy for a cluster.
Attributes:
project_id:
The Google Developers Console `project ID or project number
<https://support.google.com/cloud/answer/6158840>`__.
zone:
The name of the Google Compute Engine `zone
</compute/docs/zones#available>`__ in which the cluster
resides.
cluster_id:
The name of the cluster to update.
maintenance_policy:
The maintenance policy to be set for the cluster. An empty
field clears the existing maintenance policy.
""",
# @@protoc_insertion_point(class_scope:google.container.v1.SetMaintenancePolicyRequest)
))
_sym_db.RegisterMessage(SetMaintenancePolicyRequest)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\027com.google.container.v1B\023ClusterServiceProtoP\001Z<google.golang.org/genproto/googleapis/container/v1;container\252\002\031Google.Cloud.Container.V1\312\002\031Google\\Cloud\\Container\\V1'))
_NODECONFIG_METADATAENTRY.has_options = True
_NODECONFIG_METADATAENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_NODECONFIG_LABELSENTRY.has_options = True
_NODECONFIG_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CLUSTER_RESOURCELABELSENTRY.has_options = True
_CLUSTER_RESOURCELABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_SETLABELSREQUEST_RESOURCELABELSENTRY.has_options = True
_SETLABELSREQUEST_RESOURCELABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_CLUSTERMANAGER = _descriptor.ServiceDescriptor(
name='ClusterManager',
full_name='google.container.v1.ClusterManager',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=9787,
serialized_end=15262,
methods=[
_descriptor.MethodDescriptor(
name='ListClusters',
full_name='google.container.v1.ClusterManager.ListClusters',
index=0,
containing_service=None,
input_type=_LISTCLUSTERSREQUEST,
output_type=_LISTCLUSTERSRESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0021\022//v1/projects/{project_id}/zones/{zone}/clusters')),
),
_descriptor.MethodDescriptor(
name='GetCluster',
full_name='google.container.v1.ClusterManager.GetCluster',
index=1,
containing_service=None,
input_type=_GETCLUSTERREQUEST,
output_type=_CLUSTER,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002>\022</v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}')),
),
_descriptor.MethodDescriptor(
name='CreateCluster',
full_name='google.container.v1.ClusterManager.CreateCluster',
index=2,
containing_service=None,
input_type=_CREATECLUSTERREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0024\"//v1/projects/{project_id}/zones/{zone}/clusters:\001*')),
),
_descriptor.MethodDescriptor(
name='UpdateCluster',
full_name='google.container.v1.ClusterManager.UpdateCluster',
index=3,
containing_service=None,
input_type=_UPDATECLUSTERREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002A\032</v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:\001*')),
),
_descriptor.MethodDescriptor(
name='UpdateNodePool',
full_name='google.container.v1.ClusterManager.UpdateNodePool',
index=4,
containing_service=None,
input_type=_UPDATENODEPOOLREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002a\"\\/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/update:\001*')),
),
_descriptor.MethodDescriptor(
name='SetNodePoolAutoscaling',
full_name='google.container.v1.ClusterManager.SetNodePoolAutoscaling',
index=5,
containing_service=None,
input_type=_SETNODEPOOLAUTOSCALINGREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002f\"a/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/autoscaling:\001*')),
),
_descriptor.MethodDescriptor(
name='SetLoggingService',
full_name='google.container.v1.ClusterManager.SetLoggingService',
index=6,
containing_service=None,
input_type=_SETLOGGINGSERVICEREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002I\"D/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/logging:\001*')),
),
_descriptor.MethodDescriptor(
name='SetMonitoringService',
full_name='google.container.v1.ClusterManager.SetMonitoringService',
index=7,
containing_service=None,
input_type=_SETMONITORINGSERVICEREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002L\"G/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/monitoring:\001*')),
),
_descriptor.MethodDescriptor(
name='SetAddonsConfig',
full_name='google.container.v1.ClusterManager.SetAddonsConfig',
index=8,
containing_service=None,
input_type=_SETADDONSCONFIGREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002H\"C/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/addons:\001*')),
),
_descriptor.MethodDescriptor(
name='SetLocations',
full_name='google.container.v1.ClusterManager.SetLocations',
index=9,
containing_service=None,
input_type=_SETLOCATIONSREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002K\"F/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/locations:\001*')),
),
_descriptor.MethodDescriptor(
name='UpdateMaster',
full_name='google.container.v1.ClusterManager.UpdateMaster',
index=10,
containing_service=None,
input_type=_UPDATEMASTERREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002H\"C/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/master:\001*')),
),
_descriptor.MethodDescriptor(
name='SetMasterAuth',
full_name='google.container.v1.ClusterManager.SetMasterAuth',
index=11,
containing_service=None,
input_type=_SETMASTERAUTHREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002O\"J/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:setMasterAuth:\001*')),
),
_descriptor.MethodDescriptor(
name='DeleteCluster',
full_name='google.container.v1.ClusterManager.DeleteCluster',
index=12,
containing_service=None,
input_type=_DELETECLUSTERREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002>*</v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}')),
),
_descriptor.MethodDescriptor(
name='ListOperations',
full_name='google.container.v1.ClusterManager.ListOperations',
index=13,
containing_service=None,
input_type=_LISTOPERATIONSREQUEST,
output_type=_LISTOPERATIONSRESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0023\0221/v1/projects/{project_id}/zones/{zone}/operations')),
),
_descriptor.MethodDescriptor(
name='GetOperation',
full_name='google.container.v1.ClusterManager.GetOperation',
index=14,
containing_service=None,
input_type=_GETOPERATIONREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002B\022@/v1/projects/{project_id}/zones/{zone}/operations/{operation_id}')),
),
_descriptor.MethodDescriptor(
name='CancelOperation',
full_name='google.container.v1.ClusterManager.CancelOperation',
index=15,
containing_service=None,
input_type=_CANCELOPERATIONREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002L\"G/v1/projects/{project_id}/zones/{zone}/operations/{operation_id}:cancel:\001*')),
),
_descriptor.MethodDescriptor(
name='GetServerConfig',
full_name='google.container.v1.ClusterManager.GetServerConfig',
index=16,
containing_service=None,
input_type=_GETSERVERCONFIGREQUEST,
output_type=_SERVERCONFIG,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0025\0223/v1/projects/{project_id}/zones/{zone}/serverconfig')),
),
_descriptor.MethodDescriptor(
name='ListNodePools',
full_name='google.container.v1.ClusterManager.ListNodePools',
index=17,
containing_service=None,
input_type=_LISTNODEPOOLSREQUEST,
output_type=_LISTNODEPOOLSRESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002H\022F/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools')),
),
_descriptor.MethodDescriptor(
name='GetNodePool',
full_name='google.container.v1.ClusterManager.GetNodePool',
index=18,
containing_service=None,
input_type=_GETNODEPOOLREQUEST,
output_type=_NODEPOOL,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002W\022U/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}')),
),
_descriptor.MethodDescriptor(
name='CreateNodePool',
full_name='google.container.v1.ClusterManager.CreateNodePool',
index=19,
containing_service=None,
input_type=_CREATENODEPOOLREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002K\"F/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools:\001*')),
),
_descriptor.MethodDescriptor(
name='DeleteNodePool',
full_name='google.container.v1.ClusterManager.DeleteNodePool',
index=20,
containing_service=None,
input_type=_DELETENODEPOOLREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002W*U/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}')),
),
_descriptor.MethodDescriptor(
name='RollbackNodePoolUpgrade',
full_name='google.container.v1.ClusterManager.RollbackNodePoolUpgrade',
index=21,
containing_service=None,
input_type=_ROLLBACKNODEPOOLUPGRADEREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002c\"^/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}:rollback:\001*')),
),
_descriptor.MethodDescriptor(
name='SetNodePoolManagement',
full_name='google.container.v1.ClusterManager.SetNodePoolManagement',
index=22,
containing_service=None,
input_type=_SETNODEPOOLMANAGEMENTREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002h\"c/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/setManagement:\001*')),
),
_descriptor.MethodDescriptor(
name='SetLabels',
full_name='google.container.v1.ClusterManager.SetLabels',
index=23,
containing_service=None,
input_type=_SETLABELSREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002P\"K/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/resourceLabels:\001*')),
),
_descriptor.MethodDescriptor(
name='SetLegacyAbac',
full_name='google.container.v1.ClusterManager.SetLegacyAbac',
index=24,
containing_service=None,
input_type=_SETLEGACYABACREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002L\"G/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/legacyAbac:\001*')),
),
_descriptor.MethodDescriptor(
name='StartIPRotation',
full_name='google.container.v1.ClusterManager.StartIPRotation',
index=25,
containing_service=None,
input_type=_STARTIPROTATIONREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002Q\"L/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:startIpRotation:\001*')),
),
_descriptor.MethodDescriptor(
name='CompleteIPRotation',
full_name='google.container.v1.ClusterManager.CompleteIPRotation',
index=26,
containing_service=None,
input_type=_COMPLETEIPROTATIONREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002T\"O/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:completeIpRotation:\001*')),
),
_descriptor.MethodDescriptor(
name='SetNodePoolSize',
full_name='google.container.v1.ClusterManager.SetNodePoolSize',
index=27,
containing_service=None,
input_type=_SETNODEPOOLSIZEREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002b\"]/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}/nodePools/{node_pool_id}/setSize:\001*')),
),
_descriptor.MethodDescriptor(
name='SetNetworkPolicy',
full_name='google.container.v1.ClusterManager.SetNetworkPolicy',
index=28,
containing_service=None,
input_type=_SETNETWORKPOLICYREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002R\"M/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:setNetworkPolicy:\001*')),
),
_descriptor.MethodDescriptor(
name='SetMaintenancePolicy',
full_name='google.container.v1.ClusterManager.SetMaintenancePolicy',
index=29,
containing_service=None,
input_type=_SETMAINTENANCEPOLICYREQUEST,
output_type=_OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002V\"Q/v1/projects/{project_id}/zones/{zone}/clusters/{cluster_id}:setMaintenancePolicy:\001*')),
),
])
_sym_db.RegisterServiceDescriptor(_CLUSTERMANAGER)
DESCRIPTOR.services_by_name['ClusterManager'] = _CLUSTERMANAGER
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
vipulroxx/kivy | examples/demo/multistroke/historymanager.py | 38 | 9447 | __all__ = ('GestureHistoryManager', 'GestureVisualizer')
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.graphics import Color, Line, Rectangle
from kivy.properties import ObjectProperty, BooleanProperty, NumericProperty
from kivy.compat import PY2
# local libraries
from helpers import InformationPopup
from settings import MultistrokeSettingsContainer
# refuse heap permute for gestures with more strokes than 3
# (you can increase it, but 4 strokes = 384 templates, 5 = 3840)
MAX_PERMUTE_STROKES = 3
Builder.load_file('historymanager.kv')
class GestureHistoryManager(GridLayout):
selected = ObjectProperty(None, allownone=True)
def __init__(self, **kwargs):
super(GestureHistoryManager, self).__init__(**kwargs)
self.gesturesettingsform = GestureSettingsForm()
rr = self.gesturesettingsform.rrdetails
rr.bind(on_reanalyze_selected=self.reanalyze_selected)
self.infopopup = InformationPopup()
self.recognizer = App.get_running_app().recognizer
def reanalyze_selected(self, *l):
# recognize() can block the UI with max_gpf=100, show a message
self.infopopup.text = 'Please wait, analyzing ..'
self.infopopup.auto_dismiss = False
self.infopopup.open()
# Get a reference to the original GestureContainer object
gesture_obj = self.selected._result_obj._gesture_obj
# Reanalyze the candidate strokes using current database
res = self.recognizer.recognize(gesture_obj.get_vectors(),
max_gpf=100)
# Tag the result with the gesture object (it didn't change)
res._gesture_obj = gesture_obj
# Tag the selected item with the updated ProgressTracker
self.selected._result_obj = res
res.bind(on_complete=self._reanalyze_complete)
def _reanalyze_complete(self, *l):
self.gesturesettingsform.load_visualizer(self.selected)
self.infopopup.dismiss()
def add_selected_to_database(self, *l):
if self.selected is None:
raise Exception('add_gesture_to_database before load_visualizer?')
if self.gesturesettingsform.addsettings is None:
raise Exception('add_gesture_to_database missing addsetings?')
ids = self.gesturesettingsform.addsettings.ids
name = ids.name.value.strip()
if name == '':
self.infopopup.auto_dismiss = True
self.infopopup.text = 'You must specify a name for the gesture'
self.infopopup.open()
return
permute = ids.permute.value
sensitive = ids.orientation_sens.value
strokelen = ids.stroke_sens.value
angle_sim = ids.angle_sim.value
cand = self.selected._result_obj._gesture_obj.get_vectors()
if permute and len(cand) > MAX_PERMUTE_STROKES:
t = "Can't heap permute %d-stroke gesture " % (len(cand))
self.infopopup.text = t
self.infopopup.auto_dismiss = True
self.infopopup.open()
return
self.recognizer.add_gesture(
name,
cand,
use_strokelen=strokelen,
orientation_sensitive=sensitive,
angle_similarity=angle_sim,
permute=permute)
self.infopopup.text = 'Gesture added to database'
self.infopopup.auto_dismiss = True
self.infopopup.open()
def clear_history(self, *l):
if self.selected:
self.visualizer_deselect()
self.ids.history.clear_widgets()
def visualizer_select(self, visualizer, *l):
if self.selected is not None:
self.selected.selected = False
else:
self.add_widget(self.gesturesettingsform)
self.gesturesettingsform.load_visualizer(visualizer)
self.selected = visualizer
def visualizer_deselect(self, *l):
self.selected = None
self.remove_widget(self.gesturesettingsform)
def add_recognizer_result(self, result, *l):
'''The result object is a ProgressTracker with additional
data; in main.py it is tagged with the original GestureContainer
that was analyzed (._gesture_obj)'''
# Create a GestureVisualizer that draws the gesture on canvas
visualizer = GestureVisualizer(result._gesture_obj,
size_hint=(None, None), size=(150, 150))
# Tag it with the result object so AddGestureForm.load_visualizer
# has the results to build labels in the scrollview
visualizer._result_obj = result
visualizer.bind(on_select=self.visualizer_select)
visualizer.bind(on_deselect=self.visualizer_deselect)
# Add the visualizer to the list of gestures in 'history' screen
self.ids.history.add_widget(visualizer)
self._trigger_layout()
self.ids.scrollview.update_from_scroll()
class RecognizerResultLabel(Label):
'''This Label subclass is used to show a single result from the
gesture matching process (is a child of GestureHistoryManager)'''
pass
class RecognizerResultDetails(BoxLayout):
'''Contains a ScrollView of RecognizerResultLabels, ie the list of
matched gestures and their score/distance (is a child of
GestureHistoryManager)'''
def __init__(self, **kwargs):
super(RecognizerResultDetails, self).__init__(**kwargs)
self.register_event_type('on_reanalyze_selected')
def on_reanalyze_selected(self, *l):
pass
class AddGestureSettings(MultistrokeSettingsContainer):
pass
class GestureSettingsForm(BoxLayout):
'''This is the main content of the GestureHistoryManager, the form for
adding a new gesture to the recognizer. It is added to the widget tree
when a GestureVisualizer is selected.'''
def __init__(self, **kwargs):
super(GestureSettingsForm, self).__init__(**kwargs)
self.infopopup = InformationPopup()
self.rrdetails = RecognizerResultDetails()
self.addsettings = None
self.app = App.get_running_app()
def load_visualizer(self, visualizer):
if self.addsettings is None:
self.addsettings = AddGestureSettings()
self.ids.settings.add_widget(self.addsettings)
self.visualizer = visualizer
analysis = self.ids.analysis
analysis.clear_widgets()
analysis.add_widget(self.rrdetails)
scrollv = self.rrdetails.ids.result_scrollview
resultlist = self.rrdetails.ids.result_list
resultlist.clear_widgets()
r = visualizer._result_obj.results
if not len(r):
lbl = RecognizerResultLabel(text='[b]No match[/b]')
resultlist.add_widget(lbl)
scrollv.scroll_y = 1
return
if PY2:
d = r.iteritems
else:
d = r.items
for one in sorted(d(), key=lambda x: x[1]['score'],
reverse=True):
data = one[1]
lbl = RecognizerResultLabel(
text='Name: [b]' + data['name'] + '[/b]' +
'\n Score: ' + str(data['score']) +
'\n Distance: ' + str(data['dist']))
resultlist.add_widget(lbl)
# Make sure the top is visible
scrollv.scroll_y = 1
class GestureVisualizer(Widget):
selected = BooleanProperty(False)
def __init__(self, gesturecontainer, **kwargs):
super(GestureVisualizer, self).__init__(**kwargs)
self._gesture_container = gesturecontainer
self._trigger_draw = Clock.create_trigger(self._draw_item, 0)
self.bind(pos=self._trigger_draw, size=self._trigger_draw)
self._trigger_draw()
self.register_event_type('on_select')
self.register_event_type('on_deselect')
def on_touch_down(self, touch):
if not self.collide_point(touch.x, touch.y):
return
self.selected = not self.selected
self.dispatch(self.selected and 'on_select' or 'on_deselect')
# FIXME: This seems inefficient, is there a better way??
def _draw_item(self, dt):
g = self._gesture_container
bb = g.bbox
minx, miny, maxx, maxy = bb['minx'], bb['miny'], bb['maxx'], bb['maxy']
width, height = self.size
xpos, ypos = self.pos
if g.height > g.width:
to_self = (height * 0.85) / g.height
else:
to_self = (width * 0.85) / g.width
self.canvas.remove_group('gesture')
cand = g.get_vectors()
col = g.color
for stroke in cand:
out = []
append = out.append
for vec in stroke:
x, y = vec
x = (x - minx) * to_self
w = (maxx - minx) * to_self
append(x + xpos + (width - w) * .85 / 2)
y = (y - miny) * to_self
h = (maxy - miny) * to_self
append(y + ypos + (height - h) * .85 / 2)
with self.canvas:
Color(col[0], col[1], col[2], mode='rgb')
Line(points=out, group='gesture', width=2)
def on_select(self, *l):
pass
def on_deselect(self, *l):
pass
| mit |
aviciimaxwell/odoo | addons/hr_timesheet_invoice/wizard/hr_timesheet_final_invoice_create.py | 337 | 3000 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
#
# Create an final invoice based on selected timesheet lines
#
#
# TODO: check unit of measure !!!
#
class final_invoice_create(osv.osv_memory):
_name = 'hr.timesheet.invoice.create.final'
_description = 'Create invoice from timesheet final'
_columns = {
'date': fields.boolean('Date', help='Display date in the history of works'),
'time': fields.boolean('Time Spent', help='Display time in the history of works'),
'name': fields.boolean('Log of Activity', help='Display detail of work in the invoice line.'),
'price': fields.boolean('Cost', help='Display cost of the item you reinvoice'),
'product': fields.many2one('product.product', 'Product', help='The product that will be used to invoice the remaining amount'),
}
def do_create(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
# hack for fixing small issue (context should not propagate implicitly between actions)
if 'default_type' in context:
del context['default_type']
ids = self.pool.get('account.analytic.line').search(cr, uid, [('invoice_id','=',False),('to_invoice','<>', False), ('account_id', 'in', context['active_ids'])], context=context)
invs = self.pool.get('account.analytic.line').invoice_cost_create(cr, uid, ids, data, context=context)
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
mod_ids = mod_obj.search(cr, uid, [('name', '=', 'action_invoice_tree1')], context=context)[0]
res_id = mod_obj.read(cr, uid, mod_ids, ['res_id'], context=context)['res_id']
act_win = act_obj.read(cr, uid, [res_id], context=context)[0]
act_win['domain'] = [('id','in',invs),('type','=','out_invoice')]
act_win['name'] = _('Invoices')
return act_win
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hmendozap/auto-sklearn | autosklearn/metalearning/metafeatures/plot_metafeatures.py | 1 | 20297 | from __future__ import print_function
import argparse
import cPickle
import itertools
import os
import StringIO
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
try:
from sklearn.manifold import TSNE
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
import sklearn.metrics.pairwise
except:
print("Failed to load TSNE, probably you're using sklearn 0.14.X")
from pyMetaLearn.metalearning.meta_base import MetaBase
import pyMetaLearn.metalearning.create_datasets
import pyMetaLearn.data_repositories.openml.apiconnector
def load_dataset(dataset, dataset_directory):
dataset_dir = os.path.abspath(os.path.join(dataset_directory, dataset))
fh = open(os.path.join(dataset_dir, dataset + ".pkl"))
ds = cPickle.load(fh)
fh.close()
data_frame = ds.convert_arff_structure_to_pandas(ds
.get_unprocessed_files())
class_ = data_frame.keys()[-1]
attributes = data_frame.keys()[0:-1]
X = data_frame[attributes]
Y = data_frame[class_]
return X, Y
def plot_metafeatures(metafeatures_plot_dir, metafeatures, metafeatures_times,
runs, method='pca', seed=1, depth=1, distance='l2'):
"""Project datasets in a 2d space and plot them.
arguments:
* metafeatures_plot_dir: a directory to save the generated plots
* metafeatures: a pandas Dataframe from the MetaBase
* runs: a dictionary of runs from the MetaBase
* method: either pca or t-sne
* seed: only used for t-sne
* depth: if 1, a one-step look-ahead is performed
"""
if type(metafeatures) != pd.DataFrame:
raise ValueError("Argument metafeatures must be of type pd.Dataframe "
"but is %s" % str(type(metafeatures)))
############################################################################
# Write out the datasets and their size as a TEX table
# TODO put this in an own function
dataset_tex = StringIO.StringIO()
dataset_tex.write('\\begin{tabular}{lrrr}\n')
dataset_tex.write('\\textbf{Dataset name} & '
'\\textbf{\#features} & '
'\\textbf{\#patterns} & '
'\\textbf{\#classes} \\\\\n')
num_features = []
num_instances = []
num_classes = []
for dataset in sorted(metafeatures.index):
dataset_tex.write('%s & %d & %d & %d \\\\\n' % (
dataset.replace('larochelle_etal_2007_', '').replace(
'_', '-'),
metafeatures.loc[dataset]['number_of_features'],
metafeatures.loc[dataset]['number_of_instances'],
metafeatures.loc[dataset]['number_of_classes']))
num_features.append(metafeatures.loc[dataset]['number_of_features'])
num_instances.append(metafeatures.loc[dataset]['number_of_instances'])
num_classes.append(metafeatures.loc[dataset]['number_of_classes'])
dataset_tex.write('Minimum & %.1f & %.1f & %.1f \\\\\n' %
(np.min(num_features), np.min(num_instances), np.min(num_classes)))
dataset_tex.write('Maximum & %.1f & %.1f & %.1f \\\\\n' %
(np.max(num_features), np.max(num_instances), np.max(num_classes)))
dataset_tex.write('Mean & %.1f & %.1f & %.1f \\\\\n' %
(np.mean(num_features), np.mean(num_instances), np.mean(num_classes)))
dataset_tex.write('10\\%% quantile & %.1f & %.1f & %.1f \\\\\n' % (
np.percentile(num_features, 10), np.percentile(num_instances, 10),
np.percentile(num_classes, 10)))
dataset_tex.write('90\\%% quantile & %.1f & %.1f & %.1f \\\\\n' % (
np.percentile(num_features, 90), np.percentile(num_instances, 90),
np.percentile(num_classes, 90)))
dataset_tex.write('median & %.1f & %.1f & %.1f \\\\\n' % (
np.percentile(num_features, 50), np.percentile(num_instances, 50),
np.percentile(num_classes, 50)))
dataset_tex.write('\\end{tabular}')
dataset_tex.seek(0)
dataset_tex_output = os.path.join(metafeatures_plot_dir, 'datasets.tex')
with open(dataset_tex_output, 'w') as fh:
fh.write(dataset_tex.getvalue())
############################################################################
# Write out a list of metafeatures, each with the min/max/mean
# calculation time and the min/max/mean value
metafeatures_tex = StringIO.StringIO()
metafeatures_tex.write('\\begin{tabular}{lrrrrrr}\n')
metafeatures_tex.write('\\textbf{Metafeature} & '
'\\textbf{Minimum} & '
'\\textbf{Mean} & '
'\\textbf{Maximum} &'
'\\textbf{Minimum time} &'
'\\textbf{Mean time} &'
'\\textbf{Maximum time} '
'\\\\\n')
for mf_name in sorted(metafeatures.columns):
metafeatures_tex.write('%s & %.2f & %.2f & %.2f & %.2f & %.2f & %.2f \\\\\n'
% (mf_name.replace('_', '-'),
metafeatures.loc[:,mf_name].min(),
metafeatures.loc[:,mf_name].mean(),
metafeatures.loc[:,mf_name].max(),
metafeature_times.loc[:, mf_name].min(),
metafeature_times.loc[:, mf_name].mean(),
metafeature_times.loc[:, mf_name].max()))
metafeatures_tex.write('\\end{tabular}')
metafeatures_tex.seek(0)
metafeatures_tex_output = os.path.join(metafeatures_plot_dir, 'metafeatures.tex')
with open(metafeatures_tex_output, 'w') as fh:
fh.write(metafeatures_tex.getvalue())
# Without this scaling the transformation for visualization purposes is
# useless
metafeatures = metafeatures.copy()
X_min = np.nanmin(metafeatures, axis=0)
X_max = np.nanmax(metafeatures, axis=0)
metafeatures = (metafeatures - X_min) / (X_max - X_min)
# PCA
if method == 'pca':
pca = PCA(2)
transformation = pca.fit_transform(metafeatures.values)
elif method == 't-sne':
if distance == 'l2':
distance_matrix = sklearn.metrics.pairwise.pairwise_distances(
metafeatures.values, metric='l2')
elif distance == 'l1':
distance_matrix = sklearn.metrics.pairwise.pairwise_distances(
metafeatures.values, metric='l1')
elif distance == 'runs':
names_to_indices = dict()
for metafeature in metafeatures.index:
idx = len(names_to_indices)
names_to_indices[metafeature] = idx
X, Y = pyMetaLearn.metalearning.create_datasets\
.create_predict_spearman_rank(metafeatures, runs,
'combination')
# Make a metric matrix out of Y
distance_matrix = np.zeros((metafeatures.shape[0],
metafeatures.shape[0]), dtype=np.float64)
for idx in Y.index:
dataset_names = idx.split("_")
d1 = names_to_indices[dataset_names[0]]
d2 = names_to_indices[dataset_names[1]]
distance_matrix[d1][d2] = Y.loc[idx]
distance_matrix[d2][d1] = Y.loc[idx]
else:
raise NotImplementedError()
# For whatever reason, tsne doesn't accept l1 metric
tsne = TSNE(random_state=seed, perplexity=50, verbose=1)
transformation = tsne.fit_transform(distance_matrix)
# Transform the transformation back to range [0, 1] to ease plotting
transformation_min = np.nanmin(transformation, axis=0)
transformation_max = np.nanmax(transformation, axis=0)
transformation = (transformation - transformation_min) / \
(transformation_max - transformation_min)
print(transformation_min, transformation_max)
#for i, dataset in enumerate(directory_content):
# print dataset, meta_feature_array[i]
fig = plt.figure(dpi=600, figsize=(12, 12))
ax = plt.subplot(111)
# The dataset names must be aligned at the borders of the plot in a way
# the arrows don't cross each other. First, define the different slots
# where the labels will be positioned and then figure out the optimal
# order of the labels
slots = []
# 25 datasets on the top y-axis
slots.extend([(-0.1 + 0.05 * i, 1.1) for i in range(25)])
# 24 datasets on the right x-axis
slots.extend([(1.1, 1.05 - 0.05 * i) for i in range(24)])
# 25 datasets on the bottom y-axis
slots.extend([(-0.1 + 0.05 * i, -0.1) for i in range(25)])
# 24 datasets on the left x-axis
slots.extend([(-0.1, 1.05 - 0.05 * i) for i in range(24)])
# Align the labels on the outer axis
labels_top = []
labels_left = []
labels_right = []
labels_bottom = []
for values in zip(metafeatures.index,
transformation[:, 0], transformation[:, 1]):
label, x, y = values
# Although all plot area goes up to 1.1, 1.1, the range of all the
# points lies inside [0,1]
if x >= y and x < 1.0 - y:
labels_bottom.append((x, label))
elif x >= y and x >= 1.0 - y:
labels_right.append((y, label))
elif y > x and x <= 1.0 -y:
labels_left.append((y, label))
else:
labels_top.append((x, label))
# Sort the labels according to their alignment
labels_bottom.sort()
labels_left.sort()
labels_left.reverse()
labels_right.sort()
labels_right.reverse()
labels_top.sort()
# Build an index label -> x, y
points = {}
for values in zip(metafeatures.index,
transformation[:, 0], transformation[:, 1]):
label, x, y = values
points[label] = (x, y)
# Find out the final positions...
positions_top = {}
positions_left = {}
positions_right = {}
positions_bottom = {}
# Find the actual positions
for i, values in enumerate(labels_bottom):
y, label = values
margin = 1.2 / len(labels_bottom)
positions_bottom[label] = (-0.05 + i * margin, -0.1,)
for i, values in enumerate(labels_left):
x, label = values
margin = 1.2 / len(labels_left)
positions_left[label] = (-0.1, 1.1 - i * margin)
for i, values in enumerate(labels_top):
y, label = values
margin = 1.2 / len(labels_top)
positions_top[label] = (-0.05 + i * margin, 1.1)
for i, values in enumerate(labels_right):
y, label = values
margin = 1.2 / len(labels_right)
positions_right[label] = (1.1, 1.05 - i * margin)
# Do greedy resorting if it decreases the number of intersections...
def resort(label_positions, marker_positions, maxdepth=1):
# TODO: are the inputs dicts or lists
# TODO: two-step look-ahead
def intersect(start1, end1, start2, end2):
# Compute if there is an intersection, for the algorithm see
# Computer Graphics by F.S.Hill
# If one vector is just a point, it cannot intersect with a line...
for v in [start1, start2, end1, end2]:
if not np.isfinite(v).all():
return False # Obviously there is no intersection
def perpendicular(d):
return np.array((-d[1], d[0]))
d1 = end1 - start1 # denoted b
d2 = end2 - start2 # denoted d
d2_1 = start2 - start1 # denoted c
d1_perp = perpendicular(d1) # denoted by b_perp
d2_perp = perpendicular(d2) # denoted by d_perp
t = np.dot(d2_1, d2_perp) / np.dot(d1, d2_perp)
u = - np.dot(d2_1, d1_perp) / np.dot(d2, d1_perp)
if 0 <= t <= 1 and 0 <= u <= 1:
return True # There is an intersection
else:
return False # There is no intersection
def number_of_intersections(label_positions, marker_positions):
num = 0
for key1, key2 in itertools.permutations(label_positions, r=2):
s1 = np.array(label_positions[key1])
e1 = np.array(marker_positions[key1])
s2 = np.array(label_positions[key2])
e2 = np.array(marker_positions[key2])
if intersect(s1, e1, s2, e2):
num += 1
return num
# test if swapping two lines would decrease the number of intersections
# TODO: if this was done with a datastructure different than dicts,
# it could be much faster, because there is a lot of redundant
# computing performed in the second iteration
def swap(label_positions, marker_positions, depth=0,
maxdepth=maxdepth, best_found=sys.maxint):
if len(label_positions) <= 1:
return
two_step_look_ahead = False
while True:
improvement = False
for key1, key2 in itertools.combinations(label_positions, r=2):
before = number_of_intersections(label_positions, marker_positions)
# swap:
tmp = label_positions[key1]
label_positions[key1] = label_positions[key2]
label_positions[key2] = tmp
if depth < maxdepth and two_step_look_ahead:
swap(label_positions, marker_positions,
depth=depth+1, best_found=before)
after = number_of_intersections(label_positions, marker_positions)
if best_found > after and before > after:
improvement = True
print(before, after)
print("Depth %d: Swapped %s with %s" %
(depth, key1, key2))
else: # swap back...
tmp = label_positions[key1]
label_positions[key1] = label_positions[key2]
label_positions[key2] = tmp
if after == 0:
break
# If it is not yet sorted perfectly, do another pass with
# two-step lookahead
if before == 0:
print("Sorted perfectly...")
break
print(depth, two_step_look_ahead)
if two_step_look_ahead:
break
if maxdepth == depth:
print("Reached maximum recursion depth...")
break
if not improvement and depth < maxdepth:
print("Still %d errors, trying two-step lookahead" % before)
two_step_look_ahead = True
swap(label_positions, marker_positions, maxdepth=maxdepth)
resort(positions_bottom, points, maxdepth=depth)
resort(positions_left, points, maxdepth=depth)
resort(positions_right, points, maxdepth=depth)
resort(positions_top, points, maxdepth=depth)
# Helper function
def plot(x, y, label_x, label_y, label, ha, va, relpos, rotation=0):
ax.scatter(x, y, marker='o', label=label, s=80, linewidths=0.1,
color='blue', edgecolor='black')
label = label.replace('larochelle_etal_2007_', '')
x = ax.annotate(label, xy=(x, y), xytext=(label_x, label_y),
ha=ha, va=va, rotation=rotation,
bbox=dict(boxstyle='round', fc='gray', alpha=0.5),
arrowprops=dict(arrowstyle='->', color='black',
relpos=relpos))
# Do the plotting
for i, key in enumerate(positions_bottom):
x, y = positions_bottom[key]
plot(points[key][0], points[key][1], x, y,
key, ha='right', va='top', rotation=45, relpos=(1, 1))
for i, key in enumerate(positions_left):
x, y = positions_left[key]
plot(points[key][0], points[key][1], x, y, key,
ha='right', va='top', rotation=45, relpos=(1, 1))
for i, key in enumerate(positions_top):
x, y = positions_top[key]
plot(points[key][0], points[key][1], x, y, key,
ha='left', va='bottom', rotation=45, relpos=(0, 0))
for i, key in enumerate(positions_right):
x, y = positions_right[key]
plot(points[key][0], points[key][1], x, y, key,
ha='left', va='bottom', rotation=45, relpos=(0, 0))
# Resize everything
box = ax.get_position()
remove = 0.05 * box.width
ax.set_position([box.x0 + remove, box.y0 + remove,
box.width - remove*2, box.height - remove*2])
locs_x = ax.get_xticks()
locs_y = ax.get_yticks()
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xlim((-0.1, 1.1))
ax.set_ylim((-0.1, 1.1))
plt.savefig(os.path.join(metafeatures_plot_dir, "pca.png"))
plt.savefig(os.path.join(metafeatures_plot_dir, "pca.pdf"))
plt.clf()
# Relation of features to each other...
#correlations = []
#for mf_1, mf_2 in itertools.combinations(metafeatures.columns, 2):
# x = metafeatures.loc[:, mf_1]
# y = metafeatures.loc[:, mf_2]
# rho, p = scipy.stats.spearmanr(x, y)
# correlations.append((rho, "%s-%s" % (mf_1, mf_2)))
# plt.figure()
# plt.plot(np.arange(0, 1, 0.01), np.arange(0, 1, 0.01))
# plt.plot(x, y, "x")
# plt.xlabel(mf_1)
# plt.ylabel(mf_2)
# plt.xlim((0, 1))
# plt.ylim((0, 1))
# plt.savefig(os.path.join(target_directory, mf_1 + "__" + mf_2 + "
# .png"))
# plt.close()
#correlations.sort()
#for cor in correlations:
#print cor
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tasks", required=True, type=str)
parser.add_argument("--runs", type=str)
parser.add_argument("experiment_directory", type=str)
parser.add_argument("-m", "--method", default='pca',
choices=['pca', 't-sne'],
help="Dimensionality reduction method")
parser.add_argument("--distance", choices=[None, 'l1', 'l2', 'runs'],
default='l2')
parser.add_argument("-s", "--seed", default=1, type=int)
parser.add_argument("-d", "--depth", default=0, type=int)
parser.add_argument("--subset", default='all', choices=['all', 'pfahringer_2000_experiment1'])
args = parser.parse_args()
with open(args.tasks) as fh:
task_files_list = fh.readlines()
# Load all the experiment run data only if needed
if args.distance == 'runs':
with open(args.runs) as fh:
experiments_file_list = fh.readlines()
else:
experiments_file_list = StringIO.StringIO()
for i in range(len(task_files_list)):
experiments_file_list.write("\n")
experiments_file_list.seek(0)
pyMetaLearn.data_repositories.openml.apiconnector.set_local_directory(
args.experiment_directory)
meta_base = MetaBase(task_files_list, experiments_file_list)
metafeatures = meta_base.get_all_metafeatures_as_pandas(
metafeature_subset=args.subset)
metafeature_times = meta_base.get_all_metafeatures_times_as_pandas(
metafeature_subset=args.subset)
#if args.subset:
# metafeatures = metafeatures.loc[:,subsets[args.subset]]
# metafeature_times = metafeature_times.loc[:,subsets[args.subset]]
runs = meta_base.get_all_runs()
general_plot_directory = os.path.join(args.experiment_directory, "plots")
try:
os.mkdir(general_plot_directory)
except:
pass
metafeatures_plot_dir = os.path.join(general_plot_directory, "metafeatures")
try:
os.mkdir(metafeatures_plot_dir)
except:
pass
plot_metafeatures(metafeatures_plot_dir, metafeatures, metafeature_times,
runs, method=args.method, seed=args.seed,
depth=args.depth, distance=args.distance)
| bsd-3-clause |
rahlk/Experimental-Algorithms | multiProc/src/parGALE.py | 1 | 4233 | """
"""
from __future__ import print_function, division
import os
from demo import *
import subprocess
import sys
sys.path.append(os.path.abspath('../problems/'))
# Get the git root directory
root=repo_dir = subprocess.Popen(['git'
,'rev-parse'
, '--show-toplevel']
, stdout=subprocess.PIPE
).communicate()[0].rstrip()
sys.path.append(root)
from pdb import set_trace
from dtlz2 import DTLZ2
from multiprocessing import Pool
from random import seed as rseed, randint as randi
import numpy as np
from time import time
from tools.quality import measure
def gale0(model=DTLZ2(n_dec=30,n_obj=3), new=[], pop=int(1e4)):
"""
Recursive FASTMAP clustering.
"""
if len(new)==0:
frontier = model.generate(pop)
else:
frontier=new
frontier.extend(model.generate(pop-len(new)))
N = np.shape(frontier)[0]
leaf = []
norm = np.max(frontier, axis=0) - np.min(frontier, axis=0)
def cdom(x, y, better=['less','less','less']):
def loss1(i,x,y):
return (x - y) if better[i] == 'less' else (y - x)
def expLoss(i,x,y,n):
return np.exp(loss1(i,x,y) / n)
def loss(x, y):
n = min(len(x), len(y)) #lengths should be equal
losses = [expLoss(i,xi,yi,n) for i, (xi, yi) in enumerate(zip(x,y))]
return sum(losses)/n
"x dominates y if it losses least"
return loss(x,y) < loss(y,x)
def distant(lst):
R, C = np.shape(lst)
farthest=lambda one,rest: sorted(rest, key=lambda F: aDist(F,one))[-1]
one=lst[randi(0,R-1)]
mid=farthest(one, lst)
two=farthest(mid, lst)
return one, two
def mutate(lst,good,g=0.15):
new=[]
for l in lst:
new.append([a+(b-a)*g for a,b in zip(l,good)])
return new
def aDist(one, two):
return np.sqrt(np.sum((np.array(one)/norm-np.array(two)/norm)**2))
def recurse(dataset):
R, C = np.shape(dataset) # No. of Rows and Col
# Find the two most distance points.
one, two = distant(dataset)
# Project each case on
def proj(test):
a = aDist(one, test)
b = aDist(two, test)
c = aDist(one, two)
return (a**2-b**2+c**2)/(2*c)
if R<np.sqrt(N):
leaf.extend(dataset)
else:
half1 = cdom(model.solve(one), model.solve(two))
if half1:
_ = recurse(sorted(dataset,key=lambda F:proj(F))[:int(R/2)])
else:
_ = recurse(sorted(dataset,key=lambda F:proj(F))[int(R/2):])
recurse(frontier)
a,b=distant(leaf)
(good, bad) = (a,b) if cdom(model.solve(a), model.solve(b)) else (b,a)
new=mutate(leaf,good,g=0.5)
return new
def gale1(iter=1000,pop=1600,model=DTLZ2(n_dec=30, n_obj=3)):
n_proc = int(1000.00/iter)
new = gale0(model,new=[],pop=int(pop/n_proc))
while iter:
iter-=1
new=gale0(model, new, pop=int(pop/n_proc))
return new
def gale2(pop):
model = DTLZ2(n_dec=30,n_obj=3)
# set_trace()
return gale0(new=model.generate(pop))
def GALE2(n_proc=10,frontSize=100,iters=1000,model=DTLZ2(n_dec=30, n_obj=3)):
"""
WHY do threads take more time than single processors?? FIX THIS!!!
:param n_proc:
:param frontSize:
:param iters:
:param model:
:return:
"""
t = time()
collect=[]
final = []
popSize = [int(frontSize/n_proc)]*n_proc
# initpop = [(model, model.generate(1000), 1000) for _ in xrange(n_proc)]
p=Pool(processes=n_proc)
collect.extend(p.map(gale2, popSize))
for cc in collect: final.extend(cc)
# set_trace()
ret = gale0(model=DTLZ2(n_dec=30, n_obj=3),new=final,pop=len(final))
print('Time Taken: ', time()-t)
return ret
def GALE(n_proc=10,frontSize=100,iters=100):
t = time()
collect=[]
final = []
per = [iters/n_proc]*n_proc
popSize = [frontSize/n_proc]*n_proc
p=Pool(processes=n_proc)
collect.extend(p.map(gale1, per))
for cc in collect: final.extend(cc)
ret = gale0(model=DTLZ2(n_dec=30, n_obj=3),new=final,pop=len(final))
print('Time Taken: ', time()-t)
# true = DTLZ2(n_dec=30, n_obj=3).get_pareto()
m = measure(model=DTLZ2(n_dec=30, n_obj=3))
conv = m.convergence(ret)
print("Convergence:",conv)
# set_trace()
return
if __name__=="__main__":
eval(cmd()) | mit |
campbe13/openhatch | vendor/packages/gdata/src/gdata/youtube/client.py | 96 | 9463 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a client to communicate with the YouTube servers.
A quick and dirty port of the YouTube GDATA 1.0 Python client
libraries to version 2.0 of the GDATA library.
"""
# __author__ = '[email protected] (John Skidgel)'
import logging
import gdata.client
import gdata.youtube.data
import atom.data
import atom.http_core
# Constants
# -----------------------------------------------------------------------------
YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL = 'https://www.google.com/youtube/accounts/ClientLogin'
YOUTUBE_SUPPORTED_UPLOAD_TYPES = ('mov', 'avi', 'wmv', 'mpg', 'quicktime',
'flv')
YOUTUBE_QUERY_VALID_TIME_PARAMETERS = ('today', 'this_week', 'this_month',
'all_time')
YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS = ('published', 'viewCount', 'rating',
'relevance')
YOUTUBE_QUERY_VALID_RACY_PARAMETERS = ('include', 'exclude')
YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS = ('1', '5', '6')
YOUTUBE_STANDARDFEEDS = ('most_recent', 'recently_featured',
'top_rated', 'most_viewed','watch_on_mobile')
YOUTUBE_UPLOAD_TOKEN_URI = 'http://gdata.youtube.com/action/GetUploadToken'
YOUTUBE_SERVER = 'gdata.youtube.com/feeds/api'
YOUTUBE_SERVICE = 'youtube'
YOUTUBE_VIDEO_FEED_URI = 'http://%s/videos' % YOUTUBE_SERVER
YOUTUBE_USER_FEED_URI = 'http://%s/users/' % YOUTUBE_SERVER
# Takes a youtube video ID.
YOUTUBE_CAPTION_FEED_URI = 'http://gdata.youtube.com/feeds/api/videos/%s/captions'
# Takes a youtube video ID and a caption track ID.
YOUTUBE_CAPTION_URI = 'http://gdata.youtube.com/feeds/api/videos/%s/captiondata/%s'
YOUTUBE_CAPTION_MIME_TYPE = 'application/vnd.youtube.timedtext; charset=UTF-8'
# Classes
# -----------------------------------------------------------------------------
class Error(Exception):
"""Base class for errors within the YouTube service."""
pass
class RequestError(Error):
"""Error class that is thrown in response to an invalid HTTP Request."""
pass
class YouTubeError(Error):
"""YouTube service specific error class."""
pass
class YouTubeClient(gdata.client.GDClient):
"""Client for the YouTube service.
Performs a partial list of Google Data YouTube API functions, such as
retrieving the videos feed for a user and the feed for a video.
YouTube Service requires authentication for any write, update or delete
actions.
"""
api_version = '2'
auth_service = YOUTUBE_SERVICE
auth_scopes = ['https://%s' % YOUTUBE_SERVER]
ssl = True
def get_videos(self, uri=YOUTUBE_VIDEO_FEED_URI, auth_token=None,
desired_class=gdata.youtube.data.VideoFeed,
**kwargs):
"""Retrieves a YouTube video feed.
Args:
uri: A string representing the URI of the feed that is to be retrieved.
Returns:
A YouTubeVideoFeed if successfully retrieved.
"""
return self.get_feed(uri, auth_token=auth_token,
desired_class=desired_class,
**kwargs)
GetVideos = get_videos
def get_user_feed(self, uri=None, username=None):
"""Retrieve a YouTubeVideoFeed of user uploaded videos.
Either a uri or a username must be provided. This will retrieve list
of videos uploaded by specified user. The uri will be of format
"http://gdata.youtube.com/feeds/api/users/{username}/uploads".
Args:
uri: An optional string representing the URI of the user feed that is
to be retrieved.
username: An optional string representing the username.
Returns:
A YouTubeUserFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a username to the
GetYouTubeUserFeed() method.
"""
if uri is None and username is None:
raise YouTubeError('You must provide at least a uri or a username '
'to the GetYouTubeUserFeed() method')
elif username and not uri:
uri = '%s%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'uploads')
return self.get_feed(uri, desired_class=gdata.youtube.data.VideoFeed)
GetUserFeed = get_user_feed
def get_video_entry(self, uri=None, video_id=None,
auth_token=None, **kwargs):
"""Retrieve a YouTubeVideoEntry.
Either a uri or a video_id must be provided.
Args:
uri: An optional string representing the URI of the entry that is to
be retrieved.
video_id: An optional string representing the ID of the video.
Returns:
A YouTubeVideoFeed if successfully retrieved.
Raises:
YouTubeError: You must provide at least a uri or a video_id to the
GetYouTubeVideoEntry() method.
"""
if uri is None and video_id is None:
raise YouTubeError('You must provide at least a uri or a video_id '
'to the get_youtube_video_entry() method')
elif video_id and uri is None:
uri = '%s/%s' % (YOUTUBE_VIDEO_FEED_URI, video_id)
return self.get_feed(uri,
desired_class=gdata.youtube.data.VideoEntry,
auth_token=auth_token,
**kwargs)
GetVideoEntry = get_video_entry
def get_caption_feed(self, uri):
"""Retrieve a Caption feed of tracks.
Args:
uri: A string representing the caption feed's URI to be retrieved.
Returns:
A YouTube CaptionFeed if successfully retrieved.
"""
return self.get_feed(uri, desired_class=gdata.youtube.data.CaptionFeed)
GetCaptionFeed = get_caption_feed
def get_caption_track(self, track_url, client_id,
developer_key, auth_token=None, **kwargs):
http_request = atom.http_core.HttpRequest(uri = track_url, method = 'GET')
dev_key = 'key=' + developer_key
authsub = 'AuthSub token="' + str(auth_token) + '"'
http_request.headers = {
'Authorization': authsub,
'X-GData-Client': client_id,
'X-GData-Key': dev_key
}
return self.request(http_request=http_request, **kwargs)
GetCaptionTrack = get_caption_track
def create_track(self, video_id, title, language, body, client_id,
developer_key, auth_token=None, title_type='text', **kwargs):
"""Creates a closed-caption track and adds to an existing YouTube video.
"""
new_entry = gdata.youtube.data.TrackEntry(
content = gdata.youtube.data.TrackContent(text = body, lang = language))
uri = YOUTUBE_CAPTION_FEED_URI % video_id
http_request = atom.http_core.HttpRequest(uri = uri, method = 'POST')
dev_key = 'key=' + developer_key
authsub = 'AuthSub token="' + str(auth_token) + '"'
http_request.headers = {
'Content-Type': YOUTUBE_CAPTION_MIME_TYPE,
'Content-Language': language,
'Slug': title,
'Authorization': authsub,
'GData-Version': self.api_version,
'X-GData-Client': client_id,
'X-GData-Key': dev_key
}
http_request.add_body_part(body, http_request.headers['Content-Type'])
return self.request(http_request = http_request,
desired_class = new_entry.__class__, **kwargs)
CreateTrack = create_track
def delete_track(self, video_id, track, client_id, developer_key,
auth_token=None, **kwargs):
"""Deletes a track."""
if isinstance(track, gdata.youtube.data.TrackEntry):
track_id_text_node = track.get_id().split(':')
track_id = track_id_text_node[3]
else:
track_id = track
uri = YOUTUBE_CAPTION_URI % (video_id, track_id)
http_request = atom.http_core.HttpRequest(uri = uri, method = 'DELETE')
dev_key = 'key=' + developer_key
authsub = 'AuthSub token="' + str(auth_token) + '"'
http_request.headers = {
'Authorization': authsub,
'GData-Version': self.api_version,
'X-GData-Client': client_id,
'X-GData-Key': dev_key
}
return self.request(http_request=http_request, **kwargs)
DeleteTrack = delete_track
def update_track(self, video_id, track, body, client_id, developer_key,
auth_token=None, **kwargs):
"""Updates a closed-caption track for an existing YouTube video.
"""
track_id_text_node = track.get_id().split(':')
track_id = track_id_text_node[3]
uri = YOUTUBE_CAPTION_URI % (video_id, track_id)
http_request = atom.http_core.HttpRequest(uri = uri, method = 'PUT')
dev_key = 'key=' + developer_key
authsub = 'AuthSub token="' + str(auth_token) + '"'
http_request.headers = {
'Content-Type': YOUTUBE_CAPTION_MIME_TYPE,
'Authorization': authsub,
'GData-Version': self.api_version,
'X-GData-Client': client_id,
'X-GData-Key': dev_key
}
http_request.add_body_part(body, http_request.headers['Content-Type'])
return self.request(http_request = http_request,
desired_class = track.__class__, **kwargs)
UpdateTrack = update_track
| agpl-3.0 |
wangjun/odoo | addons/sales_team/sales_team.py | 180 | 6131 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date, datetime
from dateutil import relativedelta
from openerp import tools
from openerp.osv import fields, osv
class crm_case_section(osv.osv):
_name = "crm.case.section"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Teams"
_order = "complete_name"
_period_number = 5
def get_full_name(self, cr, uid, ids, field_name, arg, context=None):
return dict(self.name_get(cr, uid, ids, context=context))
def __get_bar_values(self, cr, uid, obj, domain, read_fields, value_field, groupby_field, context=None):
""" Generic method to generate data for bar chart values using SparklineBarWidget.
This method performs obj.read_group(cr, uid, domain, read_fields, groupby_field).
:param obj: the target model (i.e. crm_lead)
:param domain: the domain applied to the read_group
:param list read_fields: the list of fields to read in the read_group
:param str value_field: the field used to compute the value of the bar slice
:param str groupby_field: the fields used to group
:return list section_result: a list of dicts: [
{ 'value': (int) bar_column_value,
'tootip': (str) bar_column_tooltip,
}
]
"""
month_begin = date.today().replace(day=1)
section_result = [{
'value': 0,
'tooltip': tools.ustr((month_begin + relativedelta.relativedelta(months=-i)).strftime('%B %Y')),
} for i in range(self._period_number - 1, -1, -1)]
group_obj = obj.read_group(cr, uid, domain, read_fields, groupby_field, context=context)
pattern = tools.DEFAULT_SERVER_DATE_FORMAT if obj.fields_get(cr, uid, groupby_field)[groupby_field]['type'] == 'date' else tools.DEFAULT_SERVER_DATETIME_FORMAT
for group in group_obj:
group_begin_date = datetime.strptime(group['__domain'][0][2], pattern)
month_delta = relativedelta.relativedelta(month_begin, group_begin_date)
section_result[self._period_number - (month_delta.months + 1)] = {'value': group.get(value_field, 0), 'tooltip': group.get(groupby_field, 0)}
return section_result
_columns = {
'name': fields.char('Sales Team', size=64, required=True, translate=True),
'complete_name': fields.function(get_full_name, type='char', size=256, readonly=True, store=True),
'code': fields.char('Code', size=8),
'active': fields.boolean('Active', help="If the active field is set to "\
"false, it will allow you to hide the sales team without removing it."),
'change_responsible': fields.boolean('Reassign Escalated', help="When escalating to this team override the salesman with the team leader."),
'user_id': fields.many2one('res.users', 'Team Leader'),
'member_ids': fields.many2many('res.users', 'sale_member_rel', 'section_id', 'member_id', 'Team Members'),
'reply_to': fields.char('Reply-To', size=64, help="The email address put in the 'Reply-To' of all emails sent by Odoo about cases in this sales team"),
'parent_id': fields.many2one('crm.case.section', 'Parent Team'),
'child_ids': fields.one2many('crm.case.section', 'parent_id', 'Child Teams'),
'note': fields.text('Description'),
'working_hours': fields.float('Working Hours', digits=(16, 2)),
'color': fields.integer('Color Index'),
}
_defaults = {
'active': 1,
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The code of the sales team must be unique !')
]
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive Sales team.', ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
"""Overrides orm name_get method"""
if not isinstance(ids, list):
ids = [ids]
res = []
if not ids:
return res
reads = self.read(cr, uid, ids, ['name', 'parent_id'], context)
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1] + ' / ' + name
res.append((record['id'], name))
return res
class res_partner(osv.Model):
_inherit = 'res.partner'
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
class res_users(osv.Model):
_inherit = 'res.users'
_columns = {
'default_section_id': fields.many2one('crm.case.section', 'Default Sales Team'),
}
def __init__(self, pool, cr):
init_res = super(res_users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)
self.SELF_WRITEABLE_FIELDS.extend(['default_section_id'])
return init_res
| agpl-3.0 |
StackPointCloud/libcloud | contrib/generate_provider_feature_matrix_table.py | 6 | 18754 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
import inspect
from collections import OrderedDict
from os.path import join as pjoin
this_dir = os.path.abspath(os.path.split(__file__)[0])
sys.path.insert(0, os.path.join(this_dir, '../'))
from libcloud.compute .base import NodeDriver
from libcloud.compute.providers import get_driver as get_compute_driver
from libcloud.compute.providers import DRIVERS as COMPUTE_DRIVERS
from libcloud.compute.types import Provider as ComputeProvider
from libcloud.loadbalancer.base import Driver as LBDriver
from libcloud.loadbalancer.providers import get_driver as get_lb_driver
from libcloud.loadbalancer.providers import DRIVERS as LB_DRIVERS
from libcloud.loadbalancer.types import Provider as LBProvider
from libcloud.storage.base import StorageDriver
from libcloud.storage.providers import get_driver as get_storage_driver
from libcloud.storage.providers import DRIVERS as STORAGE_DRIVERS
from libcloud.storage.types import Provider as StorageProvider
from libcloud.dns.base import DNSDriver
from libcloud.dns.providers import get_driver as get_dns_driver
from libcloud.dns.providers import DRIVERS as DNS_DRIVERS
from libcloud.dns.types import Provider as DNSProvider
from libcloud.container.base import ContainerDriver
from libcloud.container.providers import get_driver as get_container_driver
from libcloud.container.providers import DRIVERS as CONTAINER_DRIVERS
from libcloud.container.types import Provider as ContainerProvider
from libcloud.backup.base import BackupDriver
from libcloud.backup.providers import get_driver as get_backup_driver
from libcloud.backup.providers import DRIVERS as BACKUP_DRIVERS
from libcloud.backup.types import Provider as BackupProvider
HEADER = ('.. NOTE: This file has been generated automatically using '
'generate_provider_feature_matrix_table.py script, don\'t manually '
'edit it')
BASE_API_METHODS = {
'compute_main': ['list_nodes', 'create_node', 'reboot_node',
'destroy_node', 'list_images', 'list_sizes',
'deploy_node'],
'compute_image_management': ['list_images', 'get_image',
'create_image', 'delete_image', 'copy_image'],
'compute_block_storage': ['list_volumes', 'create_volume',
'destroy_volume',
'attach_volume', 'detach_volume',
'list_volume_snapshots',
'create_volume_snapshot'],
'compute_key_pair_management': ['list_key_pairs', 'get_key_pair',
'create_key_pair',
'import_key_pair_from_string',
'import_key_pair_from_file',
'delete_key_pair'],
'loadbalancer': ['create_balancer', 'list_balancers',
'balancer_list_members', 'balancer_attach_member',
'balancer_detach_member', 'balancer_attach_compute_node'],
'storage_main': ['list_containers', 'list_container_objects',
'iterate_containers', 'iterate_container_objects',
'create_container', 'delete_container', 'upload_object',
'upload_object_via_stream', 'download_object',
'download_object_as_stream', 'delete_object'],
'storage_cdn': ['enable_container_cdn', 'enable_object_cdn',
'get_container_cdn_url', 'get_object_cdn_url'],
'dns': ['list_zones', 'list_records', 'iterate_zones', 'iterate_records',
'create_zone', 'update_zone', 'create_record', 'update_record',
'delete_zone', 'delete_record'],
'container': ['install_image', 'list_images', 'deploy_container',
'get_container', 'start_container', 'stop_container',
'restart_container', 'destroy_container', 'list_containers',
'list_locations', 'create_cluster', 'destroy_cluster',
'list_clusters'],
'backup': ['get_supported_target_types', 'list_targets', 'create_target', 'create_target_from_node',
'create_target_from_storage_container', 'update_target', 'delete_target', 'list_recovery_points',
'recover_target', 'recover_target_out_of_place', 'list_target_jobs', 'create_target_job',
'resume_target_job', 'suspend_target_job', 'cancel_target_job']
}
FRIENDLY_METHODS_NAMES = {
'compute_main': {
'list_nodes': 'list nodes',
'create_node': 'create node',
'reboot_node': 'reboot node',
'destroy_node': 'destroy node',
'list_images': 'list images',
'list_sizes': 'list sizes',
'deploy_node': 'deploy node'
},
'compute_image_management': {
'list_images': 'list images',
'get_image': 'get image',
'create_image': 'create image',
'copy_image': 'copy image',
'delete_image': 'delete image'
},
'compute_block_storage': {
'list_volumes': 'list volumes',
'create_volume': 'create volume',
'destroy_volume': 'destroy volume',
'attach_volume': 'attach volume',
'detach_volume': 'detach volume',
'list_volume_snapshots': 'list snapshots',
'create_volume_snapshot': 'create snapshot'
},
'compute_key_pair_management': {
'list_key_pairs': 'list key pairs',
'get_key_pair': 'get key pair',
'create_key_pair': 'create key pair',
'import_key_pair_from_string': 'import public key from string',
'import_key_pair_from_file': 'import public key from file',
'delete_key_pair': 'delete key pair'
},
'loadbalancer': {
'create_balancer': 'create balancer',
'list_balancers': 'list balancers',
'balancer_list_members': 'list members',
'balancer_attach_member': 'attach member',
'balancer_detach_member': 'detach member',
'balancer_attach_compute_node': 'attach compute node'
},
'storage_main': {
'list_containers': 'list containers',
'list_container_objects': 'list objects',
'create_container': 'create container',
'delete_container': 'delete container',
'upload_object': 'upload object',
'upload_object_via_stream': 'streaming object upload',
'download_object': 'download object',
'download_object_as_stream': 'streaming object download',
'delete_object': 'delete object'
},
'storage_cdn': {
'enable_container_cdn': 'enable container cdn',
'enable_object_cdn': 'enable object cdn',
'get_container_cdn_url': 'get container cdn URL',
'get_object_cdn_url': 'get object cdn URL',
},
'dns': {
'list_zones': 'list zones',
'list_records': 'list records',
'create_zone': 'create zone',
'update_zone': 'update zone',
'create_record': 'create record',
'update_record': 'update record',
'delete_zone': 'delete zone',
'delete_record': 'delete record'
},
'container': {
'install_image': 'install image',
'list_images': 'list images',
'deploy_container': 'deploy container',
'get_container': 'get container',
'list_containers': 'list containers',
'start_container': 'start container',
'stop_container': 'stop container',
'restart_container': 'restart container',
'destroy_container': 'destroy container',
'list_locations': 'list locations',
'create_cluster': 'create cluster',
'destroy_cluster': 'destroy cluster',
'list_clusters': 'list clusters'
},
'backup': {
'get_supported_target_types': 'get supported target types',
'list_targets': 'list targets',
'create_target': 'create target',
'create_target_from_node': 'create target from node',
'create_target_from_storage_container': 'create target from storage container',
'update_target': 'update target',
'delete_target': 'delete target',
'list_recovery_points': 'list recovery points',
'recover_target': 'recover target',
'recover_target_out_of_place': 'recover target out of place',
'list_target_jobs': 'list target jobs',
'create_target_job': 'create target job',
'resume_target_job': 'resume target job',
'suspend_target_job': 'suspend target job',
'cancel_target_job': 'cancel target job'
}
}
IGNORED_PROVIDERS = [
'dummy',
# Deprecated constants
'cloudsigma_us',
'cloudfiles_swift'
]
def get_provider_api_names(Provider):
names = [key for key, value in Provider.__dict__.items() if
not key.startswith('__')]
return names
def generate_providers_table(api):
result = {}
if api in ['compute_main', 'compute_image_management',
'compute_block_storage', 'compute_key_pair_management']:
driver = NodeDriver
drivers = COMPUTE_DRIVERS
provider = ComputeProvider
get_driver_method = get_compute_driver
elif api == 'loadbalancer':
driver = LBDriver
drivers = LB_DRIVERS
provider = LBProvider
get_driver_method = get_lb_driver
elif api in ['storage_main', 'storage_cdn']:
driver = StorageDriver
drivers = STORAGE_DRIVERS
provider = StorageProvider
get_driver_method = get_storage_driver
elif api == 'dns':
driver = DNSDriver
drivers = DNS_DRIVERS
provider = DNSProvider
get_driver_method = get_dns_driver
elif api == 'container':
driver = ContainerDriver
drivers = CONTAINER_DRIVERS
provider = ContainerProvider
get_driver_method = get_container_driver
elif api == 'backup':
driver = BackupDriver
drivers = BACKUP_DRIVERS
provider = BackupProvider
get_driver_method = get_backup_driver
else:
raise Exception('Invalid api: %s' % (api))
names = get_provider_api_names(provider)
result = OrderedDict()
for name in names:
enum = getattr(provider, name)
try:
cls = get_driver_method(enum)
except Exception as e:
# Deprecated providers throw an exception
print('Ignoring deprecated constant "%s": %s' % (enum, str(e)))
continue
# Hack for providers which expose multiple classes and support multiple
# API versions
# TODO: Make entry per version
if name.lower() == 'cloudsigma':
from libcloud.compute.drivers.cloudsigma import \
CloudSigma_2_0_NodeDriver
cls = CloudSigma_2_0_NodeDriver
elif name.lower() == 'opennebula':
from libcloud.compute.drivers.opennebula import \
OpenNebula_3_8_NodeDriver
cls = OpenNebula_3_8_NodeDriver
elif name.lower() == 'digital_ocean' and api.startswith('compute'):
from libcloud.compute.drivers.digitalocean import \
DigitalOcean_v2_NodeDriver
cls = DigitalOcean_v2_NodeDriver
if name.lower() in IGNORED_PROVIDERS:
continue
driver_methods = dict(inspect.getmembers(cls,
predicate=inspect.isfunction))
base_methods = dict(inspect.getmembers(driver,
predicate=inspect.isfunction))
base_api_methods = BASE_API_METHODS[api]
result[name] = {'name': cls.name, 'website': cls.website,
'constant': name, 'module': drivers[enum][0],
'class': drivers[enum][1],
'cls': cls,
'methods': {}}
for method_name in base_api_methods:
base_method = base_methods[method_name]
driver_method = driver_methods[method_name]
if method_name == 'deploy_node':
features = getattr(cls, 'features', {}).get('create_node', [])
is_implemented = len(features) >= 1
else:
is_implemented = (id(driver_method) !=
id(base_method))
result[name]['methods'][method_name] = is_implemented
return result
def generate_rst_table(data):
cols = len(data[0])
col_len = [max(len(r[i]) for r in data) for i in range(cols)]
formatter = ' '.join('{:<%d}' % c for c in col_len)
header = formatter.format(*['=' * c for c in col_len])
rows = [formatter.format(*row) for row in data]
result = header + '\n' + rows[0] + '\n' + header + '\n' +\
'\n'.join(rows[1:]) + '\n' + header
return result
def generate_supported_methods_table(api, provider_matrix):
base_api_methods = BASE_API_METHODS[api]
data = []
header = [FRIENDLY_METHODS_NAMES[api][method_name] for method_name in
base_api_methods if not method_name.startswith('iterate_')]
data.append(['Provider'] + header)
for provider, values in sorted(provider_matrix.items()):
provider_name = '`%s`_' % (values['name'])
row = [provider_name]
# TODO: Make it nicer
# list_* methods don't need to be implemented if iterate_* methods are
# implemented
if api == 'storage_main':
if values['methods']['iterate_containers']:
values['methods']['list_containers'] = True
if values['methods']['iterate_container_objects']:
values['methods']['list_container_objects'] = True
elif api == 'dns':
# list_zones and list_records don't need to be implemented if
if values['methods']['iterate_zones']:
values['methods']['list_zones'] = True
if values['methods']['iterate_records']:
values['methods']['list_records'] = True
for method in base_api_methods:
# TODO: ghetto
if method.startswith('iterate_'):
continue
supported = values['methods'][method]
if supported:
row.append('yes')
else:
row.append('no')
data.append(row)
result = generate_rst_table(data)
result += '\n\n'
for provider, values in sorted(provider_matrix.items()):
result += '.. _`%s`: %s\n' % (values['name'], values['website'])
return result
def generate_supported_providers_table(api, provider_matrix):
data = []
header = ['Provider', 'Documentation', 'Provider Constant',
'Supported Regions', 'Module', 'Class Name']
data.append(header)
for provider, values in sorted(provider_matrix.items()):
name_str = '`%s`_' % (values['name'])
module_str = ':mod:`%s`' % (values['module'])
class_str = ':class:`%s`' % (values['class'])
params = {'api': api, 'provider': provider.lower()}
driver_docs_path = pjoin(this_dir,
'../docs/%(api)s/drivers/%(provider)s.rst'
% params)
if os.path.exists(driver_docs_path):
docs_link = ':doc:`Click </%(api)s/drivers/%(provider)s>`' % params
else:
docs_link = ''
cls = values['cls']
supported_regions = cls.list_regions() if hasattr(cls, 'list_regions') \
else None
if supported_regions:
# Sort the regions to achieve stable output
supported_regions = sorted(supported_regions)
supported_regions = ', '.join(supported_regions)
else:
supported_regions = 'single region driver'
row = [name_str, docs_link, values['constant'], supported_regions,
module_str, class_str]
data.append(row)
result = generate_rst_table(data)
result += '\n\n'
for provider, values in sorted(provider_matrix.items()):
result += '.. _`%s`: %s\n' % (values['name'], values['website'])
return result
def generate_tables():
apis = BASE_API_METHODS.keys()
for api in apis:
result = generate_providers_table(api)
docs_dir = api
if api.startswith('compute'):
docs_dir = 'compute'
elif api.startswith('storage'):
docs_dir = 'storage'
supported_providers = generate_supported_providers_table(docs_dir,
result)
supported_methods = generate_supported_methods_table(api, result)
current_path = os.path.dirname(__file__)
target_dir = os.path.abspath(pjoin(current_path,
'../docs/%s/' % (docs_dir)))
file_name_1 = '_supported_providers.rst'
file_name_2 = '_supported_methods.rst'
if api == 'compute_main':
file_name_2 = '_supported_methods_main.rst'
elif api == 'compute_image_management':
file_name_2 = '_supported_methods_image_management.rst'
elif api == 'compute_block_storage':
file_name_2 = '_supported_methods_block_storage.rst'
elif api == 'compute_key_pair_management':
file_name_2 = '_supported_methods_key_pair_management.rst'
elif api == 'storage_main':
file_name_2 = '_supported_methods_main.rst'
elif api == 'storage_cdn':
file_name_2 = '_supported_methods_cdn.rst'
supported_providers_path = pjoin(target_dir, file_name_1)
supported_methods_path = pjoin(target_dir, file_name_2)
with open(supported_providers_path, 'w') as fp:
fp.write(HEADER + '\n\n')
fp.write(supported_providers)
with open(supported_methods_path, 'w') as fp:
fp.write(HEADER + '\n\n')
fp.write(supported_methods)
generate_tables()
| apache-2.0 |
odoo-brazil/l10n-brazil-wip | l10n_br_account_product/__manifest__.py | 3 | 2375 | # -*- coding: utf-8 -*-
# Copyright (C) 2013 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localization Account Product',
'summary': "Brazilian Localization Account Product",
'category': 'Localisation',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'website': 'http://odoo-brasil.org',
'version': '8.0.3.0.0',
'depends': [
'l10n_br_data_account',
'account_product_fiscal_classification',
],
'data': [
'l10n_br_account_product_sequence.xml',
'account_invoice_workflow.xml',
'data/l10n_br_account_product.cfop.csv',
'data/l10n_br_account.fiscal.document.csv',
'data/l10n_br_account_data.xml',
'data/l10n_br_account_product_data.xml',
'data/l10n_br_tax.icms_partition.csv',
'data/ir_cron.xml',
'views/l10n_br_account_product_view.xml',
'views/l10n_br_account_view.xml',
'views/l10n_br_account_product_view.xml',
'views/account_view.xml',
'views/account_invoice_view.xml',
'wizard/l10n_br_account_invoice_costs_ratio_view.xml',
'views/nfe/account_invoice_nfe_view.xml',
'views/res_partner_view.xml',
'views/res_company_view.xml',
'views/account_product_fiscal_classification_view.xml',
'views/product_view.xml',
'views/res_country_view.xml',
'wizard/l10n_br_account_nfe_export_invoice_view.xml',
'wizard/l10n_br_account_nfe_export_view.xml',
'wizard/l10n_br_account_document_status_sefaz_view.xml',
'wizard/account_invoice_refund_view.xml',
'security/l10n_br_account_product_security.xml',
'security/ir.model.access.csv',
'report/account_invoice_report_view.xml',
],
'demo': [
'demo/account_tax_code_demo.xml',
'demo/account_tax_demo.xml',
'demo/base_demo.xml',
'demo/product_demo.xml',
'demo/l10n_br_account_product_demo.xml',
'demo/account_fiscal_position_rule_demo.xml',
'demo/product_taxes.yml',
],
'test': [
'test/account_customer_invoice.yml',
'test/account_supplier_invoice.yml',
'test/account_invoice_refund.yml',
'test/nfe_export.yml',
],
'installable': False,
'auto_install': False,
}
| agpl-3.0 |
msingh172/youtube-dl | youtube_dl/extractor/screencastomatic.py | 149 | 1713 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
ExtractorError,
js_to_json,
)
class ScreencastOMaticIE(InfoExtractor):
_VALID_URL = r'https?://screencast-o-matic\.com/watch/(?P<id>[0-9a-zA-Z]+)'
_TEST = {
'url': 'http://screencast-o-matic.com/watch/c2lD3BeOPl',
'md5': '483583cb80d92588f15ccbedd90f0c18',
'info_dict': {
'id': 'c2lD3BeOPl',
'ext': 'mp4',
'title': 'Welcome to 3-4 Philosophy @ DECV!',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'as the title says! also: some general info re 1) VCE philosophy and 2) distance learning.',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
setup_js = self._search_regex(
r"(?s)jwplayer\('mp4Player'\).setup\((\{.*?\})\);",
webpage, 'setup code')
data = self._parse_json(setup_js, video_id, transform_source=js_to_json)
try:
video_data = next(
m for m in data['modes'] if m.get('type') == 'html5')
except StopIteration:
raise ExtractorError('Could not find any video entries!')
video_url = compat_urlparse.urljoin(url, video_data['config']['file'])
thumbnail = data.get('image')
return {
'id': video_id,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'url': video_url,
'ext': 'mp4',
'thumbnail': thumbnail,
}
| unlicense |
domeger/SplunkTAforPuppetEnterprise | bin/splunktaforpuppetenterprise/solnlib/packages/splunklib/searchcommands/validators.py | 16 | 11478 | # coding=utf-8
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from json.encoder import encode_basestring_ascii as json_encode_string
from collections import namedtuple
from cStringIO import StringIO
from io import open
import csv
import os
import re
class Validator(object):
""" Base class for validators that check and format search command options.
You must inherit from this class and override :code:`Validator.__call__` and
:code:`Validator.format`. :code:`Validator.__call__` should convert the
value it receives as argument and then return it or raise a
:code:`ValueError`, if the value will not convert.
:code:`Validator.format` should return a human readable version of the value
it receives as argument the same way :code:`str` does.
"""
def __call__(self, value):
raise NotImplementedError()
def format(self, value):
raise NotImplementedError()
class Boolean(Validator):
""" Validates Boolean option values.
"""
truth_values = {
'1': True, '0': False,
't': True, 'f': False,
'true': True, 'false': False,
'y': True, 'n': False,
'yes': True, 'no': False
}
def __call__(self, value):
if not (value is None or isinstance(value, bool)):
value = unicode(value).lower()
if value not in Boolean.truth_values:
raise ValueError('Unrecognized truth value: {0}'.format(value))
value = Boolean.truth_values[value]
return value
def format(self, value):
return None if value is None else 't' if value else 'f'
class Code(Validator):
""" Validates code option values.
This validator compiles an option value into a Python code object that can be executed by :func:`exec` or evaluated
by :func:`eval`. The value returned is a :func:`namedtuple` with two members: object, the result of compilation, and
source, the original option value.
"""
def __init__(self, mode='eval'):
"""
:param mode: Specifies what kind of code must be compiled; it can be :const:`'exec'`, if source consists of a
sequence of statements, :const:`'eval'`, if it consists of a single expression, or :const:`'single'` if it
consists of a single interactive statement. In the latter case, expression statements that evaluate to
something other than :const:`None` will be printed.
:type mode: unicode or bytes
"""
self._mode = mode
def __call__(self, value):
if value is None:
return None
try:
return Code.object(compile(value, 'string', self._mode), unicode(value))
except (SyntaxError, TypeError) as error:
raise ValueError(error.message)
def format(self, value):
return None if value is None else value.source
object = namedtuple(b'Code', (b'object', 'source'))
class Fieldname(Validator):
""" Validates field name option values.
"""
pattern = re.compile(r'''[_.a-zA-Z-][_.a-zA-Z0-9-]*$''')
def __call__(self, value):
if value is not None:
value = unicode(value)
if Fieldname.pattern.match(value) is None:
raise ValueError('Illegal characters in fieldname: {}'.format(value))
return value
def format(self, value):
return value
class File(Validator):
""" Validates file option values.
"""
def __init__(self, mode='rt', buffering=None, directory=None):
self.mode = mode
self.buffering = buffering
self.directory = File._var_run_splunk if directory is None else directory
def __call__(self, value):
if value is None:
return value
path = unicode(value)
if not os.path.isabs(path):
path = os.path.join(self.directory, path)
try:
value = open(path, self.mode) if self.buffering is None else open(path, self.mode, self.buffering)
except IOError as error:
raise ValueError('Cannot open {0} with mode={1} and buffering={2}: {3}'.format(
value, self.mode, self.buffering, error))
return value
def format(self, value):
return None if value is None else value.name
_var_run_splunk = os.path.join(
os.environ['SPLUNK_HOME'] if 'SPLUNK_HOME' in os.environ else os.getcwdu(), 'var', 'run', 'splunk')
class Integer(Validator):
""" Validates integer option values.
"""
def __init__(self, minimum=None, maximum=None):
if minimum is not None and maximum is not None:
def check_range(value):
if not (minimum <= value <= maximum):
raise ValueError('Expected integer in the range [{0},{1}], not {2}'.format(minimum, maximum, value))
return
elif minimum is not None:
def check_range(value):
if value < minimum:
raise ValueError('Expected integer in the range [{0},+∞], not {1}'.format(minimum, value))
return
elif maximum is not None:
def check_range(value):
if value > maximum:
raise ValueError('Expected integer in the range [-∞,{0}], not {1}'.format(maximum, value))
return
else:
def check_range(value):
return
self.check_range = check_range
return
def __call__(self, value):
if value is None:
return None
try:
value = long(value)
except ValueError:
raise ValueError('Expected integer value, not {}'.format(json_encode_string(value)))
self.check_range(value)
return value
def format(self, value):
return None if value is None else unicode(long(value))
class Duration(Validator):
""" Validates duration option values.
"""
def __call__(self, value):
if value is None:
return None
p = value.split(':', 2)
result = None
_60 = Duration._60
_unsigned = Duration._unsigned
try:
if len(p) == 1:
result = _unsigned(p[0])
if len(p) == 2:
result = 60 * _unsigned(p[0]) + _60(p[1])
if len(p) == 3:
result = 3600 * _unsigned(p[0]) + 60 * _60(p[1]) + _60(p[2])
except ValueError:
raise ValueError('Invalid duration value: {0}'.format(value))
return result
def format(self, value):
if value is None:
return None
value = int(value)
s = value % 60
m = value // 60 % 60
h = value // (60 * 60)
return '{0:02d}:{1:02d}:{2:02d}'.format(h, m, s)
_60 = Integer(0, 59)
_unsigned = Integer(0)
class List(Validator):
""" Validates a list of strings
"""
class Dialect(csv.Dialect):
""" Describes the properties of list option values. """
strict = True
delimiter = b','
quotechar = b'"'
doublequote = True
lineterminator = b'\n'
skipinitialspace = True
quoting = csv.QUOTE_MINIMAL
def __init__(self, validator=None):
if not (validator is None or isinstance(validator, Validator)):
raise ValueError('Expected a Validator instance or None for validator, not {}', repr(validator))
self._validator = validator
def __call__(self, value):
if value is None or isinstance(value, list):
return value
try:
value = csv.reader([value], self.Dialect).next()
except csv.Error as error:
raise ValueError(error)
if self._validator is None:
return value
try:
for index, item in enumerate(value):
value[index] = self._validator(item)
except ValueError as error:
raise ValueError('Could not convert item {}: {}'.format(index, error))
return value
def format(self, value):
output = StringIO()
writer = csv.writer(output, List.Dialect)
writer.writerow(value)
value = output.getvalue()
return value[:-1]
class Map(Validator):
""" Validates map option values.
"""
def __init__(self, **kwargs):
self.membership = kwargs
def __call__(self, value):
if value is None:
return None
value = unicode(value)
if value not in self.membership:
raise ValueError('Unrecognized value: {0}'.format(value))
return self.membership[value]
def format(self, value):
return None if value is None else self.membership.keys()[self.membership.values().index(value)]
class Match(Validator):
""" Validates that a value matches a regular expression pattern.
"""
def __init__(self, name, pattern, flags=0):
self.name = unicode(name)
self.pattern = re.compile(pattern, flags)
def __call__(self, value):
if value is None:
return None
value = unicode(value)
if self.pattern.match(value) is None:
raise ValueError('Expected {}, not {}'.format(self.name, json_encode_string(value)))
return value
def format(self, value):
return None if value is None else unicode(value)
class OptionName(Validator):
""" Validates option names.
"""
pattern = re.compile(r'''(?=\w)[^\d]\w*$''', re.UNICODE)
def __call__(self, value):
if value is not None:
value = unicode(value)
if OptionName.pattern.match(value) is None:
raise ValueError('Illegal characters in option name: {}'.format(value))
return value
def format(self, value):
return None if value is None else unicode(value)
class RegularExpression(Validator):
""" Validates regular expression option values.
"""
def __call__(self, value):
if value is None:
return None
try:
value = re.compile(unicode(value))
except re.error as error:
raise ValueError('{}: {}'.format(unicode(error).capitalize(), value))
return value
def format(self, value):
return None if value is None else value.pattern
class Set(Validator):
""" Validates set option values.
"""
def __init__(self, *args):
self.membership = set(args)
def __call__(self, value):
if value is None:
return None
value = unicode(value)
if value not in self.membership:
raise ValueError('Unrecognized value: {}'.format(value))
return value
def format(self, value):
return self.__call__(value)
__all__ = ['Boolean', 'Code', 'Duration', 'File', 'Integer', 'List', 'Map', 'RegularExpression', 'Set']
| apache-2.0 |
wsmith323/django | django/contrib/sites/management.py | 467 | 1564 | """
Creates the default Site object.
"""
from django.apps import apps
from django.conf import settings
from django.core.management.color import no_style
from django.db import DEFAULT_DB_ALIAS, connections, router
def create_default_site(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, **kwargs):
try:
Site = apps.get_model('sites', 'Site')
except LookupError:
return
if not router.allow_migrate_model(using, Site):
return
if not Site.objects.using(using).exists():
# The default settings set SITE_ID = 1, and some tests in Django's test
# suite rely on this value. However, if database sequences are reused
# (e.g. in the test suite after flush/syncdb), it isn't guaranteed that
# the next id will be 1, so we coerce it. See #15573 and #16353. This
# can also crop up outside of tests - see #15346.
if verbosity >= 2:
print("Creating example.com Site object")
Site(pk=getattr(settings, 'SITE_ID', 1), domain="example.com", name="example.com").save(using=using)
# We set an explicit pk instead of relying on auto-incrementation,
# so we need to reset the database sequence. See #17415.
sequence_sql = connections[using].ops.sequence_reset_sql(no_style(), [Site])
if sequence_sql:
if verbosity >= 2:
print("Resetting sequence")
with connections[using].cursor() as cursor:
for command in sequence_sql:
cursor.execute(command)
| bsd-3-clause |
linktlh/Toontown-journey | toontown/shtiker/ShtikerBook.py | 1 | 19967 | from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
from direct.gui.DirectGui import *
from direct.showbase import DirectObject
from pandac.PandaModules import *
from toontown.effects import DistributedFireworkShow
from toontown.nametag import NametagGlobals
from toontown.parties import DistributedPartyFireworksActivity
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
class ShtikerBook(DirectFrame, StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('ShtikerBook')
def __init__(self, doneEvent):
DirectFrame.__init__(self, relief=None, sortOrder=DGG.BACKGROUND_SORT_INDEX)
self.initialiseoptions(ShtikerBook)
StateData.StateData.__init__(self, doneEvent)
self.pages = []
self.pageTabs = []
self.currPageTabIndex = None
self.pageTabFrame = DirectFrame(parent=self, relief=None, pos=(0.93, 1, 0.575), scale=1.25)
self.pageTabFrame.hide()
self.currPageIndex = None
self.pageBeforeNews = None
self.tempLeft = None
self.tempRight = None
self.entered = 0
self.safeMode = 0
self.__obscured = 0
self.__shown = 0
self.__isOpen = 0
self.hide()
self.setPos(0, 0, 0.1)
self.pageOrder = [TTLocalizer.OptionsPageTitle,
TTLocalizer.ShardPageTitle,
TTLocalizer.MapPageTitle,
TTLocalizer.InventoryPageTitle,
TTLocalizer.QuestPageToonTasks,
TTLocalizer.TrackPageShortTitle,
TTLocalizer.SuitPageTitle,
TTLocalizer.FishPageTitle,
TTLocalizer.KartPageTitle,
TTLocalizer.DisguisePageTitle,
TTLocalizer.NPCFriendPageTitle,
TTLocalizer.GardenPageTitle,
TTLocalizer.GolfPageTitle,
TTLocalizer.EventsPageName,
TTLocalizer.AchievementsPageTitle,
TTLocalizer.NewsPageName]
return
def setSafeMode(self, setting):
self.safeMode = setting
def enter(self):
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: SHTICKERBOOK: Open')
if self.entered:
return
self.entered = 1
messenger.send('releaseDirector')
messenger.send('stickerBookEntered')
base.playSfx(self.openSound)
base.disableMouse()
base.render.hide()
base.setBackgroundColor(0.05, 0.15, 0.4)
base.setCellsActive([base.rightCells[0]], 0)
NametagGlobals.setForce2dNametags(True)
NametagGlobals.setForceOnscreenChat(True)
self.__isOpen = 1
self.__setButtonVisibility()
self.show()
self.showPageArrows()
self.tempLeft = 'arrow_left'
self.tempRight = 'arrow_right'
if not self.safeMode:
self.accept('shtiker-page-done', self.__pageDone)
self.accept(ToontownGlobals.StickerBookHotkey, self.__close)
self.accept(ToontownGlobals.OptionsPageHotkey, self.__close)
self.accept('disable-hotkeys', self.__disableHotkeys)
self.accept('enable-hotkeys', self.__enableHotkeys)
self.pageTabFrame.show()
self.pages[self.currPageIndex].enter()
if hasattr(localAvatar, 'newsButtonMgr') and localAvatar.newsButtonMgr:
localAvatar.newsButtonMgr.hideNewIssueButton()
def exit(self):
if not self.entered:
return
self.entered = 0
messenger.send('stickerBookExited')
base.playSfx(self.closeSound)
self.pages[self.currPageIndex].exit()
base.render.show()
setBlackBackground = 0
for obj in base.cr.doId2do.values():
if isinstance(obj, DistributedFireworkShow.DistributedFireworkShow) or isinstance(obj, DistributedPartyFireworksActivity.DistributedPartyFireworksActivity):
setBlackBackground = 1
if setBlackBackground:
base.setBackgroundColor(Vec4(0, 0, 0, 1))
else:
base.setBackgroundColor(ToontownGlobals.DefaultBackgroundColor)
gsg = base.win.getGsg()
if gsg:
base.render.prepareScene(gsg)
base.setCellsActive([base.rightCells[0]], 1)
NametagGlobals.setForce2dNametags(False)
NametagGlobals.setForceOnscreenChat(False)
self.__isOpen = 0
self.hide()
self.hideButton()
cleanupDialog('globalDialog')
self.pageTabFrame.hide()
self.ignore('shtiker-page-done')
self.ignore(ToontownGlobals.StickerBookHotkey)
self.ignore(ToontownGlobals.OptionsPageHotkey)
self.ignore(self.tempRight)
self.ignore(self.tempLeft)
self.ignore('disable-hotkeys')
self.ignore('enable-hotkeys')
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: SHTICKERBOOK: Close')
def load(self):
self.checkGardenStarted = localAvatar.getGardenStarted()
bookModel = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
self['image'] = bookModel.find('**/big_book')
self['image_scale'] = (2, 1, 1.5)
self.resetFrameSize()
self.bookOpenButton = DirectButton(image=(bookModel.find('**/BookIcon_CLSD'), bookModel.find('**/BookIcon_OPEN'), bookModel.find('**/BookIcon_RLVR')), relief=None, pos=(-0.158, 0, 0.17), parent=base.a2dBottomRight, scale=0.305, command=self.__open)
self.bookCloseButton = DirectButton(image=(bookModel.find('**/BookIcon_OPEN'), bookModel.find('**/BookIcon_CLSD'), bookModel.find('**/BookIcon_RLVR2')), relief=None, pos=(-0.158, 0, 0.17), parent=base.a2dBottomRight, scale=0.305, command=self.__close)
self.bookOpenButton.hide()
self.bookCloseButton.hide()
self.nextArrow = DirectButton(parent=self, relief=None, image=(bookModel.find('**/arrow_button'), bookModel.find('**/arrow_down'), bookModel.find('**/arrow_rollover')), scale=(0.1, 0.1, 0.1), pos=(0.838, 0, -0.661), command=self.__pageChange, extraArgs=[1])
self.prevArrow = DirectButton(parent=self, relief=None, image=(bookModel.find('**/arrow_button'), bookModel.find('**/arrow_down'), bookModel.find('**/arrow_rollover')), scale=(-0.1, 0.1, 0.1), pos=(-0.838, 0, -0.661), command=self.__pageChange, extraArgs=[-1])
bookModel.removeNode()
self.openSound = base.loadSfx('phase_3.5/audio/sfx/GUI_stickerbook_open.ogg')
self.closeSound = base.loadSfx('phase_3.5/audio/sfx/GUI_stickerbook_delete.ogg')
self.pageSound = base.loadSfx('phase_3.5/audio/sfx/GUI_stickerbook_turn.ogg')
return
def unload(self):
loader.unloadModel('phase_3.5/models/gui/stickerbook_gui')
self.destroy()
self.bookOpenButton.destroy()
del self.bookOpenButton
self.bookCloseButton.destroy()
del self.bookCloseButton
self.nextArrow.destroy()
del self.nextArrow
self.prevArrow.destroy()
del self.prevArrow
for page in self.pages:
page.unload()
del self.pages
for pageTab in self.pageTabs:
pageTab.destroy()
del self.pageTabs
del self.currPageTabIndex
del self.openSound
del self.closeSound
del self.pageSound
del self.tempLeft
del self.tempRight
def addPage(self, page, pageName = 'Page'):
if pageName not in self.pageOrder:
self.notify.error('Trying to add page %s in the ShtickerBook. Page not listed in the order.' % pageName)
return
pageIndex = 0
if len(self.pages):
newIndex = len(self.pages)
prevIndex = newIndex - 1
if self.pages[prevIndex].pageName == TTLocalizer.NewsPageName:
self.pages.insert(prevIndex, page)
pageIndex = prevIndex
if self.currPageIndex >= pageIndex:
self.currPageIndex += 1
else:
self.pages.append(page)
pageIndex = len(self.pages) - 1
else:
self.pages.append(page)
pageIndex = len(self.pages) - 1
page.setBook(self)
page.setPageName(pageName)
page.reparentTo(self)
self.addPageTab(page, pageIndex, pageName)
from toontown.shtiker import MapPage
if isinstance(page, MapPage.MapPage):
self.pageBeforeNews = page
def addPageTab(self, page, pageIndex, pageName = 'Page'):
tabIndex = len(self.pageTabs)
def goToPage():
messenger.send('wakeup')
base.playSfx(self.pageSound)
self.setPage(page)
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: SHTICKERBOOK: Browse tabs %s' % page.pageName)
localAvatar.newsButtonMgr.setGoingToNewsPageFromStickerBook(False)
localAvatar.newsButtonMgr.showAppropriateButton()
yOffset = 0.07 * pageIndex
iconGeom = None
iconImage = None
iconScale = 1
iconColor = Vec4(1)
buttonPressedCommand = goToPage
extraArgs = []
if pageName == TTLocalizer.OptionsPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/switch')
iconModels.detachNode()
elif pageName == TTLocalizer.ShardPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/district')
iconModels.detachNode()
elif pageName == TTLocalizer.MapPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/teleportIcon')
iconModels.detachNode()
elif pageName == TTLocalizer.InventoryPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/inventory_icons')
iconGeom = iconModels.find('**/inventory_tart')
iconScale = 7
iconModels.detachNode()
elif pageName == TTLocalizer.QuestPageToonTasks:
iconModels = loader.loadModel('phase_3.5/models/gui/stickerbook_gui')
iconGeom = iconModels.find('**/questCard')
iconScale = 0.9
iconModels.detachNode()
elif pageName == TTLocalizer.TrackPageShortTitle:
iconGeom = iconModels = loader.loadModel('phase_3.5/models/gui/filmstrip')
iconScale = 1.1
iconColor = Vec4(0.7, 0.7, 0.7, 1)
iconModels.detachNode()
elif pageName == TTLocalizer.SuitPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/gui_gear')
iconModels.detachNode()
elif pageName == TTLocalizer.FishPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/fish')
iconModels.detachNode()
elif pageName == TTLocalizer.GardenPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/gardenIcon')
iconModels.detachNode()
elif pageName == TTLocalizer.DisguisePageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/disguise2')
iconColor = Vec4(0.7, 0.7, 0.7, 1)
iconModels.detachNode()
elif pageName == TTLocalizer.NPCFriendPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/playingCard')
iconImage = iconModels.find('**/card_back')
iconGeom = iconModels.find('**/logo')
iconScale = 0.22
iconModels.detachNode()
elif pageName == TTLocalizer.KartPageTitle:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/kartIcon')
iconModels.detachNode()
elif pageName == TTLocalizer.GolfPageTitle:
iconModels = loader.loadModel('phase_6/models/golf/golf_gui')
iconGeom = iconModels.find('**/score_card_icon')
iconModels.detachNode()
elif pageName == TTLocalizer.EventsPageName:
iconModels = loader.loadModel('phase_4/models/parties/partyStickerbook')
iconGeom = iconModels.find('**/Stickerbook_PartyIcon')
iconModels.detachNode()
elif pageName == TTLocalizer.PhotoPageTitle:
iconGeom = iconModels = loader.loadModel('phase_4/models/minigames/photogame_filmroll')
iconScale = (1.9, 1.5, 1.5)
iconModels.detachNode()
elif pageName == TTLocalizer.NewsPageName:
iconModels = loader.loadModel('phase_3.5/models/gui/sos_textures')
iconGeom = iconModels.find('**/tt_t_gui_sbk_newsPageTab')
iconModels.detachNode()
buttonPressedCommand = self.goToNewsPage
extraArgs = [page]
if pageName == TTLocalizer.OptionsPageTitle:
pageName = TTLocalizer.OptionsTabTitle
pageTab = DirectButton(parent=self.pageTabFrame, relief=DGG.RAISED, frameSize=(-0.575,
0.575,
-0.575,
0.575), borderWidth=(0.05, 0.05), text=('',
'',
pageName,
''), text_align=TextNode.ALeft, text_pos=(1, -0.2), text_scale=TTLocalizer.SBpageTab, text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), image=iconImage, image_scale=iconScale, geom=iconGeom, geom_scale=iconScale, geom_color=iconColor, pos=(0, 0, -yOffset), scale=0.06, command=buttonPressedCommand, extraArgs=extraArgs)
self.pageTabs.insert(pageIndex, pageTab)
return
def setPage(self, page, enterPage = True):
if self.currPageIndex is not None:
self.pages[self.currPageIndex].exit()
self.currPageIndex = self.pages.index(page)
self.setPageTabIndex(self.currPageIndex)
if enterPage:
self.showPageArrows()
page.enter()
from toontown.shtiker import NewsPage
if not isinstance(page, NewsPage.NewsPage):
self.pageBeforeNews = page
return
def setPageBeforeNews(self, enterPage = True):
self.setPage(self.pageBeforeNews, enterPage)
self.accept(ToontownGlobals.StickerBookHotkey, self.__close)
self.accept(ToontownGlobals.OptionsPageHotkey, self.__close)
def setPageTabIndex(self, pageTabIndex):
if self.currPageTabIndex is not None and pageTabIndex != self.currPageTabIndex:
self.pageTabs[self.currPageTabIndex]['relief'] = DGG.RAISED
self.currPageTabIndex = pageTabIndex
self.pageTabs[self.currPageTabIndex]['relief'] = DGG.SUNKEN
return
def isOnPage(self, page):
result = False
if self.currPageIndex is not None:
curPage = self.pages[self.currPageIndex]
if curPage == page:
result = True
return result
def obscureButton(self, obscured):
self.__obscured = obscured
self.__setButtonVisibility()
def isObscured(self):
return self.__obscured
def showButton(self):
self.__shown = 1
self.__setButtonVisibility()
localAvatar.newsButtonMgr.showAppropriateButton()
def hideButton(self):
self.__shown = 0
self.__setButtonVisibility()
localAvatar.newsButtonMgr.request('Hidden')
def __setButtonVisibility(self):
if self.__isOpen:
self.bookOpenButton.hide()
self.bookCloseButton.show()
elif self.__shown and not self.__obscured:
self.bookOpenButton.show()
self.bookCloseButton.hide()
else:
self.bookOpenButton.hide()
self.bookCloseButton.hide()
def shouldBookButtonBeHidden(self):
result = False
if self.__isOpen:
pass
elif self.__shown and not self.__obscured:
pass
else:
result = True
return result
def __open(self):
messenger.send('enterStickerBook')
if not localAvatar.getGardenStarted():
for tab in self.pageTabs:
if tab['text'][2] == TTLocalizer.GardenPageTitle:
tab.hide()
def __close(self):
base.playSfx(self.closeSound)
self.doneStatus = {'mode': 'close'}
messenger.send('exitStickerBook')
messenger.send(self.doneEvent)
def closeBook(self):
self.__close()
def __pageDone(self):
page = self.pages[self.currPageIndex]
pageDoneStatus = page.getDoneStatus()
if pageDoneStatus:
if pageDoneStatus['mode'] == 'close':
self.__close()
else:
self.doneStatus = pageDoneStatus
messenger.send(self.doneEvent)
def __pageChange(self, offset):
messenger.send('wakeup')
base.playSfx(self.pageSound)
self.pages[self.currPageIndex].exit()
self.currPageIndex = self.currPageIndex + offset
messenger.send('stickerBookPageChange-' + str(self.currPageIndex))
self.currPageIndex = max(self.currPageIndex, 0)
self.currPageIndex = min(self.currPageIndex, len(self.pages) - 1)
self.setPageTabIndex(self.currPageIndex)
self.showPageArrows()
page = self.pages[self.currPageIndex]
from toontown.shtiker import NewsPage
if isinstance(page, NewsPage.NewsPage):
self.goToNewsPage(page)
else:
page.enter()
self.pageBeforeNews = page
def showPageArrows(self):
if self.currPageIndex == len(self.pages) - 1:
self.prevArrow.show()
self.nextArrow.hide()
else:
self.prevArrow.show()
self.nextArrow.show()
self.__checkForNewsPage()
if self.currPageIndex == 0:
self.prevArrow.hide()
self.nextArrow.show()
def __checkForNewsPage(self):
from toontown.shtiker import NewsPage
self.ignore(self.tempLeft)
self.ignore(self.tempRight)
if isinstance(self.pages[self.currPageIndex], NewsPage.NewsPage):
self.ignore(self.tempLeft)
self.ignore(self.tempRight)
else:
self.accept(self.tempRight, self.__pageChange, [1])
self.accept(self.tempLeft, self.__pageChange, [-1])
def goToNewsPage(self, page):
messenger.send('wakeup')
base.playSfx(self.pageSound)
localAvatar.newsButtonMgr.setGoingToNewsPageFromStickerBook(True)
localAvatar.newsButtonMgr.showAppropriateButton()
self.setPage(page)
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: SHTICKERBOOK: Browse tabs %s' % page.pageName)
self.ignore(ToontownGlobals.StickerBookHotkey)
self.ignore(ToontownGlobals.OptionsPageHotkey)
localAvatar.newsButtonMgr.acceptEscapeKeyPress()
def disableBookCloseButton(self):
if self.bookCloseButton:
self.bookCloseButton['command'] = None
return
def enableBookCloseButton(self):
if self.bookCloseButton:
self.bookCloseButton['command'] = self.__close
def disableAllPageTabs(self):
for button in self.pageTabs:
button['state'] = DGG.DISABLED
def enableAllPageTabs(self):
for button in self.pageTabs:
button['state'] = DGG.NORMAL
def __disableHotkeys(self):
self.ignore(ToontownGlobals.StickerBookHotkey)
self.ignore(ToontownGlobals.OptionsPageHotkey)
def __enableHotkeys(self):
self.accept(ToontownGlobals.StickerBookHotkey, self.__close)
self.accept(ToontownGlobals.OptionsPageHotkey, self.__close)
| apache-2.0 |