file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
component.ts | // deno-lint-ignore no-explicit-any
type Class<S> = new (...args: any[]) => S;
const registry: { [name: string]: Class<BaseComponent> } = {};
const registeredElements: HTMLElement[] = [];
export function Component(name: string) {
return (ctor: Class<BaseComponent>) => {
registry[name] = ctor;
};
}
export abstract class BaseComponent {
static readonly _name: string;
protected readonly ctx: HTMLElement;
protected constructor(ctx: HTMLElement) {
this.ctx = ctx;
} |
declare global {
interface HTMLElement {
component: BaseComponent;
}
}
export function bootComponents() {
function initializeComponents() {
document.querySelectorAll<HTMLElement>(`[data-cmp]`).forEach((ctx) => {
// do not initialize more than once
if (!registeredElements.includes(ctx)) {
const cmp = registry[ctx.dataset.cmp || ""];
if (!cmp) {
throw new Error(`No component with the name ${ctx.dataset.cmp} found.`);
}
registeredElements.push(ctx);
ctx.component = new cmp(ctx);
}
});
}
if (document.readyState === "loading") {
document.addEventListener("DOMContentLoaded", initializeComponents, false);
} else {
initializeComponents();
}
}
export function destroyComponents() {
while (registeredElements.length > 0) {
registeredElements.pop()?.component.destructor();
}
} |
destructor(): void {
}
} |
test_userid.py | import pytest
import time
from tests.live import testlib
from pandevice import base
class TestUserID_FW(object):
"""Tests UserID on live Firewall."""
def test_01_fw_login(self, fw, state_map):
state = state_map.setdefault(fw)
user, ip = testlib.random_name(), testlib.random_ip()
fw.userid.login(user, ip)
state.single_user = [user, ip]
def test_02_fw_logins(self, fw, state_map):
state = state_map.setdefault(fw)
users = [(testlib.random_name(), testlib.random_ip()) for i in range(10)]
fw.userid.logins(users)
state.multi_user = users
def test_03_fw_logout(self, fw, state_map):
state = state_map.setdefault(fw)
if not state.single_user:
raise Exception("User not logged in yet")
user, ip = state.single_user |
def test_04_fw_logouts(self, fw, state_map):
state = state_map.setdefault(fw)
if not state.multi_user:
raise Exception("User not logged in yet")
fw.userid.logouts(state.multi_user)
def test_05_register_str(self, fw, state_map):
state = state_map.setdefault(fw)
ip, tag = testlib.random_ip(), testlib.random_name()
fw.userid.register(ip, tag)
state.single_register = [ip, tag]
def test_06_unregister_str(self, fw, state_map):
state = state_map.setdefault(fw)
if not state.single_register:
raise Exception("No single_register")
ip, tag = state.single_register
fw.userid.unregister(ip, tag)
def test_07_register_lst(self, fw, state_map):
state = state_map.setdefault(fw)
ips = [testlib.random_ip() for x in range(10)]
tags = [testlib.random_name() for i in range(15)]
fw.userid.register(ips, tags)
state.multi_register_01 = [ips, tags]
def test_08_get_registered_ip(self, fw, state_map):
state = state_map.setdefault(fw)
if not state.multi_register_01:
raise Exception("Multi register not set")
ips, tags = state.multi_register_01
test1 = set(fw.userid.get_registered_ip())
assert test1 == set(ips)
test2 = set(fw.userid.get_registered_ip(ips[0:3], tags))
assert test2 == set(ips[0:3])
test3 = set(fw.userid.get_registered_ip(ips[0:3], tags[0:5]))
assert test3 == set(ips[0:3])
test4 = set(fw.userid.get_registered_ip(ips, tags[0:5]))
assert test4 == set(ips)
test5 = set(fw.userid.get_registered_ip(ips[0], tags[0]))
assert test5 == set([ips[0],])
tests = [test1, test2, test3, test4, test5]
assert len(test5) != 0
assert all([test1 >= x for x in tests])
assert all([x >= test5 for x in tests])
assert test2 >= test3
assert test4 >= test3
def test_09_audit_registered_ip(self, fw, state_map):
state = state_map.setdefault(fw)
original = set(fw.userid.get_registered_ip())
new_ips = [testlib.random_ip() for x in range(5)]
new_tags = [testlib.random_name() for i in range(8)]
ip_tags_pairs = dict([(ip, tuple(new_tags)) for ip in new_ips])
fw.userid.audit_registered_ip(ip_tags_pairs)
state.multi_register_02 = [new_ips, new_tags]
new_set = set(fw.userid.get_registered_ip())
assert len(new_set) < len(original)
assert new_set == set(new_ips)
def test_10_clear_registered_ip(self, fw, state_map):
state = state_map.setdefault(fw)
if not state.multi_register_02:
raise Exception("Multi register not set")
ips, tags = state.multi_register_02
original = list(fw.userid.get_registered_ip())
fw.userid.clear_registered_ip(ips[0], tags[0])
mod1 = list(fw.userid.get_registered_ip())
fw.userid.clear_registered_ip(ips[0:4], tags[0:5])
mod2 = list(fw.userid.get_registered_ip())
fw.userid.clear_registered_ip(ips[0:4], tags)
mod3 = list(fw.userid.get_registered_ip())
fw.userid.clear_registered_ip(ips, tags[0:7])
mod4 = list(fw.userid.get_registered_ip())
fw.userid.clear_registered_ip()
mod5 = list(fw.userid.get_registered_ip())
assert len(mod3) < len(mod2)
assert len(mod3) < len(mod1)
assert len(mod3) < len(original)
assert len(mod5) == 0
def test_11_batch(self, fw, state_map):
fw.userid.clear_registered_ip() # Fresh start
fw.userid.batch_start()
users = [(testlib.random_name(), testlib.random_ip()) for i in range(5)]
fw.userid.logins(users)
ips = [testlib.random_ip() for x in range(5)]
tags = [testlib.random_name() for y in range(5)]
fw.userid.register(ips, tags)
fw.userid.unregister(ips[2], tags[4])
fw.userid.get_registered_ip(ips[0:3], tags[2:4])
new_ips = [testlib.random_ip() for x in range(3)]
new_tags = [testlib.random_name() for y in range(3)]
fw.userid.audit_registered_ip(dict([(ip, tuple(new_tags)) for ip in new_ips]))
fw.userid.get_registered_ip()
fw.userid.unregister(new_ips, new_tags)
fw.userid.batch_end()
def test_12_uidmessage(self, fw, state_map):
state = state_map.setdefault(fw)
state.uid = fw.userid._create_uidmessage()
def test_13_send(self, fw, state_map):
state = state_map.setdefault(fw)
if not state.uid:
raise Exception("No UID")
fw.userid.send(
state.uid[0]
) # State.uid returns length-two tuple of XML elements | fw.userid.logout(user, ip) |
files.go | package filehandler
import (
"bytes"
"html/template"
"os"
"path/filepath"
)
func check(e error) {
if e != nil {
panic(e)
}
}
// CreateDirIfNotExist ...
func CreateDirIfNotExist(dir string) {
if _, err := os.Stat(dir); os.IsNotExist(err) {
err = os.MkdirAll(dir, 0755)
check(err)
}
}
// GetAllFilePathsInDirectory recursively returns all file paths in a soecified directory, including sub-directories.
func | (dirpath string) ([]string, error) {
// Get all the .tmpl files in the directory.
var paths []string
extension := ".tmpl"
err := filepath.Walk(dirpath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() && filepath.Ext(path) == extension {
paths = append(paths, path)
}
return nil
})
if err != nil {
return nil, err
}
return paths, nil
}
// process applies the data structure 'vars' onto an already
// parsed template 't', and returns the resulting string.
func process(t *template.Template, vars interface{}) string {
var tmplBytes bytes.Buffer
err := t.Execute(&tmplBytes, vars)
check(err)
return tmplBytes.String()
}
// ProcessFile ...
func ProcessFile(fileName string, vars interface{}) string {
tmpl, err := template.ParseFiles(fileName)
check(err)
return process(tmpl, vars)
}
// WriteToFile ...
func WriteToFile(filename string, data string) {
println("Writing file: " + filename)
file, err := os.Create(filename)
check(err)
defer file.Close()
file.WriteString(data)
}
| GetAllFilePathsInDirectory |
transform_group_by_partial.rs | // Copyright 2020-2021 The Datafuse Authors.
//
// SPDX-License-Identifier: Apache-2.0.
use std::any::Any;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Instant;
use bumpalo::Bump;
use common_datablocks::DataBlock;
use common_datablocks::HashMethod;
use common_datablocks::HashMethodKind;
use common_datavalues::arrays::BinaryArrayBuilder;
use common_datavalues::prelude::*;
use common_exception::Result;
use common_infallible::RwLock;
use common_planners::Expression;
use common_streams::DataBlockStream;
use common_streams::SendableDataBlockStream;
use common_tracing::tracing;
use futures::stream::StreamExt;
use crate::pipelines::processors::EmptyProcessor;
use crate::pipelines::processors::Processor;
pub struct GroupByPartialTransform {
aggr_exprs: Vec<Expression>,
group_exprs: Vec<Expression>,
schema: DataSchemaRef,
schema_before_group_by: DataSchemaRef,
input: Arc<dyn Processor>,
}
impl GroupByPartialTransform {
pub fn create(
schema: DataSchemaRef,
schema_before_group_by: DataSchemaRef,
aggr_exprs: Vec<Expression>,
group_exprs: Vec<Expression>,
) -> Self {
Self {
aggr_exprs,
group_exprs,
schema,
schema_before_group_by,
input: Arc::new(EmptyProcessor::create()),
}
}
}
#[async_trait::async_trait]
impl Processor for GroupByPartialTransform {
fn name(&self) -> &str {
"GroupByPartialTransform"
}
fn connect_to(&mut self, input: Arc<dyn Processor>) -> Result<()> {
self.input = input;
Ok(())
}
fn inputs(&self) -> Vec<Arc<dyn Processor>> {
vec![self.input.clone()]
}
fn as_any(&self) -> &dyn Any {
self
}
/// Create hash group based on row index and apply the function with vector.
/// For example:
/// row_idx, A
/// 0, 1
/// 1, 2
/// 2, 3
/// 3, 4
/// 4, 5
///
/// grouping by [A%3]
/// 1.1)
/// row_idx, group_key, A
/// 0, 1, 1
/// 1, 2, 2
/// 2, 0, 3
/// 3, 1, 4
/// 4, 2, 5
///
/// 1.2) make indices group(for vector compute)
/// group_key, indices
/// 0, [2]
/// 1, [0, 3]
/// 2, [1, 4]
///
/// 1.3) apply aggregate function(SUM(A)) to the take block
/// group_key, SUM(A)
/// <0, 3>
/// <1, 1+4>
/// <2, 2+5>
async fn execute(&self) -> Result<SendableDataBlockStream> {
tracing::debug!("execute...");
let aggr_len = self.aggr_exprs.len();
let start = Instant::now();
let schema_before_group_by = self.schema_before_group_by.clone();
let mut funcs = Vec::with_capacity(self.aggr_exprs.len());
let mut arg_names = Vec::with_capacity(self.aggr_exprs.len());
let mut aggr_cols = Vec::with_capacity(self.aggr_exprs.len());
for expr in self.aggr_exprs.iter() {
funcs.push(expr.to_aggregate_function(&schema_before_group_by)?);
arg_names.push(expr.to_aggregate_function_names()?);
aggr_cols.push(expr.column_name());
}
let group_cols = self
.group_exprs
.iter()
.map(|x| x.column_name())
.collect::<Vec<_>>();
let mut stream = self.input.execute().await?;
let arena = Bump::new();
let sample_block = DataBlock::empty_with_schema(self.schema.clone());
let method = DataBlock::choose_hash_method(&sample_block, &group_cols)?;
macro_rules! apply {
($hash_method: ident, $key_array_builder: ty, $group_func_table: ty) => {{
// Table for <group_key, (place, keys) >
type GroupFuncTable = $group_func_table;
let groups_locker = GroupFuncTable::default();
while let Some(block) = stream.next().await {
let block = block?;
// 1.1 and 1.2.
let group_blocks = $hash_method.group_by(&block, &group_cols)?;
// 1.3 Apply take blocks to aggregate function by group_key.
{
for (group_key, group_keys, take_block) in group_blocks {
let rows = take_block.num_rows();
let mut groups = groups_locker.write();
match groups.get_mut(&group_key) {
// New group.
None => {
let mut places = Vec::with_capacity(aggr_cols.len());
for (idx, _aggr_col) in aggr_cols.iter().enumerate() {
let func = funcs[idx].clone();
let place = funcs[idx].allocate_state(&arena);
let arg_columns = arg_names[idx]
.iter()
.map(|arg| {
take_block
.try_column_by_name(arg)
.map(|c| c.clone())
})
.collect::<Result<Vec<DataColumn>>>()?;
func.accumulate(place, &arg_columns, rows)?;
places.push(place);
}
groups.insert(group_key.clone(), (places, group_keys));
}
// Accumulate result against the take block by indices.
Some((places, _)) => {
for (idx, _aggr_col) in aggr_cols.iter().enumerate() {
let arg_columns = arg_names[idx]
.iter()
.map(|arg| {
take_block
.try_column_by_name(arg)
.map(|c| c.clone())
})
.collect::<Result<Vec<DataColumn>>>()?;
funcs[idx].accumulate(places[idx], &arg_columns, rows)?
}
}
}
}
}
}
let delta = start.elapsed();
tracing::debug!("Group by partial cost: {:?}", delta);
let groups = groups_locker.read();
if groups.is_empty() {
return Ok(Box::pin(DataBlockStream::create(
DataSchemaRefExt::create(vec![]),
None,
vec![],
)));
}
let mut group_arrays = Vec::with_capacity(group_cols.len());
for _i in 0..group_cols.len() {
group_arrays.push(Vec::with_capacity(groups.len()));
}
// Builders.
let mut state_builders: Vec<BinaryArrayBuilder> = (0..aggr_len)
.map(|_| BinaryArrayBuilder::new(groups.len() * 4))
.collect();
type KeyBuilder = $key_array_builder;
let mut group_key_builder = KeyBuilder::new(groups.len());
for (key, (places, values)) in groups.iter() {
for (idx, func) in funcs.iter().enumerate() {
let mut writer = vec![];
func.serialize(places[idx], &mut writer)?;
state_builders[idx].append_value(&writer);
}
for (i, value) in values.iter().enumerate() {
group_arrays[i].push(value.clone());
}
// Keys
group_key_builder.append_value((*key).clone());
}
let mut columns: Vec<Series> = Vec::with_capacity(self.schema.fields().len());
for mut builder in state_builders {
columns.push(builder.finish().into_series());
}
for (i, values) in group_arrays.iter().enumerate() {
columns.push(DataValue::try_into_data_array(
values,
&self.group_exprs[i].to_data_type(&self.schema_before_group_by)?,
)?)
}
let array = group_key_builder.finish();
columns.push(array.into_series());
let block = DataBlock::create_by_array(self.schema.clone(), columns);
Ok(Box::pin(DataBlockStream::create(
self.schema.clone(),
None,
vec![block],
)))
}};
}
macro_rules! match_hash_method_and_apply {
($method: ident, $apply: ident) => {{
match $method {
HashMethodKind::Serializer(hash_method) => {
apply! { hash_method, BinaryArrayBuilder , RwLock<HashMap<Vec<u8>, (Vec<usize>, Vec<DataValue>), ahash::RandomState>>}
} | apply! { hash_method , DFUInt8ArrayBuilder, RwLock<HashMap<u8, (Vec<usize>, Vec<DataValue>), ahash::RandomState>> }
}
HashMethodKind::KeysU16(hash_method) => {
apply! { hash_method , DFUInt16ArrayBuilder, RwLock<HashMap<u16, (Vec<usize>, Vec<DataValue>), ahash::RandomState>> }
}
HashMethodKind::KeysU32(hash_method) => {
apply! { hash_method , DFUInt32ArrayBuilder, RwLock<HashMap<u32, (Vec<usize>, Vec<DataValue>), ahash::RandomState>> }
}
HashMethodKind::KeysU64(hash_method) => {
apply! { hash_method , DFUInt64ArrayBuilder, RwLock<HashMap<u64, (Vec<usize>, Vec<DataValue>), ahash::RandomState>> }
}
}
}};
}
match_hash_method_and_apply! {method, apply}
}
} | HashMethodKind::KeysU8(hash_method) => { |
specs.py | # MIT License
# This project is a software package to automate the performance tracking of the HPC algorithms
# Copyright (c) 2021. Victor Tuah Kumi, Ahmed Iqbal, Javier Vite, Aidan Forester
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Gets specifications for GPU/CPU on which rocm is run"""
import model
def | (hardware_ids, rocm_versions):
"""Gets specification for all specified rocm hardwares"""
specs_info = []
for rocm in rocm_versions:
for hardw_id in hardware_ids:
specs = model.get_specs(hardw_id, rocm) #returns dictionary
if specs is not None:
title = f'{rocm} specs'
info = f'''
```
{title}
Host info:
hostname: {specs['hostname']}
cpu info: {specs['cpu_info']}
ram: {specs['ram']}
distro: {specs['distro']}
kernel version: {specs['kernel']}
rocm version: {specs['rocm']}
Device info:
device: {specs['device']}
vbios version: {specs['vbios']}
vram: {specs['vram']}
performance level: {specs['performance']}
system clock: {specs['sys_clock']}
memory clock: {specs['mem_clock']}
```
'''
specs_info.append(info)
return specs_info
| get_specs |
extension.py | from waldur_core.core import WaldurExtension
class PlaybookJobsExtension(WaldurExtension):
class Settings: | WALDUR_PLAYBOOK_JOBS = {
'PLAYBOOKS_DIR_NAME': 'ansible_playbooks',
'PLAYBOOK_ICON_SIZE': (64, 64),
}
@staticmethod
def django_app():
return 'waldur_ansible.playbook_jobs'
@staticmethod
def rest_urls():
from .urls import register_in
return register_in
@staticmethod
def is_assembly():
return True | |
build_requires_test.py | import unittest
from nose_parameterized.parameterized import parameterized
from conans.test.utils.tools import TestClient
from conans.paths import CONANFILE
tool_conanfile = """
import os
from conans import ConanFile
class Tool(ConanFile):
name = "Tool"
version = "0.1"
def package_info(self):
self.env_info.TOOL_PATH.append("MyToolPath")
"""
tool_conanfile2 = tool_conanfile.replace("0.1", "0.3")
conanfile = """
import os
from conans import ConanFile, tools
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
{}
def build(self):
self.output.info("ToolPath: %s" % os.getenv("TOOL_PATH"))
"""
requires = conanfile.format('build_requires = "Tool/0.1@lasote/stable"')
requires_range = conanfile.format('build_requires = "Tool/[>0.0]@lasote/stable"')
requirements = conanfile.format("""def build_requirements(self):
self.build_requires("Tool/0.1@lasote/stable")""")
override = conanfile.format("""build_requires = "Tool/0.2@user/channel"
def build_requirements(self):
self.build_requires("Tool/0.1@lasote/stable")""")
profile = """
[build_requires]
Tool/0.3@lasote/stable
nonexistingpattern*: SomeTool/1.2@user/channel
"""
class BuildRequiresTest(unittest.TestCase):
@parameterized.expand([(requires, ), (requires_range, ), (requirements, ), (override, )])
def | (self, conanfile):
client = TestClient()
client.save({CONANFILE: tool_conanfile}, clean_first=True)
client.run("export lasote/stable")
client.save({CONANFILE: conanfile}, clean_first=True)
client.run("export lasote/stable")
client.run("install MyLib/0.1@lasote/stable --build missing")
self.assertIn("Tool/0.1@lasote/stable: Generating the package", client.user_io.out)
self.assertIn("ToolPath: MyToolPath", client.user_io.out)
client.run("install MyLib/0.1@lasote/stable")
self.assertNotIn("Tool", client.user_io.out)
self.assertIn("MyLib/0.1@lasote/stable: Already installed!", client.user_io.out)
@parameterized.expand([(requires, ), (requires_range, ), (requirements, ), (override, )])
def test_profile_override(self, conanfile):
client = TestClient()
client.save({CONANFILE: tool_conanfile2}, clean_first=True)
client.run("export lasote/stable")
client.save({CONANFILE: conanfile,
"profile.txt": profile,
"profile2.txt": profile.replace("0.3", "[>0.2]")}, clean_first=True)
client.run("export lasote/stable")
client.run("install MyLib/0.1@lasote/stable --profile ./profile.txt --build missing")
self.assertNotIn("Tool/0.1", client.user_io.out)
self.assertNotIn("Tool/0.2", client.user_io.out)
self.assertIn("Tool/0.3@lasote/stable: Generating the package", client.user_io.out)
self.assertIn("ToolPath: MyToolPath", client.user_io.out)
client.run("install MyLib/0.1@lasote/stable")
self.assertNotIn("Tool", client.user_io.out)
self.assertIn("MyLib/0.1@lasote/stable: Already installed!", client.user_io.out)
client.run("install MyLib/0.1@lasote/stable --profile ./profile2.txt --build")
self.assertNotIn("Tool/0.1", client.user_io.out)
self.assertNotIn("Tool/0.2", client.user_io.out)
self.assertIn("Tool/0.3@lasote/stable: Generating the package", client.user_io.out)
self.assertIn("ToolPath: MyToolPath", client.user_io.out)
| test_build_requires |
squeezenet.py | import torch
from torch import nn
from torch.nn import functional as F
import torchvision
def main():
|
if __name__ == '__main__':
main()
| print('cuda device count: ', torch.cuda.device_count())
net = torchvision.models.squeezenet1_1(pretrained=True)
#net.fc = nn.Linear(512, 2)
net = net.eval()
net = net.to('cuda:0')
print(net)
tmp = torch.ones(2, 3, 227, 227).to('cuda:0')
out = net(tmp)
print('squeezenet out:', out.shape)
torch.save(net, "squeezenet.pth") |
unassociate_eip_address.go | package vpc
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// UnassociateEipAddress invokes the vpc.UnassociateEipAddress API synchronously
// api document: https://help.aliyun.com/api/vpc/unassociateeipaddress.html
func (client *Client) UnassociateEipAddress(request *UnassociateEipAddressRequest) (response *UnassociateEipAddressResponse, err error) {
response = CreateUnassociateEipAddressResponse()
err = client.DoAction(request, response)
return
}
// UnassociateEipAddressWithChan invokes the vpc.UnassociateEipAddress API asynchronously
// api document: https://help.aliyun.com/api/vpc/unassociateeipaddress.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) UnassociateEipAddressWithChan(request *UnassociateEipAddressRequest) (<-chan *UnassociateEipAddressResponse, <-chan error) {
responseChan := make(chan *UnassociateEipAddressResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.UnassociateEipAddress(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// UnassociateEipAddressWithCallback invokes the vpc.UnassociateEipAddress API asynchronously
// api document: https://help.aliyun.com/api/vpc/unassociateeipaddress.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) UnassociateEipAddressWithCallback(request *UnassociateEipAddressRequest, callback func(response *UnassociateEipAddressResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *UnassociateEipAddressResponse
var err error
defer close(result)
response, err = client.UnassociateEipAddress(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// UnassociateEipAddressRequest is the request struct for api UnassociateEipAddress
type UnassociateEipAddressRequest struct {
*requests.RpcRequest
PrivateIpAddress string `position:"Query" name:"PrivateIpAddress"`
ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"`
InstanceId string `position:"Query" name:"InstanceId"`
ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"`
OwnerAccount string `position:"Query" name:"OwnerAccount"`
InstanceType string `position:"Query" name:"InstanceType"`
Force requests.Boolean `position:"Query" name:"Force"`
AllocationId string `position:"Query" name:"AllocationId"`
OwnerId requests.Integer `position:"Query" name:"OwnerId"`
}
// UnassociateEipAddressResponse is the response struct for api UnassociateEipAddress
type UnassociateEipAddressResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
}
// CreateUnassociateEipAddressRequest creates a request to invoke UnassociateEipAddress API
func CreateUnassociateEipAddressRequest() (request *UnassociateEipAddressRequest) {
request = &UnassociateEipAddressRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("Vpc", "2016-04-28", "UnassociateEipAddress", "vpc", "openAPI")
return
}
// CreateUnassociateEipAddressResponse creates a response to parse from UnassociateEipAddress response
func | () (response *UnassociateEipAddressResponse) {
response = &UnassociateEipAddressResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| CreateUnassociateEipAddressResponse |
main.go | //
// Copyright 2020 Verizon Media
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package main
import (
"flag"
"fmt"
"github.com/AthenZ/athenz/libs/go/sia/util"
"github.com/AthenZ/athenz/provider/azure/sia-vm"
"github.com/AthenZ/athenz/provider/azure/sia-vm/data/attestation"
"github.com/AthenZ/athenz/provider/azure/sia-vm/options"
"io/ioutil"
"log"
"os"
"os/signal"
"strings"
"syscall"
"time"
)
var MetaEndPoint = "http://169.254.169.254"
var ApiVersion = "2020-06-01"
const siaMainDir = "/var/lib/sia"
const siaLinkDir = "/var/run/sia"
const siaVersion = "1.0"
func main() {
cmd := flag.String("cmd", "", "optional sub command to run")
metaEndPoint := flag.String("meta", "", "optional meta endpoint to use for debugging")
ztsEndPoint := flag.String("zts", "", "optional zts endpoint")
ztsServerName := flag.String("ztsservername", "", "zts server name for tls connections")
ztsCACert := flag.String("ztscacert", "", "zts CA certificate file")
ztsAzureDomains := flag.String("ztsazuredomain", "", "ZTS Azure Domain")
ztsResourceUri := flag.String("ztsresourceuri", "", "ZTS AD App Resource URI")
azureProvider := flag.String("azureProvider", "", "Azure Provider Service Name")
countryName := flag.String("countryname", "US", "X.509 Certificate Country Value")
pConf := flag.String("config", "/etc/sia/sia_config", "The config file to run against")
noSysLog := flag.Bool("nosyslog", false, "turn off syslog, log to stdout")
flag.Parse()
if !*noSysLog {
sysLogger, err := util.NewSysLogger()
if err == nil {
log.SetOutput(sysLogger)
} else {
log.SetFlags(log.LstdFlags)
log.Printf("Unable to create sys logger: %v\n", err)
}
} else {
log.SetFlags(log.LstdFlags)
}
if *ztsEndPoint == "" {
log.Fatalf("ztsEndPoint argument must be specified\n")
}
if *ztsAzureDomains == "" {
log.Fatalf("ztsazuredomain argument must be specified\n")
}
ztsAzureDomainList := strings.Split(*ztsAzureDomains, ",")
if *ztsResourceUri == "" {
log.Fatalf("ztsresourceuri argument must be specified\n")
}
if *metaEndPoint != "" {
MetaEndPoint = *metaEndPoint
}
identityDocument, err := attestation.GetIdentityDocument(MetaEndPoint, ApiVersion)
if err != nil {
log.Fatalf("Unable to get the instance identity document, error: %v\n", err)
}
confBytes, _ := ioutil.ReadFile(*pConf)
opts, err := options.NewOptions(confBytes, identityDocument, siaMainDir, siaVersion, *ztsCACert, *ztsServerName, ztsAzureDomainList, *countryName, *azureProvider)
if err != nil {
log.Fatalf("Unable to formulate options, error: %v\n", err)
}
log.Printf("options: %+v\n", opts)
data, err := getAttestationData(*ztsResourceUri, identityDocument, opts)
if err != nil {
log.Fatalf("Unable to formulate attestation data, error: %v\n", err)
}
// for now we're going to rotate once every day
// since our server and role certs are valid for
// 30 days by default
rotationInterval := 24 * 60 * time.Minute
ztsUrl := fmt.Sprintf("https://%s:4443/zts/v1", *ztsEndPoint)
err = util.SetupSIADirs(siaMainDir, siaLinkDir, -1, -1)
if err != nil {
log.Fatalf("Unable to setup sia directories, error: %v\n", err)
}
log.Printf("Request SSH Certificates: %t\n", opts.Ssh)
svcs := options.GetSvcNames(opts.Services)
switch *cmd {
case "rolecert":
sia.GetRoleCertificate(ztsUrl,
fmt.Sprintf("%s/%s.%s.key.pem", opts.KeyDir, opts.Domain, opts.Services[0].Name),
fmt.Sprintf("%s/%s.%s.cert.pem", opts.CertDir, opts.Domain, opts.Services[0].Name),
opts,
)
case "post":
err := sia.RegisterInstance(data, ztsUrl, identityDocument, opts)
if err != nil {
log.Fatalf("Register identity failed, err: %v\n", err)
}
log.Printf("identity registered for services: %s\n", svcs)
case "rotate":
err = sia.RefreshInstance(data, ztsUrl, identityDocument, opts)
if err != nil {
log.Fatalf("Refresh identity failed, err: %v\n", err)
}
log.Printf("Identity successfully refreshed for services: %s\n", svcs)
default:
// if we already have a cert file then we're not going to
// prove our identity since most likely it will not succeed
// due to boot time check (this could be just a regular
// service restart for any reason). Instead, we'll just skip
// over and try to rotate the certs
initialSetup := true
if files, err := ioutil.ReadDir(opts.CertDir); err != nil || len(files) <= 0 {
err := sia.RegisterInstance(data, ztsUrl, identityDocument, opts)
if err != nil {
log.Fatalf("Register identity failed, error: %v\n", err)
}
} else {
initialSetup = false
log.Println("Identity certificate file already exists. Retrieving identity details...")
}
log.Printf("Identity established for services: %s\n", svcs)
stop := make(chan bool, 1)
errors := make(chan error, 1)
go func() {
for {
log.Printf("Identity being used: %s\n", opts.Name)
// if we just did our initial setup there is no point
// to refresh the certs again. so we are going to skip
// this time around and refresh certs next time
if !initialSetup {
data, err := getAttestationData(*ztsResourceUri, identityDocument, opts)
if err != nil {
errors <- fmt.Errorf("Cannot get attestation data: %v\n", err)
return
}
err = sia.RefreshInstance(data, ztsUrl, identityDocument, opts)
if err != nil {
errors <- fmt.Errorf("refresh identity failed, error: %v", err)
return
}
log.Printf("identity successfully refreshed for services: %s\n", svcs)
} else {
initialSetup = false
}
sia.GetRoleCertificate(ztsUrl,
fmt.Sprintf("%s/%s.%s.key.pem", opts.KeyDir, opts.Domain, opts.Services[0].Name),
fmt.Sprintf("%s/%s.%s.cert.pem", opts.CertDir, opts.Domain, opts.Services[0].Name),
opts,
)
select {
case <-stop:
errors <- nil
return
case <-time.After(rotationInterval):
break
}
}
}() | signal.Notify(signals, os.Interrupt, syscall.SIGTERM)
sig := <-signals
log.Printf("Received signal %v, stopping rotation\n", sig)
stop <- true
}()
err = <-errors
if err != nil {
log.Printf("%v\n", err)
}
}
os.Exit(0)
}
// getAttestationData fetches attestation data for all the services mentioned in the config file
func getAttestationData(resourceUri string, identityDocument *attestation.IdentityDocument, opts *options.Options) ([]*attestation.Data, error) {
var data []*attestation.Data
for _, svc := range opts.Services {
a, err := attestation.New(opts.Domain, svc.Name, MetaEndPoint, ApiVersion, resourceUri, identityDocument)
if err != nil {
return nil, err
}
data = append(data, a)
}
return data, nil
} |
go func() {
signals := make(chan os.Signal, 2) |
random number generation.py | import random
| x=random.random()
print("The Random number is",round(x,3)) |
|
setup.py | #!/usr/bin/env python
from os.path import join, dirname, abspath
from setuptools import setup
def read(rel_path):
here = abspath(dirname(__file__))
with open(join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
REQUIREMENTS = read('requirements.txt').splitlines()
DESCRIPTION = read('README.md') | setup(name='robotframework-csvlibrary',
version=get_version("CSVLibrary/__init__.py"),
description='CSV library for Robot Framework',
long_description=DESCRIPTION,
long_description_content_type='text/markdown',
author='Marcin Mierzejewski',
author_email='<[email protected]>',
url='https://github.com/s4int/robotframework-CSVLibrary',
license='Apache License 2.0',
keywords='robotframework testing csv',
platforms='any',
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Testing",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
install_requires=REQUIREMENTS,
packages=['CSVLibrary'],
) | |
predict.py | from tensorflow.keras.models import load_model
from clean import downsample_mono, envelope
from kapre.time_frequency import STFT, Magnitude, ApplyFilterbank, MagnitudeToDecibel
from sklearn.preprocessing import LabelEncoder
import numpy as np
from glob import glob
import argparse
import os
import pandas as pd
from tqdm import tqdm
def make_prediction(args):
# load the model
|
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Audio Classification Training')
parser.add_argument('--model_fn', type=str, default='models/lstm.h5',
help='model file to make predictions')
parser.add_argument('--pred_fn', type=str, default='y_pred',
help='fn to write predictions in logs dir')
parser.add_argument('--src_dir', type=str, default='wavfiles',
help='directory containing wavfiles to predict')
parser.add_argument('--dt', type=float, default=1.0,
help='time in seconds to sample audio')
parser.add_argument('--sr', type=int, default=16000,
help='sample rate of clean audio')
parser.add_argument('--threshold', type=str, default=20,
help='threshold magnitude for np.int16 dtype')
args, _ = parser.parse_known_args()
make_prediction(args)
| model = load_model(args.model_fn,
custom_objects={'STFT': STFT,
'Magnitude': Magnitude,
'ApplyFilterbank': ApplyFilterbank,
'MagnitudeToDecibel': MagnitudeToDecibel})
# find the sound data
wav_paths = glob('{}/**'.format(args.src_dir), recursive=True)
wav_paths = sorted([x.replace(os.sep, '/') for x in wav_paths if '.wav' in x])
classes = sorted(os.listdir(args.src_dir))
labels = [os.path.split(x)[0].split('/')[-1] for x in wav_paths]
le = LabelEncoder()
y_true = le.fit_transform(labels)
results = []
for z, wav_fn in tqdm(enumerate(wav_paths), total=len(wav_paths)):
rate, wav = downsample_mono(wav_fn, args.sr)
mask, env = envelope(wav, rate, threshold=args.threshold)
clean_wav = wav[mask]
step = int(args.sr * args.dt)
batch = []
for i in range(0, clean_wav.shape[0], step):
sample = clean_wav[i:i + step]
sample = sample.reshape(-1, 1)
if sample.shape[0] < step:
tmp = np.zeros(shape=(step, 1), dtype=np.float32)
tmp[:sample.shape[0], :] = sample.flatten().reshape(-1, 1)
sample = tmp
batch.append(sample)
X_batch = np.array(batch, dtype=np.float32)
y_pred = model.predict(X_batch)
y_mean = np.mean(y_pred, axis=0)
y_pred = np.argmax(y_mean)
real_class = os.path.dirname(wav_fn).split('/')[-1]
print('Actual class: {}, Predicted class: {}'.format(real_class, classes[y_pred]))
results.append(y_mean)
np.save(os.path.join('logs', args.pred_fn), np.array(results)) |
utils.py | """
Copyright (C) 2019 Interactive Brokers LLC. All rights reserved. This code is subject to the terms
and conditions of the IB API Non-Commercial License or the IB API Commercial License, as applicable.
"""
"""
Collection of misc tools
"""
import sys
import logging
import inspect
from ibapi.common import UNSET_INTEGER, UNSET_DOUBLE
logger = logging.getLogger(__name__)
# I use this just to visually emphasize it's a wrapper overriden method
def iswrapper(fn):
return fn
class BadMessage(Exception):
def __init__(self, text):
self.text = text
class LogFunction(object):
def __init__(self, text, logLevel):
self.text = text
self.logLevel = logLevel
def __call__(self, fn):
def newFn(origSelf, *args, **kwargs):
if logger.getLogger().isEnabledFor(self.logLevel):
argNames = [argName for argName in inspect.getfullargspec(fn)[0] if argName != 'self']
logger.log(self.logLevel,
"{} {} {} kw:{}".format(self.text, fn.__name__,
[nameNarg for nameNarg in zip(argNames, args) if nameNarg[1] is not origSelf], kwargs))
fn(origSelf, *args)
return newFn
def current_fn_name(parent_idx = 0):
#depth is 1 bc this is already a fn, so we need the caller
return sys._getframe(1 + parent_idx).f_code.co_name
def setattr_log(self, var_name, var_value):
#import code; code.interact(local=locals())
logger.debug("%s %s %s=|%s|", self.__class__, id(self), var_name, var_value)
super(self.__class__, self).__setattr__(var_name, var_value)
SHOW_UNSET = True
def decode(the_type, fields, show_unset = False):
try:
s = next(fields)
except StopIteration:
raise BadMessage("no more fields")
logger.debug("decode %s %s", the_type, s)
if the_type is str:
if type(s) is str:
return s
elif type(s) is bytes:
return s.decode(errors='backslashreplace')
else:
raise TypeError("unsupported incoming type " + type(s) + " for desired type 'str")
orig_type = the_type
if the_type is bool:
the_type = int
if show_unset:
if s is None or len(s) == 0:
if the_type is float:
n = UNSET_DOUBLE
elif the_type is int:
n = UNSET_INTEGER
else:
raise TypeError("unsupported desired type for empty value" + the_type)
else:
n = the_type(s)
else:
|
if orig_type is bool:
n = False if n == 0 else True
return n
def ExerciseStaticMethods(klass):
import types
#import code; code.interact(local=dict(globals(), **locals()))
for (_, var) in inspect.getmembers(klass):
#print(name, var, type(var))
if type(var) == types.FunctionType:
print("Exercising: %s:" % var)
print(var())
print()
def floatToStr(val):
return str(val) if val != UNSET_DOUBLE else "";
| n = the_type(s or 0) |
lib.rs | #![recursion_limit="128"]
#![doc(html_root_url = "https://api.rocket.rs/master")]
#![doc(html_favicon_url = "https://rocket.rs/images/favicon.ico")]
#![doc(html_logo_url = "https://rocket.rs/images/logo-boxed.png")]
#![warn(rust_2018_idioms)]
//! # Rocket - Code Generation
//!
//! This crate implements the code generation portions of Rocket. This includes
//! custom derives, custom attributes, and procedural macros. The documentation
//! here is purely technical. The code generation facilities are documented
//! thoroughly in the [Rocket programming guide](https://rocket.rs/master/guide).
//!
//! # Usage
//!
//! You **_should not_** directly depend on this library. To use the macros,
//! attributes, and derives in this crate, it suffices to depend on `rocket` in
//! `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! rocket = "0.5.0-dev"
//! ```
//!
//! And to import all macros, attributes, and derives via `#[macro_use]` in the
//! crate root:
//!
//! ```rust
//! #[macro_use] extern crate rocket;
//! # #[get("/")] fn hello() { }
//! # fn main() { rocket::ignite().mount("/", routes![hello]); }
//! ```
//!
//! Or, alternatively, selectively import from the top-level scope:
//!
//! ```rust
//! # extern crate rocket;
//!
//! use rocket::{get, routes};
//! # #[get("/")] fn hello() { }
//! # fn main() { rocket::ignite().mount("/", routes![hello]); }
//! ```
//!
//! # Debugging Codegen
//!
//! When the `ROCKET_CODEGEN_DEBUG` environment variable is set, this crate
//! logs, at compile-time and to the console, the items it generates. For
//! example, you might run the following to build a Rocket application with
//! codegen debug logging enabled:
//!
//! ```sh
//! ROCKET_CODEGEN_DEBUG=1 cargo build
//! ```
#[macro_use] extern crate quote;
use rocket_http as http;
macro_rules! vars_and_mods {
($($name:ident => $path:path,)*) => {
macro_rules! define {
// Note: the `o` is to capture the input's span
$(($i:ident $name) => {
#[allow(non_snake_case)] let $i = quote!($path);
};)*
$(($span:expr => $i:ident $name) => {
#[allow(non_snake_case)] let $i = quote_spanned!($span => $path);
};)*
}
}
}
vars_and_mods! {
req => __req,
status => __status,
catcher => __catcher,
data => __data,
error => __error,
trail => __trail,
request => rocket::request,
response => rocket::response,
handler => rocket::handler,
log => rocket::logger,
Outcome => rocket::outcome::Outcome,
FromTransformedData => rocket::data::FromTransformedData,
Transform => rocket::data::Transform,
Query => rocket::request::Query,
FromFormValue => rocket::request::FromFormValue,
Request => rocket::request::Request,
Response => rocket::response::Response,
Data => rocket::data::Data,
StaticRouteInfo => rocket::StaticRouteInfo,
StaticCatcherInfo => rocket::StaticCatcherInfo,
Route => rocket::Route,
Catcher => rocket::Catcher,
SmallVec => rocket::http::private::SmallVec,
Status => rocket::http::Status,
HandlerFuture => rocket::handler::HandlerFuture,
ErrorHandlerFuture => rocket::catcher::ErrorHandlerFuture,
_Option => ::std::option::Option,
_Result => ::std::result::Result,
_Some => ::std::option::Option::Some,
_None => ::std::option::Option::None,
_Ok => ::std::result::Result::Ok,
_Err => ::std::result::Result::Err,
_Box => ::std::boxed::Box,
_Vec => ::std::vec::Vec,
}
macro_rules! define_vars_and_mods {
($($name:ident),*) => ($(define!($name $name);)*);
($span:expr => $($name:ident),*) => ($(define!($span => $name $name);)*)
}
#[macro_use]
mod proc_macro_ext;
mod derive;
mod attribute;
mod bang;
mod http_codegen;
mod syn_ext;
use crate::http::Method;
use proc_macro::TokenStream;
use devise::{proc_macro2, syn};
static URI_MACRO_PREFIX: &str = "rocket_uri_macro_";
static ROCKET_PARAM_PREFIX: &str = "__rocket_param_";
macro_rules! emit {
($tokens:expr) => ({
use devise::ext::SpanDiagnosticExt;
let mut tokens = $tokens;
if std::env::var_os("ROCKET_CODEGEN_DEBUG").is_some() {
let debug_tokens = proc_macro2::Span::call_site()
.note("emitting Rocket code generation debug output")
.note(tokens.to_string())
.emit_as_item_tokens();
tokens.extend(debug_tokens);
}
tokens.into()
})
}
macro_rules! route_attribute {
($name:ident => $method:expr) => (
/// Attribute to generate a [`Route`] and associated metadata.
///
/// This and all other route attributes can only be applied to free
/// functions:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #
/// #[get("/")]
/// fn index() -> &'static str {
/// "Hello, world!"
/// }
/// ```
///
/// There are 7 method-specific route attributes:
///
/// * [`get`] - `GET` specific route
/// * [`put`] - `PUT` specific route
/// * [`post`] - `POST` specific route
/// * [`delete`] - `DELETE` specific route
/// * [`head`] - `HEAD` specific route
/// * [`options`] - `OPTIONS` specific route
/// * [`patch`] - `PATCH` specific route
///
/// Additionally, [`route`] allows the method and path to be explicitly
/// specified:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #
/// #[route(GET, path = "/")]
/// fn index() -> &'static str {
/// "Hello, world!"
/// }
/// ```
///
/// [`get`]: attr.get.html
/// [`put`]: attr.put.html
/// [`post`]: attr.post.html
/// [`delete`]: attr.delete.html
/// [`head`]: attr.head.html
/// [`options`]: attr.options.html
/// [`patch`]: attr.patch.html
/// [`route`]: attr.route.html
///
/// # Grammar
///
/// The grammar for all method-specific route attributes is defined as:
///
/// ```text
/// route := '"' path ('?' query)? '"' (',' parameter)*
///
/// path := ('/' segment)*
///
/// query := segment ('&' segment)*
///
/// segment := URI_SEG
/// | SINGLE_PARAM
/// | MULTI_PARAM
///
/// parameter := 'rank' '=' INTEGER
/// | 'format' '=' '"' MEDIA_TYPE '"'
/// | 'data' '=' '"' SINGLE_PARAM '"'
///
/// SINGLE_PARAM := '<' IDENT '>'
/// MULTI_PARAM := '<' IDENT '..>'
///
/// URI_SEG := valid, non-percent-encoded HTTP URI segment
/// MEDIA_TYPE := valid HTTP media type or known shorthand
///
/// INTEGER := unsigned integer, as defined by Rust
/// IDENT := valid identifier, as defined by Rust, except `_`
/// ```
///
/// The generic route attribute is defined as:
///
/// ```text
/// generic-route := METHOD ',' 'path' '=' route
/// ```
///
/// # Typing Requirements
///
/// Every identifier that appears in a dynamic parameter (`SINGLE_PARAM`
/// or `MULTI_PARAM`) must appear as an argument to the function. For
/// example, the following route requires the decorated function to have
/// the arguments `foo`, `baz`, `msg`, `rest`, and `form`:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// # use rocket::request::Form;
/// # use std::path::PathBuf;
/// # #[derive(FromForm)] struct F { a: usize }
/// #[get("/<foo>/bar/<baz..>?<msg>&closed&<rest..>", data = "<form>")]
/// # fn f(foo: usize, baz: PathBuf, msg: String, rest: Form<F>, form: Form<F>) { }
/// ```
///
/// The type of each function argument corresponding to a dynamic
/// parameter is required to implement one of Rocket's guard traits. The
/// exact trait that is required to be implemented depends on the kind
/// of dynamic parameter (`SINGLE` or `MULTI`) and where in the route
/// attribute the parameter appears. The table below summarizes trait
/// requirements:
///
/// | position | kind | trait |
/// |----------|-------------|-------------------|
/// | path | `<ident>` | [`FromParam`] |
/// | path | `<ident..>` | [`FromSegments`] |
/// | query | `<ident>` | [`FromFormValue`] |
/// | query | `<ident..>` | [`FromQuery`] |
/// | data | `<ident>` | [`FromTransformedData`] |
///
/// The type of each function argument that _does not_ have a
/// corresponding dynamic parameter is required to implement the
/// [`FromRequest`] trait.
///
/// The return type of the decorated function must implement the
/// [`Responder`] trait.
///
/// [`FromParam`]: ../rocket/request/trait.FromParam.html
/// [`FromSegments`]: ../rocket/request/trait.FromSegments.html
/// [`FromFormValue`]: ../rocket/request/trait.FromFormValue.html
/// [`FromQuery`]: ../rocket/request/trait.FromQuery.html
/// [`FromTransformedData`]: ../rocket/data/trait.FromTransformedData.html
/// [`FromRequest`]: ../rocket/request/trait.FromRequest.html
/// [`Route`]: ../rocket/struct.Route.html
/// [`Responder`]: ../rocket/response/trait.Responder.html
///
/// # Semantics
///
/// The attribute generates three items:
///
/// 1. A route [`Handler`].
///
/// The generated handler validates and generates all arguments for
/// the generated function according to the trait that their type
/// must implement. The order in which arguments are processed is:
///
/// 1. Request guards from left to right.
///
/// If a request guard fails, the request is forwarded if the
/// [`Outcome`] is `Forward` or failed if the [`Outcome`] is
/// `Failure`. See [`FromRequest` Outcomes] for further
/// detail.
///
/// 2. Path and query parameters from left to right as declared
/// in the function argument list. | ///
/// 3. Data parameter, if any.
///
/// If a data guard fails, the request is forwarded if the
/// [`Outcome`] is `Forward` or failed if the [`Outcome`] is
/// `Failure`. See [`FromTransformedData` Outcomes] for further detail.
///
/// If all validation succeeds, the decorated function is called.
/// The returned value is used to generate a [`Response`] via the
/// type's [`Responder`] implementation.
///
/// 2. A static structure used by [`routes!`] to generate a [`Route`].
///
/// The static structure (and resulting [`Route`]) is populated
/// with the name (the function's name), path, query, rank, and
/// format from the route attribute. The handler is set to the
/// generated handler.
///
/// 3. A macro used by [`uri!`] to type-check and generate an
/// [`Origin`].
///
/// [`Handler`]: ../rocket/trait.Handler.html
/// [`routes!`]: macro.routes.html
/// [`uri!`]: macro.uri.html
/// [`Origin`]: ../rocket/http/uri/struct.Origin.html
/// [`Outcome`]: ../rocket/outcome/enum.Outcome.html
/// [`Response`]: ../rocket/struct.Response.html
/// [`FromRequest` Outcomes]: ../rocket/request/trait.FromRequest.html#outcomes
/// [`FromTransformedData` Outcomes]: ../rocket/data/trait.FromTransformedData.html#outcomes
#[proc_macro_attribute]
pub fn $name(args: TokenStream, input: TokenStream) -> TokenStream {
emit!(attribute::route::route_attribute($method, args, input))
}
)
}
route_attribute!(route => None);
route_attribute!(get => Method::Get);
route_attribute!(put => Method::Put);
route_attribute!(post => Method::Post);
route_attribute!(delete => Method::Delete);
route_attribute!(head => Method::Head);
route_attribute!(patch => Method::Patch);
route_attribute!(options => Method::Options);
/// Attribute to generate a [`Catcher`] and associated metadata.
///
/// This attribute can only be applied to free functions:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #
/// use rocket::Request;
/// use rocket::http::Status;
///
/// #[catch(404)]
/// fn not_found(req: &Request) -> String {
/// format!("Sorry, {} does not exist.", req.uri())
/// }
///
/// #[catch(default)]
/// fn default(status: Status, req: &Request) -> String {
/// format!("{} - {} ({})", status.code, status.reason, req.uri())
/// }
/// ```
///
/// # Grammar
///
/// The grammar for the `#[catch]` attributes is defined as:
///
/// ```text
/// catch := STATUS | 'default'
///
/// STATUS := valid HTTP status code (integer in [200, 599])
/// ```
///
/// # Typing Requirements
///
/// The decorated function may take zero, one, or two arguments. It's type
/// signature must be one of the following, where `R:`[`Responder`]:
///
/// * `fn() -> R`
/// * `fn(`[`&Request`]`) -> R`
/// * `fn(`[`Status`]`, `[`&Request`]`) -> R`
///
/// # Semantics
///
/// The attribute generates two items:
///
/// 1. An [`ErrorHandler`].
///
/// The generated handler calls the decorated function, passing in the
/// [`Status`] and [`&Request`] values if requested. The returned value is
/// used to generate a [`Response`] via the type's [`Responder`]
/// implementation.
///
/// 2. A static structure used by [`catchers!`] to generate a [`Catcher`].
///
/// The static structure (and resulting [`Catcher`]) is populated with the
/// name (the function's name) and status code from the route attribute or
/// `None` if `default`. The handler is set to the generated handler.
///
/// [`&Request`]: ../rocket/struct.Request.html
/// [`Status`]: ../rocket/http/struct.Status.html
/// [`ErrorHandler`]: ../rocket/type.ErrorHandler.html
/// [`catchers!`]: macro.catchers.html
/// [`Catcher`]: ../rocket/struct.Catcher.html
/// [`Response`]: ../rocket/struct.Response.html
/// [`Responder`]: ../rocket/response/trait.Responder.html
#[proc_macro_attribute]
pub fn catch(args: TokenStream, input: TokenStream) -> TokenStream {
emit!(attribute::catch::catch_attribute(args, input))
}
/// FIXME: Document.
#[proc_macro_attribute]
pub fn async_test(args: TokenStream, input: TokenStream) -> TokenStream {
emit!(attribute::async_entry::async_test_attribute(args, input))
}
/// FIXME: Document.
#[proc_macro_attribute]
pub fn main(args: TokenStream, input: TokenStream) -> TokenStream {
emit!(attribute::async_entry::main_attribute(args, input))
}
/// FIXME: Document.
#[proc_macro_attribute]
pub fn launch(args: TokenStream, input: TokenStream) -> TokenStream {
emit!(attribute::async_entry::launch_attribute(args, input))
}
/// Derive for the [`FromFormValue`] trait.
///
/// The [`FromFormValue`] derive can be applied to enums with nullary
/// (zero-length) fields:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #
/// #[derive(FromFormValue)]
/// enum MyValue {
/// First,
/// Second,
/// Third,
/// }
/// ```
///
/// The derive generates an implementation of the [`FromFormValue`] trait for
/// the decorated `enum`. The implementation returns successfully when the form
/// value matches, case insensitively, the stringified version of a variant's
/// name, returning an instance of said variant. If there is no match, an error
/// ([`FromFormValue::Error`]) of type [`&RawStr`] is returned, the value of
/// which is the raw form field value that failed to match.
///
/// As an example, for the `enum` above, the form values `"first"`, `"FIRST"`,
/// `"fiRSt"`, and so on would parse as `MyValue::First`, while `"second"` and
/// `"third"` would parse as `MyValue::Second` and `MyValue::Third`,
/// respectively.
///
/// The `form` field attribute can be used to change the string that is compared
/// against for a given variant:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #
/// #[derive(FromFormValue)]
/// enum MyValue {
/// First,
/// Second,
/// #[form(value = "fourth")]
/// Third,
/// }
/// ```
///
/// The `#[form]` attribute's grammar is:
///
/// ```text
/// form := 'field' '=' STRING_LIT
///
/// STRING_LIT := any valid string literal, as defined by Rust
/// ```
///
/// The attribute accepts a single string parameter of name `value`
/// corresponding to the string to use to match against for the decorated
/// variant. In the example above, the the strings `"fourth"`, `"FOUrth"` and so
/// on would parse as `MyValue::Third`.
///
/// [`FromFormValue`]: ../rocket/request/trait.FromFormValue.html
/// [`FromFormValue::Error`]: ../rocket/request/trait.FromFormValue.html#associatedtype.Error
/// [`&RawStr`]: ../rocket/http/struct.RawStr.html
// FIXME(rustdoc): We should be able to refer to items in `rocket`.
#[proc_macro_derive(FromFormValue, attributes(form))]
pub fn derive_from_form_value(input: TokenStream) -> TokenStream {
emit!(derive::from_form_value::derive_from_form_value(input))
}
/// Derive for the [`FromForm`] trait.
///
/// The [`FromForm`] derive can be applied to structures with named fields:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #
/// #[derive(FromForm)]
/// struct MyStruct {
/// field: usize,
/// other: String
/// }
/// ```
///
/// Each field's type is required to implement [`FromFormValue`].
///
/// The derive generates an implementation of the [`FromForm`] trait. The
/// implementation parses a form whose field names match the field names of the
/// structure on which the derive was applied. Each field's value is parsed with
/// the [`FromFormValue`] implementation of the field's type. The `FromForm`
/// implementation succeeds only when all of the field parses succeed. If
/// parsing fails, an error ([`FromForm::Error`]) of type [`FormParseError`] is
/// returned.
///
/// The derive accepts one field attribute: `form`, with the following syntax:
///
/// ```text
/// form := 'field' '=' '"' IDENT '"'
///
/// IDENT := valid identifier, as defined by Rust
/// ```
///
/// When applied, the attribute looks as follows:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #
/// #[derive(FromForm)]
/// struct MyStruct {
/// field: usize,
/// #[form(field = "renamed_field")]
/// other: String
/// }
/// ```
///
/// The field attribute directs that a different incoming field name is
/// expected, and the value of the `field` attribute is used instead of the
/// structure's actual field name when parsing a form. In the example above, the
/// value of the `MyStruct::other` struct field will be parsed from the incoming
/// form's `renamed_field` field.
///
/// [`FromForm`]: ../rocket/request/trait.FromForm.html
/// [`FromFormValue`]: ../rocket/request/trait.FromFormValue.html
/// [`FormParseError`]: ../rocket/request/enum.FormParseError.html
/// [`FromForm::Error`]: ../rocket/request/trait.FromForm.html#associatedtype.Error
#[proc_macro_derive(FromForm, attributes(form))]
pub fn derive_from_form(input: TokenStream) -> TokenStream {
emit!(derive::from_form::derive_from_form(input))
}
/// Derive for the [`Responder`] trait.
///
/// The [`Responder`] derive can be applied to enums and structs with named
/// fields. When applied to enums, variants must have at least one field. When
/// applied to structs, the struct must have at least one field.
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// # use std::fs::File;
/// # use rocket::http::ContentType;
/// # type OtherResponder = MyResponderA;
/// #
/// #[derive(Responder)]
/// enum MyResponderA {
/// A(String),
/// B(File, ContentType),
/// }
///
/// #[derive(Responder)]
/// struct MyResponderB {
/// inner: OtherResponder,
/// header: ContentType,
/// }
/// ```
///
/// The derive generates an implementation of the [`Responder`] trait for the
/// decorated enum or structure. The derive uses the _first_ field of a variant
/// or structure to generate a [`Response`]. As such, the type of the first
/// field must implement [`Responder`]. The remaining fields of a variant or
/// structure are set as headers in the produced [`Response`] using
/// [`Response::set_header()`]. As such, every other field (unless explicitly
/// ignored, explained next) must implement `Into<Header>`.
///
/// Except for the first field, fields decorated with `#[response(ignore)]` are
/// ignored by the derive:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// # use std::fs::File;
/// # use rocket::http::ContentType;
/// # use rocket::response::NamedFile;
/// # type Other = usize;
/// #
/// #[derive(Responder)]
/// enum MyResponder {
/// A(String),
/// B(File, ContentType, #[response(ignore)] Other),
/// }
///
/// #[derive(Responder)]
/// struct MyOtherResponder {
/// inner: NamedFile,
/// header: ContentType,
/// #[response(ignore)]
/// other: Other,
/// }
/// ```
///
/// Decorating the first field with `#[response(ignore)]` has no effect.
///
/// Additionally, the `response` attribute can be used on named structures and
/// enum variants to override the status and/or content-type of the [`Response`]
/// produced by the generated implementation. The `response` attribute used in
/// these positions has the following grammar:
///
/// ```text
/// response := parameter (',' parameter)?
///
/// parameter := 'status' '=' STATUS
/// | 'content_type' '=' CONTENT_TYPE
///
/// STATUS := unsigned integer >= 100 and < 600
/// CONTENT_TYPE := string literal, as defined by Rust, identifying a valid
/// Content-Type, as defined by Rocket
/// ```
///
/// It can be used as follows:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// # use rocket::http::ContentType;
/// # use rocket::response::NamedFile;
/// # type Other = usize;
/// # type InnerResponder = String;
/// #
/// #[derive(Responder)]
/// enum Error {
/// #[response(status = 500, content_type = "json")]
/// A(String),
/// #[response(status = 404)]
/// B(NamedFile, ContentType),
/// }
///
/// #[derive(Responder)]
/// #[response(status = 400)]
/// struct MyResponder {
/// inner: InnerResponder,
/// header: ContentType,
/// #[response(ignore)]
/// other: Other,
/// }
/// ```
///
/// The attribute accepts two key/value pairs: `status` and `content_type`. The
/// value of `status` must be an unsigned integer representing a valid status
/// code. The [`Response`] produced from the generated implementation will have
/// its status overridden to this value.
///
/// The value of `content_type` must be a valid media-type in `top/sub` form or
/// `shorthand` form. Examples include:
///
/// * `"text/html"`
/// * `"application/x-custom"`
/// * `"html"`
/// * `"json"`
/// * `"plain"`
/// * `"binary"`
///
/// See [`ContentType::parse_flexible()`] for a full list of available
/// shorthands. The [`Response`] produced from the generated implementation will
/// have its content-type overridden to this value.
///
/// [`Responder`]: ../rocket/response/trait.Responder.html
/// [`Response`]: ../rocket/struct.Response.html
/// [`Response::set_header()`]: ../rocket/response/struct.Response.html#method.set_header
/// [`ContentType::parse_flexible()`]: ../rocket/http/struct.ContentType.html#method.parse_flexible
#[proc_macro_derive(Responder, attributes(response))]
pub fn derive_responder(input: TokenStream) -> TokenStream {
emit!(derive::responder::derive_responder(input))
}
/// Derive for the [`UriDisplay<Query>`] trait.
///
/// The [`UriDisplay<Query>`] derive can be applied to enums and structs. When
/// applied to enums, variants must have at least one field. When applied to
/// structs, the struct must have at least one field.
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #[derive(UriDisplayQuery)]
/// enum Kind {
/// A(String),
/// B(usize),
/// }
///
/// #[derive(UriDisplayQuery)]
/// struct MyStruct {
/// name: String,
/// id: usize,
/// kind: Kind,
/// }
/// ```
///
/// Each field's type is required to implement [`UriDisplay<Query>`].
///
/// The derive generates an implementation of the [`UriDisplay<Query>`] trait.
/// The implementation calls [`Formatter::write_named_value()`] for every named
/// field, using the field's name (unless overridden, explained next) as the
/// `name` parameter, and [`Formatter::write_value()`] for every unnamed field
/// in the order the fields are declared.
///
/// The derive accepts one field attribute: `form`, with the following syntax:
///
/// ```text
/// form := 'field' '=' '"' IDENT '"'
///
/// IDENT := valid identifier, as defined by Rust
/// ```
///
/// When applied, the attribute looks as follows:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// # #[derive(UriDisplayQuery)]
/// # struct Kind(String);
/// #[derive(UriDisplayQuery)]
/// struct MyStruct {
/// name: String,
/// id: usize,
/// #[form(field = "type")]
/// kind: Kind,
/// }
/// ```
///
/// The field attribute directs that a different field name be used when calling
/// [`Formatter::write_named_value()`] for the given field. The value of the
/// `field` attribute is used instead of the structure's actual field name. In
/// the example above, the field `MyStruct::kind` is rendered with a name of
/// `type`.
///
/// [`UriDisplay<Query>`]: ../rocket/http/uri/trait.UriDisplay.html
/// [`Formatter::write_named_value()`]: ../rocket/http/uri/struct.Formatter.html#method.write_named_value
/// [`Formatter::write_value()`]: ../rocket/http/uri/struct.Formatter.html#method.write_value
#[proc_macro_derive(UriDisplayQuery, attributes(form))]
pub fn derive_uri_display_query(input: TokenStream) -> TokenStream {
emit!(derive::uri_display::derive_uri_display_query(input))
}
/// Derive for the [`UriDisplay<Path>`] trait.
///
/// The [`UriDisplay<Path>`] derive can only be applied to tuple structs with
/// one field.
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #[derive(UriDisplayPath)]
/// struct Name(String);
///
/// #[derive(UriDisplayPath)]
/// struct Age(usize);
/// ```
///
/// The field's type is required to implement [`UriDisplay<Path>`].
///
/// The derive generates an implementation of the [`UriDisplay<Path>`] trait.
/// The implementation calls [`Formatter::write_value()`] for the field.
///
/// [`UriDisplay<Path>`]: ../rocket/http/uri/trait.UriDisplay.html
/// [`Formatter::write_value()`]: ../rocket/http/uri/struct.Formatter.html#method.write_value
#[proc_macro_derive(UriDisplayPath)]
pub fn derive_uri_display_path(input: TokenStream) -> TokenStream {
emit!(derive::uri_display::derive_uri_display_path(input))
}
/// Generates a [`Vec`] of [`Route`]s from a set of route paths.
///
/// The `routes!` macro expands a list of route paths into a [`Vec`] of their
/// corresponding [`Route`] structures. For example, given the following routes:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #
/// #[get("/")]
/// fn index() { /* .. */ }
///
/// mod person {
/// #[post("/hi/<person>")]
/// pub fn hello(person: String) { /* .. */ }
/// }
/// ```
///
/// The `routes!` macro can be used as:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #
/// # use rocket::http::Method;
/// #
/// # #[get("/")] fn index() { /* .. */ }
/// # mod person {
/// # #[post("/hi/<person>")] pub fn hello(person: String) { /* .. */ }
/// # }
/// let my_routes = routes![index, person::hello];
/// assert_eq!(my_routes.len(), 2);
///
/// let index_route = &my_routes[0];
/// assert_eq!(index_route.method, Method::Get);
/// assert_eq!(index_route.name, Some("index"));
/// assert_eq!(index_route.uri.path(), "/");
///
/// let hello_route = &my_routes[1];
/// assert_eq!(hello_route.method, Method::Post);
/// assert_eq!(hello_route.name, Some("hello"));
/// assert_eq!(hello_route.uri.path(), "/hi/<person>");
/// ```
///
/// The grammar for `routes!` is defined as:
///
/// ```text
/// routes := PATH (',' PATH)*
///
/// PATH := a path, as defined by Rust
/// ```
///
/// [`Route`]: ../rocket/struct.Route.html
#[proc_macro]
pub fn routes(input: TokenStream) -> TokenStream {
emit!(bang::routes_macro(input))
}
/// Generates a [`Vec`] of [`Catcher`]s from a set of catcher paths.
///
/// The `catchers!` macro expands a list of catcher paths into a [`Vec`] of
/// their corresponding [`Catcher`] structures. For example, given the following
/// catchers:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #
/// #[catch(404)]
/// fn not_found() { /* .. */ }
///
/// mod inner {
/// #[catch(400)]
/// pub fn unauthorized() { /* .. */ }
/// }
///
/// #[catch(default)]
/// fn default_catcher() { /* .. */ }
/// ```
///
/// The `catchers!` macro can be used as:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #
/// # #[catch(404)] fn not_found() { /* .. */ }
/// # #[catch(default)] fn default_catcher() { /* .. */ }
/// # mod inner {
/// # #[catch(400)] pub fn unauthorized() { /* .. */ }
/// # }
/// let my_catchers = catchers![not_found, inner::unauthorized, default_catcher];
/// assert_eq!(my_catchers.len(), 3);
///
/// let not_found = &my_catchers[0];
/// assert_eq!(not_found.code, Some(404));
///
/// let unauthorized = &my_catchers[1];
/// assert_eq!(unauthorized.code, Some(400));
///
/// let default = &my_catchers[2];
/// assert_eq!(default.code, None);
/// ```
///
/// The grammar for `catchers!` is defined as:
///
/// ```text
/// catchers := PATH (',' PATH)*
///
/// PATH := a path, as defined by Rust
/// ```
///
/// [`Catcher`]: ../rocket/struct.Catcher.html
#[proc_macro]
pub fn catchers(input: TokenStream) -> TokenStream {
emit!(bang::catchers_macro(input))
}
/// Type-safe, URI-safe generation of an [`Origin`] URI from a route.
///
/// The `uri!` macro creates a type-safe, URL-safe URI given a route and values
/// for the route's URI parameters. The inputs to the macro are the path to a
/// route, a colon, and one argument for each dynamic parameter (parameters in
/// `<>`) in the route's path and query.
///
/// For example, for the following route:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #
/// #[get("/person/<name>?<age>")]
/// fn person(name: String, age: Option<u8>) -> String {
/// # "".into() /*
/// ...
/// # */
/// }
/// ```
///
/// A URI can be created as follows:
///
/// ```rust
/// # #[macro_use] extern crate rocket;
/// #
/// # #[get("/person/<name>?<age>")]
/// # fn person(name: String, age: Option<u8>) { }
/// #
/// // with unnamed parameters, in route path declaration order
/// let mike = uri!(person: "Mike Smith", Some(28));
/// assert_eq!(mike.to_string(), "/person/Mike%20Smith?age=28");
///
/// // with named parameters, order irrelevant
/// let mike = uri!(person: name = "Mike", age = Some(28));
/// let mike = uri!(person: age = Some(28), name = "Mike");
/// assert_eq!(mike.to_string(), "/person/Mike?age=28");
///
/// // with a specific mount-point
/// let mike = uri!("/api", person: name = "Mike", age = Some(28));
/// assert_eq!(mike.to_string(), "/api/person/Mike?age=28");
///
/// // with unnamed values ignored
/// let mike = uri!(person: "Mike", _);
/// assert_eq!(mike.to_string(), "/person/Mike");
///
/// // with unnamed values, explicitly `None`.
/// let option: Option<u8> = None;
/// let mike = uri!(person: "Mike", option);
/// assert_eq!(mike.to_string(), "/person/Mike");
///
/// // with named values ignored
/// let mike = uri!(person: name = "Mike", age = _);
/// assert_eq!(mike.to_string(), "/person/Mike");
///
/// // with named values, explicitly `None`
/// let option: Option<u8> = None;
/// let mike = uri!(person: name = "Mike", age = option);
/// assert_eq!(mike.to_string(), "/person/Mike");
/// ```
///
/// ## Grammar
///
/// The grammar for the `uri!` macro is:
///
/// ```text
/// uri := (mount ',')? PATH (':' params)?
///
/// mount = STRING
/// params := unnamed | named
/// unnamed := expr (',' expr)*
/// named := IDENT = expr (',' named)?
/// expr := EXPR | '_'
///
/// EXPR := a valid Rust expression (examples: `foo()`, `12`, `"hey"`)
/// IDENT := a valid Rust identifier (examples: `name`, `age`)
/// STRING := an uncooked string literal, as defined by Rust (example: `"hi"`)
/// PATH := a path, as defined by Rust (examples: `route`, `my_mod::route`)
/// ```
///
/// ## Semantics
///
/// The `uri!` macro returns an [`Origin`] structure with the URI of the
/// supplied route interpolated with the given values. Note that `Origin`
/// implements `Into<Uri>` (and by extension, `TryInto<Uri>`), so it can be
/// converted into a [`Uri`] using `.into()` as needed.
///
/// A `uri!` invocation only typechecks if the type of every value in the
/// invocation matches the type declared for the parameter in the given route,
/// after conversion with [`FromUriParam`], or if a value is ignored using `_`
/// and the corresponding route type implements [`Ignorable`].
///
/// Each value passed into `uri!` is rendered in its appropriate place in the
/// URI using the [`UriDisplay`] implementation for the value's type. The
/// `UriDisplay` implementation ensures that the rendered value is URI-safe.
///
/// If a mount-point is provided, the mount-point is prepended to the route's
/// URI.
///
/// ### Conversion
///
/// The [`FromUriParam`] trait is used to typecheck and perform a conversion for
/// each value passed to `uri!`. If a `FromUriParam<P, S>` implementation exists
/// for a type `T` for part URI part `P`, then a value of type `S` can be used
/// in `uri!` macro for a route URI parameter declared with a type of `T` in
/// part `P`. For example, the following implementation, provided by Rocket,
/// allows an `&str` to be used in a `uri!` invocation for route URI parameters
/// declared as `String`:
///
/// ```rust,ignore
/// impl<P: UriPart, 'a> FromUriParam<P, &'a str> for String { .. }
/// ```
///
/// ### Ignorables
///
/// Query parameters can be ignored using `_` in place of an expression. The
/// corresponding type in the route URI must implement [`Ignorable`]. Ignored
/// parameters are not interpolated into the resulting `Origin`. Path parameters
/// are not ignorable.
///
/// [`Uri`]: ../rocket/http/uri/enum.Uri.html
/// [`Origin`]: ../rocket/http/uri/struct.Origin.html
/// [`FromUriParam`]: ../rocket/http/uri/trait.FromUriParam.html
/// [`UriDisplay`]: ../rocket/http/uri/trait.UriDisplay.html
/// [`Ignorable`]: ../rocket/http/uri/trait.Ignorable.html
#[proc_macro]
pub fn uri(input: TokenStream) -> TokenStream {
emit!(bang::uri_macro(input))
}
#[doc(hidden)]
#[proc_macro]
pub fn rocket_internal_uri(input: TokenStream) -> TokenStream {
emit!(bang::uri_internal_macro(input))
}
#[doc(hidden)]
#[proc_macro]
pub fn rocket_internal_guide_tests(input: TokenStream) -> TokenStream {
emit!(bang::guide_tests_internal(input))
} | ///
/// If a path or query parameter guard fails, the request is
/// forwarded. |
mixin.py | class TransactionHooksDatabaseWrapperMixin(object):
"""
A ``DatabaseWrapper`` mixin to implement transaction-committed hooks.
To use, create a package for your custom database backend and place a
``base.py`` module within it. Import whatever ``DatabaseWrapper`` you want
to subclass (under some other name), and then create a ``DatabaseWrapper``
class which inherits from both this mixin and the parent
``DatabaseWrapper`` (in that order).
For an example, see ``backends/postgresql_psycopg2/base.py``.
"""
def | (self, *a, **kw):
# a list of no-argument functions to run when the transaction commits;
# each entry is an (sids, func) tuple, where sids is a list of the
# active savepoint IDs when this function was registered
self.run_on_commit = []
# Should we run the on-commit hooks the next time set_autocommit(True)
# is called?
self.run_commit_hooks_on_set_autocommit_on = False
super(TransactionHooksDatabaseWrapperMixin, self).__init__(*a, **kw)
def on_commit(self, func):
if self.in_atomic_block:
# transaction in progress; save for execution on commit
self.run_on_commit.append((self.savepoint_ids[:], func))
else:
# no transaction in progress; execute immediately
func()
def run_and_clear_commit_hooks(self):
self.validate_no_atomic_block()
try:
while self.run_on_commit:
sids, func = self.run_on_commit.pop(0)
func()
finally:
self.run_on_commit = []
def commit(self, *a, **kw):
super(TransactionHooksDatabaseWrapperMixin, self).commit(*a, **kw)
# Atomic has not had a chance yet to restore autocommit on this
# connection, so on databases that handle autocommit correctly, we need
# to wait to run the hooks until it calls set_autocommit(True)
if self.features.autocommits_when_autocommit_is_off:
self.run_and_clear_commit_hooks()
else:
self.run_commit_hooks_on_set_autocommit_on = True
def set_autocommit(self, autocommit):
super(TransactionHooksDatabaseWrapperMixin, self).set_autocommit(
autocommit)
if autocommit and self.run_commit_hooks_on_set_autocommit_on:
self.run_and_clear_commit_hooks()
self.run_commit_hooks_on_set_autocommit_on = False
def savepoint_rollback(self, sid, *a, **kw):
super(TransactionHooksDatabaseWrapperMixin, self).savepoint_rollback(
sid, *a, **kw)
# remove any callbacks registered while this savepoint was active
self.run_on_commit = list(filter(
lambda x: sid not in x[0], self.run_on_commit))
def rollback(self, *a, **kw):
super(TransactionHooksDatabaseWrapperMixin, self).rollback(*a, **kw)
self.run_on_commit = []
def connect(self, *a, **kw):
super(TransactionHooksDatabaseWrapperMixin, self).connect(*a, **kw)
self.run_on_commit = []
def close(self, *a, **kw):
super(TransactionHooksDatabaseWrapperMixin, self).close(*a, **kw)
self.run_on_commit = []
| __init__ |
views.py | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
def | (request):
return HttpResponse('TEST URL')
| index |
lib.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow::Result,
errors::{ffx_bail, ffx_error},
ffx_core::ffx_plugin,
ffx_pdk_lib::groups::{ArtifactStore, ArtifactStoreEntry, ArtifactStoreGroup},
ffx_pdk_lib::lock::{Lock, LockArtifact, LockArtifactStore},
ffx_pdk_lib::spec::{Spec, SpecArtifactStore, SpecArtifactStoreKind},
ffx_pdk_update_args::UpdateCommand,
fuchsia_hyper::new_https_client,
fuchsia_pkg::MetaContents,
futures_lite::io::AsyncWriteExt,
hyper::body::HttpBody,
hyper::{body, StatusCode, Uri},
serde_json::{json, Map, Value},
serde_json5,
std::cmp::Ordering,
std::fs::{read_to_string, File, OpenOptions},
std::io::BufReader,
std::path::PathBuf,
};
// Outputs artifacts to a lock file based on a general specification.
//
// Updates the artifacts by matching the available artifacts in an
// artifact store against the constraints in a specification
// (artifact_spec.json).
// URL path to artifact_groups.json for tuf artifact store
//
const TUF_ARTIFACT_GROUPS_PATH: &str = "targets/artifact_groups.json";
#[ffx_plugin("ffx_pdk")]
pub async fn cmd_update(cmd: UpdateCommand) -> Result<()> {
let spec: Spec = read_to_string(cmd.spec_file.clone())
.map_err(|e| ffx_error!(r#"Cannot open spec file "{}": {}"#, cmd.spec_file.display(), e))
.and_then(|contents| {
serde_json5::from_str(&contents).map_err(|e| {
ffx_error!(r#"JSON5 error from spec file "{}": {}"#, cmd.spec_file.display(), e)
})
})?;
process_spec(&spec, &cmd).await?;
println!("Spec file for product \"{}\" processed.", spec.product);
Ok(())
}
/// Struct to hold a JSON Pointer as specified in [RFC
/// 6901](https://tools.ietf.org/html/rfc6901) and a $min/$max boolean.
///
/// This struct is used for filtering artifact store by $min/$max.
///
struct MinMaxPointer {
pointer: String,
is_min: bool,
}
impl std::fmt::Debug for MinMaxPointer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "({}, {})", self.pointer, if self.is_min { "$min" } else { "$max" })
}
}
/// Returns a MinMaxPointer containing a JSON Pointer and "$min" or "$max" value.
///
/// No more than one $min or $max is allowed, so check and return errors.
///
fn get_min_max_pointer(json_object: &Map<String, Value>) -> Result<Option<MinMaxPointer>> {
let mut r = collect_min_max_pointers(json_object, "".to_string());
match r.len() {
0 => Ok(None),
1 => Ok(Some(r.remove(0))),
_ => ffx_bail!("More than one $min/$max found while processing spec file! {:?}", r),
}
}
/// Recursively collect JSON Pointers for keys containing the string
/// value "$min" or "$max" in the spec attributes.
///
/// JSON Pointers are used to look up values from a Value::Object for
/// filtering artifact store entries.
///
/// Return a vec of MinMaxPointer structs and the caller checks that no
/// more than 1 struct is returned.
///
fn collect_min_max_pointers(json_object: &Map<String, Value>, path: String) -> Vec<MinMaxPointer> {
// Collect in a tuple so we can catch the error of too many sort keys.
let mut result = Vec::<MinMaxPointer>::new();
for (key, value) in json_object.iter() {
match value {
Value::String(s) => {
if s == "$min" || s == "$max" {
result.push(MinMaxPointer {
pointer: format!("{}/{}", path, key),
is_min: s == "$min",
})
}
}
Value::Object(o) => {
result.append(&mut collect_min_max_pointers(o, format!("{}/{}", path, key)));
}
Value::Null | Value::Bool(_) | Value::Number(_) | Value::Array(_) => {}
}
}
result
}
/// Compare two Value::Object types using a JSON Pointer to extract the
/// comparison field.
///
/// Since this function is used by sort_by it returns Ordering. Panics
/// if either comparison field is missing or the field is not a number
/// or string.
///
fn value_object_partial_cmp(
a_object: &Value,
b_object: &Value,
pointer: &String,
) -> Option<Ordering> {
// values must be available, otherwise fatal error
let a: &Value = a_object
.pointer(pointer)
.unwrap_or_else(|| panic!("Missing field '{}' during $min/$max", pointer));
let b: &Value = b_object
.pointer(pointer)
.unwrap_or_else(|| panic!("Missing field '{}' during $min/$max", pointer));
match (a, b) {
(Value::Number(na), Value::Number(nb)) => {
na.as_f64().unwrap().partial_cmp(&nb.as_f64().unwrap())
}
(Value::String(sa), Value::String(sb)) => sa.partial_cmp(sb),
(_, _) => panic!("$min/$max field ({}) is not Number or String: {} {}", pointer, a, b),
}
}
/// Find the $min, $max and return the index.
///
fn find_min_max(
artifact_groups: &Vec<ArtifactStoreGroup>,
matches: &Vec<usize>,
attributes: &Map<String, Value>,
) -> Result<usize> {
// The next statement returns Err() when more than 1 $min/$max is present
let min_max_pointer = get_min_max_pointer(attributes)?;
match min_max_pointer {
None => {
if artifact_groups.len() > 1 {
ffx_bail!("Multiple artifact groups (probably missing $min/$max)");
}
Ok(0)
}
Some(p) => Ok(*matches
.iter()
.max_by(|&a, &b| {
let a_attributes = &artifact_groups[*a].attributes;
let b_attributes = &artifact_groups[*b].attributes;
value_object_partial_cmp(a_attributes, b_attributes, &p.pointer)
.map(|ordering| if p.is_min { ordering.reverse() } else { ordering })
.unwrap()
})
.unwrap()),
}
}
/// Returns the artifact for an artifact store entry by name.
///
fn get_artifact(
artifact_store_group: &ArtifactStoreGroup,
name: &str,
) -> Option<ArtifactStoreEntry> {
artifact_store_group.artifacts.iter().find(|&a| a.name == name).and_then(|a| Some(a.clone()))
}
/// Return artifact_groups.json for different kinds of artifact stores.
///
async fn read_artifact_groups(
store: &SpecArtifactStore,
cmd: &UpdateCommand,
) -> Result<ArtifactStore> {
match store.r#type {
SpecArtifactStoreKind::TUF => {
if store.repo.is_none() {
ffx_bail!("Missing repo field in artifact store")
}
let repo = store.repo.as_ref().unwrap();
let uri = format!("{}/{}", repo, TUF_ARTIFACT_GROUPS_PATH)
.parse::<Uri>()
.map_err(|e| ffx_error!(r#"Parse Uri failed for "{}": {}"#, repo, e))?;
let client = new_https_client();
let response = client
.get(uri.clone())
.await
.map_err(|e| ffx_error!(r#"Failed on http get for "{}": {}"#, uri, e))?;
if response.status() != StatusCode::OK {
ffx_bail!("http get error {} {}. \n", &uri, response.status(),);
}
let bytes = body::to_bytes(response.into_body()).await?;
let body = String::from_utf8(bytes.to_vec()).expect("response was not valid utf-8");
Ok(serde_json::from_str(&body)
.map_err(|e| ffx_error!(r#"Cannot parse json from "{}": {}"#, &uri, e))?)
}
SpecArtifactStoreKind::Local => {
if store.path.is_none() {
ffx_bail!("Missing path field in store kind");
}
let path_suffix = store.path.as_ref().unwrap();
if cmd.artifact_root.is_none() {
ffx_bail!("Missing --artifact-root parameter");
}
let path = format!("{}/{}", cmd.artifact_root.as_ref().unwrap(), path_suffix);
let reader = BufReader::new(
File::open(path.clone())
.map_err(|e| ffx_error!(r#"Cannot open "{}": {}"#, &path, e))?,
);
Ok(serde_json::from_reader(reader)
.map_err(|e| ffx_error!(r#"Cannot parse json from "{}": {}"#, &path, e))?)
}
}
}
/// Recursively match the artifact group attributes against the specification pattern.
///
/// True if a match.
///
fn match_object(group_attributes: &Value, spec_pattern: &Map<String, Value>) -> bool {
if !group_attributes.is_object() {
panic!("match_object: not an object.");
}
for (key, spec_value) in spec_pattern.iter() {
if let Some(group_value) = group_attributes.get(key) {
// Do not compare $min/$max spec values
if *spec_value != json!("$min") && *spec_value != json!("$max") {
if group_value.is_object() && spec_value.is_object() {
// Compare Object types recursively
if !match_object(group_value, spec_value.as_object().unwrap()) {
return false;
}
} else if *group_value != *spec_value {
// Compare Bool, Number, String, Array
return false;
};
}
} else {
// No value for the key in the spec, probably a user error
println!("Missing value during match for key \"{}\"", key);
return false;
}
}
true
}
/// Match artifacts groups from the artifact store file and spec attribute pattern.
///
/// Returns the index of the matching group.
///
fn match_artifacts(
artifact_groups: &Vec<ArtifactStoreGroup>,
spec_attribute_pattern: &Map<String, Value>,
) -> Result<usize> {
let mut matches = Vec::<usize>::new();
for (index, artifact_group) in artifact_groups.iter().enumerate() {
if match_object(&artifact_group.attributes, spec_attribute_pattern) {
matches.push(index);
}
}
let index = find_min_max(&artifact_groups, &matches, &spec_attribute_pattern)?;
Ok(index)
}
/// Merge two Option<Map> and return a new map. Entries are cloned.
///
/// Note: a duplicate key in b overwrites the value from a.
///
fn merge(a: &Option<Map<String, Value>>, b: &Option<Map<String, Value>>) -> Map<String, Value> {
let mut result = Map::new();
if let Some(map) = a {
result.extend(map.into_iter().map(|(k, v)| (k.clone(), v.clone())));
}
if let Some(map) = b {
result.extend(map.into_iter().map(|(k, v)| (k.clone(), v.clone())));
}
result
}
async fn get_blobs(
content_address_storage: Option<String>,
hash: String,
artifact_root: Option<String>,
) -> Result<Vec<String>> {
let tempdir = tempfile::tempdir().unwrap();
let mut result = vec![hash.clone()];
let meta_far_path = if content_address_storage.is_none() {
PathBuf::from(artifact_root.unwrap()).join(hash.to_string())
} else {
let hostname = content_address_storage.unwrap();
let uri = format!("{}/{}", hostname, hash)
.parse::<Uri>() | let mut res = client
.get(uri.clone())
.await
.map_err(|e| ffx_error!(r#"Failed on http get for "{}": {}"#, uri, e))?;
let status = res.status();
if status != StatusCode::OK {
ffx_bail!("Cannot download meta.far. Status is {}. Uri is: {}.", status, &uri);
}
let meta_far_path = tempdir.path().join("meta.far");
let mut output = async_fs::File::create(&meta_far_path).await?;
while let Some(next) = res.data().await {
let chunk = next?;
output.write_all(&chunk).await?;
}
output.sync_all().await?;
meta_far_path
};
let mut archive = File::open(&meta_far_path)
.map_err(|e| ffx_error!(r#"Cannot open meta_far "{}": {}"#, meta_far_path.display(), e))?;
let mut meta_far = fuchsia_archive::Reader::new(&mut archive).map_err(|e| {
ffx_error!(r#"Cannot read fuchsia_archive "{}": {}"#, meta_far_path.display(), e)
})?;
let meta_contents = meta_far.read_file("meta/contents").map_err(|e| {
ffx_error!(r#"Cannot read "meta/contens" from "{}": {}"#, meta_far_path.display(), e)
})?;
let meta_contents = MetaContents::deserialize(meta_contents.as_slice())?.into_contents();
result.extend(meta_contents.into_iter().map(|(_, hash)| hash.to_string()));
return Ok(result);
}
/// Main processing of a spec file
///
async fn process_spec(spec: &Spec, cmd: &UpdateCommand) -> Result<()> {
let mut lock_artifacts = Vec::<LockArtifact>::new();
for spec_artifact_group in spec.artifact_groups.iter() {
// SpecArtifactGroup has a store and list of artifacts
let spec_artifact_store = &spec_artifact_group.artifact_store;
let artifact_store_groups = read_artifact_groups(&spec_artifact_store, cmd).await?;
// find each artifact in the spec in the store
for spec_artifact in spec_artifact_group.artifacts.iter() {
let name = &spec_artifact.name;
// Merge attributes from group and spec
let attributes = merge(&spec.attributes, &spec_artifact_group.attributes);
// Select the single group that matches
let groups = &artifact_store_groups.artifact_groups;
let matching_index: usize = match_artifacts(groups, &attributes)?;
let matching_group = &groups[matching_index];
let artifact_store_group_entry =
get_artifact(matching_group, name).expect("missing artifiact");
let artifact_output = LockArtifact {
name: name.to_owned(),
r#type: artifact_store_group_entry.r#type,
artifact_store: LockArtifactStore {
name: spec_artifact_store.name.to_string(),
artifact_group_name: matching_group.name.to_string(),
r#type: spec_artifact_store.r#type.clone(),
repo: spec_artifact_store.repo.clone(),
content_address_storage: matching_group.content_address_storage.clone(),
},
attributes: matching_group.attributes.as_object().unwrap().clone(),
// todo: rename to hash
merkle: artifact_store_group_entry.hash.clone(),
blobs: get_blobs(
matching_group.content_address_storage.clone(),
artifact_store_group_entry.hash,
cmd.artifact_root.clone(),
)
.await?,
};
lock_artifacts.push(artifact_output);
}
}
let lock = Lock { artifacts: lock_artifacts };
let file = OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(cmd.out.clone())
.map_err(|e| ffx_error!(r#"Cannot create lock file "{}": {}"#, cmd.out.display(), e))?;
// write file
serde_json::to_writer_pretty(&file, &lock)?;
Ok(())
}
// tests
#[cfg(test)]
mod test {
use super::*;
use fuchsia_async as fasync;
use fuchsia_pkg::MetaPackage;
use fuchsia_pkg::{build_with_file_system, CreationManifest, FileSystem};
use maplit::{btreemap, hashmap};
use pkg::{
manager::RepositoryManager, server::RepositoryServer,
test_utils::make_writable_empty_repository,
};
use serde_json::json;
use serde_json5;
use std::collections::HashMap;
use std::convert::TryInto;
use std::fs;
use std::io;
use std::io::Write;
use std::net::Ipv4Addr;
use std::path::PathBuf;
use std::sync::Arc;
/// Test artifact hash
#[test]
fn test_get_hash() {
// Test data in json5 format for cleaner look
let data = r#"
{
name: "1361ee2a-e384-4eda-9f25-694affdeb30e",
content_address_storage: "fuchsia-blobs.googleusercontent.com",
type: "tuf",
attributes: {version: "63"},
artifacts: [
{ name: "one", merkle: "hash_1", sha256: "2", type: "package" },
{ name: "two", merkle: "hash_2", sha256: "3", type: "package" },
],
}"#;
// Parse the test data
let v: ArtifactStoreGroup = serde_json5::from_str(data).unwrap();
assert_eq!(get_artifact(&v, "one").unwrap().hash, "hash_1");
}
// For testing comparisons
impl PartialEq for MinMaxPointer {
fn eq(&self, other: &MinMaxPointer) -> bool {
self.is_min == other.is_min && self.pointer == other.pointer
}
}
#[test]
fn test_get_min_max_pointer() {
let object = json!({
"name": "John",
"age": {
"human": "$max",
"dog": 49,
}
});
let ptr = get_min_max_pointer(&object.as_object().unwrap());
// A Result containing an Option containing a tuple
assert_eq!(
ptr.unwrap().unwrap(),
MinMaxPointer { pointer: "/age/human".to_string(), is_min: false }
)
}
// Tests the filtering of artifact store groups by $min/$max
//
#[test]
fn test_find_min_max() {
let store: ArtifactStore = serde_json::from_str(
r#"
{
"schema_version": "v1",
"artifact_groups": [
{
"artifacts": [ ],
"attributes": {
"creation_time": "2021-09-06T11:37:36.054280"
},
"name": "group_a"
}, {
"artifacts": [ ],
"attributes": {
"creation_time": "2021-09-06T11:37:36.054281"
},
"name": "group_b"
}
]
}"#,
)
.unwrap();
assert_eq!(store.artifact_groups.len(), 2);
// The spec attributes for the $min case
let json_min = json!({
"creation_time": "$min"
});
// Convert to Map<String,Value> instead of Value.
let spec_attributes_min = json_min.as_object().unwrap();
let matches: Vec<usize> = (0..store.artifact_groups.len()).collect();
assert_eq!(
find_min_max(&store.artifact_groups, &matches, &spec_attributes_min).unwrap(),
0
);
// max
let json_max = json!({
"creation_time": "$max"
});
let spec_attributes_max = json_max.as_object().unwrap();
assert_eq!(
find_min_max(&store.artifact_groups, &matches, &spec_attributes_max).unwrap(),
1
);
}
// Test match_object cases
// - ignores $min/$max fields
// - fails on top level object
// - fails on recursive object
#[test]
fn test_match_object() {
let spec_json = json!({"a": "$max", "b": 1, "c": {"d": true}});
let spec = spec_json.as_object().unwrap();
let group_1 = json!({"a": 1, "b": 1, "c": {"d": true}});
assert!(match_object(&group_1, &spec));
let group_2 = json!({"a": 1, "b": 2, "c": {"d": true}});
assert!(!match_object(&group_2, &spec));
let group_3 = json!({"a": 1, "b": 1, "c": {"d": false}});
assert!(!match_object(&group_3, &spec));
let group_4 = json!({"a": 1, "c": {"d": false}});
assert!(!match_object(&group_4, &spec));
}
#[test]
fn test_value_object_partial_cmp() {
let a = json!({"w": {"x": 1}});
let b = json!({"w": {"x": 2}});
let ordering = value_object_partial_cmp(&a, &b, &"/w/x".to_string());
assert_eq!(ordering, Some(Ordering::Less));
}
struct FakeFileSystem {
content_map: HashMap<String, Vec<u8>>,
}
impl<'a> FileSystem<'a> for FakeFileSystem {
type File = &'a [u8];
fn open(&'a self, path: &str) -> Result<Self::File, io::Error> {
Ok(self.content_map.get(path).unwrap().as_slice())
}
fn len(&self, path: &str) -> Result<u64, io::Error> {
Ok(self.content_map.get(path).unwrap().len() as u64)
}
fn read(&self, path: &str) -> Result<Vec<u8>, io::Error> {
Ok(self.content_map.get(path).unwrap().clone())
}
}
fn create_meta_far(path: PathBuf) {
let creation_manifest = CreationManifest::from_external_and_far_contents(
btreemap! {
"lib/mylib.so".to_string() => "host/mylib.so".to_string()
},
btreemap! {
"meta/my_component.cmx".to_string() => "host/my_component.cmx".to_string(),
"meta/package".to_string() => "host/meta/package".to_string()
},
)
.unwrap();
let component_manifest_contents = "my_component.cmx contents";
let mut v = vec![];
let meta_package = MetaPackage::from_name("my-package-name".parse().unwrap());
meta_package.serialize(&mut v).unwrap();
let file_system = FakeFileSystem {
content_map: hashmap! {
"host/mylib.so".to_string() => Vec::new(),
"host/my_component.cmx".to_string() => component_manifest_contents.as_bytes().to_vec(),
"host/meta/package".to_string() => v
},
};
build_with_file_system(&creation_manifest, &path, "my-package-name", &file_system).unwrap();
}
fn write_file(path: PathBuf, body: &[u8]) {
let mut tmp = tempfile::NamedTempFile::new().unwrap();
tmp.write_all(body).unwrap();
tmp.persist(path).unwrap();
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_end_to_end_local() {
let tempdir = tempfile::tempdir().unwrap();
let root = tempdir.path();
let out_filename = root.join("artifact_lock.json");
// recreate the test_data directory
for (filename, data) in [
("artifact_spec.json", include_str!("../test_data/artifact_spec.json")),
("artifact_groups.json", include_str!("../test_data/artifact_groups.json")),
("artifact_groups2.json", include_str!("../test_data/artifact_groups2.json")),
] {
fs::write(root.join(filename), data).expect("Unable to write file");
}
let meta_far_path =
root.join("0000000000000000000000000000000000000000000000000000000000000000");
create_meta_far(meta_far_path);
let blob_path =
root.join("15ec7bf0b50732b49f8228e07d24365338f9e3ab994b00af08e5a3bffe55fd8b");
write_file(blob_path, "".as_bytes());
let cmd = UpdateCommand {
spec_file: PathBuf::from(root.join("artifact_spec.json")),
out: out_filename.clone(),
artifact_root: Some(root.display().to_string()),
};
let r = cmd_update(cmd).await;
assert!(r.is_ok());
let new_artifact_lock: Lock = File::open(&out_filename)
.map(BufReader::new)
.map(serde_json::from_reader)
.unwrap()
.unwrap();
let golden_artifact_lock: Lock =
serde_json::from_str(include_str!("../test_data/golden_artifact_lock.json")).unwrap();
assert_eq!(new_artifact_lock, golden_artifact_lock);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn test_end_to_end_tuf() {
let manager = RepositoryManager::new();
let tempdir = tempfile::tempdir().unwrap();
let root = tempdir.path().join("artifact-store");
let repo =
make_writable_empty_repository("artifact-store", root.clone().try_into().unwrap())
.await
.unwrap();
let out_filename = tempdir.path().join("artifact_lock.json");
let meta_far_path = root
.join("repository")
.join("0000000000000000000000000000000000000000000000000000000000000000");
create_meta_far(meta_far_path);
let blob_path = root
.join("repository")
.join("15ec7bf0b50732b49f8228e07d24365338f9e3ab994b00af08e5a3bffe55fd8b");
write_file(blob_path, "".as_bytes());
manager.add(Arc::new(repo));
let addr = (Ipv4Addr::LOCALHOST, 0).into();
let (server_fut, _, server) =
RepositoryServer::builder(addr, Arc::clone(&manager)).start().await.unwrap();
// Run the server in the background.
let task = fasync::Task::local(server_fut);
let tuf_repo_url = server.local_url() + "/artifact-store";
// write artifact_groups.json to server.
let tuf_dir = root.join("repository").join("targets/");
fs::create_dir(&tuf_dir).unwrap();
let artifact_group_path = tuf_dir.join("artifact_groups.json");
fs::write(
artifact_group_path,
include_str!("../test_data/tuf_artifact_groups.json")
.replace("tuf_repo_url", &tuf_repo_url),
)
.unwrap();
// write spec file.
let spec_file_path = tempdir.path().join("artifact_spec.json");
fs::write(
&spec_file_path,
include_str!("../test_data/tuf_artifact_spec.json")
.replace("tuf_repo_url", &tuf_repo_url),
)
.unwrap();
let cmd = UpdateCommand {
spec_file: spec_file_path,
out: out_filename.clone(),
artifact_root: None,
};
cmd_update(cmd).await.unwrap();
let new_artifact_lock: Lock = File::open(&out_filename)
.map(BufReader::new)
.map(serde_json::from_reader)
.unwrap()
.unwrap();
let golden_artifact_lock: Lock = serde_json::from_str(
include_str!("../test_data/golden_tuf_artifact_lock.json")
.replace("tuf_repo_url", &tuf_repo_url)
.as_str(),
)
.unwrap();
assert_eq!(new_artifact_lock, golden_artifact_lock);
// Signal the server to shutdown.
server.stop();
// Wait for the server to actually shut down.
task.await;
}
} | .map_err(|e| ffx_error!(r#"Parse Uri failed for "{}": {}"#, hostname, e))?;
let client = new_https_client(); |
a_bit_of_everything.go | package server
import (
"context"
"fmt"
"io"
"strings"
"sync"
"github.com/golang/glog"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/duration"
"github.com/golang/protobuf/ptypes/empty"
examples "github.com/grpc-ecosystem/grpc-gateway/examples/proto/examplepb"
"github.com/grpc-ecosystem/grpc-gateway/examples/proto/sub"
"github.com/grpc-ecosystem/grpc-gateway/examples/proto/sub2"
"github.com/rogpeppe/fastuuid"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
// Implements of ABitOfEverythingServiceServer
var uuidgen = fastuuid.MustNewGenerator()
type _ABitOfEverythingServer struct {
v map[string]*examples.ABitOfEverything
m sync.Mutex
}
type ABitOfEverythingServer interface {
examples.ABitOfEverythingServiceServer
examples.StreamServiceServer
}
// EDIT @moul
func NewHandler() ABitOfEverythingServer {
return newABitOfEverythingServer()
}
// END OF EDIT @moul
func | () ABitOfEverythingServer {
return &_ABitOfEverythingServer{
v: make(map[string]*examples.ABitOfEverything),
}
}
func (s *_ABitOfEverythingServer) Create(ctx context.Context, msg *examples.ABitOfEverything) (*examples.ABitOfEverything, error) {
s.m.Lock()
defer s.m.Unlock()
glog.Info(msg)
var uuid string
for {
uuid = fmt.Sprintf("%x", uuidgen.Next())
if _, ok := s.v[uuid]; !ok {
break
}
}
s.v[uuid] = msg
s.v[uuid].Uuid = uuid
glog.Infof("%v", s.v[uuid])
return s.v[uuid], nil
}
func (s *_ABitOfEverythingServer) CreateBody(ctx context.Context, msg *examples.ABitOfEverything) (*examples.ABitOfEverything, error) {
return s.Create(ctx, msg)
}
func (s *_ABitOfEverythingServer) BulkCreate(stream examples.StreamService_BulkCreateServer) error {
count := 0
ctx := stream.Context()
for {
msg, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return err
}
count++
glog.Error(msg)
if _, err = s.Create(ctx, msg); err != nil {
return err
}
}
err := stream.SendHeader(metadata.New(map[string]string{
"count": fmt.Sprintf("%d", count),
}))
if err != nil {
return nil
}
stream.SetTrailer(metadata.New(map[string]string{
"foo": "foo2",
"bar": "bar2",
}))
return stream.SendAndClose(new(empty.Empty))
}
func (s *_ABitOfEverythingServer) Lookup(ctx context.Context, msg *sub2.IdMessage) (*examples.ABitOfEverything, error) {
s.m.Lock()
defer s.m.Unlock()
glog.Info(msg)
err := grpc.SendHeader(ctx, metadata.New(map[string]string{
"uuid": msg.Uuid,
}))
if err != nil {
return nil, err
}
if a, ok := s.v[msg.Uuid]; ok {
return a, nil
}
grpc.SetTrailer(ctx, metadata.New(map[string]string{
"foo": "foo2",
"bar": "bar2",
}))
return nil, status.Errorf(codes.NotFound, "not found")
}
func (s *_ABitOfEverythingServer) List(_ *empty.Empty, stream examples.StreamService_ListServer) error {
s.m.Lock()
defer s.m.Unlock()
err := stream.SendHeader(metadata.New(map[string]string{
"count": fmt.Sprintf("%d", len(s.v)),
}))
if err != nil {
return nil
}
for _, msg := range s.v {
if err := stream.Send(msg); err != nil {
return err
}
}
// return error when metadata includes error header
if header, ok := metadata.FromIncomingContext(stream.Context()); ok {
if v, ok := header["error"]; ok {
stream.SetTrailer(metadata.New(map[string]string{
"foo": "foo2",
"bar": "bar2",
}))
return status.Errorf(codes.InvalidArgument, "error metadata: %v", v)
}
}
return nil
}
func (s *_ABitOfEverythingServer) Update(ctx context.Context, msg *examples.ABitOfEverything) (*empty.Empty, error) {
s.m.Lock()
defer s.m.Unlock()
glog.Info(msg)
if _, ok := s.v[msg.Uuid]; ok {
s.v[msg.Uuid] = msg
} else {
return nil, status.Errorf(codes.NotFound, "not found")
}
return new(empty.Empty), nil
}
func (s *_ABitOfEverythingServer) UpdateV2(ctx context.Context, msg *examples.UpdateV2Request) (*empty.Empty, error) {
glog.Info(msg)
// If there is no update mask do a regular update
if msg.UpdateMask == nil || len(msg.UpdateMask.GetPaths()) == 0 {
return s.Update(ctx, msg.Abe)
}
s.m.Lock()
defer s.m.Unlock()
if a, ok := s.v[msg.Abe.Uuid]; ok {
applyFieldMask(a, msg.Abe, msg.UpdateMask)
} else {
return nil, status.Errorf(codes.NotFound, "not found")
}
return new(empty.Empty), nil
}
// PatchWithFieldMaskInBody differs from UpdateV2 only in that this method exposes the field mask in the request body,
// so that clients can specify their mask explicitly
func (s *_ABitOfEverythingServer) PatchWithFieldMaskInBody(ctx context.Context, request *examples.UpdateV2Request) (*empty.Empty, error) {
// low-effort attempt to modify the field mask to only include paths for the ABE struct. Since this is only for the
// integration tests, this narrow implementaion is fine.
if request.UpdateMask != nil {
var shifted []string
for _, path := range request.UpdateMask.GetPaths() {
shifted = append(shifted, strings.TrimPrefix(path, "Abe."))
}
request.UpdateMask.Paths = shifted
}
return s.UpdateV2(ctx, request)
}
func (s *_ABitOfEverythingServer) Delete(ctx context.Context, msg *sub2.IdMessage) (*empty.Empty, error) {
s.m.Lock()
defer s.m.Unlock()
glog.Info(msg)
if _, ok := s.v[msg.Uuid]; ok {
delete(s.v, msg.Uuid)
} else {
return nil, status.Errorf(codes.NotFound, "not found")
}
return new(empty.Empty), nil
}
func (s *_ABitOfEverythingServer) GetQuery(ctx context.Context, msg *examples.ABitOfEverything) (*empty.Empty, error) {
s.m.Lock()
defer s.m.Unlock()
glog.Info(msg)
if _, ok := s.v[msg.Uuid]; ok {
s.v[msg.Uuid] = msg
} else {
return nil, status.Errorf(codes.NotFound, "not found")
}
return new(empty.Empty), nil
}
func (s *_ABitOfEverythingServer) GetRepeatedQuery(ctx context.Context, msg *examples.ABitOfEverythingRepeated) (*examples.ABitOfEverythingRepeated, error) {
s.m.Lock()
defer s.m.Unlock()
glog.Info(msg)
return msg, nil
}
func (s *_ABitOfEverythingServer) Echo(ctx context.Context, msg *sub.StringMessage) (*sub.StringMessage, error) {
s.m.Lock()
defer s.m.Unlock()
glog.Info(msg)
return msg, nil
}
func (s *_ABitOfEverythingServer) BulkEcho(stream examples.StreamService_BulkEchoServer) error {
var msgs []*sub.StringMessage
for {
msg, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return err
}
msgs = append(msgs, msg)
}
hmd := metadata.New(map[string]string{
"foo": "foo1",
"bar": "bar1",
})
if err := stream.SendHeader(hmd); err != nil {
return err
}
for _, msg := range msgs {
glog.Info(msg)
if err := stream.Send(msg); err != nil {
return err
}
}
stream.SetTrailer(metadata.New(map[string]string{
"foo": "foo2",
"bar": "bar2",
}))
return nil
}
func (s *_ABitOfEverythingServer) DeepPathEcho(ctx context.Context, msg *examples.ABitOfEverything) (*examples.ABitOfEverything, error) {
s.m.Lock()
defer s.m.Unlock()
glog.Info(msg)
return msg, nil
}
func (s *_ABitOfEverythingServer) NoBindings(ctx context.Context, msg *duration.Duration) (*empty.Empty, error) {
return nil, nil
}
func (s *_ABitOfEverythingServer) Timeout(ctx context.Context, msg *empty.Empty) (*empty.Empty, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (s *_ABitOfEverythingServer) ErrorWithDetails(ctx context.Context, msg *empty.Empty) (*empty.Empty, error) {
stat, err := status.New(codes.Unknown, "with details").
WithDetails(proto.Message(
&errdetails.DebugInfo{
StackEntries: []string{"foo:1"},
Detail: "error debug details",
},
))
if err != nil {
return nil, status.Errorf(codes.Internal, "unexpected error adding details: %s", err)
}
return nil, stat.Err()
}
func (s *_ABitOfEverythingServer) GetMessageWithBody(ctx context.Context, msg *examples.MessageWithBody) (*empty.Empty, error) {
return &empty.Empty{}, nil
}
func (s *_ABitOfEverythingServer) PostWithEmptyBody(ctx context.Context, msg *examples.Body) (*empty.Empty, error) {
return &empty.Empty{}, nil
}
func (s *_ABitOfEverythingServer) CheckGetQueryParams(ctx context.Context, msg *examples.ABitOfEverything) (*examples.ABitOfEverything, error) {
return msg, nil
}
func (s *_ABitOfEverythingServer) CheckPostQueryParams(ctx context.Context, msg *examples.ABitOfEverything) (*examples.ABitOfEverything, error) {
return msg, nil
}
| newABitOfEverythingServer |
bitcoin_es.ts | <?xml version="1.0" ?><!DOCTYPE TS><TS language="es" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Mazebits</source>
<translation>Acerca de Mazebits</translation>
</message>
<message>
<location line="+39"/>
<source><b>Mazebits</b> version</source>
<translation>Versión de <b>Mazebits</b></translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
Este es un software experimental.
Distribuido bajo la licencia MIT/X11, vea el archivo adjunto
COPYING o http://www.opensource.org/licenses/mit-license.php.
Este producto incluye software desarrollado por OpenSSL Project para su uso en
el OpenSSL Toolkit (http://www.openssl.org/) y software criptográfico escrito por
Eric Young ([email protected]) y el software UPnP escrito por Thomas Bernard.</translation>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation>Copyright</translation>
</message>
<message>
<location line="+0"/>
<source>The Mazebits developers</source>
<translation>Los programadores Mazebits</translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Libreta de direcciones</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>Haga doble clic para editar una dirección o etiqueta</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Crear una nueva dirección</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Copiar la dirección seleccionada al portapapeles del sistema</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>&Añadir dirección</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your Mazebits addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>Estas son sus direcciones Mazebits para recibir pagos. Puede utilizar una diferente por cada persona emisora para saber quién le está pagando.</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation>&Copiar dirección</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>Mostrar código &QR </translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Mazebits address</source>
<translation>Firmar un mensaje para demostrar que se posee una dirección Mazebits</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>&Firmar mensaje</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>Borrar de la lista la dirección seleccionada</translation>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation>Exportar a un archivo los datos de esta pestaña</translation>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation>&Exportar</translation>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified Mazebits address</source>
<translation>Verificar un mensaje para comprobar que fue firmado con la dirección Mazebits indicada</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>&Verificar mensaje</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Eliminar</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your Mazebits addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation>Estas son sus direcciones Mazebits para enviar pagos. Compruebe siempre la cantidad y la dirección receptora antes de transferir monedas.</translation>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation>Copiar &etiqueta</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>&Editar</translation>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation>Enviar &monedas</translation>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>Exportar datos de la libreta de direcciones</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Archivos de columnas separadas por coma (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>Error al exportar</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>No se pudo escribir en el archivo %1.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(sin etiqueta)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Diálogo de contraseña</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Introducir contraseña</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Nueva contraseña</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Repita la nueva contraseña</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Introduzca la nueva contraseña del monedero.<br/>Por favor elija una con <b>10 o más caracteres aleatorios</b> u <b>ocho o más palabras</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Cifrar el monedero</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Esta operación requiere su contraseña para desbloquear el monedero.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Desbloquear monedero</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Esta operación requiere su contraseña para descifrar el monedero.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Descifrar el monedero</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Cambiar contraseña</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Introduzca la contraseña anterior del monedero y la nueva. </translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Confirmar cifrado del monedero</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR LITECOINS</b>!</source>
<translation>Atencion: ¡Si cifra su monedero y pierde la contraseña perderá <b>TODOS SUS LITECOINS</b>!"</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>¿Seguro que desea cifrar su monedero?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>IMPORTANTE: Cualquier copia de seguridad que haya realizado previamente de su archivo de monedero debe reemplazarse con el nuevo archivo de monedero cifrado. Por razones de seguridad, las copias de seguridad previas del archivo de monedero no cifradas serán inservibles en cuanto comience a usar el nuevo monedero cifrado.</translation>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Aviso: ¡La tecla de bloqueo de mayúsculas está activada!</translation>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>Monedero cifrado</translation>
</message>
<message>
<location line="-56"/>
<source>Mazebits will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your mazebitss from being stolen by malware infecting your computer.</source>
<translation>Mazebits se cerrará para finalizar el proceso de cifrado. Recuerde que el cifrado de su monedero no puede proteger totalmente sus mazebitss de robo por malware que infecte su sistema.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Ha fallado el cifrado del monedero</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Ha fallado el cifrado del monedero debido a un error interno. El monedero no ha sido cifrado.</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>Las contraseñas no coinciden.</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>Ha fallado el desbloqueo del monedero</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>La contraseña introducida para descifrar el monedero es incorrecta.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Ha fallado el descifrado del monedero</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Se ha cambiado correctamente la contraseña del monedero.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation>Firmar &mensaje...</translation>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation>Sincronizando con la red…</translation>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation>&Vista general</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Mostrar vista general del monedero</translation>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>&Transacciones</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Examinar el historial de transacciones</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation>Editar la lista de las direcciones y etiquetas almacenadas</translation>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation>Mostrar la lista de direcciones utilizadas para recibir pagos</translation>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>&Salir</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Salir de la aplicación</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about Mazebits</source>
<translation>Mostrar información acerca de Mazebits</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>Acerca de &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Mostrar información acerca de Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Opciones...</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>&Cifrar monedero…</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>Copia de &respaldo del monedero...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Cambiar la contraseña…</translation>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation>Importando bloques de disco...</translation>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation>Reindexando bloques en disco...</translation>
</message>
<message>
<location line="-347"/>
<source>Send coins to a Mazebits address</source>
<translation>Enviar monedas a una dirección Mazebits</translation>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for Mazebits</source>
<translation>Modificar las opciones de configuración de Mazebits</translation>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation>Copia de seguridad del monedero en otra ubicación</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Cambiar la contraseña utilizada para el cifrado del monedero</translation>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation>Ventana de &depuración</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Abrir la consola de depuración y diagnóstico</translation>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation>&Verificar mensaje...</translation>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>Mazebits</source>
<translation>Mazebits</translation>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation>Monedero</translation>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation>&Enviar</translation>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation>&Recibir</translation>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation>&Direcciones</translation>
</message>
<message>
<location line="+22"/>
<source>&About Mazebits</source>
<translation>&Acerca de Mazebits</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>Mo&strar/ocultar</translation>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation>Mostrar u ocultar la ventana principal</translation>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation>Cifrar las claves privadas de su monedero</translation>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your Mazebits addresses to prove you own them</source>
<translation>Firmar mensajes con sus direcciones Mazebits para demostrar la propiedad</translation>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified Mazebits addresses</source>
<translation>Verificar mensajes comprobando que están firmados con direcciones Mazebits concretas</translation>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>&Archivo</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>&Configuración</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>A&yuda</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>Barra de pestañas</translation>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+47"/>
<source>Mazebits client</source>
<translation>Cliente Mazebits</translation>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to Mazebits network</source>
<translation><numerusform>%n conexión activa hacia la red Mazebits</numerusform><numerusform>%n conexiones activas hacia la red Mazebits</numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation>Ninguna fuente de bloques disponible ...</translation>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation>Se han procesado %1 de %2 bloques (estimados) del historial de transacciones.</translation>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation>Procesados %1 bloques del historial de transacciones.</translation>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation><numerusform>%n hora</numerusform><numerusform>%n horas</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n día</numerusform><numerusform>%n días</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation><numerusform>%n semana</numerusform><numerusform>%n semanas</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation>%1 atrás</translation>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation>El último bloque recibido fue generado hace %1.</translation>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation>Las transacciones posteriores a esta aún no están visibles.</translation>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation>Error</translation>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation>Aviso</translation>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation>Información</translation>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation>Esta transacción supera el límite de tamaño. Puede enviarla con una comisión de %1, destinada a los nodos que procesen su transacción para contribuir al mantenimiento de la red. ¿Desea pagar esta comisión?</translation>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>Actualizado</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>Actualizando...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation>Confirme la tarifa de la transacción</translation>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>Transacción enviada</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>Transacción entrante</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Fecha: %1
Cantidad: %2
Tipo: %3
Dirección: %4
</translation>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation>Gestión de URI</translation>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid Mazebits address or malformed URI parameters.</source>
<translation>¡No se puede interpretar la URI! Esto puede deberse a una dirección Mazebits inválida o a parámetros de URI mal formados.</translation>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>El monedero está <b>cifrado</b> y actualmente <b>desbloqueado</b></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>El monedero está <b>cifrado</b> y actualmente <b>bloqueado</b></translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. Mazebits can no longer continue safely and will quit.</source>
<translation>Ha ocurrido un error crítico. Mazebits ya no puede continuar con seguridad y se cerrará.</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation>Alerta de red</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Editar Dirección</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Etiqueta</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>La etiqueta asociada con esta entrada en la libreta</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Dirección</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>La dirección asociada con esta entrada en la guía. Solo puede ser modificada para direcciones de envío.</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>Nueva dirección para recibir</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Nueva dirección para enviar</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Editar dirección de recepción</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Editar dirección de envío</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>La dirección introducida "%1" ya está presente en la libreta de direcciones.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Mazebits address.</source>
<translation>La dirección introducida "%1" no es una dirección Mazebits válida.</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>No se pudo desbloquear el monedero.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Ha fallado la generación de la nueva clave.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>Mazebits-Qt</source>
<translation>Mazebits-Qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>versión</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>Uso:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>opciones de la línea de órdenes</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>Opciones GUI</translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>Establecer el idioma, por ejemplo, "es_ES" (predeterminado: configuración regional del sistema)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>Arrancar minimizado</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>Mostrar pantalla de bienvenida en el inicio (predeterminado: 1)</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Opciones</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>&Principal</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation>Tarifa de transacción opcional por kB que ayuda a asegurar que sus transacciones sean procesadas rápidamente. La mayoría de transacciones son de 1kB.</translation>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Comisión de &transacciones</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start Mazebits after logging in to the system.</source>
<translation>Iniciar Mazebits automáticamente al encender el sistema.</translation>
</message>
<message>
<location line="+3"/>
<source>&Start Mazebits on system login</source>
<translation>&Iniciar Mazebits al iniciar el sistema</translation>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation>Restablecer todas las opciones del cliente a las predeterminadas.</translation>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation>&Restablecer opciones</translation>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation>&Red</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Mazebits client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>Abrir automáticamente el puerto del cliente Mazebits en el router. Esta opción solo funciona si el router admite UPnP y está activado.</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Mapear el puerto usando &UPnP</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the Mazebits network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>Conectar a la red Mazebits a través de un proxy SOCKS (ej. para conectar con la red Tor)</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>&Conectar a través de un proxy SOCKS:</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>Dirección &IP del proxy:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>Dirección IP del proxy (ej. 127.0.0.1)</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&Puerto:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Puerto del servidor proxy (ej. 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>&Versión SOCKS:</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>Versión del proxy SOCKS (ej. 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&Ventana</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Minimizar la ventana a la bandeja de iconos del sistema.</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Minimizar a la bandeja en vez de a la barra de tareas</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Minimizar en lugar de salir de la aplicación al cerrar la ventana.Cuando esta opción está activa, la aplicación solo se puede cerrar seleccionando Salir desde el menú.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>M&inimizar al cerrar</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&Interfaz</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>I&dioma de la interfaz de usuario</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Mazebits.</source>
<translation>El idioma de la interfaz de usuario puede establecerse aquí. Este ajuste se aplicará cuando se reinicie Mazebits.</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>Mostrar las cantidades en la &unidad:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Elegir la subdivisión predeterminada para mostrar cantidades en la interfaz y cuando se envían monedas.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show Mazebits addresses in the transaction list or not.</source>
<translation>Mostrar o no las direcciones Mazebits en la lista de transacciones.</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>&Mostrar las direcciones en la lista de transacciones</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&Aceptar</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&Cancelar</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>&Aplicar</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation>predeterminado</translation>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation>Confirme el restablecimiento de las opciones</translation>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation>Algunas configuraciones pueden requerir un reinicio del cliente para que sean efectivas.</translation>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation>¿Quiere proceder?</translation>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation>Aviso</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Mazebits.</source>
<translation>Esta configuración tendrá efecto tras reiniciar Mazebits.</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>La dirección proxy indicada es inválida.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Desde</translation>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Mazebits network after a connection is established, but this process has not completed yet.</source>
<translation>La información mostrada puede estar desactualizada. Su monedero se sincroniza automáticamente con la red Mazebits después de que se haya establecido una conexión , pero este proceso aún no se ha completado.</translation>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation>Saldo:</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>No confirmado(s):</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>Monedero</translation>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation>No disponible:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Saldo recién minado que aún no está disponible.</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Movimientos recientes</b></translation>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation>Su saldo actual</translation>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>Total de las transacciones que faltan por confirmar y que no contribuyen al saldo actual</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>desincronizado</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start mazebits: click-to-pay handler</source>
<translation>No se pudo iniciar mazebits: manejador de pago-al-clic</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>Diálogo de códigos QR</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>Solicitud de pago</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>Cuantía:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>Etiqueta:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>Mensaje:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&Guardar como...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation>Error al codificar la URI en el código QR.</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>La cantidad introducida es inválida. Compruébela, por favor.</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>URI esultante demasiado larga. Intente reducir el texto de la etiqueta / mensaje.</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>Guardar código QR</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>Imágenes PNG (*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Nombre del cliente</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation>N/D</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>Versión del cliente</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&Información</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Utilizando la versión OpenSSL</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Hora de inicio</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Red</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Número de conexiones</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation>En la red de pruebas</translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>Cadena de bloques</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Número actual de bloques</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>Bloques totales estimados</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>Hora del último bloque</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Abrir</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>Opciones de la línea de órdenes</translation>
</message>
<message>
<location line="+7"/>
<source>Show the Mazebits-Qt help message to get a list with possible Mazebits command-line options.</source>
<translation>Mostrar el mensaje de ayuda de Mazebits-Qt que enumera las opciones disponibles de línea de órdenes para Mazebits.</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>&Mostrar</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&Consola</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>Fecha de compilación</translation>
</message>
<message>
<location line="-104"/>
<source>Mazebits - Debug window</source>
<translation>Mazebits - Ventana de depuración</translation>
</message>
<message>
<location line="+25"/>
<source>Mazebits Core</source>
<translation>Núcleo de Mazebits</translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>Archivo de registro de depuración</translation>
</message>
<message>
<location line="+7"/>
<source>Open the Mazebits debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>Abrir el archivo de registro de depuración en el directorio actual de datos. Esto puede llevar varios segundos para archivos de registro grandes.</translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Borrar consola</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the Mazebits RPC console.</source>
<translation>Bienvenido a la consola RPC de Mazebits</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Use las flechas arriba y abajo para navegar por el historial y <b>Control+L</b> para limpiar la pantalla.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Escriba <b>help</b> para ver un resumen de los comandos disponibles.</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Enviar monedas</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation>Enviar a multiples destinatarios de una vez</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>Añadir &destinatario</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>Eliminar todos los campos de las transacciones</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>Limpiar &todo</translation>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation>Saldo:</translation>
</message>
<message>
<location line="+10"/>
<source>123.456 BTC</source>
<translation>123.456 BTC</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Confirmar el envío</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>&Enviar</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b>%1</b> a %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Confirmar el envío de monedas</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>¿Está seguro de que desea enviar %1?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation>y</translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>La dirección de recepción no es válida, compruébela de nuevo.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>La cantidad por pagar tiene que ser mayor de 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>La cantidad sobrepasa su saldo.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>El total sobrepasa su saldo cuando se incluye la tasa de envío de %1</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Se ha encontrado una dirección duplicada. Solo se puede enviar a cada dirección una vez por operación de envío.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation>Error: ¡Ha fallado la creación de la transacción!</translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Error: transacción rechazada. Puede haber ocurrido si alguna de las monedas ya estaba gastada o si ha usado una copia de wallet.dat y las monedas se gastaron en la copia pero no se han marcado así aquí.</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>Envío</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>Ca&ntidad:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>&Pagar a:</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>La dirección a la que enviar el pago (p. ej. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Etiquete esta dirección para añadirla a la libreta</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&Etiqueta:</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>Elija una dirección de la libreta de direcciones</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Pegar dirección desde portapapeles</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>Eliminar destinatario</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Mazebits address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Introduzca una dirección Mazebits (ej. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Firmas - Firmar / verificar un mensaje</translation>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation>&Firmar mensaje</translation>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Puede firmar mensajes con sus direcciones para demostrar que las posee. Tenga cuidado de no firmar cualquier cosa vaga, ya que los ataques de phishing pueden tratar de engañarle para suplantar su identidad. Firme solo declaraciones totalmente detalladas con las que usted esté de acuerdo.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>La dirección con la que firmar el mensaje (ej. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation>Elija una dirección de la libreta de direcciones</translation>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation>Pegar dirección desde portapapeles</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Introduzca el mensaje que desea firmar aquí</translation>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation>Firma</translation>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Copiar la firma actual al portapapeles del sistema</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Mazebits address</source>
<translation>Firmar el mensaje para demostrar que se posee esta dirección Mazebits</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Firmar &mensaje</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation>Limpiar todos los campos de la firma de mensaje</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>Limpiar &todo</translation>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation>&Verificar mensaje</translation>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Introduzca la dirección para la firma, el mensaje (asegurándose de copiar tal cual los saltos de línea, espacios, tabulaciones, etc.) y la firma a continuación para verificar el mensaje. Tenga cuidado de no asumir más información de lo que dice el propio mensaje firmado para evitar fraudes basados en ataques de tipo man-in-the-middle.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>La dirección con la que se firmó el mensaje (ej. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Mazebits address</source>
<translation>Verificar el mensaje para comprobar que fue firmado con la dirección Mazebits indicada</translation>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation>Verificar &mensaje</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation>Limpiar todos los campos de la verificación de mensaje</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Mazebits address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Introduzca una dirección Mazebits (ej. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Haga clic en "Firmar mensaje" para generar la firma</translation>
</message>
<message>
<location line="+3"/>
<source>Enter Mazebits signature</source>
<translation>Introduzca una firma Mazebits</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>La dirección introducida es inválida.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Verifique la dirección e inténtelo de nuevo.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>La dirección introducida no corresponde a una clave.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Se ha cancelado el desbloqueo del monedero. </translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>No se dispone de la clave privada para la dirección introducida.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>Ha fallado la firma del mensaje.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Mensaje firmado.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>No se puede decodificar la firma.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Compruebe la firma e inténtelo de nuevo.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>La firma no coincide con el resumen del mensaje.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>La verificación del mensaje ha fallado.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Mensaje verificado.</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The Mazebits developers</source>
<translation>Los programadores Mazebits</translation>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>Abierto hasta %1</translation>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation>%1/fuera de línea</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/no confirmado</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 confirmaciones</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>Estado</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, transmitir a través de %n nodo</numerusform><numerusform>, transmitir a través de %n nodos</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Fuente</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Generado</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>De</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>Para</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>dirección propia</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>etiqueta</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Crédito</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>disponible en %n bloque más</numerusform><numerusform>disponible en %n bloques más</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>no aceptada</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Débito</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Comisión de transacción</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Cantidad neta</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Mensaje</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Comentario</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>Identificador de transacción</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>Las monedas generadas deben esperar 120 bloques antes de que se puedan gastar. Cuando se generó este bloque, se emitió a la red para ser agregado a la cadena de bloques. Si no consigue incorporarse a la cadena, su estado cambiará a "no aceptado" y las monedas no se podrán gastar. Esto puede ocurrir ocasionalmente si otro nodo genera un bloque casi al mismo tiempo que el suyo.</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Información de depuración</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Transacción</translation>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation>entradas</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>verdadero</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>falso</translation>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>, todavía no se ha sido difundido satisfactoriamente</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Abrir para %n bloque más</numerusform><numerusform>Abrir para %n bloques más</numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>desconocido</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Detalles de transacción</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Esta ventana muestra información detallada sobre la transacción</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Abrir para %n bloque más</numerusform><numerusform>Abrir para %n bloques más</numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation>Abierto hasta %1</translation>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation>Fuera de línea (%1 confirmaciones)</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation>No confirmado (%1 de %2 confirmaciones)</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Confirmado (%1 confirmaciones)</translation>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation><numerusform>El saldo recién minado estará disponible cuando venza el plazo en %n bloque más</numerusform><numerusform>El saldo recién minado estará disponible cuando venza el plazo en %n bloques más</numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Este bloque no ha sido recibido por otros nodos y probablemente no sea aceptado!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Generado pero no aceptado</translation>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>Recibido con</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Recibidos de</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Enviado a</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Pago propio</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Minado</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(nd)</translation>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Estado de transacción. Pasa el ratón sobre este campo para ver el número de confirmaciones.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Fecha y hora en que se recibió la transacción.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Tipo de transacción.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Dirección de destino de la transacción.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Cantidad retirada o añadida al saldo.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>Todo</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>Hoy</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Esta semana</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Este mes</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Mes pasado</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Este año</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Rango...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Recibido con</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Enviado a</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>A usted mismo</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Minado</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Otra</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Introduzca una dirección o etiqueta que buscar</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Cantidad mínima</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Copiar dirección</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Copiar etiqueta</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Copiar cuantía</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>Copiar identificador de transacción</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Editar etiqueta</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Mostrar detalles de la transacción</translation>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation>Exportar datos de la transacción</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Archivos de columnas separadas por coma (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Confirmado</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Fecha</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Etiqueta</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Dirección</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Cantidad</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>Error exportando</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>No se pudo escribir en el archivo %1.</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Rango:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>para</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation>Enviar monedas</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation>&Exportar</translation>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation>Exportar a un archivo los datos de esta pestaña</translation>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation>Respaldo de monedero</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation>Datos de monedero (*.dat)</translation>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation>Ha fallado el respaldo</translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation>Se ha producido un error al intentar guardar los datos del monedero en la nueva ubicación.</translation>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation>Se ha completado con éxito la copia de respaldo</translation>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation>Los datos del monedero se han guardado con éxito en la nueva ubicación.</translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>Mazebits version</source>
<translation>Versión de Mazebits</translation>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation>Uso:</translation>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or mazebitsd</source>
<translation>Envíar comando a -server o mazebitsd</translation>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>Muestra comandos
</translation>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation>Recibir ayuda para un comando
</translation>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation>Opciones:
</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: mazebits.conf)</source>
<translation>Especificar archivo de configuración (predeterminado: mazebits.conf)
</translation>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: mazebitsd.pid)</source>
<translation>Especificar archivo pid (predeterminado: mazebits.pid)
</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Especificar directorio para los datos</translation>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Establecer el tamaño de caché de la base de datos en megabytes (predeterminado: 25)</translation>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 9333 or testnet: 19333)</source>
<translation>Escuchar conexiones en <puerto> (predeterminado: 9333 o testnet: 19333)</translation>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Mantener como máximo <n> conexiones a pares (predeterminado: 125)</translation>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Conectar a un nodo para obtener direcciones de pares y desconectar</translation>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation>Especifique su propia dirección pública</translation>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Umbral para la desconexión de pares con mal comportamiento (predeterminado: 100)</translation>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Número de segundos en que se evita la reconexión de pares con mal comportamiento (predeterminado: 86400)</translation>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>Ha ocurrido un error al configurar el puerto RPC %u para escucha en IPv4: %s</translation>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 9332 or testnet: 19332)</source>
<translation>Escuchar conexiones JSON-RPC en <puerto> (predeterminado: 9332 o testnet:19332)</translation>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Aceptar comandos consola y JSON-RPC
</translation>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Correr como demonio y aceptar comandos
</translation>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation>Usar la red de pruebas
</translation>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Aceptar conexiones desde el exterior (predeterminado: 1 si no -proxy o -connect)</translation>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=mazebitsrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Mazebits Alert" [email protected]
</source>
<translation>%s, debe establecer un valor rpcpassword en el archivo de configuración:
%s
Se recomienda utilizar la siguiente contraseña aleatoria:
rpcuser=mazebitsrpc
rpcpassword=%s
(no es necesario recordar esta contraseña)
El nombre de usuario y la contraseña DEBEN NO ser iguales.
Si el archivo no existe, créelo con permisos de archivo de solo lectura.
Se recomienda también establecer alertnotify para recibir notificaciones de problemas.
Por ejemplo: alertnotify=echo %%s | mail -s "Mazebits Alert" [email protected]
</translation>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>Ha ocurrido un error al configurar el puerto RPC %u para escuchar mediante IPv6. Recurriendo a IPv4: %s</translation>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation>Vincular a la dirección dada y escuchar siempre en ella. Utilice la notación [host]:port para IPv6</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Mazebits is probably already running.</source>
<translation>No se puede bloquear el directorio de datos %s. Probablemente Mazebits ya se está ejecutando.</translation>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>¡Error: se ha rechazado la transacción! Esto puede ocurrir si ya se han gastado algunas de las monedas del monedero, como ocurriría si hubiera hecho una copia de wallet.dat y se hubieran gastado monedas a partir de la copia, con lo que no se habrían marcado aquí como gastadas.</translation>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation>¡Error: Esta transacción requiere una comisión de al menos %s debido a su monto, complejidad, o al uso de fondos recién recibidos!</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation>Ejecutar orden cuando se reciba un aviso relevante (%s en cmd se reemplazará por el mensaje)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Ejecutar comando cuando una transacción del monedero cambia (%s en cmd se remplazará por TxID)</translation>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>Establecer el tamaño máximo de las transacciones de alta prioridad/comisión baja en bytes (predeterminado:27000)</translation>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation>Esta es una versión de pre-prueba - utilícela bajo su propio riesgo. No la utilice para usos comerciales o de minería.</translation>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Aviso: ¡-paytxfee tiene un valor muy alto! Esta es la comisión que pagará si envía una transacción.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation>Aviso: ¡Las transacciones mostradas pueden no ser correctas! Puede necesitar una actualización o bien otros nodos necesitan actualizarse.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Mazebits will not work properly.</source>
<translation>Precaución: Por favor, ¡revise que la fecha y hora de su ordenador son correctas! Si su reloj está mal, Mazebits no funcionará correctamente.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>Aviso: ¡Error al leer wallet.dat! Todas las claves se han leído correctamente, pero podrían faltar o ser incorrectos los datos de transacciones o las entradas de la libreta de direcciones.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Aviso: ¡Recuperados datos de wallet.dat corrupto! El wallet.dat original se ha guardado como wallet.{timestamp}.bak en %s; si hubiera errores en su saldo o transacciones, deberá restaurar una copia de seguridad.</translation>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Intento de recuperar claves privadas de un wallet.dat corrupto</translation>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation>Opciones de creación de bloques:</translation>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation>Conectar sólo a los nodos (o nodo) especificados</translation>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation>Corrupción de base de datos de bloques detectada.</translation>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Descubrir dirección IP propia (predeterminado: 1 al escuchar sin -externalip)</translation>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation>¿Quieres reconstruir la base de datos de bloques ahora?</translation>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation>Error al inicializar la base de datos de bloques</translation>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation>Error al inicializar el entorno de la base de datos del monedero %s</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation>Error cargando base de datos de bloques</translation>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation>Error al abrir base de datos de bloques.</translation>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation>Error: ¡Espacio en disco bajo!</translation>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation>Error: ¡El monedero está bloqueado; no se puede crear la transacción!</translation>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation>Error: error de sistema: </translation>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Ha fallado la escucha en todos los puertos. Use -listen=0 si desea esto.</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation>No se ha podido leer la información de bloque</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation>No se ha podido leer el bloque</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation>No se ha podido sincronizar el índice de bloques</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation>No se ha podido escribir en el índice de bloques</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation>No se ha podido escribir la información de bloques</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation>No se ha podido escribir el bloque</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation>No se ha podido escribir la información de archivo</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation>No se ha podido escribir en la base de datos de monedas</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation>No se ha podido escribir en el índice de transacciones</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation>No se han podido escribir los datos de deshacer</translation>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation>Encontrar pares mediante búsqueda de DNS (predeterminado: 1 salvo con -connect)</translation>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation>Generar monedas (por defecto: 0)</translation>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation>Cuántos bloques comprobar al iniciar (predeterminado: 288, 0 = todos)</translation>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation>Como es de exhaustiva la verificación de bloques (0-4, por defecto 3)</translation>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation>No hay suficientes descriptores de archivo disponibles. </translation>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation>Reconstruir el índice de la cadena de bloques a partir de los archivos blk000??.dat actuales</translation>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation>Establecer el número de hilos para atender las llamadas RPC (predeterminado: 4)</translation>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation>Verificando bloques...</translation>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation>Verificando monedero...</translation>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation>Importa los bloques desde un archivo blk000??.dat externo</translation>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation>Configura el número de hilos para el script de verificación (hasta 16, 0 = auto, <0 = leave that many cores free, por fecto: 0)</translation>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation>Información</translation>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation>Dirección -tor inválida: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation>Inválido por el monto -minrelaytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation>Inválido por el monto -mintxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation>Mantener índice de transacciones completo (predeterminado: 0)</translation>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Búfer de recepción máximo por conexión, <n>*1000 bytes (predeterminado: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Búfer de recepción máximo por conexión, , <n>*1000 bytes (predeterminado: 1000)</translation>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation>Aceptar solamente cadena de bloques que concuerde con los puntos de control internos (predeterminado: 1)</translation>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>Conectarse solo a nodos de la red <net> (IPv4, IPv6 o Tor)</translation>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>Mostrar información de depuración adicional. Implica todos los demás opciones -debug*</translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>Mostrar información de depuración adicional</translation>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation>Anteponer marca temporal a la información de depuración</translation>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the Mazebits Wiki for SSL setup instructions)</source>
<translation>Opciones SSL: (ver la Mazebits Wiki para instrucciones de configuración SSL)</translation>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>Elija la versión del proxy socks a usar (4-5, predeterminado: 5)</translation>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Enviar información de trazas/depuración a la consola en lugar de al archivo debug.log</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>Enviar información de trazas/depuración al depurador</translation>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation>Establecer tamaño máximo de bloque en bytes (predeterminado: 250000)</translation> | <translation>Establecer tamaño mínimo de bloque en bytes (predeterminado: 0)</translation>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Reducir el archivo debug.log al iniciar el cliente (predeterminado: 1 sin -debug)</translation>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation>Transacción falló</translation>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Especificar el tiempo máximo de conexión en milisegundos (predeterminado: 5000)</translation>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation>Error de sistema: </translation>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation>Monto de la transacción muy pequeño</translation>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation>Montos de transacciones deben ser positivos</translation>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation>Transacción demasiado grande</translation>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Usar UPnP para asignar el puerto de escucha (predeterminado: 0)</translation>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Usar UPnP para asignar el puerto de escucha (predeterminado: 1 al escuchar)</translation>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>Utilizar proxy para conectar a Tor servicios ocultos (predeterminado: igual que -proxy)</translation>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation>Nombre de usuario para las conexiones JSON-RPC
</translation>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation>Aviso</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Aviso: Esta versión es obsoleta, actualización necesaria!</translation>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation>Necesita reconstruir las bases de datos con la opción -reindex para modificar -txindex</translation>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>wallet.dat corrupto. Ha fallado la recuperación.</translation>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation>Contraseña para las conexiones JSON-RPC
</translation>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Permitir conexiones JSON-RPC desde la dirección IP especificada
</translation>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Enviar comando al nodo situado en <ip> (predeterminado: 127.0.0.1)
</translation>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Ejecutar un comando cuando cambia el mejor bloque (%s en cmd se sustituye por el hash de bloque)</translation>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation>Actualizar el monedero al último formato</translation>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Ajustar el número de claves en reserva <n> (predeterminado: 100)
</translation>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Volver a examinar la cadena de bloques en busca de transacciones del monedero perdidas</translation>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Usar OpenSSL (https) para las conexiones JSON-RPC
</translation>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Certificado del servidor (predeterminado: server.cert)
</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Clave privada del servidor (predeterminado: server.pem)
</translation>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>Cifrados aceptados (predeterminado: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)
</translation>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation>Este mensaje de ayuda
</translation>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>No es posible conectar con %s en este sistema (bind ha dado el error %d, %s)</translation>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation>Conectar mediante proxy socks</translation>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Permitir búsquedas DNS para -addnode, -seednode y -connect</translation>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation>Cargando direcciones...</translation>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Error al cargar wallet.dat: el monedero está dañado</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of Mazebits</source>
<translation>Error al cargar wallet.dat: El monedero requiere una versión más reciente de Mazebits</translation>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart Mazebits to complete</source>
<translation>El monedero ha necesitado ser reescrito. Reinicie Mazebits para completar el proceso</translation>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation>Error al cargar wallet.dat</translation>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Dirección -proxy inválida: '%s'</translation>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>La red especificada en -onlynet '%s' es desconocida</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>Solicitada versión de proxy -socks desconocida: %i</translation>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>No se puede resolver la dirección de -bind: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>No se puede resolver la dirección de -externalip: '%s'</translation>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>Cantidad inválida para -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation>Cuantía no válida</translation>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation>Fondos insuficientes</translation>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>Cargando el índice de bloques...</translation>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Añadir un nodo al que conectarse y tratar de mantener la conexión abierta</translation>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. Mazebits is probably already running.</source>
<translation>No es posible conectar con %s en este sistema. Probablemente Mazebits ya está ejecutándose.</translation>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation>Tarifa por KB que añadir a las transacciones que envíe</translation>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation>Cargando monedero...</translation>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation>No se puede rebajar el monedero</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation>No se puede escribir la dirección predeterminada</translation>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation>Reexplorando...</translation>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation>Generado pero no aceptado</translation>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation>Para utilizar la opción %s</translation>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation>Error</translation>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>Tiene que establecer rpcpassword=<contraseña> en el fichero de configuración: ⏎
%s ⏎
Si el archivo no existe, créelo con permiso de lectura solamente del propietario.</translation>
</message>
</context>
</TS> | </message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source> |
data.rs | use Void;
pub(crate) extern "C" fn data_new(d: *mut Void, size: u32) -> *const Void {
// use libc
|
}
| {
extern "C" {
fn realloc(d: *mut Void, bytes: usize) -> *mut Void;
}
unsafe { realloc(d, size as usize) }
} |
brief.js | import predefinedPropTypes from '../../constants/prop-types/body'
import PropTypes from 'prop-types'
import React, { PureComponent } from 'react'
import SeparationCurve from '../separation-curve'
import styled from 'styled-components'
import styles from '../../constants/css'
import themeConst from '../../constants/theme'
import typography from '../../constants/typography'
// lodash
import get from 'lodash/get'
import map from 'lodash/map'
const _ = {
get,
map,
}
const Content = styled.div`
/* ff-tisa-web-pro is for english text */
font-family: ff-tisa-web-pro, source-han-serif-tc, serif;
p {
color: ${props => {
switch (props.theme.name) {
case themeConst.article.v2.photo:
return 'rgba(255, 255, 255, 0.9)'
case themeConst.article.v2.pink:
case themeConst.article.v2.default:
default:
return '#808080'
}
}};
line-height: 1.7;
letter-spacing: 0.7px;
font-weight: ${typography.font.weight.semiBold};
font-size: ${props => props.theme.fontSizeOffset + 20}px;
margin: 0 0 1em 0;
&:last-child {
margin: 0;
}
}
${styles.linkChildren}
`
export default class | extends PureComponent {
static propTypes = {
className: PropTypes.string,
data: PropTypes.arrayOf(predefinedPropTypes.elementData),
}
static defaultProps = {
className: '',
data: [],
}
_buildContentElement = (data, index) => {
switch (data.type) {
case 'unstyled':
const htmlString = _.get(data, ['content', 0])
if (!htmlString) return null
return (
<p
key={_.get(data, 'id', `p-${index}`)}
dangerouslySetInnerHTML={{ __html: htmlString }}
/>
)
default:
return null
}
}
render() {
const { className, data } = this.props
const elements = _.map(data, this._buildContentElement).filter(Boolean)
return elements.length > 0 ? (
<div className={className}>
<Content>{elements}</Content>
<SeparationCurve />
</div>
) : null
}
}
| Brief |
experimentparam.py | import spacy
from spacy.lang.en import English
from spacy.util import minibatch, compounding
from spacy.util import decaying
class ExperimentParam:
def __init__(self, TRAIN_DATA: list, max_batch_sizes: dict, model_type='ner',
dropout_start: float = 0.6, dropout_end: float = 0.2, interval: float = 1e-4):
self.TRAIN_DATA = TRAIN_DATA
self.max_batch_sizes = max_batch_sizes
self.model_type = model_type
self.dropout_start = dropout_start
self.dropout_end = dropout_end
self.interval = interval
def | (self):
"""
max_batch_sizes =
Initialize with batch size 1, and compound to a maximum determined by your data size and problem type.
{"tagger": 32, "parser": 16, "ner": 16, "textcat": 64}
"""
max_batch_size = self.max_batch_sizes[self.model_type]
if len(self.TRAIN_DATA) < 1000:
max_batch_size /= 2
if len(self.TRAIN_DATA) < 500:
max_batch_size /= 2
batch_size = compounding(1, max_batch_size, 1.001)
batches = minibatch(self.TRAIN_DATA, size=batch_size)
return batches
@property
def determine_dropout(self):
"""
For small datasets, it’s useful to set a high dropout rate at first, and decay it down towards a more reasonable value. This helps avoid the network immediately overfitting, while still encouraging it to learn some of the more interesting things in your data.
"""
dropout = decaying(self.dropout_start, self.dropout_end, self.interval)
return dropout
| get_batches |
newton_cg.rs | // Copyright 2018 Stefan Kroboth
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
extern crate argmin;
extern crate ndarray;
use argmin::prelude::*;
use argmin::solver::linesearch::MoreThuenteLineSearch;
use argmin::solver::newton::NewtonCG;
use argmin::testfunctions::{rosenbrock_2d, rosenbrock_2d_derivative, rosenbrock_2d_hessian};
use ndarray::{Array, Array1, Array2};
use serde::{Deserialize, Serialize};
#[derive(Clone, Default, Serialize, Deserialize)]
struct Rosenbrock {
a: f64,
b: f64,
}
impl ArgminOp for Rosenbrock {
type Param = Array1<f64>;
type Output = f64;
type Hessian = Array2<f64>;
fn apply(&self, p: &Self::Param) -> Result<Self::Output, Error> {
Ok(rosenbrock_2d(&p.to_vec(), self.a, self.b))
}
fn gradient(&self, p: &Self::Param) -> Result<Self::Param, Error> {
Ok(Array1::from_vec(rosenbrock_2d_derivative(
&p.to_vec(),
self.a,
self.b,
)))
}
fn hessian(&self, p: &Self::Param) -> Result<Self::Hessian, Error> |
}
fn run() -> Result<(), Error> {
// Define cost function
let cost = Rosenbrock { a: 1.0, b: 100.0 };
// Define initial parameter vector
// let init_param: Array1<f64> = Array1::from_vec(vec![1.2, 1.2]);
let init_param: Array1<f64> = Array1::from_vec(vec![-1.2, 1.0]);
// set up line search
let linesearch = MoreThuenteLineSearch::new();
// Set up solver
let solver = NewtonCG::new(linesearch);
// Run solver
let res = Executor::new(cost, solver, init_param)
.add_observer(ArgminSlogLogger::term(), ObserverMode::Always)
.max_iters(100)
.run()?;
// Wait a second (lets the logger flush everything before printing again)
std::thread::sleep(std::time::Duration::from_secs(1));
// Print result
println!("{}", res);
Ok(())
}
fn main() {
if let Err(ref e) = run() {
println!("{} {}", e.as_fail(), e.backtrace());
std::process::exit(1);
}
}
| {
let h = rosenbrock_2d_hessian(&p.to_vec(), self.a, self.b);
Ok(Array::from_shape_vec((2, 2), h)?)
} |
dynamic.rs | //! Helper module which defines the [`Dynamic`] data type and the
//! [`Any`] trait to to allow custom type handling.
use crate::func::native::SendSync;
use crate::{reify, ExclusiveRange, FnPtr, ImmutableString, InclusiveRange, INT};
#[cfg(feature = "no_std")]
use std::prelude::v1::*;
use std::{
any::{type_name, Any, TypeId},
fmt,
hash::{Hash, Hasher},
mem,
ops::{Deref, DerefMut},
str::FromStr,
};
#[cfg(not(feature = "no_std"))]
#[cfg(not(target_family = "wasm"))]
pub use std::time::Instant;
#[cfg(not(feature = "no_std"))]
#[cfg(target_family = "wasm")]
pub use instant::Instant;
/// The message: data type was checked
const CHECKED: &str = "data type was checked";
mod private {
use crate::func::native::SendSync;
use std::any::Any;
/// A sealed trait that prevents other crates from implementing [`Variant`].
pub trait Sealed {}
impl<T: Any + Clone + SendSync> Sealed for T {}
}
/// _(internals)_ Trait to represent any type.
/// Exported under the `internals` feature only.
///
/// This trait is sealed and cannot be implemented.
///
/// Currently, [`Variant`] is not [`Send`] nor [`Sync`], so it can practically be any type.
/// Turn on the `sync` feature to restrict it to only types that implement [`Send`] `+` [`Sync`].
#[cfg(not(feature = "sync"))]
pub trait Variant: Any + private::Sealed {
/// Convert this [`Variant`] trait object to [`&dyn Any`][Any].
#[must_use]
fn as_any(&self) -> &dyn Any;
/// Convert this [`Variant`] trait object to [`&mut dyn Any`][Any].
#[must_use]
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Convert this [`Variant`] trait object to [`Box<dyn Any>`].
#[must_use]
fn as_boxed_any(self: Box<Self>) -> Box<dyn Any>;
/// Get the name of this type.
#[must_use]
fn type_name(&self) -> &'static str;
/// Clone this [`Variant`] trait object.
#[must_use]
fn clone_object(&self) -> Box<dyn Variant>;
}
/// _(internals)_ Trait to represent any type.
/// Exported under the `internals` feature only.
///
/// This trait is sealed and cannot be implemented.
#[cfg(feature = "sync")]
pub trait Variant: Any + Send + Sync + private::Sealed {
/// Convert this [`Variant`] trait object to [`&dyn Any`][Any].
#[must_use]
fn as_any(&self) -> &dyn Any;
/// Convert this [`Variant`] trait object to [`&mut dyn Any`][Any].
#[must_use]
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Convert this [`Variant`] trait object to [`Box<dyn Any>`].
#[must_use]
fn as_boxed_any(self: Box<Self>) -> Box<dyn Any>;
/// Get the name of this type.
#[must_use]
fn type_name(&self) -> &'static str;
/// Clone this [`Variant`] trait object.
#[must_use]
fn clone_object(&self) -> Box<dyn Variant>;
}
impl<T: Any + Clone + SendSync> Variant for T {
#[inline(always)]
fn as_any(&self) -> &dyn Any {
self
}
#[inline(always)]
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
#[inline(always)]
fn as_boxed_any(self: Box<Self>) -> Box<dyn Any> {
self
}
#[inline(always)]
fn type_name(&self) -> &'static str {
type_name::<T>()
}
#[inline(always)]
fn clone_object(&self) -> Box<dyn Variant> {
Box::new(self.clone()) as Box<dyn Variant>
}
}
impl dyn Variant {
/// Is this [`Variant`] a specific type?
#[inline(always)]
#[must_use]
pub fn is<T: Any>(&self) -> bool {
TypeId::of::<T>() == self.type_id()
}
}
/// _(internals)_ Modes of access.
/// Exported under the `internals` feature only.
#[derive(Debug, Eq, PartialEq, Hash, Copy, Clone)]
pub enum AccessMode {
/// Mutable.
ReadWrite,
/// Immutable.
ReadOnly,
}
/// Arbitrary data attached to a [`Dynamic`] value.
#[cfg(target_pointer_width = "64")]
pub type Tag = i32;
/// Arbitrary data attached to a [`Dynamic`] value.
#[cfg(target_pointer_width = "32")]
pub type Tag = i16;
/// Default tag value for [`Dynamic`].
const DEFAULT_TAG_VALUE: Tag = 0;
/// Dynamic type containing any value.
pub struct Dynamic(pub(crate) Union);
/// Internal [`Dynamic`] representation.
///
/// Most variants are boxed to reduce the size.
pub enum Union {
/// The Unit value - ().
Unit((), Tag, AccessMode),
/// A boolean value.
Bool(bool, Tag, AccessMode),
/// An [`ImmutableString`] value.
Str(ImmutableString, Tag, AccessMode),
/// A character value.
Char(char, Tag, AccessMode),
/// An integer value.
Int(INT, Tag, AccessMode),
/// A floating-point value.
#[cfg(not(feature = "no_float"))]
Float(crate::ast::FloatWrapper<crate::FLOAT>, Tag, AccessMode),
/// _(decimal)_ A fixed-precision decimal value.
/// Exported under the `decimal` feature only.
#[cfg(feature = "decimal")]
Decimal(Box<rust_decimal::Decimal>, Tag, AccessMode),
/// An array value.
#[cfg(not(feature = "no_index"))]
Array(Box<crate::Array>, Tag, AccessMode),
/// An blob (byte array).
#[cfg(not(feature = "no_index"))]
Blob(Box<crate::Blob>, Tag, AccessMode),
/// An object map value.
#[cfg(not(feature = "no_object"))]
Map(Box<crate::Map>, Tag, AccessMode),
/// A function pointer.
FnPtr(Box<FnPtr>, Tag, AccessMode),
/// A timestamp value.
#[cfg(not(feature = "no_std"))]
TimeStamp(Box<Instant>, Tag, AccessMode),
/// Any type as a trait object.
#[allow(clippy::redundant_allocation)]
Variant(Box<Box<dyn Variant>>, Tag, AccessMode),
/// A _shared_ value of any type.
#[cfg(not(feature = "no_closure"))]
Shared(crate::Shared<crate::Locked<Dynamic>>, Tag, AccessMode),
}
/// _(internals)_ Lock guard for reading a [`Dynamic`].
/// Exported under the `internals` feature only.
///
/// This type provides transparent interoperability between normal [`Dynamic`] and shared
/// [`Dynamic`] values.
#[derive(Debug)]
pub struct DynamicReadLock<'d, T: Clone>(DynamicReadLockInner<'d, T>);
/// Different types of read guards for [`DynamicReadLock`].
#[derive(Debug)]
enum DynamicReadLockInner<'d, T: Clone> {
/// A simple reference to a non-shared value.
Reference(&'d T),
/// A read guard to a shared value.
#[cfg(not(feature = "no_closure"))]
Guard(crate::func::native::LockGuard<'d, Dynamic>),
}
impl<'d, T: Any + Clone> Deref for DynamicReadLock<'d, T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
match self.0 {
DynamicReadLockInner::Reference(ref reference) => *reference,
#[cfg(not(feature = "no_closure"))]
DynamicReadLockInner::Guard(ref guard) => guard.downcast_ref().expect(CHECKED),
}
}
}
/// _(internals)_ Lock guard for writing a [`Dynamic`].
/// Exported under the `internals` feature only.
///
/// This type provides transparent interoperability between normal [`Dynamic`] and shared
/// [`Dynamic`] values.
#[derive(Debug)]
pub struct DynamicWriteLock<'d, T: Clone>(DynamicWriteLockInner<'d, T>);
/// Different types of write guards for [`DynamicReadLock`].
#[derive(Debug)]
enum DynamicWriteLockInner<'d, T: Clone> {
/// A simple mutable reference to a non-shared value.
Reference(&'d mut T),
/// A write guard to a shared value.
#[cfg(not(feature = "no_closure"))]
Guard(crate::func::native::LockGuardMut<'d, Dynamic>),
}
impl<'d, T: Any + Clone> Deref for DynamicWriteLock<'d, T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
match self.0 {
DynamicWriteLockInner::Reference(ref reference) => *reference,
#[cfg(not(feature = "no_closure"))]
DynamicWriteLockInner::Guard(ref guard) => guard.downcast_ref().expect(CHECKED),
}
}
}
impl<'d, T: Any + Clone> DerefMut for DynamicWriteLock<'d, T> {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
match self.0 {
DynamicWriteLockInner::Reference(ref mut reference) => *reference,
#[cfg(not(feature = "no_closure"))]
DynamicWriteLockInner::Guard(ref mut guard) => guard.downcast_mut().expect(CHECKED),
}
}
}
impl Dynamic {
/// Get the arbitrary data attached to this [`Dynamic`].
#[must_use]
pub const fn tag(&self) -> Tag {
match self.0 {
Union::Unit(_, tag, _)
| Union::Bool(_, tag, _)
| Union::Str(_, tag, _)
| Union::Char(_, tag, _)
| Union::Int(_, tag, _)
| Union::FnPtr(_, tag, _)
| Union::Variant(_, tag, _) => tag,
#[cfg(not(feature = "no_float"))]
Union::Float(_, tag, _) => tag,
#[cfg(feature = "decimal")]
Union::Decimal(_, tag, _) => tag,
#[cfg(not(feature = "no_index"))]
Union::Array(_, tag, _) | Union::Blob(_, tag, _) => tag,
#[cfg(not(feature = "no_object"))]
Union::Map(_, tag, _) => tag,
#[cfg(not(feature = "no_std"))]
Union::TimeStamp(_, tag, _) => tag,
#[cfg(not(feature = "no_closure"))]
Union::Shared(_, tag, _) => tag,
}
}
/// Attach arbitrary data to this [`Dynamic`].
pub fn set_tag(&mut self, value: Tag) -> &mut Self {
match self.0 {
Union::Unit(_, ref mut tag, _)
| Union::Bool(_, ref mut tag, _)
| Union::Str(_, ref mut tag, _)
| Union::Char(_, ref mut tag, _)
| Union::Int(_, ref mut tag, _)
| Union::FnPtr(_, ref mut tag, _)
| Union::Variant(_, ref mut tag, _) => *tag = value,
#[cfg(not(feature = "no_float"))]
Union::Float(_, ref mut tag, _) => *tag = value,
#[cfg(feature = "decimal")]
Union::Decimal(_, ref mut tag, _) => *tag = value,
#[cfg(not(feature = "no_index"))]
Union::Array(_, ref mut tag, _) | Union::Blob(_, ref mut tag, _) => *tag = value,
#[cfg(not(feature = "no_object"))]
Union::Map(_, ref mut tag, _) => *tag = value,
#[cfg(not(feature = "no_std"))]
Union::TimeStamp(_, ref mut tag, _) => *tag = value,
#[cfg(not(feature = "no_closure"))]
Union::Shared(_, ref mut tag, _) => *tag = value,
}
self
}
/// Does this [`Dynamic`] hold a variant data type instead of one of the supported system
/// primitive types?
#[inline(always)]
#[must_use]
pub const fn is_variant(&self) -> bool {
matches!(self.0, Union::Variant(..))
}
/// Is the value held by this [`Dynamic`] shared?
///
/// Not available under `no_closure`.
#[cfg(not(feature = "no_closure"))]
#[inline(always)]
#[must_use]
pub const fn is_shared(&self) -> bool {
#[cfg(not(feature = "no_closure"))]
return matches!(self.0, Union::Shared(..));
#[cfg(feature = "no_closure")]
return false;
}
/// Is the value held by this [`Dynamic`] a particular type?
///
/// If the [`Dynamic`] is a shared variant checking is performed on top of its internal value.
#[inline]
#[must_use]
pub fn is<T: Any + Clone>(&self) -> bool {
if TypeId::of::<T>() == TypeId::of::<String>() {
self.type_id() == TypeId::of::<ImmutableString>()
} else {
self.type_id() == TypeId::of::<T>()
}
}
/// Get the [`TypeId`] of the value held by this [`Dynamic`].
///
/// # Panics or Deadlocks When Value is Shared
///
/// Under the `sync` feature, this call may deadlock, or [panic](https://doc.rust-lang.org/std/sync/struct.RwLock.html#panics-1).
/// Otherwise, this call panics if the data is currently borrowed for write.
#[must_use]
pub fn type_id(&self) -> TypeId {
match self.0 {
Union::Unit(..) => TypeId::of::<()>(),
Union::Bool(..) => TypeId::of::<bool>(),
Union::Str(..) => TypeId::of::<ImmutableString>(),
Union::Char(..) => TypeId::of::<char>(),
Union::Int(..) => TypeId::of::<INT>(),
#[cfg(not(feature = "no_float"))]
Union::Float(..) => TypeId::of::<crate::FLOAT>(),
#[cfg(feature = "decimal")]
Union::Decimal(..) => TypeId::of::<rust_decimal::Decimal>(),
#[cfg(not(feature = "no_index"))]
Union::Array(..) => TypeId::of::<crate::Array>(),
#[cfg(not(feature = "no_index"))]
Union::Blob(..) => TypeId::of::<crate::Blob>(),
#[cfg(not(feature = "no_object"))]
Union::Map(..) => TypeId::of::<crate::Map>(),
Union::FnPtr(..) => TypeId::of::<FnPtr>(),
#[cfg(not(feature = "no_std"))]
Union::TimeStamp(..) => TypeId::of::<Instant>(),
Union::Variant(ref v, ..) => (***v).type_id(),
#[cfg(not(feature = "no_closure"))]
#[cfg(not(feature = "sync"))]
Union::Shared(ref cell, ..) => (*cell.borrow()).type_id(),
#[cfg(not(feature = "no_closure"))]
#[cfg(feature = "sync")]
Union::Shared(ref cell, ..) => (*cell.read().unwrap()).type_id(),
}
}
/// Get the name of the type of the value held by this [`Dynamic`].
///
/// # Panics or Deadlocks When Value is Shared
///
/// Under the `sync` feature, this call may deadlock, or [panic](https://doc.rust-lang.org/std/sync/struct.RwLock.html#panics-1).
/// Otherwise, this call panics if the data is currently borrowed for write.
#[must_use]
pub fn type_name(&self) -> &'static str {
match self.0 {
Union::Unit(..) => "()",
Union::Bool(..) => "bool",
Union::Str(..) => "string",
Union::Char(..) => "char",
Union::Int(..) => type_name::<INT>(),
#[cfg(not(feature = "no_float"))]
Union::Float(..) => type_name::<crate::FLOAT>(),
#[cfg(feature = "decimal")]
Union::Decimal(..) => "decimal",
#[cfg(not(feature = "no_index"))]
Union::Array(..) => "array",
#[cfg(not(feature = "no_index"))]
Union::Blob(..) => "blob",
#[cfg(not(feature = "no_object"))]
Union::Map(..) => "map",
Union::FnPtr(..) => "Fn",
#[cfg(not(feature = "no_std"))]
Union::TimeStamp(..) => "timestamp",
Union::Variant(ref v, ..) => (***v).type_name(),
#[cfg(not(feature = "no_closure"))]
#[cfg(not(feature = "sync"))]
Union::Shared(ref cell, ..) => cell
.try_borrow()
.map(|v| (*v).type_name())
.unwrap_or("<shared>"),
#[cfg(not(feature = "no_closure"))]
#[cfg(feature = "sync")]
Union::Shared(ref cell, ..) => (*cell.read().unwrap()).type_name(),
}
}
}
impl Hash for Dynamic {
/// Hash the [`Dynamic`] value.
///
/// # Panics
///
/// Panics if the [`Dynamic`] value contains an unrecognized trait object.
fn hash<H: Hasher>(&self, state: &mut H) {
mem::discriminant(&self.0).hash(state);
match self.0 {
Union::Unit(..) => ().hash(state),
Union::Bool(ref b, ..) => b.hash(state),
Union::Str(ref s, ..) => s.hash(state),
Union::Char(ref c, ..) => c.hash(state),
Union::Int(ref i, ..) => i.hash(state),
#[cfg(not(feature = "no_float"))]
Union::Float(ref f, ..) => f.hash(state),
#[cfg(feature = "decimal")]
Union::Decimal(ref d, ..) => d.hash(state),
#[cfg(not(feature = "no_index"))]
Union::Array(ref a, ..) => a.as_ref().hash(state),
#[cfg(not(feature = "no_index"))]
Union::Blob(ref a, ..) => a.as_ref().hash(state),
#[cfg(not(feature = "no_object"))]
Union::Map(ref m, ..) => m.as_ref().hash(state),
Union::FnPtr(ref f, ..) => f.hash(state),
#[cfg(not(feature = "no_closure"))]
#[cfg(not(feature = "sync"))]
Union::Shared(ref cell, ..) => (*cell.borrow()).hash(state),
#[cfg(not(feature = "no_closure"))]
#[cfg(feature = "sync")]
Union::Shared(ref cell, ..) => (*cell.read().unwrap()).hash(state),
Union::Variant(ref _v, ..) => {
#[cfg(not(feature = "only_i32"))]
#[cfg(not(feature = "only_i64"))]
{
let value_any = (***_v).as_any();
let type_id = value_any.type_id();
if type_id == TypeId::of::<u8>() {
TypeId::of::<u8>().hash(state);
value_any.downcast_ref::<u8>().expect(CHECKED).hash(state);
} else if type_id == TypeId::of::<u16>() {
TypeId::of::<u16>().hash(state);
value_any.downcast_ref::<u16>().expect(CHECKED).hash(state);
} else if type_id == TypeId::of::<u32>() {
TypeId::of::<u32>().hash(state);
value_any.downcast_ref::<u32>().expect(CHECKED).hash(state);
} else if type_id == TypeId::of::<u64>() {
TypeId::of::<u64>().hash(state);
value_any.downcast_ref::<u64>().expect(CHECKED).hash(state);
} else if type_id == TypeId::of::<i8>() {
TypeId::of::<i8>().hash(state);
value_any.downcast_ref::<i8>().expect(CHECKED).hash(state);
} else if type_id == TypeId::of::<i16>() {
TypeId::of::<i16>().hash(state);
value_any.downcast_ref::<i16>().expect(CHECKED).hash(state);
} else if type_id == TypeId::of::<i32>() {
TypeId::of::<i32>().hash(state);
value_any.downcast_ref::<i32>().expect(CHECKED).hash(state);
} else if type_id == TypeId::of::<i64>() {
TypeId::of::<i64>().hash(state);
value_any.downcast_ref::<i64>().expect(CHECKED).hash(state);
}
#[cfg(not(target_family = "wasm"))]
if type_id == TypeId::of::<u128>() {
TypeId::of::<u128>().hash(state);
value_any.downcast_ref::<u128>().expect(CHECKED).hash(state);
} else if type_id == TypeId::of::<i128>() {
TypeId::of::<i128>().hash(state);
value_any.downcast_ref::<i128>().expect(CHECKED).hash(state);
}
}
unimplemented!("a custom type cannot be hashed")
}
#[cfg(not(feature = "no_std"))]
Union::TimeStamp(..) => unimplemented!("{} cannot be hashed", self.type_name()),
}
}
}
impl fmt::Display for Dynamic {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
Union::Unit(..) => write!(f, ""),
Union::Bool(ref v, ..) => fmt::Display::fmt(v, f),
Union::Str(ref v, ..) => fmt::Display::fmt(v, f),
Union::Char(ref v, ..) => fmt::Display::fmt(v, f),
Union::Int(ref v, ..) => fmt::Display::fmt(v, f),
#[cfg(not(feature = "no_float"))]
Union::Float(ref v, ..) => fmt::Display::fmt(v, f),
#[cfg(feature = "decimal")]
Union::Decimal(ref v, ..) => fmt::Display::fmt(v, f),
#[cfg(not(feature = "no_index"))]
Union::Array(..) => fmt::Debug::fmt(self, f),
#[cfg(not(feature = "no_index"))]
Union::Blob(..) => fmt::Debug::fmt(self, f),
#[cfg(not(feature = "no_object"))]
Union::Map(..) => fmt::Debug::fmt(self, f),
Union::FnPtr(ref v, ..) => fmt::Display::fmt(v, f),
#[cfg(not(feature = "no_std"))]
Union::TimeStamp(..) => f.write_str("<timestamp>"),
Union::Variant(ref v, ..) => {
let _value_any = (***v).as_any();
let _type_id = _value_any.type_id();
#[cfg(not(feature = "only_i32"))]
#[cfg(not(feature = "only_i64"))]
if _type_id == TypeId::of::<u8>() {
return fmt::Display::fmt(_value_any.downcast_ref::<u8>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<u16>() {
return fmt::Display::fmt(_value_any.downcast_ref::<u16>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<u32>() {
return fmt::Display::fmt(_value_any.downcast_ref::<u32>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<u64>() {
return fmt::Display::fmt(_value_any.downcast_ref::<u64>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<i8>() {
return fmt::Display::fmt(_value_any.downcast_ref::<i8>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<i16>() {
return fmt::Display::fmt(_value_any.downcast_ref::<i16>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<i32>() {
return fmt::Display::fmt(_value_any.downcast_ref::<i32>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<i64>() {
return fmt::Display::fmt(_value_any.downcast_ref::<i64>().expect(CHECKED), f);
}
#[cfg(not(feature = "no_float"))]
#[cfg(not(feature = "f32_float"))]
if _type_id == TypeId::of::<f32>() {
return fmt::Display::fmt(_value_any.downcast_ref::<f32>().expect(CHECKED), f);
}
#[cfg(not(feature = "no_float"))]
#[cfg(feature = "f32_float")]
if _type_id == TypeId::of::<f64>() {
return fmt::Display::fmt(_value_any.downcast_ref::<f64>().expect(CHECKED), f);
}
#[cfg(not(feature = "only_i32"))]
#[cfg(not(feature = "only_i64"))]
#[cfg(not(target_family = "wasm"))]
if _type_id == TypeId::of::<u128>() {
return fmt::Display::fmt(_value_any.downcast_ref::<u128>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<i128>() {
return fmt::Display::fmt(_value_any.downcast_ref::<i128>().expect(CHECKED), f);
}
if _type_id == TypeId::of::<ExclusiveRange>() {
let range = _value_any.downcast_ref::<ExclusiveRange>().expect(CHECKED);
return write!(f, "{}..{}", range.start, range.end);
} else if _type_id == TypeId::of::<InclusiveRange>() {
let range = _value_any.downcast_ref::<InclusiveRange>().expect(CHECKED);
return write!(f, "{}..={}", range.start(), range.end());
}
f.write_str((***v).type_name())
}
#[cfg(not(feature = "no_closure"))]
#[cfg(not(feature = "sync"))]
Union::Shared(ref cell, ..) => {
if let Ok(v) = cell.try_borrow() {
fmt::Display::fmt(&*v, f)
} else {
f.write_str("<shared>")
}
}
#[cfg(not(feature = "no_closure"))]
#[cfg(feature = "sync")]
Union::Shared(ref cell, ..) => fmt::Display::fmt(&*cell.read().unwrap(), f),
}
}
}
impl fmt::Debug for Dynamic {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
Union::Unit(ref v, ..) => fmt::Debug::fmt(v, f),
Union::Bool(ref v, ..) => fmt::Debug::fmt(v, f),
Union::Str(ref v, ..) => fmt::Debug::fmt(v, f),
Union::Char(ref v, ..) => fmt::Debug::fmt(v, f),
Union::Int(ref v, ..) => fmt::Debug::fmt(v, f),
#[cfg(not(feature = "no_float"))]
Union::Float(ref v, ..) => fmt::Debug::fmt(v, f),
#[cfg(feature = "decimal")]
Union::Decimal(ref v, ..) => fmt::Debug::fmt(v, f),
#[cfg(not(feature = "no_index"))]
Union::Array(ref v, ..) => fmt::Debug::fmt(v, f),
#[cfg(not(feature = "no_index"))]
Union::Blob(ref v, ..) => {
f.write_str("[")?;
v.iter().enumerate().try_for_each(|(i, v)| {
if i > 0 && i % 8 == 0 {
f.write_str(" ")?;
}
write!(f, "{:02x}", v)
})?;
f.write_str("]")
}
#[cfg(not(feature = "no_object"))]
Union::Map(ref v, ..) => {
f.write_str("#")?;
fmt::Debug::fmt(v, f)
}
Union::FnPtr(ref v, ..) => fmt::Debug::fmt(v, f),
#[cfg(not(feature = "no_std"))]
Union::TimeStamp(..) => write!(f, "<timestamp>"),
Union::Variant(ref v, ..) => {
let _value_any = (***v).as_any();
let _type_id = _value_any.type_id();
#[cfg(not(feature = "only_i32"))]
#[cfg(not(feature = "only_i64"))]
if _type_id == TypeId::of::<u8>() {
return fmt::Debug::fmt(_value_any.downcast_ref::<u8>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<u16>() {
return fmt::Debug::fmt(_value_any.downcast_ref::<u16>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<u32>() {
return fmt::Debug::fmt(_value_any.downcast_ref::<u32>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<u64>() {
return fmt::Debug::fmt(_value_any.downcast_ref::<u64>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<i8>() {
return fmt::Debug::fmt(_value_any.downcast_ref::<i8>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<i16>() {
return fmt::Debug::fmt(_value_any.downcast_ref::<i16>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<i32>() {
return fmt::Debug::fmt(_value_any.downcast_ref::<i32>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<i64>() {
return fmt::Debug::fmt(_value_any.downcast_ref::<i64>().expect(CHECKED), f);
}
#[cfg(not(feature = "no_float"))]
#[cfg(not(feature = "f32_float"))]
if _type_id == TypeId::of::<f32>() {
return fmt::Debug::fmt(_value_any.downcast_ref::<f32>().expect(CHECKED), f);
}
#[cfg(not(feature = "no_float"))]
#[cfg(feature = "f32_float")]
if _type_id == TypeId::of::<f64>() {
return fmt::Debug::fmt(_value_any.downcast_ref::<f64>().expect(CHECKED), f);
}
#[cfg(not(feature = "only_i32"))]
#[cfg(not(feature = "only_i64"))]
#[cfg(not(target_family = "wasm"))]
if _type_id == TypeId::of::<u128>() {
return fmt::Debug::fmt(_value_any.downcast_ref::<u128>().expect(CHECKED), f);
} else if _type_id == TypeId::of::<i128>() {
return fmt::Debug::fmt(_value_any.downcast_ref::<i128>().expect(CHECKED), f);
}
if _type_id == TypeId::of::<ExclusiveRange>() {
let range = _value_any.downcast_ref::<ExclusiveRange>().expect(CHECKED);
return write!(f, "{}..{}", range.start, range.end);
} else if _type_id == TypeId::of::<InclusiveRange>() {
let range = _value_any.downcast_ref::<InclusiveRange>().expect(CHECKED);
return write!(f, "{}..={}", range.start(), range.end());
}
f.write_str((***v).type_name())
}
#[cfg(not(feature = "no_closure"))]
#[cfg(not(feature = "sync"))]
Union::Shared(ref cell, ..) => {
if let Ok(v) = cell.try_borrow() {
write!(f, "{:?} (shared)", *v)
} else {
f.write_str("<shared>")
}
}
#[cfg(not(feature = "no_closure"))]
#[cfg(feature = "sync")]
Union::Shared(ref cell, ..) => fmt::Debug::fmt(&*cell.read().unwrap(), f),
}
}
}
use AccessMode::*;
impl Clone for Dynamic {
/// Clone the [`Dynamic`] value.
///
/// # WARNING
///
/// The cloned copy is marked read-write even if the original is read-only.
fn clone(&self) -> Self {
match self.0 {
Union::Unit(v, tag, ..) => Self(Union::Unit(v, tag, ReadWrite)),
Union::Bool(v, tag, ..) => Self(Union::Bool(v, tag, ReadWrite)),
Union::Str(ref v, tag, ..) => Self(Union::Str(v.clone(), tag, ReadWrite)),
Union::Char(v, tag, ..) => Self(Union::Char(v, tag, ReadWrite)),
Union::Int(v, tag, ..) => Self(Union::Int(v, tag, ReadWrite)),
#[cfg(not(feature = "no_float"))]
Union::Float(v, tag, ..) => Self(Union::Float(v, tag, ReadWrite)),
#[cfg(feature = "decimal")]
Union::Decimal(ref v, tag, ..) => Self(Union::Decimal(v.clone(), tag, ReadWrite)),
#[cfg(not(feature = "no_index"))]
Union::Array(ref v, tag, ..) => Self(Union::Array(v.clone(), tag, ReadWrite)),
#[cfg(not(feature = "no_index"))]
Union::Blob(ref v, tag, ..) => Self(Union::Blob(v.clone(), tag, ReadWrite)),
#[cfg(not(feature = "no_object"))]
Union::Map(ref v, tag, ..) => Self(Union::Map(v.clone(), tag, ReadWrite)),
Union::FnPtr(ref v, tag, ..) => Self(Union::FnPtr(v.clone(), tag, ReadWrite)),
#[cfg(not(feature = "no_std"))]
Union::TimeStamp(ref v, tag, ..) => Self(Union::TimeStamp(v.clone(), tag, ReadWrite)),
Union::Variant(ref v, tag, ..) => Self(Union::Variant(
v.as_ref().as_ref().clone_object().into(),
tag,
ReadWrite,
)),
#[cfg(not(feature = "no_closure"))]
Union::Shared(ref cell, tag, ..) => Self(Union::Shared(cell.clone(), tag, ReadWrite)),
}
}
}
impl Default for Dynamic {
#[inline(always)]
#[must_use]
fn default() -> Self {
Self::UNIT
}
}
#[cfg(not(feature = "no_float"))]
#[cfg(feature = "f32_float")]
use std::f32::consts as FloatConstants;
#[cfg(not(feature = "no_float"))]
#[cfg(not(feature = "f32_float"))]
use std::f64::consts as FloatConstants;
impl Dynamic {
/// A [`Dynamic`] containing a `()`.
pub const UNIT: Self = Self(Union::Unit((), DEFAULT_TAG_VALUE, ReadWrite));
/// A [`Dynamic`] containing a `true`.
pub const TRUE: Self = Self::from_bool(true);
/// A [`Dynamic`] containing a [`false`].
pub const FALSE: Self = Self::from_bool(false);
/// A [`Dynamic`] containing the integer zero.
pub const ZERO: Self = Self::from_int(0);
/// A [`Dynamic`] containing the integer 1.
pub const ONE: Self = Self::from_int(1);
/// A [`Dynamic`] containing the integer 2.
pub const TWO: Self = Self::from_int(2);
/// A [`Dynamic`] containing the integer 3.
pub const THREE: Self = Self::from_int(3);
/// A [`Dynamic`] containing the integer 10.
pub const TEN: Self = Self::from_int(10);
/// A [`Dynamic`] containing the integer 100.
pub const HUNDRED: Self = Self::from_int(100);
/// A [`Dynamic`] containing the integer 1,000.
pub const THOUSAND: Self = Self::from_int(1000);
/// A [`Dynamic`] containing the integer 1,000,000.
pub const MILLION: Self = Self::from_int(1000000);
/// A [`Dynamic`] containing the integer -1.
pub const NEGATIVE_ONE: Self = Self::from_int(-1);
/// A [`Dynamic`] containing the integer -2.
pub const NEGATIVE_TWO: Self = Self::from_int(-2);
/// A [`Dynamic`] containing `0.0`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_ZERO: Self = Self::from_float(0.0);
/// A [`Dynamic`] containing `1.0`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_ONE: Self = Self::from_float(1.0);
/// A [`Dynamic`] containing `2.0`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_TWO: Self = Self::from_float(2.0);
/// A [`Dynamic`] containing `10.0`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_TEN: Self = Self::from_float(10.0);
/// A [`Dynamic`] containing `100.0`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_HUNDRED: Self = Self::from_float(100.0);
/// A [`Dynamic`] containing `1000.0`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_THOUSAND: Self = Self::from_float(1000.0);
/// A [`Dynamic`] containing `1000000.0`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_MILLION: Self = Self::from_float(1000000.0);
/// A [`Dynamic`] containing `-1.0`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_NEGATIVE_ONE: Self = Self::from_float(-1.0);
/// A [`Dynamic`] containing `-2.0`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_NEGATIVE_TWO: Self = Self::from_float(-2.0);
/// A [`Dynamic`] containing `0.5`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_HALF: Self = Self::from_float(0.5);
/// A [`Dynamic`] containing `0.25`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_QUARTER: Self = Self::from_float(0.25);
/// A [`Dynamic`] containing `0.2`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_FIFTH: Self = Self::from_float(0.2);
/// A [`Dynamic`] containing `0.1`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_TENTH: Self = Self::from_float(0.1);
/// A [`Dynamic`] containing `0.01`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_HUNDREDTH: Self = Self::from_float(0.01);
/// A [`Dynamic`] containing `0.001`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_THOUSANDTH: Self = Self::from_float(0.001);
/// A [`Dynamic`] containing `0.000001`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_MILLIONTH: Self = Self::from_float(0.000001);
/// A [`Dynamic`] containing π.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_PI: Self = Self::from_float(FloatConstants::PI);
/// A [`Dynamic`] containing π/2.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_HALF_PI: Self = Self::from_float(FloatConstants::FRAC_PI_2);
/// A [`Dynamic`] containing π/4.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_QUARTER_PI: Self = Self::from_float(FloatConstants::FRAC_PI_4);
/// A [`Dynamic`] containing 2π.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_TWO_PI: Self = Self::from_float(FloatConstants::TAU);
/// A [`Dynamic`] containing 1/π.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_INVERSE_PI: Self = Self::from_float(FloatConstants::FRAC_1_PI);
/// A [`Dynamic`] containing _e_.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_E: Self = Self::from_float(FloatConstants::E);
/// A [`Dynamic`] containing `log` _e_.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_LOG_E: Self = Self::from_float(FloatConstants::LOG10_E);
/// A [`Dynamic`] containing `ln 10`.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
pub const FLOAT_LN_10: Self = Self::from_float(FloatConstants::LN_10);
/// Create a new [`Dynamic`] from a [`bool`].
#[inline(always)]
pub const fn from_bool(value: bool) -> Self {
Self(Union::Bool(value, DEFAULT_TAG_VALUE, ReadWrite))
}
/// Create a new [`Dynamic`] from an [`INT`].
#[inline(always)]
pub const fn from_int(value: INT) -> Self {
Self(Union::Int(value, DEFAULT_TAG_VALUE, ReadWrite))
}
/// Create a new [`Dynamic`] from a [`char`].
#[inline(always)]
pub const fn from_char(value: char) -> Self {
Self(Union::Char(value, DEFAULT_TAG_VALUE, ReadWrite))
}
/// Create a new [`Dynamic`] from a [`FLOAT`][crate::FLOAT].
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
#[inline(always)]
pub const fn from_float(value: crate::FLOAT) -> Self {
Self(Union::Float(
crate::ast::FloatWrapper::new_const(value),
DEFAULT_TAG_VALUE,
ReadWrite,
))
}
/// Create a new [`Dynamic`] from a [`Decimal`](https://docs.rs/rust_decimal).
///
/// Exported under the `decimal` feature only.
#[cfg(feature = "decimal")]
#[inline(always)]
pub fn from_decimal(value: rust_decimal::Decimal) -> Self {
Self(Union::Decimal(value.into(), DEFAULT_TAG_VALUE, ReadWrite))
}
/// Create a [`Dynamic`] from an [`Array`][crate::Array].
#[cfg(not(feature = "no_index"))]
#[inline(always)]
pub fn from_array(array: crate::Array) -> Self {
Self(Union::Array(array.into(), DEFAULT_TAG_VALUE, ReadWrite))
}
/// Create a [`Dynamic`] from a [`Blob`][crate::Blob].
#[cfg(not(feature = "no_index"))]
#[inline(always)]
pub fn from_blob(blob: crate::Blob) -> Self {
Self(Union::Blob(blob.into(), DEFAULT_TAG_VALUE, ReadWrite))
}
/// Create a [`Dynamic`] from a [`Map`][crate::Map].
#[cfg(not(feature = "no_object"))]
#[inline(always)]
pub fn from_map(map: crate::Map) -> Self {
Self(Union::Map(map.into(), DEFAULT_TAG_VALUE, ReadWrite))
}
/// Create a new [`Dynamic`] from an [`Instant`].
///
/// Not available under `no-std`.
#[cfg(not(feature = "no_std"))]
#[inline(always)]
pub fn from_timestamp(value: Instant) -> Self {
Self(Union::TimeStamp(value.into(), DEFAULT_TAG_VALUE, ReadWrite))
}
/// Get the [`AccessMode`] for this [`Dynamic`].
#[must_use]
pub(crate) const fn access_mode(&self) -> AccessMode {
match self.0 {
Union::Unit(.., access)
| Union::Bool(.., access)
| Union::Str(.., access)
| Union::Char(.., access)
| Union::Int(.., access)
| Union::FnPtr(.., access)
| Union::Variant(.., access) => access,
#[cfg(not(feature = "no_float"))]
Union::Float(.., access) => access,
#[cfg(feature = "decimal")]
Union::Decimal(.., access) => access,
#[cfg(not(feature = "no_index"))]
Union::Array(.., access) | Union::Blob(.., access) => access,
#[cfg(not(feature = "no_object"))]
Union::Map(.., access) => access,
#[cfg(not(feature = "no_std"))]
Union::TimeStamp(.., access) => access,
#[cfg(not(feature = "no_closure"))]
Union::Shared(.., access) => access,
}
}
/// Set the [`AccessMode`] for this [`Dynamic`].
pub(crate) fn set_access_mode(&mut self, typ: AccessMode) -> &mut Self {
match self.0 {
Union::Unit(.., ref mut access)
| Union::Bool(.., ref mut access)
| Union::Str(.., ref mut access)
| Union::Char(.., ref mut access)
| Union::Int(.., ref mut access)
| Union::FnPtr(.., ref mut access)
| Union::Variant(.., ref mut access) => *access = typ,
#[cfg(not(feature = "no_float"))]
Union::Float(.., ref mut access) => *access = typ,
#[cfg(feature = "decimal")]
Union::Decimal(.., ref mut access) => *access = typ,
#[cfg(not(feature = "no_index"))]
Union::Array(ref mut a, _, ref mut access) => {
*access = typ;
for v in a.iter_mut() {
v.set_access_mode(typ);
}
}
#[cfg(not(feature = "no_index"))]
Union::Blob(.., ref mut access) => *access = typ,
#[cfg(not(feature = "no_object"))]
Union::Map(ref mut m, _, ref mut access) => {
*access = typ;
for v in m.values_mut() {
v.set_access_mode(typ);
}
}
#[cfg(not(feature = "no_std"))]
Union::TimeStamp(.., ref mut access) => *access = typ,
#[cfg(not(feature = "no_closure"))]
Union::Shared(.., ref mut access) => *access = typ,
}
self
}
/// Make this [`Dynamic`] read-only (i.e. a constant).
#[inline(always)]
pub fn into_read_only(self) -> Self {
let mut value = self;
value.set_access_mode(AccessMode::ReadOnly);
value
}
/// Is this [`Dynamic`] read-only?
///
/// Constant [`Dynamic`] values are read-only.
///
/// If a [`&mut Dynamic`][Dynamic] to such a constant is passed to a Rust function, the function
/// can use this information to return an error of
/// [`ErrorAssignmentToConstant`][crate::EvalAltResult::ErrorAssignmentToConstant] if its value
/// is going to be modified.
///
/// This safe-guards constant values from being modified from within Rust functions.
#[must_use]
pub fn is_read_only(&self) -> bool {
#[cfg(not(feature = "no_closure"))]
match self.0 {
Union::Shared(.., ReadOnly) => return true,
#[cfg(not(feature = "sync"))]
Union::Shared(ref cell, ..) => {
return match cell.borrow().access_mode() {
ReadWrite => false,
ReadOnly => true,
}
}
#[cfg(feature = "sync")]
Union::Shared(ref cell, ..) => {
return match cell.read().unwrap().access_mode() {
ReadWrite => false,
ReadOnly => true,
}
}
_ => (),
}
match self.access_mode() {
ReadWrite => false,
ReadOnly => true,
}
}
/// Can this [`Dynamic`] be hashed?
#[must_use]
pub(crate) fn is_hashable(&self) -> bool {
match self.0 {
Union::Unit(..)
| Union::Bool(..)
| Union::Str(..)
| Union::Char(..)
| Union::Int(..) => true,
#[cfg(not(feature = "no_float"))]
Union::Float(..) => true,
#[cfg(not(feature = "no_index"))]
Union::Array(..) => true,
#[cfg(not(feature = "no_object"))]
Union::Map(..) => true,
#[cfg(not(feature = "no_closure"))]
#[cfg(not(feature = "sync"))]
Union::Shared(ref cell, ..) => cell.borrow().is_hashable(),
#[cfg(not(feature = "no_closure"))]
#[cfg(feature = "sync")]
Union::Shared(ref cell, ..) => cell.read().unwrap().is_hashable(),
_ => false,
}
}
/// Create a [`Dynamic`] from any type. A [`Dynamic`] value is simply returned as is.
///
/// # Notes
///
/// Beware that you need to pass in an [`Array`][crate::Array] type for it to be recognized as
/// an [`Array`][crate::Array]. A [`Vec<T>`][Vec] does not get automatically converted to an
/// [`Array`][crate::Array], but will be a custom type instead (stored as a trait object). Use
/// `Into<Dynamic>` to convert a [`Vec<T>`][Vec] into a [`Dynamic`] as an
/// [`Array`][crate::Array] value.
///
/// Similarly, passing in a [`HashMap<String, T>`][std::collections::HashMap] or
/// [`BTreeMap<String, T>`][std::collections::BTreeMap] will not get a [`Map`][crate::Map] but a
/// custom type. Again, use `Into<Dynamic>` to get a [`Dynamic`] with a [`Map`][crate::Map]
/// value.
///
/// # Examples
///
/// ```
/// use rhai::Dynamic;
///
/// let result = Dynamic::from(42_i64);
/// assert_eq!(result.type_name(), "i64");
/// assert_eq!(result.to_string(), "42");
///
/// let result = Dynamic::from("hello");
/// assert_eq!(result.type_name(), "string");
/// assert_eq!(result.to_string(), "hello");
///
/// let new_result = Dynamic::from(result);
/// assert_eq!(new_result.type_name(), "string");
/// assert_eq!(new_result.to_string(), "hello");
/// ```
#[inline]
#[must_use]
pub fn from<T: Variant + Clone>(value: T) -> Self {
// Coded this way in order to maximally leverage potentials for dead-code removal.
reify!(value, |v: Dynamic| return v);
reify!(value, |v: INT| return v.into());
#[cfg(not(feature = "no_float"))]
reify!(value, |v: crate::FLOAT| return v.into());
#[cfg(feature = "decimal")]
reify!(value, |v: rust_decimal::Decimal| return v.into());
reify!(value, |v: bool| return v.into());
reify!(value, |v: char| return v.into());
reify!(value, |v: ImmutableString| return v.into());
reify!(value, |v: String| return v.into());
reify!(value, |v: &str| return v.into());
reify!(value, |v: ()| return v.into());
#[cfg(not(feature = "no_index"))]
reify!(value, |v: crate::Array| return v.into());
#[cfg(not(feature = "no_index"))]
reify!(value, |v: crate::Blob| {
// don't use blob.into() because it'll be converted into an Array
return Dynamic::from_blob(v);
});
#[cfg(not(feature = "no_object"))]
reify!(value, |v: crate::Map| return v.into());
reify!(value, |v: FnPtr| return v.into());
#[cfg(not(feature = "no_std"))]
reify!(value, |v: Instant| return v.into());
#[cfg(not(feature = "no_closure"))]
reify!(value, |v: crate::Shared<crate::Locked<Dynamic>>| return v
.into());
Self(Union::Variant(
Box::new(Box::new(value)),
DEFAULT_TAG_VALUE,
ReadWrite,
))
}
/// Turn the [`Dynamic`] value into a shared [`Dynamic`] value backed by an
/// [`Rc<RefCell<Dynamic>>`][std::rc::Rc] or [`Arc<RwLock<Dynamic>>`][std::sync::Arc]
/// depending on the `sync` feature.
///
/// Not available under `no_closure`.
///
/// Shared [`Dynamic`] values are relatively cheap to clone as they simply increment the
/// reference counts.
///
/// Shared [`Dynamic`] values can be converted seamlessly to and from ordinary [`Dynamic`]
/// values.
///
/// If the [`Dynamic`] value is already shared, this method returns itself.
#[cfg(not(feature = "no_closure"))]
#[inline]
#[must_use]
pub fn into_shared(self) -> Self {
let _access = self.access_mode();
match self.0 {
Union::Shared(..) => self,
_ => Self(Union::Shared(
crate::Locked::new(self).into(),
DEFAULT_TAG_VALUE,
_access,
)),
}
}
/// Convert the [`Dynamic`] value into specific type.
///
/// Casting to a [`Dynamic`] just returns as is, but if it contains a shared value,
/// it is cloned into a [`Dynamic`] with a normal value.
///
/// Returns [`None`] if types mismatched.
///
/// # Panics or Deadlocks
///
/// Under the `sync` feature, this call may deadlock, or [panic](https://doc.rust-lang.org/std/sync/struct.RwLock.html#panics-1).
/// Otherwise, this call panics if the data is currently borrowed for write.
///
/// These normally shouldn't occur since most operations in Rhai is single-threaded.
///
/// # Example
///
/// ```
/// use rhai::Dynamic;
///
/// let x = Dynamic::from(42_u32);
///
/// assert_eq!(x.try_cast::<u32>().expect("x should be u32"), 42);
/// ```
#[inline]
#[must_use]
pub fn try_cast<T: Any>(self) -> Option<T> {
// Coded this way in order to maximally leverage potentials for dead-code removal.
#[cfg(not(feature = "no_closure"))]
if let Union::Shared(..) = self.0 {
return self.flatten().try_cast::<T>();
}
reify!(self, |v: T| return Some(v));
match self.0 {
Union::Int(v, ..) => reify!(v => Option<T>),
#[cfg(not(feature = "no_float"))]
Union::Float(v, ..) => reify!(*v => Option<T>),
#[cfg(feature = "decimal")]
Union::Decimal(v, ..) => reify!(*v => Option<T>),
Union::Bool(v, ..) => reify!(v => Option<T>),
Union::Str(v, ..) => {
reify!(v, |v: T| Some(v), || reify!(v.to_string() => Option<T>))
}
Union::Char(v, ..) => reify!(v => Option<T>),
#[cfg(not(feature = "no_index"))]
Union::Array(v, ..) => reify!(*v => Option<T>),
#[cfg(not(feature = "no_index"))]
Union::Blob(v, ..) => reify!(*v => Option<T>),
#[cfg(not(feature = "no_object"))]
Union::Map(v, ..) => reify!(*v => Option<T>),
Union::FnPtr(v, ..) => reify!(*v => Option<T>),
#[cfg(not(feature = "no_std"))]
Union::TimeStamp(v, ..) => reify!(*v => Option<T>),
Union::Unit(v, ..) => reify!(v => Option<T>),
Union::Variant(v, ..) => (*v).as_boxed_any().downcast().ok().map(|x| *x),
#[cfg(not(feature = "no_closure"))]
Union::Shared(..) => unreachable!("Union::Shared case should be already handled"),
}
}
/// Convert the [`Dynamic`] value into a specific type.
///
/// Casting to a [`Dynamic`] just returns as is, but if it contains a shared value,
/// it is cloned into a [`Dynamic`] with a normal value.
///
/// # Panics or Deadlocks
///
/// Panics if the cast fails (e.g. the type of the actual value is not the same as the specified type).
///
/// Under the `sync` feature, this call may deadlock, or [panic](https://doc.rust-lang.org/std/sync/struct.RwLock.html#panics-1).
/// Otherwise, this call panics if the data is currently borrowed for write.
///
/// These normally shouldn't occur since most operations in Rhai is single-threaded.
///
/// # Example
///
/// ```
/// use rhai::Dynamic;
///
/// let x = Dynamic::from(42_u32);
///
/// assert_eq!(x.cast::<u32>(), 42);
/// ```
#[inline]
#[must_use]
pub fn cast<T: Any + Clone>(self) -> T {
#[cfg(not(feature = "no_closure"))]
let self_type_name = if self.is_shared() {
// Avoid panics/deadlocks with shared values
"<shared>"
} else {
self.type_name()
};
#[cfg(feature = "no_closure")]
let self_type_name = self.type_name();
self.try_cast::<T>()
.unwrap_or_else(|| panic!("cannot cast {} to {}", self_type_name, type_name::<T>()))
}
/// Clone the [`Dynamic`] value and convert it into a specific type.
///
/// Casting to a [`Dynamic`] just returns as is, but if it contains a shared value,
/// it is cloned into a [`Dynamic`] with a normal value.
///
/// Returns [`None`] if types mismatched.
///
/// # Panics or Deadlocks
///
/// Panics if the cast fails (e.g. the type of the actual value is not the
/// same as the specified type).
///
/// Under the `sync` feature, this call may deadlock, or [panic](https://doc.rust-lang.org/std/sync/struct.RwLock.html#panics-1).
/// Otherwise, this call panics if the data is currently borrowed for write.
///
/// These normally shouldn't occur since most operations in Rhai is single-threaded.
///
/// # Example
///
/// ```
/// use rhai::Dynamic;
///
/// let x = Dynamic::from(42_u32);
/// let y = &x;
///
/// assert_eq!(y.clone_cast::<u32>(), 42);
/// ```
#[inline(always)]
#[must_use]
pub fn clone_cast<T: Any + Clone>(&self) -> T {
self.flatten_clone().cast::<T>()
}
/// Flatten the [`Dynamic`] and clone it.
///
/// If the [`Dynamic`] is not a shared value, it returns a cloned copy.
///
/// If the [`Dynamic`] is a shared value, it returns a cloned copy of the shared value.
#[inline]
#[must_use]
pub fn flatten_clone(&self) -> Self {
match self.0 {
#[cfg(not(feature = "no_closure"))]
#[cfg(not(feature = "sync"))]
Union::Shared(ref cell, ..) => cell.borrow().clone(),
#[cfg(not(feature = "no_closure"))]
#[cfg(feature = "sync")]
Union::Shared(ref cell, ..) => cell.read().unwrap().clone(),
_ => self.clone(),
}
}
/// Flatten the [`Dynamic`].
///
/// If the [`Dynamic`] is not a shared value, it returns itself.
///
/// If the [`Dynamic`] is a shared value, it returns the shared value if there are no
/// outstanding references, or a cloned copy.
#[inline]
#[must_use]
pub fn flatt | ) -> Self {
match self.0 {
#[cfg(not(feature = "no_closure"))]
Union::Shared(cell, ..) => crate::func::native::shared_try_take(cell).map_or_else(
#[cfg(not(feature = "sync"))]
|cell| cell.borrow().clone(),
#[cfg(feature = "sync")]
|cell| cell.read().unwrap().clone(),
#[cfg(not(feature = "sync"))]
|value| value.into_inner(),
#[cfg(feature = "sync")]
|value| value.into_inner().unwrap(),
),
_ => self,
}
}
/// Flatten the [`Dynamic`] in place.
///
/// If the [`Dynamic`] is not a shared value, it does nothing.
///
/// If the [`Dynamic`] is a shared value, it is set to the shared value if there are no
/// outstanding references, or a cloned copy otherwise.
#[inline]
pub(crate) fn flatten_in_place(&mut self) -> &mut Self {
match self.0 {
#[cfg(not(feature = "no_closure"))]
Union::Shared(ref mut cell, ..) => {
let cell = mem::take(cell);
*self = crate::func::native::shared_try_take(cell).map_or_else(
#[cfg(not(feature = "sync"))]
|cell| cell.borrow().clone(),
#[cfg(feature = "sync")]
|cell| cell.read().unwrap().clone(),
#[cfg(not(feature = "sync"))]
|value| value.into_inner(),
#[cfg(feature = "sync")]
|value| value.into_inner().unwrap(),
);
}
_ => (),
}
self
}
/// Is the [`Dynamic`] a shared value that is locked?
///
/// Not available under `no_closure`.
///
/// ## Note
///
/// Under the `sync` feature, shared values use [`RwLock`][std::sync::RwLock] and they are never locked.
/// Access just waits until the [`RwLock`][std::sync::RwLock] is released.
/// So this method always returns [`false`] under [`Sync`].
#[cfg(not(feature = "no_closure"))]
#[inline]
#[must_use]
pub fn is_locked(&self) -> bool {
#[cfg(not(feature = "no_closure"))]
match self.0 {
Union::Shared(ref _cell, ..) => {
#[cfg(not(feature = "sync"))]
return _cell.try_borrow().is_err();
#[cfg(feature = "sync")]
return false;
}
_ => (),
}
false
}
/// Get a reference of a specific type to the [`Dynamic`].
/// Casting to [`Dynamic`] just returns a reference to it.
///
/// Returns [`None`] if the cast fails.
///
/// # Panics or Deadlocks When Value is Shared
///
/// Under the `sync` feature, this call may deadlock, or [panic](https://doc.rust-lang.org/std/sync/struct.RwLock.html#panics-1).
/// Otherwise, this call panics if the data is currently borrowed for write.
#[inline]
#[must_use]
pub fn read_lock<T: Any + Clone>(&self) -> Option<DynamicReadLock<T>> {
match self.0 {
#[cfg(not(feature = "no_closure"))]
Union::Shared(ref cell, ..) => {
#[cfg(not(feature = "sync"))]
let value = cell.borrow();
#[cfg(feature = "sync")]
let value = cell.read().unwrap();
if (*value).type_id() != TypeId::of::<T>()
&& TypeId::of::<Dynamic>() != TypeId::of::<T>()
{
return None;
} else {
return Some(DynamicReadLock(DynamicReadLockInner::Guard(value)));
}
}
_ => (),
}
self.downcast_ref()
.map(DynamicReadLockInner::Reference)
.map(DynamicReadLock)
}
/// Get a mutable reference of a specific type to the [`Dynamic`].
/// Casting to [`Dynamic`] just returns a mutable reference to it.
///
/// Returns [`None`] if the cast fails.
///
/// # Panics or Deadlocks When Value is Shared
///
/// Under the `sync` feature, this call may deadlock, or [panic](https://doc.rust-lang.org/std/sync/struct.RwLock.html#panics-1).
/// Otherwise, this call panics if the data is currently borrowed for write.
#[inline]
#[must_use]
pub fn write_lock<T: Any + Clone>(&mut self) -> Option<DynamicWriteLock<T>> {
match self.0 {
#[cfg(not(feature = "no_closure"))]
Union::Shared(ref cell, ..) => {
let guard = crate::func::native::locked_write(cell);
if (*guard).type_id() != TypeId::of::<T>()
&& TypeId::of::<Dynamic>() != TypeId::of::<T>()
{
return None;
} else {
return Some(DynamicWriteLock(DynamicWriteLockInner::Guard(guard)));
}
}
_ => (),
}
self.downcast_mut()
.map(DynamicWriteLockInner::Reference)
.map(DynamicWriteLock)
}
/// Get a reference of a specific type to the [`Dynamic`].
/// Casting to [`Dynamic`] just returns a reference to it.
///
/// Returns [`None`] if the cast fails, or if the value is shared.
#[inline]
#[must_use]
pub(crate) fn downcast_ref<T: Any + Clone + ?Sized>(&self) -> Option<&T> {
// Coded this way in order to maximally leverage potentials for dead-code removal.
if TypeId::of::<T>() == TypeId::of::<INT>() {
return match self.0 {
Union::Int(ref v, ..) => v.as_any().downcast_ref::<T>(),
_ => None,
};
}
#[cfg(not(feature = "no_float"))]
if TypeId::of::<T>() == TypeId::of::<crate::FLOAT>() {
return match self.0 {
Union::Float(ref v, ..) => v.as_ref().as_any().downcast_ref::<T>(),
_ => None,
};
}
#[cfg(feature = "decimal")]
if TypeId::of::<T>() == TypeId::of::<rust_decimal::Decimal>() {
return match self.0 {
Union::Decimal(ref v, ..) => v.as_ref().as_any().downcast_ref::<T>(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<bool>() {
return match self.0 {
Union::Bool(ref v, ..) => v.as_any().downcast_ref::<T>(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<ImmutableString>() {
return match self.0 {
Union::Str(ref v, ..) => v.as_any().downcast_ref::<T>(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<char>() {
return match self.0 {
Union::Char(ref v, ..) => v.as_any().downcast_ref::<T>(),
_ => None,
};
}
#[cfg(not(feature = "no_index"))]
if TypeId::of::<T>() == TypeId::of::<crate::Array>() {
return match self.0 {
Union::Array(ref v, ..) => v.as_ref().as_any().downcast_ref::<T>(),
_ => None,
};
}
#[cfg(not(feature = "no_index"))]
if TypeId::of::<T>() == TypeId::of::<crate::Blob>() {
return match self.0 {
Union::Blob(ref v, ..) => v.as_ref().as_any().downcast_ref::<T>(),
_ => None,
};
}
#[cfg(not(feature = "no_object"))]
if TypeId::of::<T>() == TypeId::of::<crate::Map>() {
return match self.0 {
Union::Map(ref v, ..) => v.as_ref().as_any().downcast_ref::<T>(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<FnPtr>() {
return match self.0 {
Union::FnPtr(ref v, ..) => v.as_ref().as_any().downcast_ref::<T>(),
_ => None,
};
}
#[cfg(not(feature = "no_std"))]
if TypeId::of::<T>() == TypeId::of::<Instant>() {
return match self.0 {
Union::TimeStamp(ref v, ..) => v.as_ref().as_any().downcast_ref::<T>(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<()>() {
return match self.0 {
Union::Unit(ref v, ..) => v.as_any().downcast_ref::<T>(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<Dynamic>() {
return self.as_any().downcast_ref::<T>();
}
match self.0 {
Union::Variant(ref v, ..) => (***v).as_any().downcast_ref::<T>(),
#[cfg(not(feature = "no_closure"))]
Union::Shared(..) => None,
_ => None,
}
}
/// Get a mutable reference of a specific type to the [`Dynamic`].
/// Casting to [`Dynamic`] just returns a mutable reference to it.
///
/// Returns [`None`] if the cast fails, or if the value is shared.
#[inline]
#[must_use]
pub(crate) fn downcast_mut<T: Any + Clone>(&mut self) -> Option<&mut T> {
// Coded this way in order to maximally leverage potentials for dead-code removal.
if TypeId::of::<T>() == TypeId::of::<INT>() {
return match self.0 {
Union::Int(ref mut v, ..) => v.as_any_mut().downcast_mut::<T>(),
_ => None,
};
}
#[cfg(not(feature = "no_float"))]
if TypeId::of::<T>() == TypeId::of::<crate::FLOAT>() {
return match self.0 {
Union::Float(ref mut v, ..) => v.as_mut().as_any_mut().downcast_mut::<T>(),
_ => None,
};
}
#[cfg(feature = "decimal")]
if TypeId::of::<T>() == TypeId::of::<rust_decimal::Decimal>() {
return match self.0 {
Union::Decimal(ref mut v, ..) => v.as_mut().as_any_mut().downcast_mut::<T>(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<bool>() {
return match self.0 {
Union::Bool(ref mut v, ..) => v.as_any_mut().downcast_mut::<T>(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<ImmutableString>() {
return match self.0 {
Union::Str(ref mut v, ..) => v.as_any_mut().downcast_mut::<T>(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<char>() {
return match self.0 {
Union::Char(ref mut v, ..) => v.as_any_mut().downcast_mut::<T>(),
_ => None,
};
}
#[cfg(not(feature = "no_index"))]
if TypeId::of::<T>() == TypeId::of::<crate::Array>() {
return match self.0 {
Union::Array(ref mut v, ..) => v.as_mut().as_any_mut().downcast_mut::<T>(),
_ => None,
};
}
#[cfg(not(feature = "no_index"))]
if TypeId::of::<T>() == TypeId::of::<crate::Blob>() {
return match self.0 {
Union::Blob(ref mut v, ..) => v.as_mut().as_any_mut().downcast_mut::<T>(),
_ => None,
};
}
#[cfg(not(feature = "no_object"))]
if TypeId::of::<T>() == TypeId::of::<crate::Map>() {
return match self.0 {
Union::Map(ref mut v, ..) => v.as_mut().as_any_mut().downcast_mut::<T>(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<FnPtr>() {
return match self.0 {
Union::FnPtr(ref mut v, ..) => v.as_mut().as_any_mut().downcast_mut::<T>(),
_ => None,
};
}
#[cfg(not(feature = "no_std"))]
if TypeId::of::<T>() == TypeId::of::<Instant>() {
return match self.0 {
Union::TimeStamp(ref mut v, ..) => v.as_mut().as_any_mut().downcast_mut::<T>(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<()>() {
return match self.0 {
Union::Unit(ref mut v, ..) => v.as_any_mut().downcast_mut::<T>(),
_ => None,
};
}
if TypeId::of::<T>() == TypeId::of::<Dynamic>() {
return self.as_any_mut().downcast_mut::<T>();
}
match self.0 {
Union::Variant(ref mut v, ..) => (***v).as_any_mut().downcast_mut::<T>(),
#[cfg(not(feature = "no_closure"))]
Union::Shared(..) => None,
_ => None,
}
}
/// Cast the [`Dynamic`] as a unit `()`.
/// Returns the name of the actual type if the cast fails.
#[inline]
pub fn as_unit(&self) -> Result<(), &'static str> {
match self.0 {
Union::Unit(v, ..) => Ok(v),
#[cfg(not(feature = "no_closure"))]
Union::Shared(..) => self.read_lock().map(|v| *v).ok_or_else(|| self.type_name()),
_ => Err(self.type_name()),
}
}
/// Cast the [`Dynamic`] as the system integer type [`INT`].
/// Returns the name of the actual type if the cast fails.
#[inline]
pub fn as_int(&self) -> Result<INT, &'static str> {
match self.0 {
Union::Int(n, ..) => Ok(n),
#[cfg(not(feature = "no_closure"))]
Union::Shared(..) => self.read_lock().map(|v| *v).ok_or_else(|| self.type_name()),
_ => Err(self.type_name()),
}
}
/// Cast the [`Dynamic`] as the system floating-point type [`FLOAT`][crate::FLOAT].
/// Returns the name of the actual type if the cast fails.
///
/// Not available under `no_float`.
#[cfg(not(feature = "no_float"))]
#[inline]
pub fn as_float(&self) -> Result<crate::FLOAT, &'static str> {
match self.0 {
Union::Float(n, ..) => Ok(*n),
#[cfg(not(feature = "no_closure"))]
Union::Shared(..) => self.read_lock().map(|v| *v).ok_or_else(|| self.type_name()),
_ => Err(self.type_name()),
}
}
/// _(decimal)_ Cast the [`Dynamic`] as a [`Decimal`][rust_decimal::Decimal].
/// Returns the name of the actual type if the cast fails.
///
/// Exported under the `decimal` feature only.
#[cfg(feature = "decimal")]
#[inline]
pub fn as_decimal(&self) -> Result<rust_decimal::Decimal, &'static str> {
match self.0 {
Union::Decimal(ref n, ..) => Ok(**n),
#[cfg(not(feature = "no_closure"))]
Union::Shared(..) => self.read_lock().map(|v| *v).ok_or_else(|| self.type_name()),
_ => Err(self.type_name()),
}
}
/// Cast the [`Dynamic`] as a [`bool`].
/// Returns the name of the actual type if the cast fails.
#[inline]
pub fn as_bool(&self) -> Result<bool, &'static str> {
match self.0 {
Union::Bool(b, ..) => Ok(b),
#[cfg(not(feature = "no_closure"))]
Union::Shared(..) => self.read_lock().map(|v| *v).ok_or_else(|| self.type_name()),
_ => Err(self.type_name()),
}
}
/// Cast the [`Dynamic`] as a [`char`].
/// Returns the name of the actual type if the cast fails.
#[inline]
pub fn as_char(&self) -> Result<char, &'static str> {
match self.0 {
Union::Char(n, ..) => Ok(n),
#[cfg(not(feature = "no_closure"))]
Union::Shared(..) => self.read_lock().map(|v| *v).ok_or_else(|| self.type_name()),
_ => Err(self.type_name()),
}
}
/// Cast the [`Dynamic`] as a string slice.
/// Returns the name of the actual type if the cast fails.
///
/// # Panics
///
/// Panics if the value is shared.
#[inline]
pub(crate) fn as_str_ref(&self) -> Result<&str, &'static str> {
match self.0 {
Union::Str(ref s, ..) => Ok(s),
#[cfg(not(feature = "no_closure"))]
Union::Shared(..) => panic!("as_str_ref() cannot be called on shared values"),
_ => Err(self.type_name()),
}
}
/// Convert the [`Dynamic`] into a [`String`].
/// If there are other references to the same string, a cloned copy is returned.
/// Returns the name of the actual type if the cast fails.
#[inline]
pub fn into_string(self) -> Result<String, &'static str> {
self.into_immutable_string()
.map(ImmutableString::into_owned)
}
/// Convert the [`Dynamic`] into an [`ImmutableString`].
/// Returns the name of the actual type if the cast fails.
#[inline]
pub fn into_immutable_string(self) -> Result<ImmutableString, &'static str> {
match self.0 {
Union::Str(s, ..) => Ok(s),
#[cfg(not(feature = "no_closure"))]
Union::Shared(cell, ..) => {
#[cfg(not(feature = "sync"))]
let value = cell.borrow();
#[cfg(feature = "sync")]
let value = cell.read().unwrap();
match value.0 {
Union::Str(ref s, ..) => Ok(s.clone()),
_ => Err((*value).type_name()),
}
}
_ => Err(self.type_name()),
}
}
/// Convert the [`Dynamic`] into an [`Array`][crate::Array].
/// Returns the name of the actual type if the cast fails.
#[cfg(not(feature = "no_index"))]
#[inline(always)]
pub fn into_array(self) -> Result<crate::Array, &'static str> {
match self.0 {
Union::Array(a, ..) => Ok(*a),
#[cfg(not(feature = "no_closure"))]
Union::Shared(cell, ..) => {
#[cfg(not(feature = "sync"))]
let value = cell.borrow();
#[cfg(feature = "sync")]
let value = cell.read().unwrap();
match value.0 {
Union::Array(ref a, ..) => Ok(a.as_ref().clone()),
_ => Err((*value).type_name()),
}
}
_ => Err(self.type_name()),
}
}
/// Convert the [`Dynamic`] into a [`Vec`].
/// Returns the name of the actual type if any cast fails.
#[cfg(not(feature = "no_index"))]
#[inline(always)]
pub fn into_typed_array<T: Variant + Clone>(self) -> Result<Vec<T>, &'static str> {
match self.0 {
Union::Array(a, ..) => a
.into_iter()
.map(|v| {
#[cfg(not(feature = "no_closure"))]
let typ = if v.is_shared() {
// Avoid panics/deadlocks with shared values
"<shared>"
} else {
v.type_name()
};
#[cfg(feature = "no_closure")]
let typ = v.type_name();
v.try_cast::<T>().ok_or_else(|| typ)
})
.collect(),
Union::Blob(..) if TypeId::of::<T>() == TypeId::of::<u8>() => Ok(self.cast::<Vec<T>>()),
#[cfg(not(feature = "no_closure"))]
Union::Shared(cell, ..) => {
#[cfg(not(feature = "sync"))]
let value = cell.borrow();
#[cfg(feature = "sync")]
let value = cell.read().unwrap();
match value.0 {
Union::Array(ref a, ..) => {
a.iter()
.map(|v| {
#[cfg(not(feature = "no_closure"))]
let typ = if v.is_shared() {
// Avoid panics/deadlocks with shared values
"<shared>"
} else {
v.type_name()
};
#[cfg(feature = "no_closure")]
let typ = v.type_name();
v.read_lock::<T>().ok_or_else(|| typ).map(|v| v.clone())
})
.collect()
}
Union::Blob(..) if TypeId::of::<T>() == TypeId::of::<u8>() => {
Ok((*value).clone().cast::<Vec<T>>())
}
_ => Err((*value).type_name()),
}
}
_ => Err(self.type_name()),
}
}
/// Convert the [`Dynamic`] into a [`Blob`][crate::Blob].
/// Returns the name of the actual type if the cast fails.
#[cfg(not(feature = "no_index"))]
#[inline(always)]
pub fn into_blob(self) -> Result<crate::Blob, &'static str> {
match self.0 {
Union::Blob(a, ..) => Ok(*a),
#[cfg(not(feature = "no_closure"))]
Union::Shared(cell, ..) => {
#[cfg(not(feature = "sync"))]
let value = cell.borrow();
#[cfg(feature = "sync")]
let value = cell.read().unwrap();
match value.0 {
Union::Blob(ref a, ..) => Ok(a.as_ref().clone()),
_ => Err((*value).type_name()),
}
}
_ => Err(self.type_name()),
}
}
}
impl From<()> for Dynamic {
#[inline(always)]
fn from(value: ()) -> Self {
Self(Union::Unit(value, DEFAULT_TAG_VALUE, ReadWrite))
}
}
impl From<bool> for Dynamic {
#[inline(always)]
fn from(value: bool) -> Self {
Self(Union::Bool(value, DEFAULT_TAG_VALUE, ReadWrite))
}
}
impl From<INT> for Dynamic {
#[inline(always)]
fn from(value: INT) -> Self {
Self(Union::Int(value, DEFAULT_TAG_VALUE, ReadWrite))
}
}
#[cfg(not(feature = "no_float"))]
impl From<crate::FLOAT> for Dynamic {
#[inline(always)]
fn from(value: crate::FLOAT) -> Self {
Self(Union::Float(value.into(), DEFAULT_TAG_VALUE, ReadWrite))
}
}
#[cfg(not(feature = "no_float"))]
impl From<crate::ast::FloatWrapper<crate::FLOAT>> for Dynamic {
#[inline(always)]
fn from(value: crate::ast::FloatWrapper<crate::FLOAT>) -> Self {
Self(Union::Float(value, DEFAULT_TAG_VALUE, ReadWrite))
}
}
#[cfg(feature = "decimal")]
impl From<rust_decimal::Decimal> for Dynamic {
#[inline(always)]
fn from(value: rust_decimal::Decimal) -> Self {
Self(Union::Decimal(value.into(), DEFAULT_TAG_VALUE, ReadWrite))
}
}
impl From<char> for Dynamic {
#[inline(always)]
fn from(value: char) -> Self {
Self(Union::Char(value, DEFAULT_TAG_VALUE, ReadWrite))
}
}
impl<S: Into<ImmutableString>> From<S> for Dynamic {
#[inline(always)]
fn from(value: S) -> Self {
Self(Union::Str(value.into(), DEFAULT_TAG_VALUE, ReadWrite))
}
}
impl From<&ImmutableString> for Dynamic {
#[inline(always)]
fn from(value: &ImmutableString) -> Self {
value.clone().into()
}
}
impl FromStr for Dynamic {
type Err = ();
fn from_str(value: &str) -> Result<Self, Self::Err> {
Ok(Self(Union::Str(value.into(), DEFAULT_TAG_VALUE, ReadWrite)))
}
}
#[cfg(not(feature = "no_index"))]
impl<T: Variant + Clone> From<Vec<T>> for Dynamic {
#[inline]
fn from(value: Vec<T>) -> Self {
Self(Union::Array(
Box::new(value.into_iter().map(Dynamic::from).collect()),
DEFAULT_TAG_VALUE,
ReadWrite,
))
}
}
#[cfg(not(feature = "no_index"))]
impl<T: Variant + Clone> From<&[T]> for Dynamic {
#[inline]
fn from(value: &[T]) -> Self {
Self(Union::Array(
Box::new(value.iter().cloned().map(Dynamic::from).collect()),
DEFAULT_TAG_VALUE,
ReadWrite,
))
}
}
#[cfg(not(feature = "no_index"))]
impl<T: Variant + Clone> std::iter::FromIterator<T> for Dynamic {
#[inline]
fn from_iter<X: IntoIterator<Item = T>>(iter: X) -> Self {
Self(Union::Array(
Box::new(iter.into_iter().map(Dynamic::from).collect()),
DEFAULT_TAG_VALUE,
ReadWrite,
))
}
}
#[cfg(not(feature = "no_object"))]
#[cfg(not(feature = "no_std"))]
impl<K: Into<crate::Identifier>, T: Variant + Clone> From<std::collections::HashMap<K, T>>
for Dynamic
{
#[inline]
fn from(value: std::collections::HashMap<K, T>) -> Self {
Self(Union::Map(
Box::new(
value
.into_iter()
.map(|(k, v)| (k.into(), Dynamic::from(v)))
.collect(),
),
DEFAULT_TAG_VALUE,
ReadWrite,
))
}
}
#[cfg(not(feature = "no_object"))]
#[cfg(not(feature = "no_std"))]
impl<K: Into<crate::Identifier>> From<std::collections::HashSet<K>> for Dynamic {
#[inline]
fn from(value: std::collections::HashSet<K>) -> Self {
Self(Union::Map(
Box::new(
value
.into_iter()
.map(|k| (k.into(), Dynamic::UNIT))
.collect(),
),
DEFAULT_TAG_VALUE,
ReadWrite,
))
}
}
#[cfg(not(feature = "no_object"))]
impl<K: Into<crate::Identifier>, T: Variant + Clone> From<std::collections::BTreeMap<K, T>>
for Dynamic
{
#[inline]
fn from(value: std::collections::BTreeMap<K, T>) -> Self {
Self(Union::Map(
Box::new(
value
.into_iter()
.map(|(k, v)| (k.into(), Dynamic::from(v)))
.collect(),
),
DEFAULT_TAG_VALUE,
ReadWrite,
))
}
}
#[cfg(not(feature = "no_object"))]
impl<K: Into<crate::Identifier>> From<std::collections::BTreeSet<K>> for Dynamic {
#[inline]
fn from(value: std::collections::BTreeSet<K>) -> Self {
Self(Union::Map(
Box::new(
value
.into_iter()
.map(|k| (k.into(), Dynamic::UNIT))
.collect(),
),
DEFAULT_TAG_VALUE,
ReadWrite,
))
}
}
impl From<FnPtr> for Dynamic {
#[inline(always)]
fn from(value: FnPtr) -> Self {
Self(Union::FnPtr(value.into(), DEFAULT_TAG_VALUE, ReadWrite))
}
}
#[cfg(not(feature = "no_std"))]
impl From<Instant> for Dynamic {
#[inline(always)]
fn from(value: Instant) -> Self {
Self(Union::TimeStamp(value.into(), DEFAULT_TAG_VALUE, ReadWrite))
}
}
#[cfg(not(feature = "no_closure"))]
impl From<crate::Shared<crate::Locked<Dynamic>>> for Dynamic {
#[inline(always)]
fn from(value: crate::Shared<crate::Locked<Self>>) -> Self {
Self(Union::Shared(value, DEFAULT_TAG_VALUE, ReadWrite))
}
}
impl From<ExclusiveRange> for Dynamic {
#[inline(always)]
fn from(value: ExclusiveRange) -> Self {
Dynamic::from(value)
}
}
impl From<InclusiveRange> for Dynamic {
#[inline(always)]
fn from(value: InclusiveRange) -> Self {
Dynamic::from(value)
}
}
| en(self |
FileWord.ts | /**
* @file FileWord 文件-word
* @author Auto Generated by IconPark
*/
/* tslint:disable: max-line-length */
/* eslint-disable max-len */
import {ISvgIconProps, IconWrapper} from '../runtime';
export default IconWrapper('file-word', (props: ISvgIconProps) => (
'<?xml version="1.0" encoding="UTF-8"?>'
+ '<svg width="' + props.size + '" height="' + props.size + '" viewBox="0 0 48 48" fill="none" xmlns="http://www.w3.org/2000/svg">'
+ '<rect width="48" height="48" fill="white" fill-opacity="0.01"/>'
+ '<path d="M48 0H0V48H48V0Z" fill="white" fill-opacity="0.01"/>'
+ '<path d="M10 4H30L40 14V42C40 43.1046 39.1046 44 38 44H10C8.89543 44 8 43.1046 8 42V6C8 4.89543 8.89543 4 10 4Z" fill="' + props.colors[1] + '" stroke="' + props.colors[0] + '" stroke-width="' + props.strokeWidth + '" stroke-linejoin="' + props.strokeLinejoin + '"/>' | )); | + '<path d="M16.0083 20L19.0083 34L24.0083 24L29.0083 34L32.0083 20" stroke="' + props.colors[2] + '" stroke-width="' + props.strokeWidth + '" stroke-linecap="' + props.strokeLinecap + '" stroke-linejoin="' + props.strokeLinejoin + '"/>'
+ '</svg>' |
compressed.py | """Base class for sparse matrix formats using compressed storage."""
from __future__ import division, print_function, absolute_import
__all__ = []
from warnings import warn
import operator
import numpy as np
from scipy._lib._util import _prune_array
from .base import spmatrix, isspmatrix, SparseEfficiencyWarning
from .data import _data_matrix, _minmax_mixin
from .dia import dia_matrix
from . import _sparsetools
from ._sparsetools import (get_csr_submatrix, csr_sample_offsets, csr_todense,
csr_sample_values, csr_row_index, csr_row_slice,
csr_column_index1, csr_column_index2)
from ._index import IndexMixin
from .sputils import (upcast, upcast_char, to_native, isdense, isshape,
getdtype, isscalarlike, isintlike, get_index_dtype,
downcast_intp_index, get_sum_dtype, check_shape,
matrix, asmatrix, is_pydata_spmatrix)
class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):
"""base matrix class for compressed row- and column-oriented matrices"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if arg1.format == self.format and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.asformat(self.format)
self._set_self(arg1)
elif isinstance(arg1, tuple):
if isshape(arg1):
# It's a tuple of matrix dimensions (M, N)
# create empty matrix
self._shape = check_shape(arg1)
M, N = self.shape
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M, N))
self.data = np.zeros(0, getdtype(dtype, default=float))
self.indices = np.zeros(0, idx_dtype)
self.indptr = np.zeros(self._swap((M, N))[0] + 1,
dtype=idx_dtype)
else:
if len(arg1) == 2:
# (data, ij) format
from .coo import coo_matrix
other = self.__class__(coo_matrix(arg1, shape=shape))
self._set_self(other)
elif len(arg1) == 3:
# (data, indices, indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = None
if shape is not None:
maxval = max(shape)
idx_dtype = get_index_dtype((indices, indptr),
maxval=maxval,
check_contents=True)
self.indices = np.array(indices, copy=copy,
dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=dtype)
else:
raise ValueError("unrecognized {}_matrix "
"constructor usage".format(self.format))
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except Exception:
raise ValueError("unrecognized {}_matrix constructor usage"
"".format(self.format))
from .coo import coo_matrix
self._set_self(self.__class__(coo_matrix(arg1, dtype=dtype)))
# Read matrix dimensions given, if any
if shape is not None:
self._shape = check_shape(shape)
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
major_dim = len(self.indptr) - 1
minor_dim = self.indices.max() + 1
except Exception:
raise ValueError('unable to infer matrix dimensions')
else:
self._shape = check_shape(self._swap((major_dim,
minor_dim)))
if dtype is not None:
self.data = self.data.astype(dtype, copy=False)
self.check_format(full_check=False)
def getnnz(self, axis=None):
if axis is None:
return int(self.indptr[-1])
else:
if axis < 0:
axis += 2
axis, _ = self._swap((axis, 1 - axis))
_, N = self._swap(self.shape)
if axis == 0:
return np.bincount(downcast_intp_index(self.indices),
minlength=N)
elif axis == 1:
return np.diff(self.indptr)
raise ValueError('axis out of bounds')
getnnz.__doc__ = spmatrix.getnnz.__doc__
def _set_self(self, other, copy=False):
"""take the member variables of other and assign them to self"""
if copy:
other = other.copy()
self.data = other.data
self.indices = other.indices
self.indptr = other.indptr
self._shape = check_shape(other.shape)
def check_format(self, full_check=True):
"""check whether the matrix format is valid
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
"""
# use _swap to determine proper bounds
major_name, minor_name = self._swap(('row', 'column'))
major_dim, minor_dim = self._swap(self.shape)
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype ({})"
"".format(self.indptr.dtype.name), stacklevel=3)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype ({})"
"".format(self.indices.dtype.name), stacklevel=3)
idx_dtype = get_index_dtype((self.indptr, self.indices))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]:
if x != 1:
raise ValueError('data, indices, and indptr should be 1-D')
# check index pointer
if (len(self.indptr) != major_dim + 1):
raise ValueError("index pointer size ({}) should be ({})"
"".format(len(self.indptr), major_dim + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= minor_dim:
raise ValueError("{} index values must be < {}"
"".format(minor_name, minor_dim))
if self.indices.min() < 0:
raise ValueError("{} index values must be >= 0"
"".format(minor_name))
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices()
# assert(self.has_sorted_indices())
# TODO check for duplicates?
#######################
# Boolean comparisons #
#######################
def _scalar_binopt(self, other, op):
"""Scalar version of self._binopt, for cases in which no new nonzeros
are added. Produces a new spmatrix in canonical form.
"""
self.sum_duplicates()
res = self._with_data(op(self.data, other), copy=True)
res.eliminate_zeros()
return res
def __eq__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
return self.__class__(self.shape, dtype=np.bool_)
if other == 0:
warn("Comparing a sparse matrix with 0 using == is inefficient"
", try using != instead.", SparseEfficiencyWarning,
stacklevel=3)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
inv = self._scalar_binopt(other, operator.ne)
return all_true - inv
else:
return self._scalar_binopt(other, operator.eq)
# Dense other.
elif isdense(other):
return self.todense() == other
# Pydata sparse other.
elif is_pydata_spmatrix(other):
return NotImplemented
# Sparse other.
elif isspmatrix(other):
warn("Comparing sparse matrices using == is inefficient, try using"
" != instead.", SparseEfficiencyWarning, stacklevel=3)
# TODO sparse broadcasting
if self.shape != other.shape:
return False
elif self.format != other.format:
other = other.asformat(self.format)
res = self._binopt(other, '_ne_')
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true - res
else:
return False
| if isscalarlike(other):
if np.isnan(other):
warn("Comparing a sparse matrix with nan using != is"
" inefficient", SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true
elif other != 0:
warn("Comparing a sparse matrix with a nonzero scalar using !="
" is inefficient, try using == instead.",
SparseEfficiencyWarning, stacklevel=3)
all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)
inv = self._scalar_binopt(other, operator.eq)
return all_true - inv
else:
return self._scalar_binopt(other, operator.ne)
# Dense other.
elif isdense(other):
return self.todense() != other
# Pydata sparse other.
elif is_pydata_spmatrix(other):
return NotImplemented
# Sparse other.
elif isspmatrix(other):
# TODO sparse broadcasting
if self.shape != other.shape:
return True
elif self.format != other.format:
other = other.asformat(self.format)
return self._binopt(other, '_ne_')
else:
return True
def _inequality(self, other, op, op_name, bad_scalar_msg):
# Scalar other.
if isscalarlike(other):
if 0 == other and op_name in ('_le_', '_ge_'):
raise NotImplementedError(" >= and <= don't work with 0.")
elif op(0, other):
warn(bad_scalar_msg, SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.result_type(other))
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
return self._scalar_binopt(other, op)
# Dense other.
elif isdense(other):
return op(self.todense(), other)
# Sparse other.
elif isspmatrix(other):
# TODO sparse broadcasting
if self.shape != other.shape:
raise ValueError("inconsistent shapes")
elif self.format != other.format:
other = other.asformat(self.format)
if op_name not in ('_ge_', '_le_'):
return self._binopt(other, op_name)
warn("Comparing sparse matrices using >= and <= is inefficient, "
"using <, >, or !=, instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
return all_true - res
else:
raise ValueError("Operands could not be compared.")
def __lt__(self, other):
return self._inequality(other, operator.lt, '_lt_',
"Comparing a sparse matrix with a scalar "
"greater than zero using < is inefficient, "
"try using >= instead.")
def __gt__(self, other):
return self._inequality(other, operator.gt, '_gt_',
"Comparing a sparse matrix with a scalar "
"less than zero using > is inefficient, "
"try using <= instead.")
def __le__(self, other):
return self._inequality(other, operator.le, '_le_',
"Comparing a sparse matrix with a scalar "
"greater than zero using <= is inefficient, "
"try using > instead.")
def __ge__(self, other):
return self._inequality(other, operator.ge, '_ge_',
"Comparing a sparse matrix with a scalar "
"less than zero using >= is inefficient, "
"try using < instead.")
#################################
# Arithmetic operator overrides #
#################################
def _add_dense(self, other):
if other.shape != self.shape:
raise ValueError('Incompatible shapes.')
dtype = upcast_char(self.dtype.char, other.dtype.char)
order = self._swap('CF')[0]
result = np.array(other, dtype=dtype, order=order, copy=True)
M, N = self._swap(self.shape)
y = result if result.flags.c_contiguous else result.T
csr_todense(M, N, self.indptr, self.indices, self.data, y)
return matrix(result, copy=False)
def _add_sparse(self, other):
return self._binopt(other, '_plus_')
def _sub_sparse(self, other):
return self._binopt(other, '_minus_')
def multiply(self, other):
"""Point-wise multiplication by another matrix, vector, or
scalar.
"""
# Scalar multiplication.
if isscalarlike(other):
return self._mul_scalar(other)
# Sparse matrix or vector.
if isspmatrix(other):
if self.shape == other.shape:
other = self.__class__(other)
return self._binopt(other, '_elmul_')
# Single element.
elif other.shape == (1, 1):
return self._mul_scalar(other.toarray()[0, 0])
elif self.shape == (1, 1):
return other._mul_scalar(self.toarray()[0, 0])
# A row times a column.
elif self.shape[1] == 1 and other.shape[0] == 1:
return self._mul_sparse_matrix(other.tocsc())
elif self.shape[0] == 1 and other.shape[1] == 1:
return other._mul_sparse_matrix(self.tocsc())
# Row vector times matrix. other is a row.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[1], other.shape[1]))
return self._mul_sparse_matrix(other)
# self is a row.
elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[1], self.shape[1]))
return other._mul_sparse_matrix(copy)
# Column vector times matrix. other is a column.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[0], other.shape[0]))
return other._mul_sparse_matrix(self)
# self is a column.
elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[0], self.shape[0]))
return copy._mul_sparse_matrix(other)
else:
raise ValueError("inconsistent shapes")
# Assume other is a dense matrix/array, which produces a single-item
# object array if other isn't convertible to ndarray.
other = np.atleast_2d(other)
if other.ndim != 2:
return np.multiply(self.toarray(), other)
# Single element / wrapped object.
if other.size == 1:
return self._mul_scalar(other.flat[0])
# Fast case for trivial sparse matrix.
elif self.shape == (1, 1):
return np.multiply(self.toarray()[0, 0], other)
from .coo import coo_matrix
ret = self.tocoo()
# Matching shapes.
if self.shape == other.shape:
data = np.multiply(ret.data, other[ret.row, ret.col])
# Sparse row vector times...
elif self.shape[0] == 1:
if other.shape[1] == 1: # Dense column vector.
data = np.multiply(ret.data, other)
elif other.shape[1] == self.shape[1]: # Dense matrix.
data = np.multiply(ret.data, other[:, ret.col])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(np.arange(other.shape[0]), len(ret.row))
col = np.tile(ret.col, other.shape[0])
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(other.shape[0], self.shape[1]),
copy=False)
# Sparse column vector times...
elif self.shape[1] == 1:
if other.shape[0] == 1: # Dense row vector.
data = np.multiply(ret.data[:, None], other)
elif other.shape[0] == self.shape[0]: # Dense matrix.
data = np.multiply(ret.data[:, None], other[ret.row])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(ret.row, other.shape[1])
col = np.tile(np.arange(other.shape[1]), len(ret.col))
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(self.shape[0], other.shape[1]),
copy=False)
# Sparse matrix times dense row vector.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
data = np.multiply(ret.data, other[:, ret.col].ravel())
# Sparse matrix times dense column vector.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
data = np.multiply(ret.data, other[ret.row].ravel())
else:
raise ValueError("inconsistent shapes")
ret.data = data.view(np.ndarray).ravel()
return ret
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
M, N = self.shape
# output array
result = np.zeros(M, dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvec or csc_matvec
fn = getattr(_sparsetools, self.format + '_matvec')
fn(M, N, self.indptr, self.indices, self.data, other, result)
return result
def _mul_multivector(self, other):
M, N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M, n_vecs),
dtype=upcast_char(self.dtype.char, other.dtype.char))
# csr_matvecs or csc_matvecs
fn = getattr(_sparsetools, self.format + '_matvecs')
fn(M, N, n_vecs, self.indptr, self.indices, self.data,
other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
major_axis = self._swap((M, N))[0]
other = self.__class__(other) # convert to this format
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices))
fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')
nnz = fn(M, N,
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=nnz)
indptr = np.empty(major_axis + 1, dtype=idx_dtype)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
fn = getattr(_sparsetools, self.format + '_matmat')
fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
return self.__class__((data, indices, indptr), shape=(M, N))
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
raise ValueError("k exceeds matrix dimensions")
fn = getattr(_sparsetools, self.format + "_diagonal")
y = np.empty(min(rows + min(k, 0), cols - max(k, 0)),
dtype=upcast(self.dtype))
fn(k, self.shape[0], self.shape[1], self.indptr, self.indices,
self.data, y)
return y
diagonal.__doc__ = spmatrix.diagonal.__doc__
#####################
# Other binary ops #
#####################
def _maximum_minimum(self, other, npop, op_name, dense_check):
if isscalarlike(other):
if dense_check(other):
warn("Taking maximum (minimum) with > 0 (< 0) number results"
" to a dense matrix.", SparseEfficiencyWarning,
stacklevel=3)
other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
self.sum_duplicates()
new_data = npop(self.data, np.asarray(other))
mat = self.__class__((new_data, self.indices, self.indptr),
dtype=new_data.dtype, shape=self.shape)
return mat
elif isdense(other):
return npop(self.todense(), other)
elif isspmatrix(other):
return self._binopt(other, op_name)
else:
raise ValueError("Operands not compatible.")
def maximum(self, other):
return self._maximum_minimum(other, np.maximum,
'_maximum_', lambda x: np.asarray(x) > 0)
maximum.__doc__ = spmatrix.maximum.__doc__
def minimum(self, other):
return self._maximum_minimum(other, np.minimum,
'_minimum_', lambda x: np.asarray(x) < 0)
minimum.__doc__ = spmatrix.minimum.__doc__
#####################
# Reduce operations #
#####################
def sum(self, axis=None, dtype=None, out=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# The spmatrix base class already does axis=0 and axis=1 efficiently
# so we only do the case axis=None here
if (not hasattr(self, 'blocksize') and
axis in self._swap(((1, -1), (0, 2)))[0]):
# faster than multiplication for large minor axis in CSC/CSR
res_dtype = get_sum_dtype(self.dtype)
ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
major_index, value = self._minor_reduce(np.add)
ret[major_index] = value
ret = asmatrix(ret)
if axis % 2 == 1:
ret = ret.T
if out is not None and out.shape != ret.shape:
raise ValueError('dimensions do not match')
return ret.sum(axis=(), dtype=dtype, out=out)
# spmatrix will handle the remaining situations when axis
# is in {None, -1, 0, 1}
else:
return spmatrix.sum(self, axis=axis, dtype=dtype, out=out)
sum.__doc__ = spmatrix.sum.__doc__
def _minor_reduce(self, ufunc, data=None):
"""Reduce nonzeros with a ufunc over the minor axis when non-empty
Can be applied to a function of self.data by supplying data parameter.
Warning: this does not call sum_duplicates()
Returns
-------
major_index : array of ints
Major indices where nonzero
value : array of self.dtype
Reduce result for nonzeros in each major_index
"""
if data is None:
data = self.data
major_index = np.flatnonzero(np.diff(self.indptr))
value = ufunc.reduceat(data,
downcast_intp_index(self.indptr[major_index]))
return major_index, value
#######################
# Getting and Setting #
#######################
def _get_intXint(self, row, col):
M, N = self._swap(self.shape)
major, minor = self._swap((row, col))
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data,
major, major + 1, minor, minor + 1)
return data.sum(dtype=self.dtype)
def _get_sliceXslice(self, row, col):
major, minor = self._swap((row, col))
if major.step in (1, None) and minor.step in (1, None):
return self._get_submatrix(major, minor, copy=True)
return self._major_slice(major)._minor_slice(minor)
def _get_arrayXarray(self, row, col):
# inner indexing
idx_dtype = self.indices.dtype
M, N = self._swap(self.shape)
major, minor = self._swap((row, col))
major = np.asarray(major, dtype=idx_dtype)
minor = np.asarray(minor, dtype=idx_dtype)
val = np.empty(major.size, dtype=self.dtype)
csr_sample_values(M, N, self.indptr, self.indices, self.data,
major.size, major.ravel(), minor.ravel(), val)
if major.ndim == 1:
return asmatrix(val)
return self.__class__(val.reshape(major.shape))
def _get_columnXarray(self, row, col):
# outer indexing
major, minor = self._swap((row, col))
return self._major_index_fancy(major)._minor_index_fancy(minor)
def _major_index_fancy(self, idx):
"""Index along the major axis where idx is an array of ints.
"""
idx_dtype = self.indices.dtype
indices = np.asarray(idx, dtype=idx_dtype).ravel()
_, N = self._swap(self.shape)
M = len(indices)
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape)
row_nnz = np.diff(self.indptr)
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz[idx], out=res_indptr[1:])
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_index(M, indices, self.indptr, self.indices, self.data,
res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _major_slice(self, idx, copy=False):
"""Index along the major axis where idx is a slice object.
"""
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(M)
M = len(range(start, stop, step))
new_shape = self._swap((M, N))
if M == 0:
return self.__class__(new_shape)
row_nnz = np.diff(self.indptr)
idx_dtype = self.indices.dtype
res_indptr = np.zeros(M+1, dtype=idx_dtype)
np.cumsum(row_nnz[idx], out=res_indptr[1:])
if step == 1:
all_idx = slice(self.indptr[start], self.indptr[stop])
res_indices = np.array(self.indices[all_idx], copy=copy)
res_data = np.array(self.data[all_idx], copy=copy)
else:
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_row_slice(start, stop, step, self.indptr, self.indices,
self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _minor_index_fancy(self, idx):
"""Index along the minor axis where idx is an array of ints.
"""
idx_dtype = self.indices.dtype
idx = np.asarray(idx, dtype=idx_dtype).ravel()
M, N = self._swap(self.shape)
k = len(idx)
new_shape = self._swap((M, k))
if k == 0:
return self.__class__(new_shape)
# pass 1: count idx entries and compute new indptr
col_offsets = np.zeros(N, dtype=idx_dtype)
res_indptr = np.empty_like(self.indptr)
csr_column_index1(k, idx, M, N, self.indptr, self.indices,
col_offsets, res_indptr)
# pass 2: copy indices/data for selected idxs
col_order = np.argsort(idx).astype(idx_dtype, copy=False)
nnz = res_indptr[-1]
res_indices = np.empty(nnz, dtype=idx_dtype)
res_data = np.empty(nnz, dtype=self.dtype)
csr_column_index2(col_order, col_offsets, len(self.indices),
self.indices, self.data, res_indices, res_data)
return self.__class__((res_data, res_indices, res_indptr),
shape=new_shape, copy=False)
def _minor_slice(self, idx, copy=False):
"""Index along the minor axis where idx is a slice object.
"""
if idx == slice(None):
return self.copy() if copy else self
M, N = self._swap(self.shape)
start, stop, step = idx.indices(N)
N = len(range(start, stop, step))
if N == 0:
return self.__class__(self._swap((M, N)))
if step == 1:
return self._get_submatrix(minor=idx, copy=copy)
# TODO: don't fall back to fancy indexing here
return self._minor_index_fancy(np.arange(start, stop, step))
def _get_submatrix(self, major=None, minor=None, copy=False):
"""Return a submatrix of this matrix.
major, minor: None, int, or slice with step 1
"""
M, N = self._swap(self.shape)
i0, i1 = _process_slice(major, M)
j0, j1 = _process_slice(minor, N)
if i0 == 0 and j0 == 0 and i1 == M and j1 == N:
return self.copy() if copy else self
indptr, indices, data = get_csr_submatrix(
M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1)
shape = self._swap((i1 - i0, j1 - j0))
return self.__class__((data, indices, indptr), shape=shape,
dtype=self.dtype, copy=False)
def _set_intXint(self, row, col, x):
i, j = self._swap((row, col))
self._set_many(i, j, x)
def _set_arrayXarray(self, row, col, x):
i, j = self._swap((row, col))
self._set_many(i, j, x)
def _set_arrayXarray_sparse(self, row, col, x):
# clear entries that will be overwritten
self._zero_many(*self._swap((row, col)))
M, N = row.shape # matches col.shape
broadcast_row = M != 1 and x.shape[0] == 1
broadcast_col = N != 1 and x.shape[1] == 1
r, c = x.row, x.col
x = np.asarray(x.data, dtype=self.dtype)
if broadcast_row:
r = np.repeat(np.arange(M), len(r))
c = np.tile(c, M)
x = np.tile(x, M)
if broadcast_col:
r = np.repeat(r, N)
c = np.tile(np.arange(N), len(c))
x = np.repeat(x, N)
# only assign entries in the new sparsity structure
i, j = self._swap((row[r, c], col[r, c]))
self._set_many(i, j, x)
def _setdiag(self, values, k):
if 0 in self.shape:
return
M, N = self.shape
broadcast = (values.ndim == 0)
if k < 0:
if broadcast:
max_index = min(M + k, N)
else:
max_index = min(M + k, N, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
i -= k
else:
if broadcast:
max_index = min(M, N - k)
else:
max_index = min(M, N - k, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
j += k
if not broadcast:
values = values[:len(i)]
self[i, j] = values
def _prepare_indices(self, i, j):
M, N = self._swap(self.shape)
def check_bounds(indices, bound):
idx = indices.max()
if idx >= bound:
raise IndexError('index (%d) out of range (>= %d)' %
(idx, bound))
idx = indices.min()
if idx < -bound:
raise IndexError('index (%d) out of range (< -%d)' %
(idx, bound))
i = np.array(i, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()
j = np.array(j, dtype=self.indices.dtype, copy=False, ndmin=1).ravel()
check_bounds(i, M)
check_bounds(j, N)
return i, j, M, N
def _set_many(self, i, j, x):
"""Sets value at each (i, j) to x
Here (i,j) index major and minor respectively, and must not contain
duplicate entries.
"""
i, j, M, N = self._prepare_indices(i, j)
x = np.array(x, dtype=self.dtype, copy=False, ndmin=1).ravel()
n_samples = x.size
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if -1 not in offsets:
# only affects existing non-zero cells
self.data[offsets] = x
return
else:
warn("Changing the sparsity structure of a {}_matrix is expensive."
" lil_matrix is more efficient.".format(self.format),
SparseEfficiencyWarning, stacklevel=3)
# replace where possible
mask = offsets > -1
self.data[offsets[mask]] = x[mask]
# only insertions remain
mask = ~mask
i = i[mask]
i[i < 0] += M
j = j[mask]
j[j < 0] += N
self._insert_many(i, j, x[mask])
def _zero_many(self, i, j):
"""Sets value at each (i, j) to zero, preserving sparsity structure.
Here (i,j) index major and minor respectively.
"""
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(i)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
i, j, offsets)
# only assign zeros to the existing sparsity structure
self.data[offsets[offsets > -1]] = 0
def _insert_many(self, i, j, x):
"""Inserts new nonzero at each (i, j) with value x
Here (i,j) index major and minor respectively.
i, j and x must be non-empty, 1d arrays.
Inserts each major group (e.g. all entries per row) at a time.
Maintains has_sorted_indices property.
Modifies i, j, x in place.
"""
order = np.argsort(i, kind='mergesort') # stable for duplicates
i = i.take(order, mode='clip')
j = j.take(order, mode='clip')
x = x.take(order, mode='clip')
do_sort = self.has_sorted_indices
# Update index data type
idx_dtype = get_index_dtype((self.indices, self.indptr),
maxval=(self.indptr[-1] + x.size))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
i = np.asarray(i, dtype=idx_dtype)
j = np.asarray(j, dtype=idx_dtype)
# Collate old and new in chunks by major index
indices_parts = []
data_parts = []
ui, ui_indptr = np.unique(i, return_index=True)
ui_indptr = np.append(ui_indptr, len(j))
new_nnzs = np.diff(ui_indptr)
prev = 0
for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])):
# old entries
start = self.indptr[prev]
stop = self.indptr[ii]
indices_parts.append(self.indices[start:stop])
data_parts.append(self.data[start:stop])
# handle duplicate j: keep last setting
uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
if len(uj) == je - js:
indices_parts.append(j[js:je])
data_parts.append(x[js:je])
else:
indices_parts.append(j[js:je][::-1][uj_indptr])
data_parts.append(x[js:je][::-1][uj_indptr])
new_nnzs[c] = len(uj)
prev = ii
# remaining old entries
start = self.indptr[ii]
indices_parts.append(self.indices[start:])
data_parts.append(self.data[start:])
# update attributes
self.indices = np.concatenate(indices_parts)
self.data = np.concatenate(data_parts)
nnzs = np.empty(self.indptr.shape, dtype=idx_dtype)
nnzs[0] = idx_dtype(0)
indptr_diff = np.diff(self.indptr)
indptr_diff[ui] += new_nnzs
nnzs[1:] = indptr_diff
self.indptr = np.cumsum(nnzs, out=nnzs)
if do_sort:
# TODO: only sort where necessary
self.has_sorted_indices = False
self.sort_indices()
self.check_format(full_check=False)
######################
# Conversion methods #
######################
def tocoo(self, copy=True):
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
from .coo import coo_matrix
return coo_matrix((self.data, (row, col)), self.shape, copy=copy,
dtype=self.dtype)
tocoo.__doc__ = spmatrix.tocoo.__doc__
def toarray(self, order=None, out=None):
if out is None and order is None:
order = self._swap('cf')[0]
out = self._process_toarray_args(order, out)
if not (out.flags.c_contiguous or out.flags.f_contiguous):
raise ValueError('Output array must be C or F contiguous')
# align ideal order with output array order
if out.flags.c_contiguous:
x = self.tocsr()
y = out
else:
x = self.tocsc()
y = out.T
M, N = x._swap(x.shape)
csr_todense(M, N, x.indptr, x.indices, x.data, y)
return out
toarray.__doc__ = spmatrix.toarray.__doc__
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
"""Remove zero entries from the matrix
This is an *in place* operation
"""
M, N = self._swap(self.shape)
_sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
def __get_has_canonical_format(self):
"""Determine whether the matrix has sorted indices and no duplicates
Returns
- True: if the above applies
- False: otherwise
has_canonical_format implies has_sorted_indices, so if the latter flag
is False, so will the former be; if the former is found True, the
latter flag is also set.
"""
# first check to see if result was cached
if not getattr(self, '_has_sorted_indices', True):
# not sorted => not canonical
self._has_canonical_format = False
elif not hasattr(self, '_has_canonical_format'):
self.has_canonical_format = _sparsetools.csr_has_canonical_format(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_canonical_format
def __set_has_canonical_format(self, val):
self._has_canonical_format = bool(val)
if val:
self.has_sorted_indices = True
has_canonical_format = property(fget=__get_has_canonical_format,
fset=__set_has_canonical_format)
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
The is an *in place* operation
"""
if self.has_canonical_format:
return
self.sort_indices()
M, N = self._swap(self.shape)
_sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
self.has_canonical_format = True
def __get_sorted(self):
"""Determine whether the matrix has sorted indices
Returns
- True: if the indices of the matrix are in sorted order
- False: otherwise
"""
# first check to see if result was cached
if not hasattr(self, '_has_sorted_indices'):
self._has_sorted_indices = _sparsetools.csr_has_sorted_indices(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_sorted_indices
def __set_sorted(self, val):
self._has_sorted_indices = bool(val)
has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)
def sorted_indices(self):
"""Return a copy of this matrix with sorted indices
"""
A = self.copy()
A.sort_indices()
return A
# an alternative that has linear complexity is the following
# although the previous option is typically faster
# return self.toother().toother()
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if not self.has_sorted_indices:
_sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr,
self.indices, self.data)
self.has_sorted_indices = True
def prune(self):
"""Remove empty space after all non-zero elements.
"""
major_dim = self._swap(self.shape)[0]
if len(self.indptr) != major_dim + 1:
raise ValueError('index pointer has invalid length')
if len(self.indices) < self.nnz:
raise ValueError('indices array has fewer than nnz elements')
if len(self.data) < self.nnz:
raise ValueError('data array has fewer than nnz elements')
self.indices = _prune_array(self.indices[:self.nnz])
self.data = _prune_array(self.data[:self.nnz])
def resize(self, *shape):
shape = check_shape(shape)
if hasattr(self, 'blocksize'):
bm, bn = self.blocksize
new_M, rm = divmod(shape[0], bm)
new_N, rn = divmod(shape[1], bn)
if rm or rn:
raise ValueError("shape must be divisible into %s blocks. "
"Got %s" % (self.blocksize, shape))
M, N = self.shape[0] // bm, self.shape[1] // bn
else:
new_M, new_N = self._swap(shape)
M, N = self._swap(self.shape)
if new_M < M:
self.indices = self.indices[:self.indptr[new_M]]
self.data = self.data[:self.indptr[new_M]]
self.indptr = self.indptr[:new_M + 1]
elif new_M > M:
self.indptr = np.resize(self.indptr, new_M + 1)
self.indptr[M + 1:].fill(self.indptr[M])
if new_N < N:
mask = self.indices < new_N
if not np.all(mask):
self.indices = self.indices[mask]
self.data = self.data[mask]
major_index, val = self._minor_reduce(np.add, mask)
self.indptr.fill(0)
self.indptr[1:][major_index] = val
np.cumsum(self.indptr, out=self.indptr)
self._shape = shape
resize.__doc__ = spmatrix.resize.__doc__
###################
# utility methods #
###################
# needed by _data_matrix
def _with_data(self, data, copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data, self.indices.copy(),
self.indptr.copy()),
shape=self.shape,
dtype=data.dtype)
else:
return self.__class__((data, self.indices, self.indptr),
shape=self.shape, dtype=data.dtype)
def _binopt(self, other, op):
"""apply the binary operation fn to two sparse matrices."""
other = self.__class__(other)
# e.g. csr_plus_csr, csr_minus_csr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=maxnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(maxnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(maxnnz, dtype=np.bool_)
else:
data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
fn(self.shape[0], self.shape[1],
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
A = self.__class__((data, indices, indptr), shape=self.shape)
A.prune()
return A
def _divide_sparse(self, other):
"""
Divide this matrix by a second sparse matrix.
"""
if other.shape != self.shape:
raise ValueError('inconsistent shapes')
r = self._binopt(other, '_eldiv_')
if np.issubdtype(r.dtype, np.inexact):
# Eldiv leaves entries outside the combined sparsity
# pattern empty, so they must be filled manually.
# Everything outside of other's sparsity is NaN, and everything
# inside it is either zero or defined by eldiv.
out = np.empty(self.shape, dtype=self.dtype)
out.fill(np.nan)
row, col = other.nonzero()
out[row, col] = 0
r = r.tocoo()
out[r.row, r.col] = r.data
out = matrix(out)
else:
# integers types go with nan <-> 0
out = r
return out
def _process_slice(sl, num):
if sl is None:
i0, i1 = 0, num
elif isinstance(sl, slice):
i0, i1, stride = sl.indices(num)
if stride != 1:
raise ValueError('slicing with step != 1 not supported')
i0 = min(i0, i1) # give an empty slice when i0 > i1
elif isintlike(sl):
if sl < 0:
sl += num
i0, i1 = sl, sl + 1
if i0 < 0 or i1 > num:
raise IndexError('index out of bounds: 0 <= %d < %d <= %d' %
(i0, i1, num))
else:
raise TypeError('expected slice or scalar')
return i0, i1 | def __ne__(self, other):
# Scalar other. |
integ.job-poller.ts | import * as sfn from '@aws-cdk/aws-stepfunctions';
import * as cdk from '@aws-cdk/core';
import * as tasks from '../lib';
class JobPollerStack extends cdk.Stack {
constructor(scope: cdk.App, id: string, props: cdk.StackProps = {}) {
super(scope, id, props);
const submitJobActivity = new sfn.Activity(this, 'SubmitJob');
const checkJobActivity = new sfn.Activity(this, 'CheckJob');
const submitJob = new sfn.Task(this, 'Submit Job', {
task: new tasks.InvokeActivity(submitJobActivity),
resultPath: '$.guid',
});
const waitX = new sfn.Wait(this, 'Wait X Seconds', { time: sfn.WaitTime.secondsPath('$.wait_time') });
const getStatus = new sfn.Task(this, 'Get Job Status', {
task: new tasks.InvokeActivity(checkJobActivity),
inputPath: '$.guid',
resultPath: '$.status',
});
const isComplete = new sfn.Choice(this, 'Job Complete?');
const jobFailed = new sfn.Fail(this, 'Job Failed', {
cause: 'AWS Batch Job Failed',
error: 'DescribeJob returned FAILED',
});
const finalStatus = new sfn.Task(this, 'Get Final Job Status', {
task: new tasks.InvokeActivity(checkJobActivity),
inputPath: '$.guid',
});
const chain = sfn.Chain
.start(submitJob)
.next(waitX)
.next(getStatus)
.next(isComplete
.when(sfn.Condition.stringEquals('$.status', 'FAILED'), jobFailed)
.when(sfn.Condition.stringEquals('$.status', 'SUCCEEDED'), finalStatus) |
new sfn.StateMachine(this, 'StateMachine', {
definition: chain,
timeout: cdk.Duration.seconds(30),
});
}
}
const app = new cdk.App();
new JobPollerStack(app, 'aws-stepfunctions-integ');
app.synth(); | .otherwise(waitX)); |
udac_example_dag.py | from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators import (
StageToRedshiftOperator,
LoadFactOperator,
LoadDimensionOperator,
DataQualityOperator,
)
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.postgres_operator import PostgresOperator
from helpers import SqlQueries
REDSHIFT_CONN_ID = 'redshift'
AWS_CREDENTIALS_ID = 'aws_credentials'
INPUT_BUCKET = 'udacity-dend'
default_args = {
'owner': 'udacity',
'start_date': datetime(2018, 11, 1),
'depends_on_past': False,
'retries': 3,
'retry_delay': timedelta(seconds=300),
'catchup': False,
}
fact_table_name_and_query = ('songplays', SqlQueries.songplay_table_insert)
dim_tables_name_to_query = {
'users': SqlQueries.user_table_insert,
'songs': SqlQueries.song_table_insert,
'artists': SqlQueries.artist_table_insert,
'time': SqlQueries.time_table_insert,
}
dag = DAG(
'udac_example_dag',
default_args=default_args,
description='Load and transform data in Redshift with Airflow',
schedule_interval='@hourly',
)
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
create_tables = PostgresOperator(
task_id='Create_tables',
dag=dag,
postgres_conn_id=REDSHIFT_CONN_ID,
sql='/create_tables.sql',
)
stage_events_to_redshift = StageToRedshiftOperator(
task_id='Stage_events',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
aws_credentials_id=AWS_CREDENTIALS_ID,
s3_bucket=INPUT_BUCKET,
s3_key='log_data/{execution_date.year}/{execution_date.month}/',
table='staging_events',
file_format="JSON 's3://udacity-dend/log_json_path.json'",
provide_context=True,
)
stage_songs_to_redshift = StageToRedshiftOperator(
task_id='Stage_songs',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
aws_credentials_id=AWS_CREDENTIALS_ID,
s3_bucket=INPUT_BUCKET,
s3_key='song_data',
table='staging_songs',
file_format="JSON 'auto'",
provide_context=True,
)
load_songplays_table = LoadFactOperator(
task_id=f'Load_{fact_table_name_and_query[0]}_fact_table',
dag=dag,
table=fact_table_name_and_query[0],
conn_id=REDSHIFT_CONN_ID,
sql=fact_table_name_and_query[1],
)
dim_operators = [
LoadDimensionOperator(
task_id=f'Load_{dim_table_name}_dim_table',
dag=dag,
table=dim_table_name,
conn_id=REDSHIFT_CONN_ID,
sql=dim_query,
)
for dim_table_name, dim_query in dim_tables_name_to_query.items()
]
run_quality_checks = DataQualityOperator(
task_id='Run_data_quality_checks',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
tables=list(dim_tables_name_to_query) + [fact_table_name_and_query[0]],
)
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
start_operator >> create_tables
create_tables >> [stage_events_to_redshift, stage_songs_to_redshift]
[stage_events_to_redshift, stage_songs_to_redshift] >> load_songplays_table | load_songplays_table >> dim_operators
dim_operators + [load_songplays_table] >> run_quality_checks
run_quality_checks >> end_operator |
|
trace_service_grpc_transport.py | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.cloud.trace_v2.proto import tracing_pb2_grpc
class TraceServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.devtools.cloudtrace.v2 TraceService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/trace.append",
)
def __init__(
self, channel=None, credentials=None, address="cloudtrace.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(address=address, credentials=credentials)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {"trace_service_stub": tracing_pb2_grpc.TraceServiceStub(channel)}
@classmethod
def create_channel(cls, address="cloudtrace.googleapis.com:443", credentials=None):
|
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def batch_write_spans(self):
"""Return the gRPC stub for :meth:`TraceServiceClient.batch_write_spans`.
Sends new spans to new or existing traces. You cannot update
existing spans.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["trace_service_stub"].BatchWriteSpans
@property
def create_span(self):
"""Return the gRPC stub for :meth:`TraceServiceClient.create_span`.
Creates a new span.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["trace_service_stub"].CreateSpan
| """Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES
) |
contact.helper.ts | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance | * with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import {ContactDetail} from '../services/domain/contact/contact-detail.model';
export function getContactDetailValueByType(contactDetails: ContactDetail[], type: string): string {
const items = contactDetails.filter(contact => contact.type === type);
return items.length ? items[0].value : '';
} | |
Button.tsx | import { FC, MouseEventHandler } from 'react'
import styles from './Button.module.scss'
type ButtonPropsType = {
action?: MouseEventHandler<HTMLButtonElement>
}
const Button: FC<ButtonPropsType> = ({ children, action }) => {
return (
<button onClick={action} className={styles.btn}>
{children}
</button>
) | }
export default Button |
|
__init__.py | """
This module provides data loaders and transformers for popular vision datasets.
"""
from .mscoco import COCOSegmentation
from .cityscapes import CitySegmentation
from .ade import ADE20KSegmentation
from .pascal_voc import VOCSegmentation
from .pascal_aug import VOCAugSegmentation
from .sbu_shadow import SBUSegmentation
from .ycb import YCBSegmentation
from .robocup import RobocupSegmentation
datasets = {
'ade20k': ADE20KSegmentation,
'pascal_voc': VOCSegmentation,
'pascal_aug': VOCAugSegmentation, | 'robocup': RobocupSegmentation,
}
def get_segmentation_dataset(name, **kwargs):
"""Segmentation Datasets"""
return datasets[name.lower()](**kwargs) | 'coco': COCOSegmentation,
'citys': CitySegmentation,
'sbu': SBUSegmentation,
'ycb': YCBSegmentation, |
Shape and Reshape.py | import numpy as np
class Main:
def __init__(self):
self.li = list(map(int, input().split()))
self.np_li = np.array(self.li)
| if __name__ == '__main__':
obj = Main()
obj.output() | def output(self):
print(np.reshape(self.np_li, (3,3)))
|
kendo.culture.sms-FI.min.js | /**
* Kendo UI v2018.1.117 (http://www.telerik.com/kendo-ui)
* Copyright 2018 Telerik AD. All rights reserved.
*
* Kendo UI commercial licenses may be obtained at
* http://www.telerik.com/purchase/license-agreement/kendo-ui-complete
* If you do not own a commercial license, this file shall be governed by the trial license terms.
|
*/
!function(n){"function"==typeof define&&define.amd?define(["kendo.core.min"],n):n()}(function(){!function(n,m){kendo.cultures["sms-FI"]={name:"sms-FI",numberFormat:{pattern:["-n"],decimals:2,",":" ",".":",",groupSize:[3],percent:{pattern:["-n %","n %"],decimals:2,",":" ",".":",",groupSize:[3],symbol:"%"},currency:{name:"Euro",abbr:"EUR",pattern:["-n $","n $"],decimals:2,",":" ",".":",",groupSize:[3],symbol:"€"}},calendars:{standard:{days:{names:["pâ´sspei´vv","vuõssargg","mââibargg","seärad","neljdpei´vv","piâtnâc","sue´vet"],namesAbbr:["pâ","vu","mâ","se","ne","pi","su"],namesShort:["pâ","v","m","s","n","pi","s"]},months:{names:["ođđee´jjmään","tä´lvvmään","pâ´zzlâšttam-mään","njuhččmään","vue´ssmään","ǩie´ssmään","suei´nnmään","på´rǧǧmään","čõhččmään","kålggmään","skamm-mään","rosttovmään"],namesAbbr:["ođđee´jjmään","tä´lvvmään","pâ´zzlâšttam-mään","njuhččmään","vue´ssmään","ǩie´ssmään","suei´nnmään","på´rǧǧmään","čõhččmään","kålggmään","skamm-mään","rosttovmään"]},AM:[""],PM:[""],patterns:{d:"d.M.yyyy",D:"MMMM d'. p. 'yyyy",F:"MMMM d'. p. 'yyyy H:mm:ss",g:"d.M.yyyy H:mm",G:"d.M.yyyy H:mm:ss",m:"MMMM d'. p. '",M:"MMMM d'. p. '",s:"yyyy'-'MM'-'dd'T'HH':'mm':'ss",t:"H:mm",T:"H:mm:ss",u:"yyyy'-'MM'-'dd HH':'mm':'ss'Z'",y:"MMMM yyyy",Y:"MMMM yyyy"},"/":".",":":":",firstDay:1}}}}(this)});
//# sourceMappingURL=kendo.culture.sms-FI.min.js.map | |
studentController.js | const Student = require("../Models/student");
const {
createStudentSchema,
createEnquirySchema,
createEnrollmentSchema,
updateStudentSchema,
} = require("../validators/studentValidator");
// @route POST api/admin/students
// @desc Create a student
// @access Private
exports.createStudent = async (req, res) => {
try {
const { error: validationError } = createStudentSchema.validate(req.body);
if (validationError) {
return res.status(400).json({ err: validationError.details[0].message });
}
const { email, phoneNumber, enquiryStatus, enrollmentStatus } = req.body;
if (await Student.findOne({ phoneNumber })) {
return res.status(400).json({ err: "Student already exists" });
}
if (email) {
if (await Student.findOne({ email })) {
return res.status(400).json({ err: "Student already exists" });
}
}
let options = { ...req.body };
if (enquiryStatus) {
options = {
...options,
isEnquiryActive: true,
enquiryDate: new Date(),
};
}
if (enrollmentStatus) {
options = {
...options,
isEnrollmentActive: true,
enrollmentDate: new Date(),
};
}
const student = new Student(options);
await student.save();
return res.status(200).json(student);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route POST api/admin/students
// @desc Create a student enquiry
// @access Private
exports.createEnquiry = async (req, res) => {
try {
const { error: validationError } = createEnquirySchema.validate(req.body);
if (validationError) {
console.log("inside validation error");
return res.status(400).json({ err: validationError.details[0].message });
}
const { email, phoneNumber } = req.body;
if (await Student.findOne({ phoneNumber })) {
return res.status(400).json({ err: "Student already exists" });
}
if (email) {
if (await Student.findOne({ email })) {
return res.status(400).json({ err: "Student already exists" });
}
}
const student = new Student({
...req.body,
enquiryStatus: true,
isEnquiryActive: true,
enquiryDate: new Date(),
});
await student.save();
return res.status(200).json(student);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route POST api/admin/students
// @desc Create enrollment for a student
// @access Private
exports.createEnrollment = async (req, res) => {
try {
const { error: validationError } = createEnrollmentSchema.validate(
req.body
);
if (validationError) {
return res.status(400).json({ err: validationError.details[0].message });
}
const { email, phoneNumber } = req.body;
if (await Student.findOne({ phoneNumber })) {
res.status(400).json({ err: "Student already exists" });
}
if (email) {
if (await Student.findOne({ email })) {
res.status(400).json({ err: "Student already exists" });
}
}
const student = new Student({
...req.body,
enrollmentStatus: true,
isEnrollmentActive: true,
enrollmentDate: new Date(),
});
await student.save();
return res.status(200).json(student);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc Find a student by phone number
// @access Private
exports.findStudent = async (req, res) => {
try {
const student = await Student.findOne({
phoneNumber: req.params.phoneNumber,
});
if (!student) {
return res.status(404).json({ err: "Student not found" });
}
return res.status(200).json(student);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route PUT api/admin/students
// @desc update the details of a student
// @access Private
exports.updateStudent = async (req, res) => {
try {
const { error: validationError } = updateStudentSchema.validate(req.body);
if (validationError) {
return res.status(400).json({ err: validationError.details[0].message });
}
const {
name,
email,
phoneNumber,
enquiryStatus,
isEnquiryActive,
enquiryDate,
enrollmentStatus,
isEnrollmentActive,
enrollmentDate,
year,
course,
college,
offeredFees,
actualFees,
} = req.body;
const student = await Student.findOne({ _id: req.params.id });
if (!student) {
return res.status(404).json({ err: "Student not found" });
}
if (name) student.name = name;
if (email) student.email = email;
if (phoneNumber) student.phoneNumber = phoneNumber;
if (req.body.hasOwnProperty("enquiryStatus")) {
student.enquiryStatus = enquiryStatus;
if (enquiryStatus === true) {
console.log("Inside condition");
student.isEnquiryActive = true;
student.enquiryDate = new Date();
} else {
student.isEnquiryActive = false;
}
}
if (req.body.hasOwnProperty("enrollmentStatus")) {
student.enrollmentStatus = enrollmentStatus;
if (enrollmentStatus === true) {
student.isEnrollmentActive = true;
student.enrollmentDate = new Date();
} else {
student.isEnrollmentActive = false;
}
}
if (isEnquiryActive) student.isEnquiryActive = isEnquiryActive;
if (isEnrollmentActive) student.isEnrollmentActive = isEnrollmentActive;
if (enquiryDate) student.enquiryDate = enquiryDate;
if (enrollmentDate) student.enrollmentDate = enrollmentDate;
if (year) student.year = year;
if (course) student.course = course;
if (college) student.college = college;
if (offeredFees) student.offeredFees = offeredFees;
if (actualFees) student.actualFees = actualFees;
await student.save();
return res.json(student);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route DELETE api/admin/students
// @desc delete a student entry
// @access Private
exports.deleteStudent = async (req, res) => {
try {
await Student.findOneAndDelete(
{ phoneNumber: req.params.phoneNumber }, | return res.status(200).json(deletedStudent);
} else {
return res.status(400).json({ err: "Something went wrong" });
}
}
);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get list of all the students
// @access Private
exports.listStudents = async (req, res) => {
try {
const listOfStudents = await Student.find((err) => {
if (err) {
console.log(err);
return res.status(400).json({ err: "Something went wrong" });
}
}).sort({ enquiryDate: -1, enrollmentDate: -1 });
return res.status(200).json(listOfStudents);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get list of all the students enquiries
// @access Private
exports.listEnquiries = async (req, res) => {
try {
const enquiryFilter = { enquiryStatus: true };
const listOfEnquiries = await Student.find(enquiryFilter, (err) => {
if (err) {
return res.status(400).json({
err: "Something went wrong",
});
}
}).sort({ enquiryDate: -1 });
return res.status(200).json(listOfEnquiries);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
x;
};
// @route GET api/admin/students
// @desc get list of all the enrolled students
// @access Private
exports.listEnrollments = async (req, res) => {
try {
const enrollmentFilter = { enrollmentStatus: true };
const listOfEnrollments = await Student.find(enrollmentFilter, (err) => {
if (err) {
return res.status(400).json({
err: "Something went wrong",
});
}
}).sort({ enrollmentDate: -1 });
return res.status(200).json(listOfEnrollments);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get all the enquires of last 30 days
// @access Private
exports.listEnquiriesInThirtyDays = async (req, res) => {
try {
const currentDate = new Date();
let thirtyDaysBefore = new Date();
thirtyDaysBefore = thirtyDaysBefore.setDate(
thirtyDaysBefore.getDate() - 30
);
fetchQuery = {
enquiryDate: { $gte: thirtyDaysBefore, $lte: currentDate },
enquiryStatus: true,
};
const listOfEnquiriesInThirtyDays = await Student.find(
fetchQuery,
(err) => {
if (err) {
return res.status(400).json({ err: "Something went wrong" });
}
}
).sort({ enquiryDate: -1 });
return res.status(200).json(listOfEnquiriesInThirtyDays);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get all the enquires of last 90 days
// @access Private
exports.listEnquiriesInNinetyDays = async (req, res) => {
try {
const currentDate = new Date();
let ninetyDaysBefore = new Date();
ninetyDaysBefore = ninetyDaysBefore.setDate(
ninetyDaysBefore.getDate() - 90
);
fetchQuery = {
enquiryDate: { $gte: ninetyDaysBefore, $lte: currentDate },
enquiryStatus: true,
};
const listOfEnquiriesInNinetyDays = await Student.find(
fetchQuery,
(err) => {
if (err) {
return res.status(400).json({ err: "Something went wrong" });
}
}
).sort({ enquiryDate: -1 });
return res.status(200).json(listOfEnquiriesInNinetyDays);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get all the enquires in last year
// @access Private
exports.listEnquiriesInLastYear = async (req, res) => {
try {
const currentDate = new Date();
let oneYearBefore = new Date();
oneYearBefore = oneYearBefore.setDate(oneYearBefore.getDate() - 365);
fetchQuery = {
enquiryDate: { $gte: oneYearBefore, $lte: currentDate },
enquiryStatus: true,
};
const listOfEnquiriesInLastYear = await Student.find(fetchQuery, (err) => {
if (err) {
return res.status(400).json({ err: "Something went wrong" });
}
}).sort({ enquiryDate: -1 });
return res.status(200).json(listOfEnquiriesInLastYear);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get all the enrollments of last 30 days
// @access Private
exports.listEnrollmentsInThirtyDays = async (req, res) => {
try {
const currentDate = new Date();
let thirtyDaysBefore = new Date();
thirtyDaysBefore = thirtyDaysBefore.setDate(
thirtyDaysBefore.getDate() - 30
);
fetchQuery = {
enquiryDate: { $gte: thirtyDaysBefore, $lte: currentDate },
enquiryStatus: true,
};
const listOfEnrollmentsInThirtyDays = await Student.find(
fetchQuery,
(err) => {
if (err) {
return res.status(400).json({ err: "Something went wrong" });
}
}
).sort({ enrollmentDate: -1 });
return res.status(200).json(listOfEnrollmentsInThirtyDays);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get all the enquires of last 90 days
// @access Private
exports.listEnrollmentsInNinetyDays = async (req, res) => {
try {
const currentDate = new Date();
let ninetyDaysBefore = new Date();
ninetyDaysBefore = ninetyDaysBefore.setDate(
ninetyDaysBefore.getDate() - 90
);
fetchQuery = {
enrollmentDate: { $gte: ninetyDaysBefore, $lte: currentDate },
enrollmentStatus: true,
};
const listOfEnrollmentsInNinetyDays = await Student.find(
fetchQuery,
(err) => {
if (err) {
return res.status(400).json({ err: "Something went wrong" });
}
}
).sort({ enrollmentDate: -1 });
return res.status(200).json(listOfEnrollmentsInNinetyDays);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get all the enquires in last year
// @access Private
exports.listEnrollmentsInLastYear = async (req, res) => {
try {
const currentDate = new Date();
let oneYearBefore = new Date();
oneYearBefore = oneYearBefore.setDate(oneYearBefore.getDate() - 365);
fetchQuery = {
enrollmentDate: { $gte: oneYearBefore, $lte: currentDate },
enrollmentStatus: true,
};
const listOfEnrollmentsInLastYear = await Student.find(
fetchQuery,
(err) => {
if (err) {
return res.status(400).json({ err: "Something went wrong" });
}
}
).sort({ enrollmentDate: -1 });
return res.status(200).json(listOfEnrollmentsInLastYear);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get number of all the students
// @access Private
exports.totalStudents = async (req, res) => {
try {
await Student.countDocuments((err, numberOfStudents) => {
if (err) {
return res.status(400).json({ err: "Something went wrong" });
}
return res.status(200).json(numberOfStudents);
});
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get number of all the enquiries
// @access Private
exports.totalEnquiries = async (req, res) => {
try {
await Student.countDocuments(
{ enquiryStatus: true },
(err, numberOfEnquiries) => {
if (err) {
return res.status(400).json({ err: "Something went wrong" });
}
return res.status(200).json(numberOfEnquiries);
}
);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get number of all the enrollments
// @access Private
exports.totalEnrollments = async (req, res) => {
try {
await Student.countDocuments(
{ enrollmentStatus: true },
(err, numberOfEnrollments) => {
if (err) {
return res.status(400).json({ err: "Something went wrong" });
}
return res.status(200).json(numberOfEnrollments);
}
);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get all the enquires of last 30 days
// @access Private
exports.totalEnquiriesInThirtyDays = async (req, res) => {
try {
const currentDate = new Date();
let thirtyDaysBefore = new Date();
thirtyDaysBefore = thirtyDaysBefore.setDate(
thirtyDaysBefore.getDate() - 30
);
fetchQuery = {
enquiryDate: { $gte: thirtyDaysBefore, $lte: currentDate },
enquiryStatus: true,
};
await Student.countDocuments(
fetchQuery,
(err, numberOfEnquiriesInThirtyDays) => {
if (err) {
return res.status(401).json({ err: "Something went wrong" });
}
return res.status(200).json(numberOfEnquiriesInThirtyDays);
}
);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get all the enquires of last 90 days
// @access Private
exports.totalEnquiriesInNinetyDays = async (req, res) => {
try {
const currentDate = new Date();
let ninetyDaysBefore = new Date();
ninetyDaysBefore = ninetyDaysBefore.setDate(
ninetyDaysBefore.getDate() - 30
);
fetchQuery = {
enquiryDate: { $gte: ninetyDaysBefore, $lte: currentDate },
enquiryStatus: true,
};
await Student.countDocuments(
fetchQuery,
(err, numberOfEnquiriesInNinetyDays) => {
if (err) {
return res.status(400).json({ err: "Something went wrong" });
}
return res.status(200).json(numberOfEnquiriesInNinetyDays);
}
);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route POST api/admin/students
// @desc get all the enquires of year
// @access Private
exports.totalEnquiriesInLastYear = async (req, res) => {
try {
const currentDate = new Date();
let oneYearBefore = new Date();
oneYearBefore = oneYearBefore.setDate(oneYearBefore.getDate() - 365);
fetchQuery = {
enquiryDate: { $gte: oneYearBefore, $lte: currentDate },
enquiryStatus: true,
};
await Student.countDocuments(
fetchQuery,
(err, numberOfEnquiriesInLastYear) => {
if (err) {
return res.status(400).json({ err: "Something went wrong" });
}
return res.status(200).json(numberOfEnquiriesInLastYear);
}
);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get all the enrollments in last 30 days
// @access Private
exports.totalEnrollmentsInThirtyDays = async (req, res) => {
try {
const currentDate = new Date();
let thirtyDaysBefore = new Date();
thirtyDaysBefore = thirtyDaysBefore.setDate(
thirtyDaysBefore.getDate() - 30
);
fetchQuery = {
enrollmentDate: { $gte: thirtyDaysBefore, $lte: currentDate },
enrollmentStatus: true,
};
await Student.countDocuments(
fetchQuery,
(err, numberOfEnrollmentsInLastThirtyDays) => {
if (err) {
return res.status(200).json({ err: "Something went wrong" });
}
return res.status(200).json(numberOfEnrollmentsInLastThirtyDays);
}
);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get all the enrollments in last 90 days
// @access Private
exports.totalEnrollmentsInNinetyDays = async (req, res) => {
try {
const currentDate = new Date();
let ninetyDaysBefore = new Date();
ninetyDaysBefore = ninetyDaysBefore.setDate(
ninetyDaysBefore.getDate() - 90
);
fetchQuery = {
enrollmentDate: { $gte: ninetyDaysBefore, $lte: currentDate },
enrollmentStatus: true,
};
await Student.countDocuments(
fetchQuery,
(err, numberOfEnrollmentsInLastNinetyDays) => {
if (err) {
return res.status(400).json({ err: "Something went wrong" });
}
return res.status(200).json(numberOfEnrollmentsInLastNinetyDays);
}
);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
};
// @route GET api/admin/students
// @desc get all the enrollments in last one year
// @access Private
exports.totalEnrollmentsInLastYear = async (req, res) => {
try {
const currentDate = new Date();
let oneYearBefore = new Date();
oneYearBefore = oneYearBefore.setDate(oneYearBefore.getDate() - 90);
fetchQuery = {
enrollmentDate: { $gte: oneYearBefore, $lte: currentDate },
enrollmentStatus: true,
};
await Student.countDocuments(
fetchQuery,
(err, numberOfEnrollmentsInLastYear) => {
if (err) {
return res.status(200).json({ err: "Something went wrong" });
}
return res.status(200).json(numberOfEnrollmentsInLastYear);
}
);
} catch (err) {
return res.status(500).json({ err: "Internal Server Error" });
}
}; | (err, deletedStudent) => {
if (!err || null !== deletedStudent) { |
gunicorn_application.py | """Top-level gunicorn application for `routemaster serve`."""
from typing import Callable
import gunicorn.app.base
from routemaster.utils import WSGICallable
class GunicornWSGIApplication(gunicorn.app.base.BaseApplication):
"""gunicorn application for routemaster."""
def __init__(
self,
app: WSGICallable,
*,
bind: str,
debug: bool,
workers: int,
post_fork: Callable[[], None],
) -> None:
|
def load_config(self) -> None:
"""
Load gunicorn configuration settings.
Rather than grab these from a file we instead just set them to their
known values inline.
"""
self.cfg.set('bind', self.bind)
self.cfg.set('workers', self.workers)
self.cfg.set('post_fork', lambda server, workers: self.post_fork())
if self.debug:
self.cfg.set('reload', True)
self.cfg.set('accesslog', '-')
def load(self) -> WSGICallable:
"""
Load gunicorn WSGI callable.
Luckily little loading is needed since this is available inline.
"""
if self.debug:
# Inline import so we don't depend on this in production.
import werkzeug.debug
return werkzeug.debug.DebuggedApplication(
self.application,
evalex=False,
)
return self.application
| self.application = app
self.bind = bind
self.debug = debug
self.workers = workers
self.post_fork = post_fork
super().__init__() |
unionWith.js | var convert = require('./convert'),
func = convert('unionWith', require('../unionWith'));
func.placeholder = require('./placeholder'); | module.exports = func; |
|
gerrit_test.go | // +build unit
package gits_test
import (
"fmt"
"net/http"
"net/http/httptest"
"sort"
"testing"
"github.com/jenkins-x/jx/pkg/auth"
"github.com/jenkins-x/jx/pkg/gits"
"github.com/jenkins-x/jx/pkg/util"
"github.com/stretchr/testify/suite"
)
type GerritProviderTestSuite struct {
suite.Suite
mux *http.ServeMux
server *httptest.Server
provider *gits.GerritProvider
}
var gerritRouter = util.Router{
"/a/projects/": util.MethodMap{
"GET": "list-projects.json",
},
"/a/projects/test-org%2Ftest-user/": util.MethodMap{
"PUT": "create-project.json",
},
}
func (suite *GerritProviderTestSuite) SetupSuite() {
suite.mux = http.NewServeMux()
suite.server = httptest.NewServer(suite.mux)
suite.Require().NotNil(suite.server)
for path, methodMap := range gerritRouter {
suite.mux.HandleFunc(path, util.GetMockAPIResponseFromFile("test_data/gerrit", methodMap))
}
as := auth.AuthServer{
URL: suite.server.URL,
Name: "Test Server",
Kind: "Oauth2",
CurrentUser: "test-user",
}
ua := auth.UserAuth{
Username: "test-user",
ApiToken: "0123456789abdef",
}
gitter := gits.NewGitCLI()
provider, err := gits.NewGerritProvider(&as, &ua, gitter)
suite.Require().NotNil(provider)
suite.Require().Nil(err)
var ok bool
suite.provider, ok = provider.(*gits.GerritProvider)
suite.Require().True(ok)
suite.Require().NotNil(suite.provider)
suite.Require().NotNil(suite.provider.Client)
}
func (suite *GerritProviderTestSuite) TestListRepositories() {
repos, err := suite.provider.ListRepositories("")
suite.Require().NotNil(repos)
suite.Require().Nil(err)
suite.Require().Equal(4, len(repos))
var repoNames []string
for _, repo := range repos {
repoNames = append(repoNames, repo.Name)
}
sort.Strings(repoNames)
suite.Require().Equal("All-Projects", repoNames[0])
suite.Require().Equal("All-Users", repoNames[1])
suite.Require().Equal("RecipeBook", repoNames[2])
suite.Require().Equal("testing", repoNames[3])
}
func (suite *GerritProviderTestSuite) TestCreateRepository() {
repo, err := suite.provider.CreateRepository("test-org", "test-user", false)
suite.T().Log(err)
suite.Require().NotNil(repo)
suite.Require().Nil(err)
suite.Require().Equal("test-org/test-repo", repo.Name)
suite.Require().Equal(fmt.Sprintf("%s/test-org/test-repo", suite.server.URL), repo.CloneURL)
suite.Require().Equal(fmt.Sprintf("%s:test-org/test-repo", suite.server.URL), repo.SSHURL)
}
func TestGerritProviderTestSuite(t *testing.T) |
func (suite *GerritProviderTestSuite) TearDownSuite() {
suite.server.Close()
}
| {
if testing.Short() {
t.Skip("skipping GerritProviderTestSuite in short mode")
} else {
suite.Run(t, new(GerritProviderTestSuite))
}
} |
app-routing.module.ts | import { NgModule } from '@angular/core';
import { Routes, RouterModule } from '@angular/router';
import { PageNotFoundComponent } from './components/page-not-found/page-not-found.component';
import { ProfileComponent } from './components/profile/profile.component';
const routes: Routes = [
{ path: '', redirectTo: 'home', pathMatch: 'full'},
{ path: 'home', component: ProfileComponent },
{ path: '**', component: PageNotFoundComponent},
];
@NgModule({
imports: [RouterModule.forRoot(routes)],
exports: [RouterModule]
})
export class | { }
| AppRoutingModule |
cmd.go | package cli
import (
"github.com/filecoin-project/boost/cli/ctxutil"
cliutil "github.com/filecoin-project/boost/cli/util"
lcliutil "github.com/filecoin-project/lotus/cli/util"
)
| var DaemonContext = ctxutil.DaemonContext | var GetBoostAPI = cliutil.GetBoostAPI
var GetFullNodeAPI = lcliutil.GetFullNodeAPI
var ReqContext = ctxutil.ReqContext |
client.go | package client
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
)
type Client struct {
Host string
url *url.URL
}
type Job struct {
Id int `json:"id,omitempty"`
QueueName string `json:"queue_name,omitempty"`
Category string `json:"category,omitempty"`
Url string `json:"url"`
Payload interface{} `json:"payload"`
RunAfter int `json:"run_after"`
MaxRetries int `json:"max_retries"`
RetryDelay int `json:"retry_delay"`
Timeout int `json:"timeout"`
}
type Queue struct {
PollingInterval int `json:"polling_interval"`
MaxWorkers int `json:"max_workers"`
}
type Routing struct {
QueueName string `json:"queue_name"`
JobCategory string `json:"job_category,omitempty"`
}
func New(host string, concurrent int) *Client {
u, err := url.Parse(fmt.Sprintf("http://%s", host))
if err != nil {
return nil
}
http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = concurrent
return &Client{Host: host, url: u}
}
func (c *Client) CreateJobIfNotExist(name string, maxWorkers int) error {
c.url.Path = fmt.Sprintf("/queue/%s", name)
req, err := http.NewRequest(http.MethodGet, c.url.String(), nil)
if err != nil {
return err
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound |
c.url.Path = fmt.Sprintf("/routing/%s", name)
req, err = http.NewRequest(http.MethodGet, c.url.String(), nil)
if err != nil {
return err
}
res, err = http.DefaultClient.Do(req)
if err != nil {
return nil
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
if err := c.createRouting(name); err != nil {
return err
}
}
return nil
}
func (c *Client) Enqueue(name string, j *Job) (*Job, error) {
var result Job
if err := c.call(http.MethodPost, fmt.Sprintf("/job/%s", name), j, &result); err != nil {
return nil, err
}
return &result, nil
}
func (c *Client) createQueue(name string, q *Queue) error {
return c.call(http.MethodPut, fmt.Sprintf("/queue/%s", name), q, &Queue{})
}
func (c *Client) createRouting(name string) error {
return c.call(http.MethodPut, fmt.Sprintf("/routing/%s", name), &Routing{QueueName: name}, &Routing{})
}
func (c *Client) call(method string, path string, body interface{}, result interface{}) error {
c.url.Path = path
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(body); err != nil {
return err
}
req, err := http.NewRequest(method, c.url.String(), &buf)
if err != nil {
return err
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode == http.StatusBadRequest {
return errors.New("failed create queue")
}
if err := json.NewDecoder(res.Body).Decode(result); err != nil {
return err
}
return nil
}
| {
if err := c.createQueue(name, &Queue{PollingInterval: 100, MaxWorkers: maxWorkers}); err != nil {
return err
}
} |
system.schema.js | const Ajv = require("../../wrapper/ajv-wrapper");
const PLANET_SCHEMA = {
type: "object",
properties: {
localeName: { type: "string" },
resources: { type: "integer" },
influence: { type: "integer" },
destroyed: { type: "boolean", default: false },
trait: {
type: "array",
items: { enum: ["cultural", "hazardous", "industrial"] },
},
tech: {
type: "array",
items: { enum: ["yellow", "red", "blue", "green"] },
},
legendary: { type: "boolean" },
legendaryCard: { type: "string" },
// Override default position/size.
position: {
type: "object",
properties: {
x: { type: "number" },
y: { type: "number" },
},
},
radius: { type: "number" },
},
required: ["localeName", "resources", "influence"],
};
const HYPERLANE_SCHEMA = {
type: "array",
items: {
type: "array",
items: { enum: [0, 1, 2, 3, 4, 5] },
uniqueItems: true,
},
maxItems: 6,
minItems: 6,
};
const WORMHOLE_SCHEMA = {
type: "array",
items: { enum: ["alpha", "beta", "gamma", "delta"] },
};
const SYSTEM_SCHEMA = {
type: "object",
properties: {
tile: { type: "integer" },
source: { type: "string" },
home: { type: "boolean" },
planets: { type: "array", items: PLANET_SCHEMA },
wormholes: WORMHOLE_SCHEMA,
wormholesFaceDown: WORMHOLE_SCHEMA,
anomalies: {
type: "array",
items: {
enum: ["asteroid field", "supernova", "nebula", "gravity rift"],
},
},
offMap: { type: "boolean" },
hyperlane: { type: "boolean" },
hyperlaneFaceUp: HYPERLANE_SCHEMA,
hyperlaneFaceDown: HYPERLANE_SCHEMA,
},
required: ["tile", "source"],
};
// Lazy instantiate on first use.
let _systemValidator = false;
/**
* Static class for validating raw system against schema.
*/
class | {
constructor() {
throw new Error("Static only");
}
/**
* Validate schema, returns error does not throw.
*
* @param {object} system attributes
* @param {function} onError - takes the error as single argument
* @returns {boolean} true if valid
*/
static validate(system, onError) {
if (!_systemValidator) {
_systemValidator = new Ajv({ useDefaults: true }).compile(
SYSTEM_SCHEMA
);
}
if (!_systemValidator(system)) {
(onError ? onError : console.error)(_systemValidator.errors);
return false;
}
return true;
}
}
module.exports = {
SystemSchema,
};
| SystemSchema |
useHashTable.ts | import * as anchor from "@project-serum/anchor";
import { useState } from "react";
import toast from "react-hot-toast";
import { MetadataProgram, Metadata } from "@metaplex/js";
const rpcHost = "https://still-solitary-paper.solana-mainnet.quiknode.pro/3556f36b7113ada207f0bc78ef72f446f1f3ecdf/";
const connection = new anchor.web3.Connection(rpcHost);
export const MAX_NAME_LENGTH = 32;
export const MAX_URI_LENGTH = 200;
export const MAX_SYMBOL_LENGTH = 10;
export const MAX_CREATOR_LEN = 32 + 1 + 1;
export async function | (
hash: string,
metadataEnabled?: boolean
): Promise<any[]> {
const metadataAccounts = await MetadataProgram.getProgramAccounts(
connection,
{
filters: [
{
memcmp: {
offset:
1 +
32 +
32 +
4 +
MAX_NAME_LENGTH +
4 +
MAX_URI_LENGTH +
4 +
MAX_SYMBOL_LENGTH +
2 +
1 +
4 +
0 * MAX_CREATOR_LEN,
bytes: hash,
},
},
],
}
);
const mintHashes: any = [];
for (let index = 0; index < metadataAccounts.length; index++) {
const account = metadataAccounts[index];
const accountInfo: any = await connection.getParsedAccountInfo(
account.pubkey
);
const metadata: any = new Metadata(hash.toString(), accountInfo.value);
if (metadataEnabled) mintHashes.push(metadata.data);
else mintHashes.push(metadata.data.mint);
}
return mintHashes;
}
export function useHashTable(candyMachineId: string, metadataEnabled: boolean) {
const [hashTable, setHashTable] = useState<any[]>([]);
const [isLoading, setIsLoading] = useState(false);
const getHashTable = async () => {
if (!candyMachineId || !candyMachineId.length) {
toast.error("Please type the Candy Machine ID in the input box.");
return;
}
try {
setIsLoading(true);
const data = await fetchHashTable(candyMachineId, metadataEnabled);
setHashTable(data);
if (data.length === 0)
toast.success(
"Zero mint hashes have been found so far for this candy machine."
);
} catch (error) {
console.error(error);
toast.error("An error happened! Please try again later!");
}
setIsLoading(false);
};
return { hashTable, isLoading, getHashTable };
}
| fetchHashTable |
docker_cli_logs_test.go | package main
import (
"fmt"
"io"
"os/exec"
"regexp"
"strings"
"time"
"github.com/docker/docker/integration-cli/checker"
"github.com/docker/docker/integration-cli/cli"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/go-check/check"
"github.com/gotestyourself/gotestyourself/icmd"
)
// This used to work, it test a log of PageSize-1 (gh#4851)
func (s *DockerSuite) TestLogsContainerSmallerThanPage(c *check.C) {
testLogsContainerPagination(c, 32767)
}
// Regression test: When going over the PageSize, it used to panic (gh#4851)
func (s *DockerSuite) TestLogsContainerBiggerThanPage(c *check.C) {
testLogsContainerPagination(c, 32768)
}
// Regression test: When going much over the PageSize, it used to block (gh#4851)
func (s *DockerSuite) TestLogsContainerMuchBiggerThanPage(c *check.C) {
testLogsContainerPagination(c, 33000)
}
func testLogsContainerPagination(c *check.C, testLen int) {
out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen))
id := strings.TrimSpace(out)
dockerCmd(c, "wait", id)
out, _ = dockerCmd(c, "logs", id)
c.Assert(out, checker.HasLen, testLen+1)
}
func (s *DockerSuite) TestLogsTimestamps(c *check.C) {
testLen := 100
out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo = >> a.a; done; cat a.a", testLen))
id := strings.TrimSpace(out)
dockerCmd(c, "wait", id)
out, _ = dockerCmd(c, "logs", "-t", id)
lines := strings.Split(out, "\n")
c.Assert(lines, checker.HasLen, testLen+1)
ts := regexp.MustCompile(`^.* `)
for _, l := range lines {
if l != "" {
_, err := time.Parse(jsonmessage.RFC3339NanoFixed+" ", ts.FindString(l))
c.Assert(err, checker.IsNil, check.Commentf("Failed to parse timestamp from %v", l))
// ensure we have padded 0's
c.Assert(l[29], checker.Equals, uint8('Z'))
}
}
}
func (s *DockerSuite) TestLogsSeparateStderr(c *check.C) {
msg := "stderr_log"
out := cli.DockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)).Combined()
id := strings.TrimSpace(out)
cli.DockerCmd(c, "wait", id)
cli.DockerCmd(c, "logs", id).Assert(c, icmd.Expected{
Out: "",
Err: msg,
})
}
func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) {
// TODO Windows: Needs investigation why this fails. Obtained string includes
// a bunch of ANSI escape sequences before the "stderr_log" message.
testRequires(c, DaemonIsLinux)
msg := "stderr_log"
out := cli.DockerCmd(c, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)).Combined()
id := strings.TrimSpace(out)
cli.DockerCmd(c, "wait", id)
cli.DockerCmd(c, "logs", id).Assert(c, icmd.Expected{
Out: msg,
Err: "",
})
}
func (s *DockerSuite) TestLogsTail(c *check.C) {
testLen := 100
out := cli.DockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)).Combined()
id := strings.TrimSpace(out)
cli.DockerCmd(c, "wait", id)
out = cli.DockerCmd(c, "logs", "--tail", "0", id).Combined()
lines := strings.Split(out, "\n")
c.Assert(lines, checker.HasLen, 1)
out = cli.DockerCmd(c, "logs", "--tail", "5", id).Combined()
lines = strings.Split(out, "\n")
c.Assert(lines, checker.HasLen, 6)
out = cli.DockerCmd(c, "logs", "--tail", "99", id).Combined()
lines = strings.Split(out, "\n")
c.Assert(lines, checker.HasLen, 100)
out = cli.DockerCmd(c, "logs", "--tail", "all", id).Combined()
lines = strings.Split(out, "\n")
c.Assert(lines, checker.HasLen, testLen+1)
out = cli.DockerCmd(c, "logs", "--tail", "-1", id).Combined()
lines = strings.Split(out, "\n")
c.Assert(lines, checker.HasLen, testLen+1)
out = cli.DockerCmd(c, "logs", "--tail", "random", id).Combined()
lines = strings.Split(out, "\n")
c.Assert(lines, checker.HasLen, testLen+1)
}
func (s *DockerSuite) TestLogsFollowStopped(c *check.C) {
dockerCmd(c, "run", "--name=test", "busybox", "echo", "hello")
id := getIDByName(c, "test")
logsCmd := exec.Command(dockerBinary, "logs", "-f", id)
c.Assert(logsCmd.Start(), checker.IsNil)
errChan := make(chan error)
go func() {
errChan <- logsCmd.Wait()
close(errChan)
}()
select {
case err := <-errChan:
c.Assert(err, checker.IsNil)
case <-time.After(30 * time.Second):
c.Fatal("Following logs is hanged")
}
}
func (s *DockerSuite) TestLogsSince(c *check.C) {
name := "testlogssince"
dockerCmd(c, "run", "--name="+name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do sleep 2; echo log$i; done")
out, _ := dockerCmd(c, "logs", "-t", name)
log2Line := strings.Split(strings.Split(out, "\n")[1], " ")
t, err := time.Parse(time.RFC3339Nano, log2Line[0]) // the timestamp log2 is written
c.Assert(err, checker.IsNil)
since := t.Unix() + 1 // add 1s so log1 & log2 doesn't show up
out, _ = dockerCmd(c, "logs", "-t", fmt.Sprintf("--since=%v", since), name)
// Skip 2 seconds
unexpected := []string{"log1", "log2"}
for _, v := range unexpected {
c.Assert(out, checker.Not(checker.Contains), v, check.Commentf("unexpected log message returned, since=%v", since))
}
// Test to make sure a bad since format is caught by the client
out, _, _ = dockerCmdWithError("logs", "-t", "--since=2006-01-02T15:04:0Z", name)
c.Assert(out, checker.Contains, "cannot parse \"0Z\" as \"05\"", check.Commentf("bad since format passed to server"))
// Test with default value specified and parameter omitted
expected := []string{"log1", "log2", "log3"}
for _, cmd := range [][]string{
{"logs", "-t", name},
{"logs", "-t", "--since=0", name},
} {
result := icmd.RunCommand(dockerBinary, cmd...)
result.Assert(c, icmd.Success)
for _, v := range expected {
c.Assert(result.Combined(), checker.Contains, v)
}
}
}
func (s *DockerSuite) TestLogsSinceFutureFollow(c *check.C) {
// TODO Windows TP5 - Figure out why this test is so flakey. Disabled for now.
testRequires(c, DaemonIsLinux)
name := "testlogssincefuturefollow"
out, _ := dockerCmd(c, "run", "-d", "--name", name, "busybox", "/bin/sh", "-c", `for i in $(seq 1 5); do echo log$i; sleep 1; done`)
// Extract one timestamp from the log file to give us a starting point for
// our `--since` argument. Because the log producer runs in the background,
// we need to check repeatedly for some output to be produced.
var timestamp string
for i := 0; i != 100 && timestamp == ""; i++ {
if out, _ = dockerCmd(c, "logs", "-t", name); out == "" {
time.Sleep(time.Millisecond * 100) // Retry
} else {
timestamp = strings.Split(strings.Split(out, "\n")[0], " ")[0]
}
}
c.Assert(timestamp, checker.Not(checker.Equals), "")
t, err := time.Parse(time.RFC3339Nano, timestamp)
c.Assert(err, check.IsNil)
since := t.Unix() + 2
out, _ = dockerCmd(c, "logs", "-t", "-f", fmt.Sprintf("--since=%v", since), name)
c.Assert(out, checker.Not(checker.HasLen), 0, check.Commentf("cannot read from empty log"))
lines := strings.Split(strings.TrimSpace(out), "\n")
for _, v := range lines {
ts, err := time.Parse(time.RFC3339Nano, strings.Split(v, " ")[0])
c.Assert(err, checker.IsNil, check.Commentf("cannot parse timestamp output from log: '%v'", v))
c.Assert(ts.Unix() >= since, checker.Equals, true, check.Commentf("earlier log found. since=%v logdate=%v", since, ts))
}
}
// Regression test for #8832
func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) {
// TODO Windows: Fix this test for TP5.
testRequires(c, DaemonIsLinux)
expected := 150000
out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", fmt.Sprintf("usleep 600000; yes X | head -c %d", expected))
id := strings.TrimSpace(out)
stopSlowRead := make(chan bool)
go func() {
dockerCmd(c, "wait", id)
stopSlowRead <- true
}()
logCmd := exec.Command(dockerBinary, "logs", "-f", id)
stdout, err := logCmd.StdoutPipe()
c.Assert(err, checker.IsNil)
c.Assert(logCmd.Start(), checker.IsNil)
// First read slowly
bytes1, err := ConsumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead)
c.Assert(err, checker.IsNil)
// After the container has finished we can continue reading fast
bytes2, err := ConsumeWithSpeed(stdout, 32*1024, 0, nil)
c.Assert(err, checker.IsNil)
c.Assert(logCmd.Wait(), checker.IsNil)
actual := bytes1 + bytes2
c.Assert(actual, checker.Equals, expected)
}
// ConsumeWithSpeed reads chunkSize bytes from reader before sleeping
// for interval duration. Returns total read bytes. Send true to the
// stop channel to return before reading to EOF on the reader.
func | (reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) {
buffer := make([]byte, chunkSize)
for {
var readBytes int
readBytes, err = reader.Read(buffer)
n += readBytes
if err != nil {
if err == io.EOF {
err = nil
}
return
}
select {
case <-stop:
return
case <-time.After(interval):
}
}
}
func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) {
out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done")
id := strings.TrimSpace(out)
c.Assert(waitRun(id), checker.IsNil)
nroutines, err := getGoroutineNumber()
c.Assert(err, checker.IsNil)
cmd := exec.Command(dockerBinary, "logs", "-f", id)
r, w := io.Pipe()
cmd.Stdout = w
c.Assert(cmd.Start(), checker.IsNil)
// Make sure pipe is written to
chErr := make(chan error)
go func() {
b := make([]byte, 1)
_, err := r.Read(b)
chErr <- err
}()
c.Assert(<-chErr, checker.IsNil)
c.Assert(cmd.Process.Kill(), checker.IsNil)
r.Close()
cmd.Wait()
// NGoroutines is not updated right away, so we need to wait before failing
c.Assert(waitForGoroutines(nroutines), checker.IsNil)
}
func (s *DockerSuite) TestLogsFollowGoroutinesNoOutput(c *check.C) {
out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 2; done")
id := strings.TrimSpace(out)
c.Assert(waitRun(id), checker.IsNil)
nroutines, err := getGoroutineNumber()
c.Assert(err, checker.IsNil)
cmd := exec.Command(dockerBinary, "logs", "-f", id)
c.Assert(cmd.Start(), checker.IsNil)
time.Sleep(200 * time.Millisecond)
c.Assert(cmd.Process.Kill(), checker.IsNil)
cmd.Wait()
// NGoroutines is not updated right away, so we need to wait before failing
c.Assert(waitForGoroutines(nroutines), checker.IsNil)
}
func (s *DockerSuite) TestLogsCLIContainerNotFound(c *check.C) {
name := "testlogsnocontainer"
out, _, _ := dockerCmdWithError("logs", name)
message := fmt.Sprintf("No such container: %s\n", name)
c.Assert(out, checker.Contains, message)
}
func (s *DockerSuite) TestLogsWithDetails(c *check.C) {
dockerCmd(c, "run", "--name=test", "--label", "foo=bar", "-e", "baz=qux", "--log-opt", "labels=foo", "--log-opt", "env=baz", "busybox", "echo", "hello")
out, _ := dockerCmd(c, "logs", "--details", "--timestamps", "test")
logFields := strings.Fields(strings.TrimSpace(out))
c.Assert(len(logFields), checker.Equals, 3, check.Commentf(out))
details := strings.Split(logFields[1], ",")
c.Assert(details, checker.HasLen, 2)
c.Assert(details[0], checker.Equals, "baz=qux")
c.Assert(details[1], checker.Equals, "foo=bar")
}
| ConsumeWithSpeed |
types.go | package types
import (
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/docker/app/internal"
"github.com/docker/app/types/metadata"
"github.com/docker/app/types/settings"
)
// SingleFileSeparator is the separator used in single-file app
const SingleFileSeparator = "\n---\n"
// AppSourceKind represents what format the app was in when read
type AppSourceKind int
const (
// AppSourceSplit represents an Application in multiple file format
AppSourceSplit AppSourceKind = iota
// AppSourceMerged represents an Application in single file format
AppSourceMerged
// AppSourceImage represents an Application pulled from an image
AppSourceImage
// AppSourceURL represents an Application fetched from an URL
AppSourceURL
// AppSourceArchive represents an Application in an archive format
AppSourceArchive
)
// ShouldRunInsideDirectory returns whether the package is run from a directory on disk
func (a AppSourceKind) ShouldRunInsideDirectory() bool {
return a == AppSourceSplit || a == AppSourceImage || a == AppSourceArchive
}
// App represents an app
type App struct {
Name string
Path string
Cleanup func()
Source AppSourceKind
composesContent [][]byte
settingsContent [][]byte
settings settings.Settings
metadataContent []byte
metadata metadata.AppMetadata
attachments []Attachment
}
// Attachment is a summary of an attachment (attached file) stored in the app definition
type Attachment struct {
path string
size int64
}
// Path returns the local file path
func (f *Attachment) Path() string {
return f.path
}
// Size returns the file size in bytes
func (f *Attachment) Size() int64 {
return f.size
}
// Composes returns compose files content
func (a *App) Composes() [][]byte {
return a.composesContent
}
// SettingsRaw returns setting files content
func (a *App) SettingsRaw() [][]byte {
return a.settingsContent
}
// Settings returns map of settings
func (a *App) Settings() settings.Settings {
return a.settings
}
// MetadataRaw returns metadata file content
func (a *App) MetadataRaw() []byte {
return a.metadataContent
}
// Metadata returns the metadata struct
func (a *App) Metadata() metadata.AppMetadata {
return a.metadata
}
// Attachments returns the external files list
func (a *App) Attachments() []Attachment {
return a.attachments
}
// Extract writes the app in the specified folder
func (a *App) Extract(path string) error {
if err := ioutil.WriteFile(filepath.Join(path, internal.MetadataFileName), a.MetadataRaw(), 0644); err != nil {
return err
}
if err := ioutil.WriteFile(filepath.Join(path, internal.ComposeFileName), a.Composes()[0], 0644); err != nil {
return err
}
if err := ioutil.WriteFile(filepath.Join(path, internal.SettingsFileName), a.SettingsRaw()[0], 0644); err != nil {
return err
}
return nil
}
func noop() {}
// NewApp creates a new docker app with the specified path and struct modifiers
func NewApp(path string, ops ...func(*App) error) (*App, error) {
app := &App{
Name: path,
Path: path,
Cleanup: noop,
composesContent: [][]byte{},
settingsContent: [][]byte{},
metadataContent: []byte{},
}
for _, op := range ops {
if err := op(app); err != nil {
return nil, err
}
}
return app, nil
}
// NewAppFromDefaultFiles creates a new docker app using the default files in the specified path.
// If one of those file doesn't exists, it will error out.
func NewAppFromDefaultFiles(path string, ops ...func(*App) error) (*App, error) {
appOps := append([]func(*App) error{
MetadataFile(filepath.Join(path, internal.MetadataFileName)),
WithComposeFiles(filepath.Join(path, internal.ComposeFileName)),
WithSettingsFiles(filepath.Join(path, internal.SettingsFileName)),
WithAttachments(path),
}, ops...)
return NewApp(path, appOps...)
}
// WithName sets the application name
func WithName(name string) func(*App) error {
return func(app *App) error {
app.Name = name
return nil
}
}
// WithPath sets the original path of the app
func WithPath(path string) func(*App) error {
return func(app *App) error {
app.Path = path
return nil
}
}
// WithCleanup sets the cleanup function of the app
func WithCleanup(f func()) func(*App) error {
return func(app *App) error {
app.Cleanup = f
return nil
}
}
// WithSource sets the source of the app
func WithSource(source AppSourceKind) func(*App) error {
return func(app *App) error {
app.Source = source
return nil
}
}
// WithSettingsFiles adds the specified settings files to the app
func WithSettingsFiles(files ...string) func(*App) error {
return settingsLoader(func() ([][]byte, error) { return readFiles(files...) })
}
// WithAttachments adds all local files (exc. main files) to the app
func WithAttachments(rootAppDir string) func(*App) error {
return func(app *App) error {
return filepath.Walk(rootAppDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
localFilePath, err := filepath.Rel(rootAppDir, path)
if err != nil {
return err
}
switch localFilePath {
case internal.ComposeFileName:
case internal.MetadataFileName:
case internal.SettingsFileName:
default:
externalFile := Attachment{
// Standardise on forward slashes for windows boxes
path: filepath.ToSlash(localFilePath),
size: info.Size(),
}
app.attachments = append(app.attachments, externalFile)
}
return nil
})
}
}
// WithSettings adds the specified settings readers to the app
func WithSettings(readers ...io.Reader) func(*App) error {
return settingsLoader(func() ([][]byte, error) { return readReaders(readers...) })
}
func settingsLoader(f func() ([][]byte, error)) func(*App) error {
return func(app *App) error {
settingsContent, err := f()
if err != nil {
return err
}
settingsContents := append(app.settingsContent, settingsContent...)
loaded, err := settings.LoadMultiple(settingsContents)
if err != nil {
return err
}
app.settings = loaded
app.settingsContent = settingsContents
return nil
}
}
// MetadataFile adds the specified metadata file to the app
func MetadataFile(file string) func(*App) error {
return metadataLoader(func() ([]byte, error) { return ioutil.ReadFile(file) })
}
// Metadata adds the specified metadata reader to the app
func Metadata(r io.Reader) func(*App) error {
return metadataLoader(func() ([]byte, error) { return ioutil.ReadAll(r) })
}
func metadataLoader(f func() ([]byte, error)) func(app *App) error {
return func(app *App) error {
d, err := f()
if err != nil {
return err
}
loaded, err := metadata.Load(d)
if err != nil {
return err
}
app.metadata = loaded
app.metadataContent = d
return nil
}
}
// WithComposeFiles adds the specified compose files to the app
func WithComposeFiles(files ...string) func(*App) error {
return composeLoader(func() ([][]byte, error) { return readFiles(files...) })
}
// WithComposes adds the specified compose readers to the app
func WithComposes(readers ...io.Reader) func(*App) error {
return composeLoader(func() ([][]byte, error) { return readReaders(readers...) })
}
func composeLoader(f func() ([][]byte, error)) func(app *App) error {
return func(app *App) error {
composesContent, err := f()
if err != nil {
return err
}
app.composesContent = append(app.composesContent, composesContent...)
return nil
}
}
func readReaders(readers ...io.Reader) ([][]byte, error) {
content := make([][]byte, len(readers))
var errs []string
for i, r := range readers {
d, err := ioutil.ReadAll(r)
if err != nil {
errs = append(errs, err.Error())
continue | }
content[i] = d
}
return content, newErrGroup(errs)
}
func readFiles(files ...string) ([][]byte, error) {
content := make([][]byte, len(files))
var errs []string
for i, file := range files {
d, err := ioutil.ReadFile(file)
if err != nil {
errs = append(errs, err.Error())
continue
}
content[i] = d
}
return content, newErrGroup(errs)
}
func newErrGroup(errs []string) error {
if len(errs) == 0 {
return nil
}
return errors.New(strings.Join(errs, "\n"))
} | |
package.js | Package.describe({
name: 'templ:drop',
version: '0.3.3',
summary: 'Realy fully customizable, and reactive drops, dropdowns, tooltips and dropmenus for Meteor.',
git: 'https://github.com/meteor-templ/drop',
documentation: 'README.md'
});
Package.onUse(function(api) {
api.versionsFrom('1.2.1');
api.use('mongo');
api.use('ecmascript');
api.use('templating');
api.use('less');
api.use('random');
api.use('stevezhu:[email protected]');
api.use('aldeed:[email protected]');
api.use('matb33:[email protected]');
api.use('dburles:[email protected]');
api.use('templ:[email protected]');
api.use('shuttler:[email protected]');
| api.addFiles('watch.js', 'client');
api.addFiles('Drop.html', 'client');
api.addFiles('Drop.js', 'client');
api.addFiles('Drops.html', 'client');
api.addFiles('Drops.js', 'client');
api.addFiles('body.html', 'client');
api.addFiles('helpers.js', 'client');
api.addFiles('Data.js', 'client');
api.addFiles('nesting.js', 'client');
api.addFiles('DropDefault.html', 'client');
api.addFiles('DropDefault.less', 'client');
api.addFiles('DropBootstrap.html', 'client');
api.addFiles('DropDefaultTemplate.html', 'client');
api.addFiles('Drop.less', 'client');
api.addFiles('DropHide.html', 'client');
api.addFiles('DropHide.js', 'client');
api.export('Drop');
}); | api.addFiles('class.js', 'client');
api.addFiles('triggers.js', 'client');
api.addFiles('instances.js', 'client'); |
exporter-spec.js | "use strict";
import { flushIframes, makeRSDoc, makeStandardOps } from "../SpecHelper.js";
describe("Core - exporter", () => {
afterAll(flushIframes);
async function getExportedDoc(ops) {
const doc = await makeRSDoc(ops);
const dataURL = await new Promise(resolve => {
doc.defaultView.require(["core/exporter"], ({ rsDocToDataURL }) =>
resolve(rsDocToDataURL("text/html", doc))
);
});
const docString = decodeURIComponent(dataURL).replace(
"data:text/html;charset=utf-8,",
""
); | it("removes .removeOnSave elements", async () => {
const ops = makeStandardOps();
ops.body = `<div class="removeOnSave" id="this-should-be-removed">this should be removed</div>`;
const doc = await getExportedDoc(ops);
expect(doc.getElementById("this-should-be-removed")).toBeFalsy();
expect(doc.querySelectorAll(".removeOnSave")).toHaveSize(0);
});
it("removes all comments", async () => {
const ops = makeStandardOps();
ops.body = `<div><!-- remove -->PASS <span><!-- remove --></span></div>`;
const doc = await getExportedDoc(ops);
const walker = document.createTreeWalker(doc.body, NodeFilter.SHOW_COMMENT);
const comments = [];
while (walker.nextNode()) {
comments.push(walker.currentNode);
}
expect(comments).toHaveSize(0);
});
it("removes temporary element attributes", async () => {
const body = `
<a
id="ANCHOR"
data-keep-me="FOO"
data-cite="rfc6454#section-3.2"
data-xref-type="dfn"
>origin</a
>
<dfn
id="DFN"
data-keep-me="BAR"
data-cite="?rfc6454"
data-cite-frag="section-3.2"
>origin</dfn
>
`;
const ops = makeStandardOps(null, body);
const doc = await getExportedDoc(ops);
const anchor = doc.getElementById("ANCHOR");
expect(anchor.hasAttribute("data-cite")).toBeFalse();
expect(anchor.hasAttribute("data-cite-frag")).toBeFalse();
expect(anchor.hasAttribute("data-cite-path")).toBeFalse();
expect(anchor.hasAttribute("data-xref-type")).toBeFalse();
expect(anchor.hasAttribute("data-keep-me")).toBeTrue();
const dfn = doc.getElementById("DFN");
expect(dfn.hasAttribute("data-cite")).toBeFalse();
expect(dfn.hasAttribute("data-cite-frag")).toBeFalse();
expect(dfn.hasAttribute("data-cite-path")).toBeFalse();
expect(dfn.hasAttribute("data-keep-me")).toBeTrue();
});
it("moves the W3C style sheet to be last thing in documents head", async () => {
const ops = makeStandardOps();
ops.body = `
<!-- add WebIDL style -->
<pre class="idl">
interface Foo {};
</pre>
<!-- add examples and hljs styles -->
<pre class="example js">
function Foo(){};
</pre>`;
const doc = await getExportedDoc(ops);
const { lastElementChild } = doc.head;
expect(lastElementChild.href).toBe(
"https://www.w3.org/StyleSheets/TR/2016/W3C-ED"
);
});
}); | return new DOMParser().parseFromString(docString, "text/html");
}
|
top.ts | import I18n = require('../interfaces.d');
declare var top:I18n.IFingerprint;
top = { rank: -1,
iso: 'top',
name: 'Totonac, Papantla',
trigrams:
{ akg: 0,
'an ': 1,
lak: 2,
ata: 3,
ama: 4,
aka: 5,
' na': 6,
ala: 7,
kan: 8,
' ka': 9,
ini: 10,
' la': 11,
' ta': 12,
mak: 13,
'ni ': 14,
tam: 15,
'a t': 16,
kat: 17,
tla: 18,
chu: 19,
'ch\'': 20,
'in ': 21,
'n n': 22,
' ch': 23,
kga: 24,
'la ': 25,
lan: 26,
lat: 27,
'\'ak': 28,
ixk: 29,
'h\'i': 30,
kam: 31,
' ni': 32,
' tl': 33,
'\'ix': 34,
nin: 35,
xla: 36,
tal: 37,
xku: 38,
' li': 39,
wak: 40,
win: 41,
' xl': 42,
' ix': 43,
uwi: 44,
awa: 45,
' wa': 46,
'ma ': 47,
kuw: 48,
'hu ': 49,
akc: 50,
min: 51,
kch: 52,
kgs: 53,
'a n': 54,
alh: 55,
'p\'a': 56,
'ap\'': 57,
nak: 58,
gsi: 59,
ika: 60,
'ta ': 61,
ach: 62,
ima: 63,
'tu ': 64,
ami: 65,
map: 66,
wan: 67,
lim: 68,
' tu': 69,
tum: 70,
'na ': 71,
'ak ': 72,
nac: 73,
ani: 74,
'n t': 75,
'kg ': 76,
sin: 77,
ali: 78,
nit: 79,
man: 80,
pul: 81,
'ka ': 82,
'it ': 83,
nka: 84,
nat: 85,
ask: 86,
'n, ': 87,
' ma': 88,
ank: 89,
'n k': 90,
pal: 91,
' pa': 92,
una: 93,
hun: 94,
' pu': 95,
'n l': 96,
'ta\'': 97,
ema: 98,
ula: 99,
ilh: 100,
' ne': 101,
tak: 102,
kak: 103,
nem: 104,
'k\'a': 105, | ila: 108,
nan: 109,
', n': 110,
akx: 111,
'n c': 112,
'um ': 113,
'ti ': 114,
'i l': 115,
'. l': 116,
'u n': 117,
kal: 118,
apa: 119,
'n x': 120,
'g l': 121,
ana: 122,
' ti': 123,
tat: 124,
'u k': 125,
'i t': 126,
tap: 127,
'i n': 128,
'h\'a': 129,
'an,': 130,
'a k': 131,
'u t': 132,
atu: 133,
xta: 134,
'a l': 135,
nal: 136,
ina: 137,
atl: 138,
nil: 139,
tun: 140,
ats: 141,
nik: 142,
mat: 143,
lht: 144,
tsi: 145,
aya: 146,
'a\' ': 147,
gal: 148,
' ak': 149,
'lh ': 150,
'n. ': 151,
uwa: 152,
'ku ': 153,
'k\'i': 154,
'a, ': 155,
ixt: 156,
hta: 157,
' am': 158,
'a\'a': 159,
' xa': 160,
'a c': 161,
kgt: 162,
apu: 163,
lit: 164,
'\'in': 165,
'i, ': 166,
tay: 167,
law: 168,
'a p': 169,
kas: 170,
xtu: 171,
hka: 172,
okg: 173,
lhk: 174,
'wa ': 175,
'u l': 176,
'nu ': 177,
'\'al': 178,
'an.': 179,
taw: 180,
'sk\'': 181,
utu: 182,
gta: 183,
kil: 184,
'a m': 185,
ixl: 186,
'a w': 187,
lhi: 188,
uni: 189,
'\'an': 190,
'\' t': 191,
aku: 192,
' k\'': 193,
puw: 194,
sit: 195,
wal: 196,
lik: 197,
xni: 198,
'a x': 199,
unu: 200,
chi: 201,
'p\'u': 202,
gch: 203,
'ak\'': 204,
kgc: 205,
'k i': 206,
kxt: 207,
'kg\'': 208,
kxn: 209,
glh: 210,
'ka\'': 211,
sku: 212,
iti: 213,
kuj: 214,
kgl: 215,
umi: 216,
kla: 217,
'a. ': 218,
'g\'a': 219,
lip: 220,
mal: 221,
akn: 222,
sta: 223,
akl: 224,
kgo: 225,
xli: 226,
'u x': 227,
'\'ut': 228,
'i i': 229,
ast: 230,
', t': 231,
axt: 232,
ixp: 233,
't, ': 234,
pas: 235,
', p': 236,
'u p': 237,
kgk: 238,
aks: 239,
itu: 240,
nam: 241,
gan: 242,
tas: 243,
'n a': 244,
't\'a': 245,
aki: 246,
'hi ': 247,
'in,': 248,
gap: 249,
kap: 250,
xpu: 251,
paw: 252,
'un ': 253,
mas: 254,
'i p': 255,
't t': 256,
put: 257,
', k': 258,
'a a': 259,
ion: 260,
liw: 261,
uka: 262,
tan: 263,
ita: 264,
'si ': 265,
kni: 266,
lha: 267,
cha: 268,
ekg: 269,
'i x': 270,
'\'un': 271,
'at ': 272,
tka: 273,
yak: 274,
't x': 275,
'k p': 276,
'n p': 277,
ati: 278,
tik: 279,
gax: 280,
xal: 281,
't n': 282,
', l': 283,
sik: 284,
iwa: 285,
atk: 286,
lin: 287,
'. w': 288,
'i a': 289,
lam: 290,
'n w': 291,
ipa: 292,
'i. ': 293,
tac: 294,
one: 295,
iku: 296,
wek: 297,
' un': 298,
'k k': 299,
til: 300 } };
export = top; | 'i k': 106,
'\'at': 107, |
get_status.go | package cmd
import (
"fmt"
"net/http"
"strings"
"github.com/spf13/cobra"
)
var (
getStatusCmd = &cobra.Command{
Use: fmt.Sprintf("%s [%s]", STATUS, strings.ToUpper(SERVICE)),
Short: fmt.Sprintf("Retrieve current %s for a %s", STATUS, SERVICE),
Long: fmt.Sprintf("Retrieve the current %s of a %s in MariaDB SkySQL", STATUS, SERVICE),
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
serviceID := args[0]
var res *http.Response
var err error
res, err = client.ReadStatus(cmd.Context(), serviceID)
checkAndPrint(res, err, STATUS)
},
}
)
func init() {
getCmd.AddCommand(getStatusCmd)
} | ||
cxr.rs | /*!
One-line description.
More detailed description, with
# Example
*/
use crate::forms::library::LibraryName;
use crate::scheme::ID_LIB_SCHEME;
use schemer_lang::eval::environment::Exports;
// ------------------------------------------------------------------------------------------------
// Public Types
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
// Private Types
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
// Public Functions
// ------------------------------------------------------------------------------------------------
library_name!(ID_LIB_SCHEME_CXR, "cxr", ID_LIB_SCHEME, scheme_cxr_name);
pub fn | () -> Exports {
Exports::default()
}
// ------------------------------------------------------------------------------------------------
// Implementations
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
// Private Functions
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
// Modules
// ------------------------------------------------------------------------------------------------
| scheme_cxr_exports |
Ad.js | const mongoose = require('mongoose')
const schema = new mongoose.Schema({
name: { type: String },
items: [{ image: { type: String }, url: { type: String } }],
})
| module.exports = mongoose.model('Ad', schema) |
|
app.go | package failover
import (
"errors"
"github.com/gorilla/mux"
"github.com/siddontang/go/log"
"net"
"net/http"
"sync"
"time"
)
var (
// If failover handler return this error, we will give up future handling.
ErrGiveupFailover = errors.New("Give up failover handling")
)
type BeforeFailoverHandler func(downMaster string) error
type AfterFailoverHandler func(downMaster, newMaster string) error
type App struct {
c *Config
l net.Listener
cluster Cluster
masters *masterFSM
gMutex sync.Mutex
groups map[string]*Group
quit chan struct{}
wg sync.WaitGroup
hMutex sync.Mutex
beforeHandlers []BeforeFailoverHandler
afterHandlers []AfterFailoverHandler
}
func NewApp(c *Config) (*App, error) {
var err error
a := new(App)
a.c = c
a.quit = make(chan struct{})
a.groups = make(map[string]*Group)
a.masters = newMasterFSM()
if c.MaxDownTime <= 0 {
c.MaxDownTime = 3
}
if a.c.CheckInterval <= 0 {
a.c.CheckInterval = 1000
}
if len(c.Addr) > 0 {
a.l, err = net.Listen("tcp", c.Addr)
if err != nil {
return nil, err
}
}
switch c.Broker {
case "raft":
a.cluster, err = newRaft(c, a.masters)
case "zk":
a.cluster, err = newZk(c, a.masters)
default:
log.Infof("unsupported broker %s, use no cluster", c.Broker)
a.cluster = nil
}
if err != nil {
return nil, err
}
return a, nil
}
func (a *App) Close() {
select {
case <-a.quit:
return
default:
break
}
if a.l != nil {
a.l.Close()
}
if a.cluster != nil {
a.cluster.Close()
}
close(a.quit)
a.wg.Wait()
}
func (a *App) Run() {
if a.cluster != nil {
// wait 5s to determind whether leader or not
select {
case <-a.cluster.LeaderCh():
case <-time.After(5 * time.Second):
}
}
if a.c.MastersState == MastersStateNew {
a.setMasters(a.c.Masters)
} else {
a.addMasters(a.c.Masters)
}
go a.startHTTP()
a.wg.Add(1)
t := time.NewTicker(time.Duration(a.c.CheckInterval) * time.Millisecond)
defer func() {
t.Stop()
a.wg.Done()
}()
for {
select {
case <-t.C:
a.check()
case <-a.quit:
return
}
}
}
func (a *App) check() {
if a.cluster != nil && !a.cluster.IsLeader() {
// is not leader, not check
return
}
masters := a.masters.GetMasters()
var wg sync.WaitGroup
for _, master := range masters {
a.gMutex.Lock()
g, ok := a.groups[master]
if !ok {
g = newGroup(master)
a.groups[master] = g
}
a.gMutex.Unlock()
wg.Add(1)
go a.checkMaster(&wg, g)
}
// wait all check done | for master, g := range a.groups {
if !a.masters.IsMaster(master) {
delete(a.groups, master)
g.Close()
}
}
a.gMutex.Unlock()
}
func (a *App) checkMaster(wg *sync.WaitGroup, g *Group) {
defer wg.Done()
// later, add check strategy, like check failed n numbers in n seconds and do failover, etc.
// now only check once.
err := g.Check()
if err == nil {
return
}
oldMaster := g.Master.Addr
if err == ErrNodeType {
log.Errorf("server %s is not master now, we will skip it", oldMaster)
// server is not master, we will not check it.
a.delMasters([]string{oldMaster})
return
}
errNum := time.Duration(g.CheckErrNum.Get())
downTime := errNum * time.Duration(a.c.CheckInterval) * time.Millisecond
if downTime < time.Duration(a.c.MaxDownTime)*time.Second {
log.Warnf("check master %s err %v, down time: %0.2fs, retry check", oldMaster, err, downTime.Seconds())
return
}
// If check error, we will remove it from saved masters and not check.
// I just want to avoid some errors if below failover failed, at that time,
// handling it manually seems a better way.
// If you want to recheck it, please add it again.
a.delMasters([]string{oldMaster})
log.Errorf("check master %s err %v, do failover", oldMaster, err)
if err := a.onBeforeFailover(oldMaster); err != nil {
//give up failover
return
}
// first elect a candidate
newMaster, err := g.Elect()
if err != nil {
// elect error
return
}
log.Errorf("master is down, elect %s as new master, do failover", newMaster)
// promote the candiate to master
err = g.Promote(newMaster)
if err != nil {
log.Fatalf("do master %s failover err: %v", oldMaster, err)
return
}
a.addMasters([]string{newMaster})
a.onAfterFailover(oldMaster, newMaster)
}
func (a *App) startHTTP() {
if a.l == nil {
return
}
m := mux.NewRouter()
m.Handle("/master", &masterHandler{a})
s := http.Server{
Handler: m,
}
s.Serve(a.l)
}
func (a *App) addMasters(addrs []string) error {
if len(addrs) == 0 {
return nil
}
if a.cluster != nil {
if a.cluster.IsLeader() {
return a.cluster.AddMasters(addrs, 10*time.Second)
} else {
log.Infof("%s is not leader, skip", a.c.Addr)
}
} else {
a.masters.AddMasters(addrs)
}
return nil
}
func (a *App) delMasters(addrs []string) error {
if len(addrs) == 0 {
return nil
}
if a.cluster != nil {
if a.cluster.IsLeader() {
return a.cluster.DelMasters(addrs, 10*time.Second)
} else {
log.Infof("%s is not leader, skip", a.c.Addr)
}
} else {
a.masters.DelMasters(addrs)
}
return nil
}
func (a *App) setMasters(addrs []string) error {
if a.cluster != nil {
if a.cluster.IsLeader() {
return a.cluster.SetMasters(addrs, 10*time.Second)
} else {
log.Infof("%s is not leader, skip", a.c.Addr)
}
} else {
a.masters.SetMasters(addrs)
}
return nil
}
func (a *App) AddBeforeFailoverHandler(f BeforeFailoverHandler) {
a.hMutex.Lock()
a.beforeHandlers = append(a.beforeHandlers, f)
a.hMutex.Unlock()
}
func (a *App) AddAfterFailoverHandler(f AfterFailoverHandler) {
a.hMutex.Lock()
a.afterHandlers = append(a.afterHandlers, f)
a.hMutex.Unlock()
}
func (a *App) onBeforeFailover(downMaster string) error {
a.hMutex.Lock()
defer a.hMutex.Unlock()
for _, h := range a.beforeHandlers {
if err := h(downMaster); err != nil {
log.Errorf("do before failover handler for %s err: %v", downMaster, err)
if err == ErrGiveupFailover {
return ErrGiveupFailover
}
}
}
return nil
}
func (a *App) onAfterFailover(downMaster string, newMaster string) error {
a.hMutex.Lock()
defer a.hMutex.Unlock()
for _, h := range a.afterHandlers {
if err := h(downMaster, newMaster); err != nil {
log.Errorf("do after failover handler for %s -> %s err: %v", downMaster, newMaster, err)
if err == ErrGiveupFailover {
return ErrGiveupFailover
}
}
}
return nil
} | wg.Wait()
a.gMutex.Lock() |
expr.rs | use super::diagnostics::SnapshotParser;
use super::pat::{CommaRecoveryMode, RecoverColon, RecoverComma, PARAM_EXPECTED};
use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign};
use super::{
AttrWrapper, BlockMode, ClosureSpans, ForceCollect, Parser, PathStyle, Restrictions,
SemiColonMode, SeqSep, TokenExpectType, TokenType, TrailingToken,
};
use crate::maybe_recover_from_interpolated_ty_qpath;
use ast::token::DelimToken;
use rustc_ast::ptr::P;
use rustc_ast::token::{self, Token, TokenKind};
use rustc_ast::tokenstream::Spacing;
use rustc_ast::util::classify;
use rustc_ast::util::literal::LitError;
use rustc_ast::util::parser::{prec_let_scrutinee_needs_par, AssocOp, Fixity};
use rustc_ast::{self as ast, AttrStyle, AttrVec, CaptureBy, ExprField, Lit, UnOp, DUMMY_NODE_ID};
use rustc_ast::{AnonConst, BinOp, BinOpKind, FnDecl, FnRetTy, MacCall, Param, Ty, TyKind};
use rustc_ast::{Arm, Async, BlockCheckMode, Expr, ExprKind, Label, Movability, RangeLimits};
use rustc_ast_pretty::pprust;
use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, PResult};
use rustc_session::lint::builtin::BREAK_WITH_LABEL_AND_LOOP;
use rustc_session::lint::BuiltinLintDiagnostics;
use rustc_span::source_map::{self, Span, Spanned};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{BytePos, Pos};
use std::mem;
/// Possibly accepts an `token::Interpolated` expression (a pre-parsed expression
/// dropped into the token stream, which happens while parsing the result of
/// macro expansion). Placement of these is not as complex as I feared it would
/// be. The important thing is to make sure that lookahead doesn't balk at
/// `token::Interpolated` tokens.
macro_rules! maybe_whole_expr {
($p:expr) => {
if let token::Interpolated(nt) = &$p.token.kind {
match &**nt {
token::NtExpr(e) | token::NtLiteral(e) => {
let e = e.clone();
$p.bump();
return Ok(e);
}
token::NtPath(path) => {
let path = path.clone();
$p.bump();
return Ok($p.mk_expr(
$p.prev_token.span,
ExprKind::Path(None, path),
AttrVec::new(),
));
}
token::NtBlock(block) => {
let block = block.clone();
$p.bump();
return Ok($p.mk_expr(
$p.prev_token.span,
ExprKind::Block(block, None),
AttrVec::new(),
));
}
_ => {}
};
}
};
}
#[derive(Debug)]
pub(super) enum LhsExpr {
NotYetParsed,
AttributesParsed(AttrWrapper),
AlreadyParsed(P<Expr>),
}
impl From<Option<AttrWrapper>> for LhsExpr {
/// Converts `Some(attrs)` into `LhsExpr::AttributesParsed(attrs)`
/// and `None` into `LhsExpr::NotYetParsed`.
///
/// This conversion does not allocate.
fn from(o: Option<AttrWrapper>) -> Self {
if let Some(attrs) = o { LhsExpr::AttributesParsed(attrs) } else { LhsExpr::NotYetParsed }
}
} | impl From<P<Expr>> for LhsExpr {
/// Converts the `expr: P<Expr>` into `LhsExpr::AlreadyParsed(expr)`.
///
/// This conversion does not allocate.
fn from(expr: P<Expr>) -> Self {
LhsExpr::AlreadyParsed(expr)
}
}
impl<'a> Parser<'a> {
/// Parses an expression.
#[inline]
pub fn parse_expr(&mut self) -> PResult<'a, P<Expr>> {
self.current_closure.take();
self.parse_expr_res(Restrictions::empty(), None)
}
/// Parses an expression, forcing tokens to be collected
pub fn parse_expr_force_collect(&mut self) -> PResult<'a, P<Expr>> {
self.collect_tokens_no_attrs(|this| this.parse_expr())
}
pub fn parse_anon_const_expr(&mut self) -> PResult<'a, AnonConst> {
self.parse_expr().map(|value| AnonConst { id: DUMMY_NODE_ID, value })
}
fn parse_expr_catch_underscore(&mut self) -> PResult<'a, P<Expr>> {
match self.parse_expr() {
Ok(expr) => Ok(expr),
Err(mut err) => match self.token.ident() {
Some((Ident { name: kw::Underscore, .. }, false))
if self.look_ahead(1, |t| t == &token::Comma) =>
{
// Special-case handling of `foo(_, _, _)`
err.emit();
self.bump();
Ok(self.mk_expr(self.prev_token.span, ExprKind::Err, AttrVec::new()))
}
_ => Err(err),
},
}
}
/// Parses a sequence of expressions delimited by parentheses.
fn parse_paren_expr_seq(&mut self) -> PResult<'a, Vec<P<Expr>>> {
self.parse_paren_comma_seq(|p| p.parse_expr_catch_underscore()).map(|(r, _)| r)
}
/// Parses an expression, subject to the given restrictions.
#[inline]
pub(super) fn parse_expr_res(
&mut self,
r: Restrictions,
already_parsed_attrs: Option<AttrWrapper>,
) -> PResult<'a, P<Expr>> {
self.with_res(r, |this| this.parse_assoc_expr(already_parsed_attrs))
}
/// Parses an associative expression.
///
/// This parses an expression accounting for associativity and precedence of the operators in
/// the expression.
#[inline]
fn parse_assoc_expr(
&mut self,
already_parsed_attrs: Option<AttrWrapper>,
) -> PResult<'a, P<Expr>> {
self.parse_assoc_expr_with(0, already_parsed_attrs.into())
}
/// Parses an associative expression with operators of at least `min_prec` precedence.
pub(super) fn parse_assoc_expr_with(
&mut self,
min_prec: usize,
lhs: LhsExpr,
) -> PResult<'a, P<Expr>> {
let mut lhs = if let LhsExpr::AlreadyParsed(expr) = lhs {
expr
} else {
let attrs = match lhs {
LhsExpr::AttributesParsed(attrs) => Some(attrs),
_ => None,
};
if [token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token.kind) {
return self.parse_prefix_range_expr(attrs);
} else {
self.parse_prefix_expr(attrs)?
}
};
let last_type_ascription_set = self.last_type_ascription.is_some();
if !self.should_continue_as_assoc_expr(&lhs) {
self.last_type_ascription = None;
return Ok(lhs);
}
self.expected_tokens.push(TokenType::Operator);
while let Some(op) = self.check_assoc_op() {
// Adjust the span for interpolated LHS to point to the `$lhs` token
// and not to what it refers to.
let lhs_span = match self.prev_token.kind {
TokenKind::Interpolated(..) => self.prev_token.span,
_ => lhs.span,
};
let cur_op_span = self.token.span;
let restrictions = if op.node.is_assign_like() {
self.restrictions & Restrictions::NO_STRUCT_LITERAL
} else {
self.restrictions
};
let prec = op.node.precedence();
if prec < min_prec {
break;
}
// Check for deprecated `...` syntax
if self.token == token::DotDotDot && op.node == AssocOp::DotDotEq {
self.err_dotdotdot_syntax(self.token.span);
}
if self.token == token::LArrow {
self.err_larrow_operator(self.token.span);
}
self.bump();
if op.node.is_comparison() {
if let Some(expr) = self.check_no_chained_comparison(&lhs, &op)? {
return Ok(expr);
}
}
// Look for JS' `===` and `!==` and recover
if (op.node == AssocOp::Equal || op.node == AssocOp::NotEqual)
&& self.token.kind == token::Eq
&& self.prev_token.span.hi() == self.token.span.lo()
{
let sp = op.span.to(self.token.span);
let sugg = match op.node {
AssocOp::Equal => "==",
AssocOp::NotEqual => "!=",
_ => unreachable!(),
};
self.struct_span_err(sp, &format!("invalid comparison operator `{sugg}=`"))
.span_suggestion_short(
sp,
&format!("`{s}=` is not a valid comparison operator, use `{s}`", s = sugg),
sugg.to_string(),
Applicability::MachineApplicable,
)
.emit();
self.bump();
}
// Look for PHP's `<>` and recover
if op.node == AssocOp::Less
&& self.token.kind == token::Gt
&& self.prev_token.span.hi() == self.token.span.lo()
{
let sp = op.span.to(self.token.span);
self.struct_span_err(sp, "invalid comparison operator `<>`")
.span_suggestion_short(
sp,
"`<>` is not a valid comparison operator, use `!=`",
"!=".to_string(),
Applicability::MachineApplicable,
)
.emit();
self.bump();
}
// Look for C++'s `<=>` and recover
if op.node == AssocOp::LessEqual
&& self.token.kind == token::Gt
&& self.prev_token.span.hi() == self.token.span.lo()
{
let sp = op.span.to(self.token.span);
self.struct_span_err(sp, "invalid comparison operator `<=>`")
.span_label(
sp,
"`<=>` is not a valid comparison operator, use `std::cmp::Ordering`",
)
.emit();
self.bump();
}
if self.prev_token == token::BinOp(token::Plus)
&& self.token == token::BinOp(token::Plus)
&& self.prev_token.span.between(self.token.span).is_empty()
{
let op_span = self.prev_token.span.to(self.token.span);
// Eat the second `+`
self.bump();
lhs = self.recover_from_postfix_increment(lhs, op_span)?;
continue;
}
let op = op.node;
// Special cases:
if op == AssocOp::As {
lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Cast)?;
continue;
} else if op == AssocOp::Colon {
lhs = self.parse_assoc_op_ascribe(lhs, lhs_span)?;
continue;
} else if op == AssocOp::DotDot || op == AssocOp::DotDotEq {
// If we didn't have to handle `x..`/`x..=`, it would be pretty easy to
// generalise it to the Fixity::None code.
lhs = self.parse_range_expr(prec, lhs, op, cur_op_span)?;
break;
}
let fixity = op.fixity();
let prec_adjustment = match fixity {
Fixity::Right => 0,
Fixity::Left => 1,
// We currently have no non-associative operators that are not handled above by
// the special cases. The code is here only for future convenience.
Fixity::None => 1,
};
let rhs = self.with_res(restrictions - Restrictions::STMT_EXPR, |this| {
this.parse_assoc_expr_with(prec + prec_adjustment, LhsExpr::NotYetParsed)
})?;
let span = self.mk_expr_sp(&lhs, lhs_span, rhs.span);
lhs = match op {
AssocOp::Add
| AssocOp::Subtract
| AssocOp::Multiply
| AssocOp::Divide
| AssocOp::Modulus
| AssocOp::LAnd
| AssocOp::LOr
| AssocOp::BitXor
| AssocOp::BitAnd
| AssocOp::BitOr
| AssocOp::ShiftLeft
| AssocOp::ShiftRight
| AssocOp::Equal
| AssocOp::Less
| AssocOp::LessEqual
| AssocOp::NotEqual
| AssocOp::Greater
| AssocOp::GreaterEqual => {
let ast_op = op.to_ast_binop().unwrap();
let binary = self.mk_binary(source_map::respan(cur_op_span, ast_op), lhs, rhs);
self.mk_expr(span, binary, AttrVec::new())
}
AssocOp::Assign => {
self.mk_expr(span, ExprKind::Assign(lhs, rhs, cur_op_span), AttrVec::new())
}
AssocOp::AssignOp(k) => {
let aop = match k {
token::Plus => BinOpKind::Add,
token::Minus => BinOpKind::Sub,
token::Star => BinOpKind::Mul,
token::Slash => BinOpKind::Div,
token::Percent => BinOpKind::Rem,
token::Caret => BinOpKind::BitXor,
token::And => BinOpKind::BitAnd,
token::Or => BinOpKind::BitOr,
token::Shl => BinOpKind::Shl,
token::Shr => BinOpKind::Shr,
};
let aopexpr = self.mk_assign_op(source_map::respan(cur_op_span, aop), lhs, rhs);
self.mk_expr(span, aopexpr, AttrVec::new())
}
AssocOp::As | AssocOp::Colon | AssocOp::DotDot | AssocOp::DotDotEq => {
self.span_bug(span, "AssocOp should have been handled by special case")
}
};
if let Fixity::None = fixity {
break;
}
}
if last_type_ascription_set {
self.last_type_ascription = None;
}
Ok(lhs)
}
fn should_continue_as_assoc_expr(&mut self, lhs: &Expr) -> bool {
match (self.expr_is_complete(lhs), AssocOp::from_token(&self.token)) {
// Semi-statement forms are odd:
// See https://github.com/rust-lang/rust/issues/29071
(true, None) => false,
(false, _) => true, // Continue parsing the expression.
// An exhaustive check is done in the following block, but these are checked first
// because they *are* ambiguous but also reasonable looking incorrect syntax, so we
// want to keep their span info to improve diagnostics in these cases in a later stage.
(true, Some(AssocOp::Multiply)) | // `{ 42 } *foo = bar;` or `{ 42 } * 3`
(true, Some(AssocOp::Subtract)) | // `{ 42 } -5`
(true, Some(AssocOp::Add)) // `{ 42 } + 42
// If the next token is a keyword, then the tokens above *are* unambiguously incorrect:
// `if x { a } else { b } && if y { c } else { d }`
if !self.look_ahead(1, |t| t.is_used_keyword()) => {
// These cases are ambiguous and can't be identified in the parser alone.
let sp = self.sess.source_map().start_point(self.token.span);
self.sess.ambiguous_block_expr_parse.borrow_mut().insert(sp, lhs.span);
false
}
(true, Some(AssocOp::LAnd)) |
(true, Some(AssocOp::LOr)) |
(true, Some(AssocOp::BitOr)) => {
// `{ 42 } &&x` (#61475) or `{ 42 } && if x { 1 } else { 0 }`. Separated from the
// above due to #74233.
// These cases are ambiguous and can't be identified in the parser alone.
//
// Bitwise AND is left out because guessing intent is hard. We can make
// suggestions based on the assumption that double-refs are rarely intentional,
// and closures are distinct enough that they don't get mixed up with their
// return value.
let sp = self.sess.source_map().start_point(self.token.span);
self.sess.ambiguous_block_expr_parse.borrow_mut().insert(sp, lhs.span);
false
}
(true, Some(ref op)) if !op.can_continue_expr_unambiguously() => false,
(true, Some(_)) => {
self.error_found_expr_would_be_stmt(lhs);
true
}
}
}
/// We've found an expression that would be parsed as a statement,
/// but the next token implies this should be parsed as an expression.
/// For example: `if let Some(x) = x { x } else { 0 } / 2`.
fn error_found_expr_would_be_stmt(&self, lhs: &Expr) {
let mut err = self.struct_span_err(
self.token.span,
&format!("expected expression, found `{}`", pprust::token_to_string(&self.token),),
);
err.span_label(self.token.span, "expected expression");
self.sess.expr_parentheses_needed(&mut err, lhs.span);
err.emit();
}
/// Possibly translate the current token to an associative operator.
/// The method does not advance the current token.
///
/// Also performs recovery for `and` / `or` which are mistaken for `&&` and `||` respectively.
fn check_assoc_op(&self) -> Option<Spanned<AssocOp>> {
let (op, span) = match (AssocOp::from_token(&self.token), self.token.ident()) {
// When parsing const expressions, stop parsing when encountering `>`.
(
Some(
AssocOp::ShiftRight
| AssocOp::Greater
| AssocOp::GreaterEqual
| AssocOp::AssignOp(token::BinOpToken::Shr),
),
_,
) if self.restrictions.contains(Restrictions::CONST_EXPR) => {
return None;
}
(Some(op), _) => (op, self.token.span),
(None, Some((Ident { name: sym::and, span }, false))) => {
self.error_bad_logical_op("and", "&&", "conjunction");
(AssocOp::LAnd, span)
}
(None, Some((Ident { name: sym::or, span }, false))) => {
self.error_bad_logical_op("or", "||", "disjunction");
(AssocOp::LOr, span)
}
_ => return None,
};
Some(source_map::respan(span, op))
}
/// Error on `and` and `or` suggesting `&&` and `||` respectively.
fn error_bad_logical_op(&self, bad: &str, good: &str, english: &str) {
self.struct_span_err(self.token.span, &format!("`{bad}` is not a logical operator"))
.span_suggestion_short(
self.token.span,
&format!("use `{good}` to perform logical {english}"),
good.to_string(),
Applicability::MachineApplicable,
)
.note("unlike in e.g., python and PHP, `&&` and `||` are used for logical operators")
.emit();
}
/// Checks if this expression is a successfully parsed statement.
fn expr_is_complete(&self, e: &Expr) -> bool {
self.restrictions.contains(Restrictions::STMT_EXPR)
&& !classify::expr_requires_semi_to_be_stmt(e)
}
/// Parses `x..y`, `x..=y`, and `x..`/`x..=`.
/// The other two variants are handled in `parse_prefix_range_expr` below.
fn parse_range_expr(
&mut self,
prec: usize,
lhs: P<Expr>,
op: AssocOp,
cur_op_span: Span,
) -> PResult<'a, P<Expr>> {
let rhs = if self.is_at_start_of_range_notation_rhs() {
Some(self.parse_assoc_expr_with(prec + 1, LhsExpr::NotYetParsed)?)
} else {
None
};
let rhs_span = rhs.as_ref().map_or(cur_op_span, |x| x.span);
let span = self.mk_expr_sp(&lhs, lhs.span, rhs_span);
let limits =
if op == AssocOp::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed };
let range = self.mk_range(Some(lhs), rhs, limits);
Ok(self.mk_expr(span, range, AttrVec::new()))
}
fn is_at_start_of_range_notation_rhs(&self) -> bool {
if self.token.can_begin_expr() {
// Parse `for i in 1.. { }` as infinite loop, not as `for i in (1..{})`.
if self.token == token::OpenDelim(token::Brace) {
return !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL);
}
true
} else {
false
}
}
/// Parses prefix-forms of range notation: `..expr`, `..`, `..=expr`.
fn parse_prefix_range_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> {
// Check for deprecated `...` syntax.
if self.token == token::DotDotDot {
self.err_dotdotdot_syntax(self.token.span);
}
debug_assert!(
[token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token.kind),
"parse_prefix_range_expr: token {:?} is not DotDot/DotDotEq",
self.token
);
let limits = match self.token.kind {
token::DotDot => RangeLimits::HalfOpen,
_ => RangeLimits::Closed,
};
let op = AssocOp::from_token(&self.token);
// FIXME: `parse_prefix_range_expr` is called when the current
// token is `DotDot`, `DotDotDot`, or `DotDotEq`. If we haven't already
// parsed attributes, then trying to parse them here will always fail.
// We should figure out how we want attributes on range expressions to work.
let attrs = self.parse_or_use_outer_attributes(attrs)?;
self.collect_tokens_for_expr(attrs, |this, attrs| {
let lo = this.token.span;
this.bump();
let (span, opt_end) = if this.is_at_start_of_range_notation_rhs() {
// RHS must be parsed with more associativity than the dots.
this.parse_assoc_expr_with(op.unwrap().precedence() + 1, LhsExpr::NotYetParsed)
.map(|x| (lo.to(x.span), Some(x)))?
} else {
(lo, None)
};
let range = this.mk_range(None, opt_end, limits);
Ok(this.mk_expr(span, range, attrs.into()))
})
}
/// Parses a prefix-unary-operator expr.
fn parse_prefix_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> {
let attrs = self.parse_or_use_outer_attributes(attrs)?;
let lo = self.token.span;
macro_rules! make_it {
($this:ident, $attrs:expr, |this, _| $body:expr) => {
$this.collect_tokens_for_expr($attrs, |$this, attrs| {
let (hi, ex) = $body?;
Ok($this.mk_expr(lo.to(hi), ex, attrs.into()))
})
};
}
let this = self;
// Note: when adding new unary operators, don't forget to adjust TokenKind::can_begin_expr()
match this.token.uninterpolate().kind {
token::Not => make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Not)), // `!expr`
token::Tilde => make_it!(this, attrs, |this, _| this.recover_tilde_expr(lo)), // `~expr`
token::BinOp(token::Minus) => {
make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Neg))
} // `-expr`
token::BinOp(token::Star) => {
make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Deref))
} // `*expr`
token::BinOp(token::And) | token::AndAnd => {
make_it!(this, attrs, |this, _| this.parse_borrow_expr(lo))
}
token::BinOp(token::Plus) if this.look_ahead(1, |tok| tok.is_numeric_lit()) => {
let mut err = this.struct_span_err(lo, "leading `+` is not supported");
err.span_label(lo, "unexpected `+`");
// a block on the LHS might have been intended to be an expression instead
if let Some(sp) = this.sess.ambiguous_block_expr_parse.borrow().get(&lo) {
this.sess.expr_parentheses_needed(&mut err, *sp);
} else {
err.span_suggestion_verbose(
lo,
"try removing the `+`",
"".to_string(),
Applicability::MachineApplicable,
);
}
err.emit();
this.bump();
this.parse_prefix_expr(None)
} // `+expr`
// Recover from `++x`:
token::BinOp(token::Plus)
if this.look_ahead(1, |t| *t == token::BinOp(token::Plus)) =>
{
let prev_is_semi = this.prev_token == token::Semi;
let pre_span = this.token.span.to(this.look_ahead(1, |t| t.span));
// Eat both `+`s.
this.bump();
this.bump();
let operand_expr = this.parse_dot_or_call_expr(Default::default())?;
this.recover_from_prefix_increment(operand_expr, pre_span, prev_is_semi)
}
token::Ident(..) if this.token.is_keyword(kw::Box) => {
make_it!(this, attrs, |this, _| this.parse_box_expr(lo))
}
token::Ident(..) if this.is_mistaken_not_ident_negation() => {
make_it!(this, attrs, |this, _| this.recover_not_expr(lo))
}
_ => return this.parse_dot_or_call_expr(Some(attrs)),
}
}
fn parse_prefix_expr_common(&mut self, lo: Span) -> PResult<'a, (Span, P<Expr>)> {
self.bump();
let expr = self.parse_prefix_expr(None);
let (span, expr) = self.interpolated_or_expr_span(expr)?;
Ok((lo.to(span), expr))
}
fn parse_unary_expr(&mut self, lo: Span, op: UnOp) -> PResult<'a, (Span, ExprKind)> {
let (span, expr) = self.parse_prefix_expr_common(lo)?;
Ok((span, self.mk_unary(op, expr)))
}
// Recover on `!` suggesting for bitwise negation instead.
fn recover_tilde_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> {
self.struct_span_err(lo, "`~` cannot be used as a unary operator")
.span_suggestion_short(
lo,
"use `!` to perform bitwise not",
"!".to_owned(),
Applicability::MachineApplicable,
)
.emit();
self.parse_unary_expr(lo, UnOp::Not)
}
/// Parse `box expr`.
fn parse_box_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> {
let (span, expr) = self.parse_prefix_expr_common(lo)?;
self.sess.gated_spans.gate(sym::box_syntax, span);
Ok((span, ExprKind::Box(expr)))
}
fn is_mistaken_not_ident_negation(&self) -> bool {
let token_cannot_continue_expr = |t: &Token| match t.uninterpolate().kind {
// These tokens can start an expression after `!`, but
// can't continue an expression after an ident
token::Ident(name, is_raw) => token::ident_can_begin_expr(name, t.span, is_raw),
token::Literal(..) | token::Pound => true,
_ => t.is_whole_expr(),
};
self.token.is_ident_named(sym::not) && self.look_ahead(1, token_cannot_continue_expr)
}
/// Recover on `not expr` in favor of `!expr`.
fn recover_not_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> {
// Emit the error...
let not_token = self.look_ahead(1, |t| t.clone());
self.struct_span_err(
not_token.span,
&format!("unexpected {} after identifier", super::token_descr(¬_token)),
)
.span_suggestion_short(
// Span the `not` plus trailing whitespace to avoid
// trailing whitespace after the `!` in our suggestion
self.sess.source_map().span_until_non_whitespace(lo.to(not_token.span)),
"use `!` to perform logical negation",
"!".to_owned(),
Applicability::MachineApplicable,
)
.emit();
// ...and recover!
self.parse_unary_expr(lo, UnOp::Not)
}
/// Returns the span of expr, if it was not interpolated or the span of the interpolated token.
fn interpolated_or_expr_span(
&self,
expr: PResult<'a, P<Expr>>,
) -> PResult<'a, (Span, P<Expr>)> {
expr.map(|e| {
(
match self.prev_token.kind {
TokenKind::Interpolated(..) => self.prev_token.span,
_ => e.span,
},
e,
)
})
}
fn parse_assoc_op_cast(
&mut self,
lhs: P<Expr>,
lhs_span: Span,
expr_kind: fn(P<Expr>, P<Ty>) -> ExprKind,
) -> PResult<'a, P<Expr>> {
let mk_expr = |this: &mut Self, lhs: P<Expr>, rhs: P<Ty>| {
this.mk_expr(
this.mk_expr_sp(&lhs, lhs_span, rhs.span),
expr_kind(lhs, rhs),
AttrVec::new(),
)
};
// Save the state of the parser before parsing type normally, in case there is a
// LessThan comparison after this cast.
let parser_snapshot_before_type = self.clone();
let cast_expr = match self.parse_as_cast_ty() {
Ok(rhs) => mk_expr(self, lhs, rhs),
Err(type_err) => {
// Rewind to before attempting to parse the type with generics, to recover
// from situations like `x as usize < y` in which we first tried to parse
// `usize < y` as a type with generic arguments.
let parser_snapshot_after_type = mem::replace(self, parser_snapshot_before_type);
// Check for typo of `'a: loop { break 'a }` with a missing `'`.
match (&lhs.kind, &self.token.kind) {
(
// `foo: `
ExprKind::Path(None, ast::Path { segments, .. }),
TokenKind::Ident(kw::For | kw::Loop | kw::While, false),
) if segments.len() == 1 => {
let snapshot = self.create_snapshot_for_diagnostic();
let label = Label {
ident: Ident::from_str_and_span(
&format!("'{}", segments[0].ident),
segments[0].ident.span,
),
};
match self.parse_labeled_expr(label, AttrVec::new(), false) {
Ok(expr) => {
type_err.cancel();
self.struct_span_err(label.ident.span, "malformed loop label")
.span_suggestion(
label.ident.span,
"use the correct loop label format",
label.ident.to_string(),
Applicability::MachineApplicable,
)
.emit();
return Ok(expr);
}
Err(err) => {
err.cancel();
self.restore_snapshot(snapshot);
}
}
}
_ => {}
}
match self.parse_path(PathStyle::Expr) {
Ok(path) => {
let (op_noun, op_verb) = match self.token.kind {
token::Lt => ("comparison", "comparing"),
token::BinOp(token::Shl) => ("shift", "shifting"),
_ => {
// We can end up here even without `<` being the next token, for
// example because `parse_ty_no_plus` returns `Err` on keywords,
// but `parse_path` returns `Ok` on them due to error recovery.
// Return original error and parser state.
*self = parser_snapshot_after_type;
return Err(type_err);
}
};
// Successfully parsed the type path leaving a `<` yet to parse.
type_err.cancel();
// Report non-fatal diagnostics, keep `x as usize` as an expression
// in AST and continue parsing.
let msg = format!(
"`<` is interpreted as a start of generic arguments for `{}`, not a {}",
pprust::path_to_string(&path),
op_noun,
);
let span_after_type = parser_snapshot_after_type.token.span;
let expr =
mk_expr(self, lhs, self.mk_ty(path.span, TyKind::Path(None, path)));
self.struct_span_err(self.token.span, &msg)
.span_label(
self.look_ahead(1, |t| t.span).to(span_after_type),
"interpreted as generic arguments",
)
.span_label(self.token.span, format!("not interpreted as {op_noun}"))
.multipart_suggestion(
&format!("try {op_verb} the cast value"),
vec![
(expr.span.shrink_to_lo(), "(".to_string()),
(expr.span.shrink_to_hi(), ")".to_string()),
],
Applicability::MachineApplicable,
)
.emit();
expr
}
Err(path_err) => {
// Couldn't parse as a path, return original error and parser state.
path_err.cancel();
*self = parser_snapshot_after_type;
return Err(type_err);
}
}
}
};
self.parse_and_disallow_postfix_after_cast(cast_expr)
}
/// Parses a postfix operators such as `.`, `?`, or index (`[]`) after a cast,
/// then emits an error and returns the newly parsed tree.
/// The resulting parse tree for `&x as T[0]` has a precedence of `((&x) as T)[0]`.
fn parse_and_disallow_postfix_after_cast(
&mut self,
cast_expr: P<Expr>,
) -> PResult<'a, P<Expr>> {
let span = cast_expr.span;
let maybe_ascription_span = if let ExprKind::Type(ascripted_expr, _) = &cast_expr.kind {
Some(ascripted_expr.span.shrink_to_hi().with_hi(span.hi()))
} else {
None
};
// Save the memory location of expr before parsing any following postfix operators.
// This will be compared with the memory location of the output expression.
// If they different we can assume we parsed another expression because the existing expression is not reallocated.
let addr_before = &*cast_expr as *const _ as usize;
let with_postfix = self.parse_dot_or_call_expr_with_(cast_expr, span)?;
let changed = addr_before != &*with_postfix as *const _ as usize;
// Check if an illegal postfix operator has been added after the cast.
// If the resulting expression is not a cast, or has a different memory location, it is an illegal postfix operator.
if !matches!(with_postfix.kind, ExprKind::Cast(_, _) | ExprKind::Type(_, _)) || changed {
let msg = format!(
"casts cannot be followed by {}",
match with_postfix.kind {
ExprKind::Index(_, _) => "indexing",
ExprKind::Try(_) => "`?`",
ExprKind::Field(_, _) => "a field access",
ExprKind::MethodCall(_, _, _) => "a method call",
ExprKind::Call(_, _) => "a function call",
ExprKind::Await(_) => "`.await`",
ExprKind::Err => return Ok(with_postfix),
_ => unreachable!("parse_dot_or_call_expr_with_ shouldn't produce this"),
}
);
let mut err = self.struct_span_err(span, &msg);
let suggest_parens = |err: &mut DiagnosticBuilder<'_, _>| {
let suggestions = vec![
(span.shrink_to_lo(), "(".to_string()),
(span.shrink_to_hi(), ")".to_string()),
];
err.multipart_suggestion(
"try surrounding the expression in parentheses",
suggestions,
Applicability::MachineApplicable,
);
};
// If type ascription is "likely an error", the user will already be getting a useful
// help message, and doesn't need a second.
if self.last_type_ascription.map_or(false, |last_ascription| last_ascription.1) {
self.maybe_annotate_with_ascription(&mut err, false);
} else if let Some(ascription_span) = maybe_ascription_span {
let is_nightly = self.sess.unstable_features.is_nightly_build();
if is_nightly {
suggest_parens(&mut err);
}
err.span_suggestion(
ascription_span,
&format!(
"{}remove the type ascription",
if is_nightly { "alternatively, " } else { "" }
),
String::new(),
if is_nightly {
Applicability::MaybeIncorrect
} else {
Applicability::MachineApplicable
},
);
} else {
suggest_parens(&mut err);
}
err.emit();
};
Ok(with_postfix)
}
fn parse_assoc_op_ascribe(&mut self, lhs: P<Expr>, lhs_span: Span) -> PResult<'a, P<Expr>> {
let maybe_path = self.could_ascription_be_path(&lhs.kind);
self.last_type_ascription = Some((self.prev_token.span, maybe_path));
let lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Type)?;
self.sess.gated_spans.gate(sym::type_ascription, lhs.span);
Ok(lhs)
}
/// Parse `& mut? <expr>` or `& raw [ const | mut ] <expr>`.
fn parse_borrow_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> {
self.expect_and()?;
let has_lifetime = self.token.is_lifetime() && self.look_ahead(1, |t| t != &token::Colon);
let lifetime = has_lifetime.then(|| self.expect_lifetime()); // For recovery, see below.
let (borrow_kind, mutbl) = self.parse_borrow_modifiers(lo);
let expr = self.parse_prefix_expr(None);
let (hi, expr) = self.interpolated_or_expr_span(expr)?;
let span = lo.to(hi);
if let Some(lt) = lifetime {
self.error_remove_borrow_lifetime(span, lt.ident.span);
}
Ok((span, ExprKind::AddrOf(borrow_kind, mutbl, expr)))
}
fn error_remove_borrow_lifetime(&self, span: Span, lt_span: Span) {
self.struct_span_err(span, "borrow expressions cannot be annotated with lifetimes")
.span_label(lt_span, "annotated with lifetime here")
.span_suggestion(
lt_span,
"remove the lifetime annotation",
String::new(),
Applicability::MachineApplicable,
)
.emit();
}
/// Parse `mut?` or `raw [ const | mut ]`.
fn parse_borrow_modifiers(&mut self, lo: Span) -> (ast::BorrowKind, ast::Mutability) {
if self.check_keyword(kw::Raw) && self.look_ahead(1, Token::is_mutability) {
// `raw [ const | mut ]`.
let found_raw = self.eat_keyword(kw::Raw);
assert!(found_raw);
let mutability = self.parse_const_or_mut().unwrap();
self.sess.gated_spans.gate(sym::raw_ref_op, lo.to(self.prev_token.span));
(ast::BorrowKind::Raw, mutability)
} else {
// `mut?`
(ast::BorrowKind::Ref, self.parse_mutability())
}
}
/// Parses `a.b` or `a(13)` or `a[4]` or just `a`.
fn parse_dot_or_call_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> {
let attrs = self.parse_or_use_outer_attributes(attrs)?;
self.collect_tokens_for_expr(attrs, |this, attrs| {
let base = this.parse_bottom_expr();
let (span, base) = this.interpolated_or_expr_span(base)?;
this.parse_dot_or_call_expr_with(base, span, attrs)
})
}
pub(super) fn parse_dot_or_call_expr_with(
&mut self,
e0: P<Expr>,
lo: Span,
mut attrs: Vec<ast::Attribute>,
) -> PResult<'a, P<Expr>> {
// Stitch the list of outer attributes onto the return value.
// A little bit ugly, but the best way given the current code
// structure
self.parse_dot_or_call_expr_with_(e0, lo).map(|expr| {
expr.map(|mut expr| {
attrs.extend::<Vec<_>>(expr.attrs.into());
expr.attrs = attrs.into();
expr
})
})
}
fn parse_dot_or_call_expr_with_(&mut self, mut e: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> {
loop {
if self.eat(&token::Question) {
// `expr?`
e = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Try(e), AttrVec::new());
continue;
}
if self.eat(&token::Dot) {
// expr.f
e = self.parse_dot_suffix_expr(lo, e)?;
continue;
}
if self.expr_is_complete(&e) {
return Ok(e);
}
e = match self.token.kind {
token::OpenDelim(token::Paren) => self.parse_fn_call_expr(lo, e),
token::OpenDelim(token::Bracket) => self.parse_index_expr(lo, e)?,
_ => return Ok(e),
}
}
}
fn look_ahead_type_ascription_as_field(&mut self) -> bool {
self.look_ahead(1, |t| t.is_ident())
&& self.look_ahead(2, |t| t == &token::Colon)
&& self.look_ahead(3, |t| t.can_begin_expr())
}
fn parse_dot_suffix_expr(&mut self, lo: Span, base: P<Expr>) -> PResult<'a, P<Expr>> {
match self.token.uninterpolate().kind {
token::Ident(..) => self.parse_dot_suffix(base, lo),
token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) => {
Ok(self.parse_tuple_field_access_expr(lo, base, symbol, suffix, None))
}
token::Literal(token::Lit { kind: token::Float, symbol, suffix }) => {
Ok(self.parse_tuple_field_access_expr_float(lo, base, symbol, suffix))
}
_ => {
self.error_unexpected_after_dot();
Ok(base)
}
}
}
fn error_unexpected_after_dot(&self) {
// FIXME Could factor this out into non_fatal_unexpected or something.
let actual = pprust::token_to_string(&self.token);
self.struct_span_err(self.token.span, &format!("unexpected token: `{actual}`")).emit();
}
// We need an identifier or integer, but the next token is a float.
// Break the float into components to extract the identifier or integer.
// FIXME: With current `TokenCursor` it's hard to break tokens into more than 2
// parts unless those parts are processed immediately. `TokenCursor` should either
// support pushing "future tokens" (would be also helpful to `break_and_eat`), or
// we should break everything including floats into more basic proc-macro style
// tokens in the lexer (probably preferable).
fn parse_tuple_field_access_expr_float(
&mut self,
lo: Span,
base: P<Expr>,
float: Symbol,
suffix: Option<Symbol>,
) -> P<Expr> {
#[derive(Debug)]
enum FloatComponent {
IdentLike(String),
Punct(char),
}
use FloatComponent::*;
let float_str = float.as_str();
let mut components = Vec::new();
let mut ident_like = String::new();
for c in float_str.chars() {
if c == '_' || c.is_ascii_alphanumeric() {
ident_like.push(c);
} else if matches!(c, '.' | '+' | '-') {
if !ident_like.is_empty() {
components.push(IdentLike(mem::take(&mut ident_like)));
}
components.push(Punct(c));
} else {
panic!("unexpected character in a float token: {:?}", c)
}
}
if !ident_like.is_empty() {
components.push(IdentLike(ident_like));
}
// With proc macros the span can refer to anything, the source may be too short,
// or too long, or non-ASCII. It only makes sense to break our span into components
// if its underlying text is identical to our float literal.
let span = self.token.span;
let can_take_span_apart =
|| self.span_to_snippet(span).as_deref() == Ok(float_str).as_deref();
match &*components {
// 1e2
[IdentLike(i)] => {
self.parse_tuple_field_access_expr(lo, base, Symbol::intern(&i), suffix, None)
}
// 1.
[IdentLike(i), Punct('.')] => {
let (ident_span, dot_span) = if can_take_span_apart() {
let (span, ident_len) = (span.data(), BytePos::from_usize(i.len()));
let ident_span = span.with_hi(span.lo + ident_len);
let dot_span = span.with_lo(span.lo + ident_len);
(ident_span, dot_span)
} else {
(span, span)
};
assert!(suffix.is_none());
let symbol = Symbol::intern(&i);
self.token = Token::new(token::Ident(symbol, false), ident_span);
let next_token = (Token::new(token::Dot, dot_span), self.token_spacing);
self.parse_tuple_field_access_expr(lo, base, symbol, None, Some(next_token))
}
// 1.2 | 1.2e3
[IdentLike(i1), Punct('.'), IdentLike(i2)] => {
let (ident1_span, dot_span, ident2_span) = if can_take_span_apart() {
let (span, ident1_len) = (span.data(), BytePos::from_usize(i1.len()));
let ident1_span = span.with_hi(span.lo + ident1_len);
let dot_span = span
.with_lo(span.lo + ident1_len)
.with_hi(span.lo + ident1_len + BytePos(1));
let ident2_span = self.token.span.with_lo(span.lo + ident1_len + BytePos(1));
(ident1_span, dot_span, ident2_span)
} else {
(span, span, span)
};
let symbol1 = Symbol::intern(&i1);
self.token = Token::new(token::Ident(symbol1, false), ident1_span);
// This needs to be `Spacing::Alone` to prevent regressions.
// See issue #76399 and PR #76285 for more details
let next_token1 = (Token::new(token::Dot, dot_span), Spacing::Alone);
let base1 =
self.parse_tuple_field_access_expr(lo, base, symbol1, None, Some(next_token1));
let symbol2 = Symbol::intern(&i2);
let next_token2 = Token::new(token::Ident(symbol2, false), ident2_span);
self.bump_with((next_token2, self.token_spacing)); // `.`
self.parse_tuple_field_access_expr(lo, base1, symbol2, suffix, None)
}
// 1e+ | 1e- (recovered)
[IdentLike(_), Punct('+' | '-')] |
// 1e+2 | 1e-2
[IdentLike(_), Punct('+' | '-'), IdentLike(_)] |
// 1.2e+ | 1.2e-
[IdentLike(_), Punct('.'), IdentLike(_), Punct('+' | '-')] |
// 1.2e+3 | 1.2e-3
[IdentLike(_), Punct('.'), IdentLike(_), Punct('+' | '-'), IdentLike(_)] => {
// See the FIXME about `TokenCursor` above.
self.error_unexpected_after_dot();
base
}
_ => panic!("unexpected components in a float token: {:?}", components),
}
}
fn parse_tuple_field_access_expr(
&mut self,
lo: Span,
base: P<Expr>,
field: Symbol,
suffix: Option<Symbol>,
next_token: Option<(Token, Spacing)>,
) -> P<Expr> {
match next_token {
Some(next_token) => self.bump_with(next_token),
None => self.bump(),
}
let span = self.prev_token.span;
let field = ExprKind::Field(base, Ident::new(field, span));
self.expect_no_suffix(span, "a tuple index", suffix);
self.mk_expr(lo.to(span), field, AttrVec::new())
}
/// Parse a function call expression, `expr(...)`.
fn parse_fn_call_expr(&mut self, lo: Span, fun: P<Expr>) -> P<Expr> {
let snapshot = if self.token.kind == token::OpenDelim(token::Paren)
&& self.look_ahead_type_ascription_as_field()
{
Some((self.create_snapshot_for_diagnostic(), fun.kind.clone()))
} else {
None
};
let open_paren = self.token.span;
let mut seq = self.parse_paren_expr_seq().map(|args| {
self.mk_expr(lo.to(self.prev_token.span), self.mk_call(fun, args), AttrVec::new())
});
if let Some(expr) =
self.maybe_recover_struct_lit_bad_delims(lo, open_paren, &mut seq, snapshot)
{
return expr;
}
self.recover_seq_parse_error(token::Paren, lo, seq)
}
/// If we encounter a parser state that looks like the user has written a `struct` literal with
/// parentheses instead of braces, recover the parser state and provide suggestions.
#[instrument(skip(self, seq, snapshot), level = "trace")]
fn maybe_recover_struct_lit_bad_delims(
&mut self,
lo: Span,
open_paren: Span,
seq: &mut PResult<'a, P<Expr>>,
snapshot: Option<(SnapshotParser<'a>, ExprKind)>,
) -> Option<P<Expr>> {
match (seq.as_mut(), snapshot) {
(Err(err), Some((mut snapshot, ExprKind::Path(None, path)))) => {
let name = pprust::path_to_string(&path);
snapshot.bump(); // `(`
match snapshot.parse_struct_fields(path, false, token::Paren) {
Ok((fields, ..)) if snapshot.eat(&token::CloseDelim(token::Paren)) => {
// We are certain we have `Enum::Foo(a: 3, b: 4)`, suggest
// `Enum::Foo { a: 3, b: 4 }` or `Enum::Foo(3, 4)`.
self.restore_snapshot(snapshot);
let close_paren = self.prev_token.span;
let span = lo.to(self.prev_token.span);
if !fields.is_empty() {
let replacement_err = self.struct_span_err(
span,
"invalid `struct` delimiters or `fn` call arguments",
);
mem::replace(err, replacement_err).cancel();
err.multipart_suggestion(
&format!("if `{name}` is a struct, use braces as delimiters"),
vec![
(open_paren, " { ".to_string()),
(close_paren, " }".to_string()),
],
Applicability::MaybeIncorrect,
);
err.multipart_suggestion(
&format!("if `{name}` is a function, use the arguments directly"),
fields
.into_iter()
.map(|field| (field.span.until(field.expr.span), String::new()))
.collect(),
Applicability::MaybeIncorrect,
);
err.emit();
} else {
err.emit();
}
return Some(self.mk_expr_err(span));
}
Ok(_) => {}
Err(mut err) => {
err.emit();
}
}
}
_ => {}
}
None
}
/// Parse an indexing expression `expr[...]`.
fn parse_index_expr(&mut self, lo: Span, base: P<Expr>) -> PResult<'a, P<Expr>> {
self.bump(); // `[`
let index = self.parse_expr()?;
self.expect(&token::CloseDelim(token::Bracket))?;
Ok(self.mk_expr(lo.to(self.prev_token.span), self.mk_index(base, index), AttrVec::new()))
}
/// Assuming we have just parsed `.`, continue parsing into an expression.
fn parse_dot_suffix(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> {
if self.token.uninterpolated_span().rust_2018() && self.eat_keyword(kw::Await) {
return Ok(self.mk_await_expr(self_arg, lo));
}
let fn_span_lo = self.token.span;
let mut segment = self.parse_path_segment(PathStyle::Expr, None)?;
self.check_trailing_angle_brackets(&segment, &[&token::OpenDelim(token::Paren)]);
self.check_turbofish_missing_angle_brackets(&mut segment);
if self.check(&token::OpenDelim(token::Paren)) {
// Method call `expr.f()`
let mut args = self.parse_paren_expr_seq()?;
args.insert(0, self_arg);
let fn_span = fn_span_lo.to(self.prev_token.span);
let span = lo.to(self.prev_token.span);
Ok(self.mk_expr(span, ExprKind::MethodCall(segment, args, fn_span), AttrVec::new()))
} else {
// Field access `expr.f`
if let Some(args) = segment.args {
self.struct_span_err(
args.span(),
"field expressions cannot have generic arguments",
)
.emit();
}
let span = lo.to(self.prev_token.span);
Ok(self.mk_expr(span, ExprKind::Field(self_arg, segment.ident), AttrVec::new()))
}
}
/// At the bottom (top?) of the precedence hierarchy,
/// Parses things like parenthesized exprs, macros, `return`, etc.
///
/// N.B., this does not parse outer attributes, and is private because it only works
/// correctly if called from `parse_dot_or_call_expr()`.
fn parse_bottom_expr(&mut self) -> PResult<'a, P<Expr>> {
maybe_recover_from_interpolated_ty_qpath!(self, true);
maybe_whole_expr!(self);
// Outer attributes are already parsed and will be
// added to the return value after the fact.
//
// Therefore, prevent sub-parser from parsing
// attributes by giving them an empty "already-parsed" list.
let attrs = AttrVec::new();
// Note: when adding new syntax here, don't forget to adjust `TokenKind::can_begin_expr()`.
let lo = self.token.span;
if let token::Literal(_) = self.token.kind {
// This match arm is a special-case of the `_` match arm below and
// could be removed without changing functionality, but it's faster
// to have it here, especially for programs with large constants.
self.parse_lit_expr(attrs)
} else if self.check(&token::OpenDelim(token::Paren)) {
self.parse_tuple_parens_expr(attrs)
} else if self.check(&token::OpenDelim(token::Brace)) {
self.parse_block_expr(None, lo, BlockCheckMode::Default, attrs)
} else if self.check(&token::BinOp(token::Or)) || self.check(&token::OrOr) {
self.parse_closure_expr(attrs).map_err(|mut err| {
// If the input is something like `if a { 1 } else { 2 } | if a { 3 } else { 4 }`
// then suggest parens around the lhs.
if let Some(sp) = self.sess.ambiguous_block_expr_parse.borrow().get(&lo) {
self.sess.expr_parentheses_needed(&mut err, *sp);
}
err
})
} else if self.check(&token::OpenDelim(token::Bracket)) {
self.parse_array_or_repeat_expr(attrs, token::Bracket)
} else if self.check_path() {
self.parse_path_start_expr(attrs)
} else if self.check_keyword(kw::Move) || self.check_keyword(kw::Static) {
self.parse_closure_expr(attrs)
} else if self.eat_keyword(kw::If) {
self.parse_if_expr(attrs)
} else if self.check_keyword(kw::For) {
if self.choose_generics_over_qpath(1) {
// NOTE(Centril, eddyb): DO NOT REMOVE! Beyond providing parser recovery,
// this is an insurance policy in case we allow qpaths in (tuple-)struct patterns.
// When `for <Foo as Bar>::Proj in $expr $block` is wanted,
// you can disambiguate in favor of a pattern with `(...)`.
self.recover_quantified_closure_expr(attrs)
} else {
assert!(self.eat_keyword(kw::For));
self.parse_for_expr(None, self.prev_token.span, attrs)
}
} else if self.eat_keyword(kw::While) {
self.parse_while_expr(None, self.prev_token.span, attrs)
} else if let Some(label) = self.eat_label() {
self.parse_labeled_expr(label, attrs, true)
} else if self.eat_keyword(kw::Loop) {
let sp = self.prev_token.span;
self.parse_loop_expr(None, self.prev_token.span, attrs).map_err(|mut err| {
err.span_label(sp, "while parsing this `loop` expression");
err
})
} else if self.eat_keyword(kw::Continue) {
let kind = ExprKind::Continue(self.eat_label());
Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs))
} else if self.eat_keyword(kw::Match) {
let match_sp = self.prev_token.span;
self.parse_match_expr(attrs).map_err(|mut err| {
err.span_label(match_sp, "while parsing this `match` expression");
err
})
} else if self.eat_keyword(kw::Unsafe) {
let sp = self.prev_token.span;
self.parse_block_expr(None, lo, BlockCheckMode::Unsafe(ast::UserProvided), attrs)
.map_err(|mut err| {
err.span_label(sp, "while parsing this `unsafe` expression");
err
})
} else if self.check_inline_const(0) {
self.parse_const_block(lo.to(self.token.span), false)
} else if self.is_do_catch_block() {
self.recover_do_catch(attrs)
} else if self.is_try_block() {
self.expect_keyword(kw::Try)?;
self.parse_try_block(lo, attrs)
} else if self.eat_keyword(kw::Return) {
self.parse_return_expr(attrs)
} else if self.eat_keyword(kw::Break) {
self.parse_break_expr(attrs)
} else if self.eat_keyword(kw::Yield) {
self.parse_yield_expr(attrs)
} else if self.eat_keyword(kw::Let) {
self.parse_let_expr(attrs)
} else if self.eat_keyword(kw::Underscore) {
Ok(self.mk_expr(self.prev_token.span, ExprKind::Underscore, attrs))
} else if !self.unclosed_delims.is_empty() && self.check(&token::Semi) {
// Don't complain about bare semicolons after unclosed braces
// recovery in order to keep the error count down. Fixing the
// delimiters will possibly also fix the bare semicolon found in
// expression context. For example, silence the following error:
//
// error: expected expression, found `;`
// --> file.rs:2:13
// |
// 2 | foo(bar(;
// | ^ expected expression
self.bump();
Ok(self.mk_expr_err(self.token.span))
} else if self.token.uninterpolated_span().rust_2018() {
// `Span::rust_2018()` is somewhat expensive; don't get it repeatedly.
if self.check_keyword(kw::Async) {
if self.is_async_block() {
// Check for `async {` and `async move {`.
self.parse_async_block(attrs)
} else {
self.parse_closure_expr(attrs)
}
} else if self.eat_keyword(kw::Await) {
self.recover_incorrect_await_syntax(lo, self.prev_token.span, attrs)
} else {
self.parse_lit_expr(attrs)
}
} else {
self.parse_lit_expr(attrs)
}
}
fn parse_lit_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
match self.parse_opt_lit() {
Some(literal) => {
let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Lit(literal), attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
None => self.try_macro_suggestion(),
}
}
fn parse_tuple_parens_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
self.expect(&token::OpenDelim(token::Paren))?;
let (es, trailing_comma) = match self.parse_seq_to_end(
&token::CloseDelim(token::Paren),
SeqSep::trailing_allowed(token::Comma),
|p| p.parse_expr_catch_underscore(),
) {
Ok(x) => x,
Err(err) => return Ok(self.recover_seq_parse_error(token::Paren, lo, Err(err))),
};
let kind = if es.len() == 1 && !trailing_comma {
// `(e)` is parenthesized `e`.
ExprKind::Paren(es.into_iter().next().unwrap())
} else {
// `(e,)` is a tuple with only one field, `e`.
ExprKind::Tup(es)
};
let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
fn parse_array_or_repeat_expr(
&mut self,
attrs: AttrVec,
close_delim: token::DelimToken,
) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
self.bump(); // `[` or other open delim
let close = &token::CloseDelim(close_delim);
let kind = if self.eat(close) {
// Empty vector
ExprKind::Array(Vec::new())
} else {
// Non-empty vector
let first_expr = self.parse_expr()?;
if self.eat(&token::Semi) {
// Repeating array syntax: `[ 0; 512 ]`
let count = self.parse_anon_const_expr()?;
self.expect(close)?;
ExprKind::Repeat(first_expr, count)
} else if self.eat(&token::Comma) {
// Vector with two or more elements.
let sep = SeqSep::trailing_allowed(token::Comma);
let (remaining_exprs, _) = self.parse_seq_to_end(close, sep, |p| p.parse_expr())?;
let mut exprs = vec![first_expr];
exprs.extend(remaining_exprs);
ExprKind::Array(exprs)
} else {
// Vector with one element
self.expect(close)?;
ExprKind::Array(vec![first_expr])
}
};
let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
fn parse_path_start_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let (qself, path) = if self.eat_lt() {
let (qself, path) = self.parse_qpath(PathStyle::Expr)?;
(Some(qself), path)
} else {
(None, self.parse_path(PathStyle::Expr)?)
};
let lo = path.span;
// `!`, as an operator, is prefix, so we know this isn't that.
let (hi, kind) = if self.eat(&token::Not) {
// MACRO INVOCATION expression
if qself.is_some() {
self.struct_span_err(path.span, "macros cannot use qualified paths").emit();
}
let mac = MacCall {
path,
args: self.parse_mac_args()?,
prior_type_ascription: self.last_type_ascription,
};
(self.prev_token.span, ExprKind::MacCall(mac))
} else if self.check(&token::OpenDelim(token::Brace)) {
if let Some(expr) = self.maybe_parse_struct_expr(qself.as_ref(), &path, &attrs) {
if qself.is_some() {
self.sess.gated_spans.gate(sym::more_qualified_paths, path.span);
}
return expr;
} else {
(path.span, ExprKind::Path(qself, path))
}
} else {
(path.span, ExprKind::Path(qself, path))
};
let expr = self.mk_expr(lo.to(hi), kind, attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
/// Parse `'label: $expr`. The label is already parsed.
fn parse_labeled_expr(
&mut self,
label: Label,
attrs: AttrVec,
mut consume_colon: bool,
) -> PResult<'a, P<Expr>> {
let lo = label.ident.span;
let label = Some(label);
let ate_colon = self.eat(&token::Colon);
let expr = if self.eat_keyword(kw::While) {
self.parse_while_expr(label, lo, attrs)
} else if self.eat_keyword(kw::For) {
self.parse_for_expr(label, lo, attrs)
} else if self.eat_keyword(kw::Loop) {
self.parse_loop_expr(label, lo, attrs)
} else if self.check(&token::OpenDelim(token::Brace)) || self.token.is_whole_block() {
self.parse_block_expr(label, lo, BlockCheckMode::Default, attrs)
} else if !ate_colon && (self.check(&TokenKind::Comma) || self.check(&TokenKind::Gt)) {
// We're probably inside of a `Path<'a>` that needs a turbofish
let msg = "expected `while`, `for`, `loop` or `{` after a label";
self.struct_span_err(self.token.span, msg).span_label(self.token.span, msg).emit();
consume_colon = false;
Ok(self.mk_expr_err(lo))
} else {
let msg = "expected `while`, `for`, `loop` or `{` after a label";
self.struct_span_err(self.token.span, msg).span_label(self.token.span, msg).emit();
// Continue as an expression in an effort to recover on `'label: non_block_expr`.
self.parse_expr()
}?;
if !ate_colon && consume_colon {
self.error_labeled_expr_must_be_followed_by_colon(lo, expr.span);
}
Ok(expr)
}
fn error_labeled_expr_must_be_followed_by_colon(&self, lo: Span, span: Span) {
self.struct_span_err(span, "labeled expression must be followed by `:`")
.span_label(lo, "the label")
.span_suggestion_short(
lo.shrink_to_hi(),
"add `:` after the label",
": ".to_string(),
Applicability::MachineApplicable,
)
.note("labels are used before loops and blocks, allowing e.g., `break 'label` to them")
.emit();
}
/// Recover on the syntax `do catch { ... }` suggesting `try { ... }` instead.
fn recover_do_catch(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
self.bump(); // `do`
self.bump(); // `catch`
let span_dc = lo.to(self.prev_token.span);
self.struct_span_err(span_dc, "found removed `do catch` syntax")
.span_suggestion(
span_dc,
"replace with the new syntax",
"try".to_string(),
Applicability::MachineApplicable,
)
.note("following RFC #2388, the new non-placeholder syntax is `try`")
.emit();
self.parse_try_block(lo, attrs)
}
/// Parse an expression if the token can begin one.
fn parse_expr_opt(&mut self) -> PResult<'a, Option<P<Expr>>> {
Ok(if self.token.can_begin_expr() { Some(self.parse_expr()?) } else { None })
}
/// Parse `"return" expr?`.
fn parse_return_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
let kind = ExprKind::Ret(self.parse_expr_opt()?);
let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
/// Parse `"break" (('label (:? expr)?) | expr?)` with `"break"` token already eaten.
/// If the label is followed immediately by a `:` token, the label and `:` are
/// parsed as part of the expression (i.e. a labeled loop). The language team has
/// decided in #87026 to require parentheses as a visual aid to avoid confusion if
/// the break expression of an unlabeled break is a labeled loop (as in
/// `break 'lbl: loop {}`); a labeled break with an unlabeled loop as its value
/// expression only gets a warning for compatibility reasons; and a labeled break
/// with a labeled loop does not even get a warning because there is no ambiguity.
fn parse_break_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
let mut label = self.eat_label();
let kind = if label.is_some() && self.token == token::Colon {
// The value expression can be a labeled loop, see issue #86948, e.g.:
// `loop { break 'label: loop { break 'label 42; }; }`
let lexpr = self.parse_labeled_expr(label.take().unwrap(), AttrVec::new(), true)?;
self.struct_span_err(
lexpr.span,
"parentheses are required around this expression to avoid confusion with a labeled break expression",
)
.multipart_suggestion(
"wrap the expression in parentheses",
vec![
(lexpr.span.shrink_to_lo(), "(".to_string()),
(lexpr.span.shrink_to_hi(), ")".to_string()),
],
Applicability::MachineApplicable,
)
.emit();
Some(lexpr)
} else if self.token != token::OpenDelim(token::Brace)
|| !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL)
{
let expr = self.parse_expr_opt()?;
if let Some(ref expr) = expr {
if label.is_some()
&& matches!(
expr.kind,
ExprKind::While(_, _, None)
| ExprKind::ForLoop(_, _, _, None)
| ExprKind::Loop(_, None)
| ExprKind::Block(_, None)
)
{
self.sess.buffer_lint_with_diagnostic(
BREAK_WITH_LABEL_AND_LOOP,
lo.to(expr.span),
ast::CRATE_NODE_ID,
"this labeled break expression is easy to confuse with an unlabeled break with a labeled value expression",
BuiltinLintDiagnostics::BreakWithLabelAndLoop(expr.span),
);
}
}
expr
} else {
None
};
let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Break(label, kind), attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
/// Parse `"yield" expr?`.
fn parse_yield_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
let kind = ExprKind::Yield(self.parse_expr_opt()?);
let span = lo.to(self.prev_token.span);
self.sess.gated_spans.gate(sym::generators, span);
let expr = self.mk_expr(span, kind, attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
/// Returns a string literal if the next token is a string literal.
/// In case of error returns `Some(lit)` if the next token is a literal with a wrong kind,
/// and returns `None` if the next token is not literal at all.
pub fn parse_str_lit(&mut self) -> Result<ast::StrLit, Option<Lit>> {
match self.parse_opt_lit() {
Some(lit) => match lit.kind {
ast::LitKind::Str(symbol_unescaped, style) => Ok(ast::StrLit {
style,
symbol: lit.token.symbol,
suffix: lit.token.suffix,
span: lit.span,
symbol_unescaped,
}),
_ => Err(Some(lit)),
},
None => Err(None),
}
}
pub(super) fn parse_lit(&mut self) -> PResult<'a, Lit> {
self.parse_opt_lit().ok_or_else(|| {
if let token::Interpolated(inner) = &self.token.kind {
let expr = match inner.as_ref() {
token::NtExpr(expr) => Some(expr),
token::NtLiteral(expr) => Some(expr),
_ => None,
};
if let Some(expr) = expr {
if matches!(expr.kind, ExprKind::Err) {
let mut err = self
.diagnostic()
.struct_span_err(self.token.span, "invalid interpolated expression");
err.downgrade_to_delayed_bug();
return err;
}
}
}
let msg = format!("unexpected token: {}", super::token_descr(&self.token));
self.struct_span_err(self.token.span, &msg)
})
}
/// Matches `lit = true | false | token_lit`.
/// Returns `None` if the next token is not a literal.
pub(super) fn parse_opt_lit(&mut self) -> Option<Lit> {
let mut recovered = None;
if self.token == token::Dot {
// Attempt to recover `.4` as `0.4`. We don't currently have any syntax where
// dot would follow an optional literal, so we do this unconditionally.
recovered = self.look_ahead(1, |next_token| {
if let token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) =
next_token.kind
{
if self.token.span.hi() == next_token.span.lo() {
let s = String::from("0.") + symbol.as_str();
let kind = TokenKind::lit(token::Float, Symbol::intern(&s), suffix);
return Some(Token::new(kind, self.token.span.to(next_token.span)));
}
}
None
});
if let Some(token) = &recovered {
self.bump();
self.error_float_lits_must_have_int_part(&token);
}
}
let token = recovered.as_ref().unwrap_or(&self.token);
match Lit::from_token(token) {
Ok(lit) => {
self.bump();
Some(lit)
}
Err(LitError::NotLiteral) => None,
Err(err) => {
let span = token.span;
let token::Literal(lit) = token.kind else {
unreachable!();
};
self.bump();
self.report_lit_error(err, lit, span);
// Pack possible quotes and prefixes from the original literal into
// the error literal's symbol so they can be pretty-printed faithfully.
let suffixless_lit = token::Lit::new(lit.kind, lit.symbol, None);
let symbol = Symbol::intern(&suffixless_lit.to_string());
let lit = token::Lit::new(token::Err, symbol, lit.suffix);
Some(Lit::from_lit_token(lit, span).unwrap_or_else(|_| unreachable!()))
}
}
}
fn error_float_lits_must_have_int_part(&self, token: &Token) {
self.struct_span_err(token.span, "float literals must have an integer part")
.span_suggestion(
token.span,
"must have an integer part",
pprust::token_to_string(token).into(),
Applicability::MachineApplicable,
)
.emit();
}
fn report_lit_error(&self, err: LitError, lit: token::Lit, span: Span) {
// Checks if `s` looks like i32 or u1234 etc.
fn looks_like_width_suffix(first_chars: &[char], s: &str) -> bool {
s.len() > 1 && s.starts_with(first_chars) && s[1..].chars().all(|c| c.is_ascii_digit())
}
// Try to lowercase the prefix if it's a valid base prefix.
fn fix_base_capitalisation(s: &str) -> Option<String> {
if let Some(stripped) = s.strip_prefix('B') {
Some(format!("0b{stripped}"))
} else if let Some(stripped) = s.strip_prefix('O') {
Some(format!("0o{stripped}"))
} else if let Some(stripped) = s.strip_prefix('X') {
Some(format!("0x{stripped}"))
} else {
None
}
}
let token::Lit { kind, suffix, .. } = lit;
match err {
// `NotLiteral` is not an error by itself, so we don't report
// it and give the parser opportunity to try something else.
LitError::NotLiteral => {}
// `LexerError` *is* an error, but it was already reported
// by lexer, so here we don't report it the second time.
LitError::LexerError => {}
LitError::InvalidSuffix => {
self.expect_no_suffix(
span,
&format!("{} {} literal", kind.article(), kind.descr()),
suffix,
);
}
LitError::InvalidIntSuffix => {
let suf = suffix.expect("suffix error with no suffix");
let suf = suf.as_str();
if looks_like_width_suffix(&['i', 'u'], &suf) {
// If it looks like a width, try to be helpful.
let msg = format!("invalid width `{}` for integer literal", &suf[1..]);
self.struct_span_err(span, &msg)
.help("valid widths are 8, 16, 32, 64 and 128")
.emit();
} else if let Some(fixed) = fix_base_capitalisation(suf) {
let msg = "invalid base prefix for number literal";
self.struct_span_err(span, msg)
.note("base prefixes (`0xff`, `0b1010`, `0o755`) are lowercase")
.span_suggestion(
span,
"try making the prefix lowercase",
fixed,
Applicability::MaybeIncorrect,
)
.emit();
} else {
let msg = format!("invalid suffix `{suf}` for number literal");
self.struct_span_err(span, &msg)
.span_label(span, format!("invalid suffix `{suf}`"))
.help("the suffix must be one of the numeric types (`u32`, `isize`, `f32`, etc.)")
.emit();
}
}
LitError::InvalidFloatSuffix => {
let suf = suffix.expect("suffix error with no suffix");
let suf = suf.as_str();
if looks_like_width_suffix(&['f'], suf) {
// If it looks like a width, try to be helpful.
let msg = format!("invalid width `{}` for float literal", &suf[1..]);
self.struct_span_err(span, &msg).help("valid widths are 32 and 64").emit();
} else {
let msg = format!("invalid suffix `{suf}` for float literal");
self.struct_span_err(span, &msg)
.span_label(span, format!("invalid suffix `{suf}`"))
.help("valid suffixes are `f32` and `f64`")
.emit();
}
}
LitError::NonDecimalFloat(base) => {
let descr = match base {
16 => "hexadecimal",
8 => "octal",
2 => "binary",
_ => unreachable!(),
};
self.struct_span_err(span, &format!("{descr} float literal is not supported"))
.span_label(span, "not supported")
.emit();
}
LitError::IntTooLarge => {
self.struct_span_err(span, "integer literal is too large").emit();
}
}
}
pub(super) fn expect_no_suffix(&self, sp: Span, kind: &str, suffix: Option<Symbol>) {
if let Some(suf) = suffix {
let mut err = if kind == "a tuple index"
&& [sym::i32, sym::u32, sym::isize, sym::usize].contains(&suf)
{
// #59553: warn instead of reject out of hand to allow the fix to percolate
// through the ecosystem when people fix their macros
let mut err = self
.sess
.span_diagnostic
.struct_span_warn(sp, &format!("suffixes on {kind} are invalid"));
err.note(&format!(
"`{}` is *temporarily* accepted on tuple index fields as it was \
incorrectly accepted on stable for a few releases",
suf,
));
err.help(
"on proc macros, you'll want to use `syn::Index::from` or \
`proc_macro::Literal::*_unsuffixed` for code that will desugar \
to tuple field access",
);
err.note(
"see issue #60210 <https://github.com/rust-lang/rust/issues/60210> \
for more information",
);
err
} else {
self.struct_span_err(sp, &format!("suffixes on {kind} are invalid"))
.forget_guarantee()
};
err.span_label(sp, format!("invalid suffix `{suf}`"));
err.emit();
}
}
/// Matches `'-' lit | lit` (cf. `ast_validation::AstValidator::check_expr_within_pat`).
/// Keep this in sync with `Token::can_begin_literal_maybe_minus`.
pub fn parse_literal_maybe_minus(&mut self) -> PResult<'a, P<Expr>> {
maybe_whole_expr!(self);
let lo = self.token.span;
let minus_present = self.eat(&token::BinOp(token::Minus));
let lit = self.parse_lit()?;
let expr = self.mk_expr(lit.span, ExprKind::Lit(lit), AttrVec::new());
if minus_present {
Ok(self.mk_expr(
lo.to(self.prev_token.span),
self.mk_unary(UnOp::Neg, expr),
AttrVec::new(),
))
} else {
Ok(expr)
}
}
fn is_array_like_block(&mut self) -> bool {
self.look_ahead(1, |t| matches!(t.kind, TokenKind::Ident(..) | TokenKind::Literal(_)))
&& self.look_ahead(2, |t| t == &token::Comma)
&& self.look_ahead(3, |t| t.can_begin_expr())
}
/// Emits a suggestion if it looks like the user meant an array but
/// accidentally used braces, causing the code to be interpreted as a block
/// expression.
fn maybe_suggest_brackets_instead_of_braces(
&mut self,
lo: Span,
attrs: AttrVec,
) -> Option<P<Expr>> {
let mut snapshot = self.create_snapshot_for_diagnostic();
match snapshot.parse_array_or_repeat_expr(attrs, token::Brace) {
Ok(arr) => {
let hi = snapshot.prev_token.span;
self.struct_span_err(arr.span, "this is a block expression, not an array")
.multipart_suggestion(
"to make an array, use square brackets instead of curly braces",
vec![(lo, "[".to_owned()), (hi, "]".to_owned())],
Applicability::MaybeIncorrect,
)
.emit();
self.restore_snapshot(snapshot);
Some(self.mk_expr_err(arr.span))
}
Err(e) => {
e.cancel();
None
}
}
}
/// Parses a block or unsafe block.
pub(super) fn parse_block_expr(
&mut self,
opt_label: Option<Label>,
lo: Span,
blk_mode: BlockCheckMode,
mut attrs: AttrVec,
) -> PResult<'a, P<Expr>> {
if self.is_array_like_block() {
if let Some(arr) = self.maybe_suggest_brackets_instead_of_braces(lo, attrs.clone()) {
return Ok(arr);
}
}
if let Some(label) = opt_label {
self.sess.gated_spans.gate(sym::label_break_value, label.ident.span);
}
if self.token.is_whole_block() {
self.struct_span_err(self.token.span, "cannot use a `block` macro fragment here")
.span_label(lo.to(self.token.span), "the `block` fragment is within this context")
.emit();
}
let (inner_attrs, blk) = self.parse_block_common(lo, blk_mode)?;
attrs.extend(inner_attrs);
Ok(self.mk_expr(blk.span, ExprKind::Block(blk, opt_label), attrs))
}
/// Recover on an explicitly quantified closure expression, e.g., `for<'a> |x: &'a u8| *x + 1`.
fn recover_quantified_closure_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
let _ = self.parse_late_bound_lifetime_defs()?;
let span_for = lo.to(self.prev_token.span);
let closure = self.parse_closure_expr(attrs)?;
self.struct_span_err(span_for, "cannot introduce explicit parameters for a closure")
.span_label(closure.span, "the parameters are attached to this closure")
.span_suggestion(
span_for,
"remove the parameters",
String::new(),
Applicability::MachineApplicable,
)
.emit();
Ok(self.mk_expr_err(lo.to(closure.span)))
}
/// Parses a closure expression (e.g., `move |args| expr`).
fn parse_closure_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
let movability =
if self.eat_keyword(kw::Static) { Movability::Static } else { Movability::Movable };
let asyncness = if self.token.uninterpolated_span().rust_2018() {
self.parse_asyncness()
} else {
Async::No
};
let capture_clause = self.parse_capture_clause()?;
let decl = self.parse_fn_block_decl()?;
let decl_hi = self.prev_token.span;
let mut body = match decl.output {
FnRetTy::Default(_) => {
let restrictions = self.restrictions - Restrictions::STMT_EXPR;
self.parse_expr_res(restrictions, None)?
}
_ => {
// If an explicit return type is given, require a block to appear (RFC 968).
let body_lo = self.token.span;
self.parse_block_expr(None, body_lo, BlockCheckMode::Default, AttrVec::new())?
}
};
if let Async::Yes { span, .. } = asyncness {
// Feature-gate `async ||` closures.
self.sess.gated_spans.gate(sym::async_closure, span);
}
if self.token.kind == TokenKind::Semi && self.token_cursor.frame.delim == DelimToken::Paren
{
// It is likely that the closure body is a block but where the
// braces have been removed. We will recover and eat the next
// statements later in the parsing process.
body = self.mk_expr_err(body.span);
}
let body_span = body.span;
let closure = self.mk_expr(
lo.to(body.span),
ExprKind::Closure(capture_clause, asyncness, movability, decl, body, lo.to(decl_hi)),
attrs,
);
// Disable recovery for closure body
let spans =
ClosureSpans { whole_closure: closure.span, closing_pipe: decl_hi, body: body_span };
self.current_closure = Some(spans);
Ok(closure)
}
/// Parses an optional `move` prefix to a closure-like construct.
fn parse_capture_clause(&mut self) -> PResult<'a, CaptureBy> {
if self.eat_keyword(kw::Move) {
// Check for `move async` and recover
if self.check_keyword(kw::Async) {
let move_async_span = self.token.span.with_lo(self.prev_token.span.data().lo);
Err(self.incorrect_move_async_order_found(move_async_span))
} else {
Ok(CaptureBy::Value)
}
} else {
Ok(CaptureBy::Ref)
}
}
/// Parses the `|arg, arg|` header of a closure.
fn parse_fn_block_decl(&mut self) -> PResult<'a, P<FnDecl>> {
let inputs = if self.eat(&token::OrOr) {
Vec::new()
} else {
self.expect(&token::BinOp(token::Or))?;
let args = self
.parse_seq_to_before_tokens(
&[&token::BinOp(token::Or), &token::OrOr],
SeqSep::trailing_allowed(token::Comma),
TokenExpectType::NoExpect,
|p| p.parse_fn_block_param(),
)?
.0;
self.expect_or()?;
args
};
let output =
self.parse_ret_ty(AllowPlus::Yes, RecoverQPath::Yes, RecoverReturnSign::Yes)?;
Ok(P(FnDecl { inputs, output }))
}
/// Parses a parameter in a closure header (e.g., `|arg, arg|`).
fn parse_fn_block_param(&mut self) -> PResult<'a, Param> {
let lo = self.token.span;
let attrs = self.parse_outer_attributes()?;
self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
let pat = this.parse_pat_no_top_alt(PARAM_EXPECTED)?;
let ty = if this.eat(&token::Colon) {
this.parse_ty()?
} else {
this.mk_ty(this.prev_token.span, TyKind::Infer)
};
Ok((
Param {
attrs: attrs.into(),
ty,
pat,
span: lo.to(this.token.span),
id: DUMMY_NODE_ID,
is_placeholder: false,
},
TrailingToken::MaybeComma,
))
})
}
/// Parses an `if` expression (`if` token already eaten).
fn parse_if_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
let cond = self.parse_cond_expr()?;
let missing_then_block_binop_span = || {
match cond.kind {
ExprKind::Binary(Spanned { span: binop_span, .. }, _, ref right)
if let ExprKind::Block(..) = right.kind => Some(binop_span),
_ => None
}
};
// Verify that the parsed `if` condition makes sense as a condition. If it is a block, then
// verify that the last statement is either an implicit return (no `;`) or an explicit
// return. This won't catch blocks with an explicit `return`, but that would be caught by
// the dead code lint.
let thn = if self.token.is_keyword(kw::Else) || !cond.returns() {
if let Some(binop_span) = missing_then_block_binop_span() {
self.error_missing_if_then_block(lo, None, Some(binop_span)).emit();
self.mk_block_err(cond.span)
} else {
self.error_missing_if_cond(lo, cond.span)
}
} else {
let attrs = self.parse_outer_attributes()?.take_for_recovery(); // For recovery.
let not_block = self.token != token::OpenDelim(token::Brace);
let block = self.parse_block().map_err(|err| {
if not_block {
self.error_missing_if_then_block(lo, Some(err), missing_then_block_binop_span())
} else {
err
}
})?;
self.error_on_if_block_attrs(lo, false, block.span, &attrs);
block
};
let els = if self.eat_keyword(kw::Else) { Some(self.parse_else_expr()?) } else { None };
Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::If(cond, thn, els), attrs))
}
fn error_missing_if_then_block(
&self,
if_span: Span,
err: Option<DiagnosticBuilder<'a, ErrorGuaranteed>>,
binop_span: Option<Span>,
) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
let msg = "this `if` expression has a condition, but no block";
let mut err = if let Some(mut err) = err {
err.span_label(if_span, msg);
err
} else {
self.struct_span_err(if_span, msg)
};
if let Some(binop_span) = binop_span {
err.span_help(binop_span, "maybe you forgot the right operand of the condition?");
}
err
}
fn error_missing_if_cond(&self, lo: Span, span: Span) -> P<ast::Block> {
let sp = self.sess.source_map().next_point(lo);
self.struct_span_err(sp, "missing condition for `if` expression")
.span_label(sp, "expected if condition here")
.emit();
self.mk_block_err(span)
}
/// Parses the condition of a `if` or `while` expression.
fn parse_cond_expr(&mut self) -> PResult<'a, P<Expr>> {
let cond = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?;
if let ExprKind::Let(..) = cond.kind {
// Remove the last feature gating of a `let` expression since it's stable.
self.sess.gated_spans.ungate_last(sym::let_chains, cond.span);
}
Ok(cond)
}
/// Parses a `let $pat = $expr` pseudo-expression.
/// The `let` token has already been eaten.
fn parse_let_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
let pat = self.parse_pat_allow_top_alt(
None,
RecoverComma::Yes,
RecoverColon::Yes,
CommaRecoveryMode::LikelyTuple,
)?;
self.expect(&token::Eq)?;
let expr = self.with_res(self.restrictions | Restrictions::NO_STRUCT_LITERAL, |this| {
this.parse_assoc_expr_with(1 + prec_let_scrutinee_needs_par(), None.into())
})?;
let span = lo.to(expr.span);
self.sess.gated_spans.gate(sym::let_chains, span);
Ok(self.mk_expr(span, ExprKind::Let(pat, expr, span), attrs))
}
/// Parses an `else { ... }` expression (`else` token already eaten).
fn parse_else_expr(&mut self) -> PResult<'a, P<Expr>> {
let ctx_span = self.prev_token.span; // `else`
let attrs = self.parse_outer_attributes()?.take_for_recovery(); // For recovery.
let expr = if self.eat_keyword(kw::If) {
self.parse_if_expr(AttrVec::new())?
} else {
let blk = self.parse_block()?;
self.mk_expr(blk.span, ExprKind::Block(blk, None), AttrVec::new())
};
self.error_on_if_block_attrs(ctx_span, true, expr.span, &attrs);
Ok(expr)
}
fn error_on_if_block_attrs(
&self,
ctx_span: Span,
is_ctx_else: bool,
branch_span: Span,
attrs: &[ast::Attribute],
) {
let (span, last) = match attrs {
[] => return,
[x0 @ xn] | [x0, .., xn] => (x0.span.to(xn.span), xn.span),
};
let ctx = if is_ctx_else { "else" } else { "if" };
self.struct_span_err(last, "outer attributes are not allowed on `if` and `else` branches")
.span_label(branch_span, "the attributes are attached to this branch")
.span_label(ctx_span, format!("the branch belongs to this `{ctx}`"))
.span_suggestion(
span,
"remove the attributes",
String::new(),
Applicability::MachineApplicable,
)
.emit();
}
/// Parses `for <src_pat> in <src_expr> <src_loop_block>` (`for` token already eaten).
fn parse_for_expr(
&mut self,
opt_label: Option<Label>,
lo: Span,
mut attrs: AttrVec,
) -> PResult<'a, P<Expr>> {
// Record whether we are about to parse `for (`.
// This is used below for recovery in case of `for ( $stuff ) $block`
// in which case we will suggest `for $stuff $block`.
let begin_paren = match self.token.kind {
token::OpenDelim(token::Paren) => Some(self.token.span),
_ => None,
};
let pat = self.parse_pat_allow_top_alt(
None,
RecoverComma::Yes,
RecoverColon::Yes,
CommaRecoveryMode::LikelyTuple,
)?;
if !self.eat_keyword(kw::In) {
self.error_missing_in_for_loop();
}
self.check_for_for_in_in_typo(self.prev_token.span);
let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?;
let pat = self.recover_parens_around_for_head(pat, begin_paren);
let (iattrs, loop_block) = self.parse_inner_attrs_and_block()?;
attrs.extend(iattrs);
let kind = ExprKind::ForLoop(pat, expr, loop_block, opt_label);
Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs))
}
fn error_missing_in_for_loop(&mut self) {
let (span, msg, sugg) = if self.token.is_ident_named(sym::of) {
// Possibly using JS syntax (#75311).
let span = self.token.span;
self.bump();
(span, "try using `in` here instead", "in")
} else {
(self.prev_token.span.between(self.token.span), "try adding `in` here", " in ")
};
self.struct_span_err(span, "missing `in` in `for` loop")
.span_suggestion_short(
span,
msg,
sugg.into(),
// Has been misleading, at least in the past (closed Issue #48492).
Applicability::MaybeIncorrect,
)
.emit();
}
/// Parses a `while` or `while let` expression (`while` token already eaten).
fn parse_while_expr(
&mut self,
opt_label: Option<Label>,
lo: Span,
mut attrs: AttrVec,
) -> PResult<'a, P<Expr>> {
let cond = self.parse_cond_expr().map_err(|mut err| {
err.span_label(lo, "while parsing the condition of this `while` expression");
err
})?;
let (iattrs, body) = self.parse_inner_attrs_and_block().map_err(|mut err| {
err.span_label(lo, "while parsing the body of this `while` expression");
err.span_label(cond.span, "this `while` condition successfully parsed");
err
})?;
attrs.extend(iattrs);
Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::While(cond, body, opt_label), attrs))
}
/// Parses `loop { ... }` (`loop` token already eaten).
fn parse_loop_expr(
&mut self,
opt_label: Option<Label>,
lo: Span,
mut attrs: AttrVec,
) -> PResult<'a, P<Expr>> {
let (iattrs, body) = self.parse_inner_attrs_and_block()?;
attrs.extend(iattrs);
Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::Loop(body, opt_label), attrs))
}
crate fn eat_label(&mut self) -> Option<Label> {
self.token.lifetime().map(|ident| {
self.bump();
Label { ident }
})
}
/// Parses a `match ... { ... }` expression (`match` token already eaten).
fn parse_match_expr(&mut self, mut attrs: AttrVec) -> PResult<'a, P<Expr>> {
let match_span = self.prev_token.span;
let lo = self.prev_token.span;
let scrutinee = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?;
if let Err(mut e) = self.expect(&token::OpenDelim(token::Brace)) {
if self.token == token::Semi {
e.span_suggestion_short(
match_span,
"try removing this `match`",
String::new(),
Applicability::MaybeIncorrect, // speculative
);
}
if self.maybe_recover_unexpected_block_label() {
e.cancel();
self.bump();
} else {
return Err(e);
}
}
attrs.extend(self.parse_inner_attributes()?);
let mut arms: Vec<Arm> = Vec::new();
while self.token != token::CloseDelim(token::Brace) {
match self.parse_arm() {
Ok(arm) => arms.push(arm),
Err(mut e) => {
// Recover by skipping to the end of the block.
e.emit();
self.recover_stmt();
let span = lo.to(self.token.span);
if self.token == token::CloseDelim(token::Brace) {
self.bump();
}
return Ok(self.mk_expr(span, ExprKind::Match(scrutinee, arms), attrs));
}
}
}
let hi = self.token.span;
self.bump();
Ok(self.mk_expr(lo.to(hi), ExprKind::Match(scrutinee, arms), attrs))
}
/// Attempt to recover from match arm body with statements and no surrounding braces.
fn parse_arm_body_missing_braces(
&mut self,
first_expr: &P<Expr>,
arrow_span: Span,
) -> Option<P<Expr>> {
if self.token.kind != token::Semi {
return None;
}
let start_snapshot = self.create_snapshot_for_diagnostic();
let semi_sp = self.token.span;
self.bump(); // `;`
let mut stmts =
vec![self.mk_stmt(first_expr.span, ast::StmtKind::Expr(first_expr.clone()))];
let err = |this: &mut Parser<'_>, stmts: Vec<ast::Stmt>| {
let span = stmts[0].span.to(stmts[stmts.len() - 1].span);
let mut err = this.struct_span_err(span, "`match` arm body without braces");
let (these, s, are) =
if stmts.len() > 1 { ("these", "s", "are") } else { ("this", "", "is") };
err.span_label(
span,
&format!(
"{these} statement{s} {are} not surrounded by a body",
these = these,
s = s,
are = are
),
);
err.span_label(arrow_span, "while parsing the `match` arm starting here");
if stmts.len() > 1 {
err.multipart_suggestion(
&format!("surround the statement{s} with a body"),
vec![
(span.shrink_to_lo(), "{ ".to_string()),
(span.shrink_to_hi(), " }".to_string()),
],
Applicability::MachineApplicable,
);
} else {
err.span_suggestion(
semi_sp,
"use a comma to end a `match` arm expression",
",".to_string(),
Applicability::MachineApplicable,
);
}
err.emit();
this.mk_expr_err(span)
};
// We might have either a `,` -> `;` typo, or a block without braces. We need
// a more subtle parsing strategy.
loop {
if self.token.kind == token::CloseDelim(token::Brace) {
// We have reached the closing brace of the `match` expression.
return Some(err(self, stmts));
}
if self.token.kind == token::Comma {
self.restore_snapshot(start_snapshot);
return None;
}
let pre_pat_snapshot = self.create_snapshot_for_diagnostic();
match self.parse_pat_no_top_alt(None) {
Ok(_pat) => {
if self.token.kind == token::FatArrow {
// Reached arm end.
self.restore_snapshot(pre_pat_snapshot);
return Some(err(self, stmts));
}
}
Err(err) => {
err.cancel();
}
}
self.restore_snapshot(pre_pat_snapshot);
match self.parse_stmt_without_recovery(true, ForceCollect::No) {
// Consume statements for as long as possible.
Ok(Some(stmt)) => {
stmts.push(stmt);
}
Ok(None) => {
self.restore_snapshot(start_snapshot);
break;
}
// We couldn't parse either yet another statement missing it's
// enclosing block nor the next arm's pattern or closing brace.
Err(stmt_err) => {
stmt_err.cancel();
self.restore_snapshot(start_snapshot);
break;
}
}
}
None
}
pub(super) fn parse_arm(&mut self) -> PResult<'a, Arm> {
fn check_let_expr(expr: &Expr) -> (bool, bool) {
match expr.kind {
ExprKind::Binary(_, ref lhs, ref rhs) => {
let lhs_rslt = check_let_expr(lhs);
let rhs_rslt = check_let_expr(rhs);
(lhs_rslt.0 || rhs_rslt.0, false)
}
ExprKind::Let(..) => (true, true),
_ => (false, true),
}
}
let attrs = self.parse_outer_attributes()?;
self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
let lo = this.token.span;
let pat = this.parse_pat_allow_top_alt(
None,
RecoverComma::Yes,
RecoverColon::Yes,
CommaRecoveryMode::EitherTupleOrPipe,
)?;
let guard = if this.eat_keyword(kw::If) {
let if_span = this.prev_token.span;
let cond = this.parse_expr()?;
let (has_let_expr, does_not_have_bin_op) = check_let_expr(&cond);
if has_let_expr {
if does_not_have_bin_op {
// Remove the last feature gating of a `let` expression since it's stable.
this.sess.gated_spans.ungate_last(sym::let_chains, cond.span);
}
let span = if_span.to(cond.span);
this.sess.gated_spans.gate(sym::if_let_guard, span);
}
Some(cond)
} else {
None
};
let arrow_span = this.token.span;
if let Err(mut err) = this.expect(&token::FatArrow) {
// We might have a `=>` -> `=` or `->` typo (issue #89396).
if TokenKind::FatArrow
.similar_tokens()
.map_or(false, |similar_tokens| similar_tokens.contains(&this.token.kind))
{
err.span_suggestion(
this.token.span,
"try using a fat arrow here",
"=>".to_string(),
Applicability::MaybeIncorrect,
);
err.emit();
this.bump();
} else {
return Err(err);
}
}
let arm_start_span = this.token.span;
let expr = this.parse_expr_res(Restrictions::STMT_EXPR, None).map_err(|mut err| {
err.span_label(arrow_span, "while parsing the `match` arm starting here");
err
})?;
let require_comma = classify::expr_requires_semi_to_be_stmt(&expr)
&& this.token != token::CloseDelim(token::Brace);
let hi = this.prev_token.span;
if require_comma {
let sm = this.sess.source_map();
if let Some(body) = this.parse_arm_body_missing_braces(&expr, arrow_span) {
let span = body.span;
return Ok((
ast::Arm {
attrs: attrs.into(),
pat,
guard,
body,
span,
id: DUMMY_NODE_ID,
is_placeholder: false,
},
TrailingToken::None,
));
}
this.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Brace)]).map_err(
|mut err| {
match (sm.span_to_lines(expr.span), sm.span_to_lines(arm_start_span)) {
(Ok(ref expr_lines), Ok(ref arm_start_lines))
if arm_start_lines.lines[0].end_col
== expr_lines.lines[0].end_col
&& expr_lines.lines.len() == 2
&& this.token == token::FatArrow =>
{
// We check whether there's any trailing code in the parse span,
// if there isn't, we very likely have the following:
//
// X | &Y => "y"
// | -- - missing comma
// | |
// | arrow_span
// X | &X => "x"
// | - ^^ self.token.span
// | |
// | parsed until here as `"y" & X`
err.span_suggestion_short(
arm_start_span.shrink_to_hi(),
"missing a comma here to end this `match` arm",
",".to_owned(),
Applicability::MachineApplicable,
);
}
_ => {
err.span_label(
arrow_span,
"while parsing the `match` arm starting here",
);
}
}
err
},
)?;
} else {
this.eat(&token::Comma);
}
Ok((
ast::Arm {
attrs: attrs.into(),
pat,
guard,
body: expr,
span: lo.to(hi),
id: DUMMY_NODE_ID,
is_placeholder: false,
},
TrailingToken::None,
))
})
}
/// Parses a `try {...}` expression (`try` token already eaten).
fn parse_try_block(&mut self, span_lo: Span, mut attrs: AttrVec) -> PResult<'a, P<Expr>> {
let (iattrs, body) = self.parse_inner_attrs_and_block()?;
attrs.extend(iattrs);
if self.eat_keyword(kw::Catch) {
let mut error = self.struct_span_err(
self.prev_token.span,
"keyword `catch` cannot follow a `try` block",
);
error.help("try using `match` on the result of the `try` block instead");
error.emit();
Err(error)
} else {
let span = span_lo.to(body.span);
self.sess.gated_spans.gate(sym::try_blocks, span);
Ok(self.mk_expr(span, ExprKind::TryBlock(body), attrs))
}
}
fn is_do_catch_block(&self) -> bool {
self.token.is_keyword(kw::Do)
&& self.is_keyword_ahead(1, &[kw::Catch])
&& self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace))
&& !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL)
}
fn is_try_block(&self) -> bool {
self.token.is_keyword(kw::Try)
&& self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace))
&& self.token.uninterpolated_span().rust_2018()
}
/// Parses an `async move? {...}` expression.
fn parse_async_block(&mut self, mut attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
self.expect_keyword(kw::Async)?;
let capture_clause = self.parse_capture_clause()?;
let (iattrs, body) = self.parse_inner_attrs_and_block()?;
attrs.extend(iattrs);
let kind = ExprKind::Async(capture_clause, DUMMY_NODE_ID, body);
Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs))
}
fn is_async_block(&self) -> bool {
self.token.is_keyword(kw::Async)
&& ((
// `async move {`
self.is_keyword_ahead(1, &[kw::Move])
&& self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace))
) || (
// `async {`
self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace))
))
}
fn is_certainly_not_a_block(&self) -> bool {
self.look_ahead(1, |t| t.is_ident())
&& (
// `{ ident, ` cannot start a block.
self.look_ahead(2, |t| t == &token::Comma)
|| self.look_ahead(2, |t| t == &token::Colon)
&& (
// `{ ident: token, ` cannot start a block.
self.look_ahead(4, |t| t == &token::Comma) ||
// `{ ident: ` cannot start a block unless it's a type ascription `ident: Type`.
self.look_ahead(3, |t| !t.can_begin_type())
)
)
}
fn maybe_parse_struct_expr(
&mut self,
qself: Option<&ast::QSelf>,
path: &ast::Path,
attrs: &AttrVec,
) -> Option<PResult<'a, P<Expr>>> {
let struct_allowed = !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL);
if struct_allowed || self.is_certainly_not_a_block() {
if let Err(err) = self.expect(&token::OpenDelim(token::Brace)) {
return Some(Err(err));
}
let expr = self.parse_struct_expr(qself.cloned(), path.clone(), attrs.clone(), true);
if let (Ok(expr), false) = (&expr, struct_allowed) {
// This is a struct literal, but we don't can't accept them here.
self.error_struct_lit_not_allowed_here(path.span, expr.span);
}
return Some(expr);
}
None
}
fn error_struct_lit_not_allowed_here(&self, lo: Span, sp: Span) {
self.struct_span_err(sp, "struct literals are not allowed here")
.multipart_suggestion(
"surround the struct literal with parentheses",
vec![(lo.shrink_to_lo(), "(".to_string()), (sp.shrink_to_hi(), ")".to_string())],
Applicability::MachineApplicable,
)
.emit();
}
pub(super) fn parse_struct_fields(
&mut self,
pth: ast::Path,
recover: bool,
close_delim: token::DelimToken,
) -> PResult<'a, (Vec<ExprField>, ast::StructRest, bool)> {
let mut fields = Vec::new();
let mut base = ast::StructRest::None;
let mut recover_async = false;
let mut async_block_err = |e: &mut Diagnostic, span: Span| {
recover_async = true;
e.span_label(span, "`async` blocks are only allowed in Rust 2018 or later");
e.help_use_latest_edition();
};
while self.token != token::CloseDelim(close_delim) {
if self.eat(&token::DotDot) {
let exp_span = self.prev_token.span;
// We permit `.. }` on the left-hand side of a destructuring assignment.
if self.check(&token::CloseDelim(close_delim)) {
base = ast::StructRest::Rest(self.prev_token.span.shrink_to_hi());
break;
}
match self.parse_expr() {
Ok(e) => base = ast::StructRest::Base(e),
Err(mut e) if recover => {
e.emit();
self.recover_stmt();
}
Err(e) => return Err(e),
}
self.recover_struct_comma_after_dotdot(exp_span);
break;
}
let recovery_field = self.find_struct_error_after_field_looking_code();
let parsed_field = match self.parse_expr_field() {
Ok(f) => Some(f),
Err(mut e) => {
if pth == kw::Async {
async_block_err(&mut e, pth.span);
} else {
e.span_label(pth.span, "while parsing this struct");
}
e.emit();
// If the next token is a comma, then try to parse
// what comes next as additional fields, rather than
// bailing out until next `}`.
if self.token != token::Comma {
self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore);
if self.token != token::Comma {
break;
}
}
None
}
};
match self.expect_one_of(&[token::Comma], &[token::CloseDelim(close_delim)]) {
Ok(_) => {
if let Some(f) = parsed_field.or(recovery_field) {
// Only include the field if there's no parse error for the field name.
fields.push(f);
}
}
Err(mut e) => {
if pth == kw::Async {
async_block_err(&mut e, pth.span);
} else {
e.span_label(pth.span, "while parsing this struct");
if let Some(f) = recovery_field {
fields.push(f);
e.span_suggestion(
self.prev_token.span.shrink_to_hi(),
"try adding a comma",
",".into(),
Applicability::MachineApplicable,
);
}
}
if !recover {
return Err(e);
}
e.emit();
self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore);
self.eat(&token::Comma);
}
}
}
Ok((fields, base, recover_async))
}
/// Precondition: already parsed the '{'.
pub(super) fn parse_struct_expr(
&mut self,
qself: Option<ast::QSelf>,
pth: ast::Path,
attrs: AttrVec,
recover: bool,
) -> PResult<'a, P<Expr>> {
let lo = pth.span;
let (fields, base, recover_async) =
self.parse_struct_fields(pth.clone(), recover, token::Brace)?;
let span = lo.to(self.token.span);
self.expect(&token::CloseDelim(token::Brace))?;
let expr = if recover_async {
ExprKind::Err
} else {
ExprKind::Struct(P(ast::StructExpr { qself, path: pth, fields, rest: base }))
};
Ok(self.mk_expr(span, expr, attrs))
}
/// Use in case of error after field-looking code: `S { foo: () with a }`.
fn find_struct_error_after_field_looking_code(&self) -> Option<ExprField> {
match self.token.ident() {
Some((ident, is_raw))
if (is_raw || !ident.is_reserved())
&& self.look_ahead(1, |t| *t == token::Colon) =>
{
Some(ast::ExprField {
ident,
span: self.token.span,
expr: self.mk_expr_err(self.token.span),
is_shorthand: false,
attrs: AttrVec::new(),
id: DUMMY_NODE_ID,
is_placeholder: false,
})
}
_ => None,
}
}
fn recover_struct_comma_after_dotdot(&mut self, span: Span) {
if self.token != token::Comma {
return;
}
self.struct_span_err(
span.to(self.prev_token.span),
"cannot use a comma after the base struct",
)
.span_suggestion_short(
self.token.span,
"remove this comma",
String::new(),
Applicability::MachineApplicable,
)
.note("the base struct must always be the last field")
.emit();
self.recover_stmt();
}
/// Parses `ident (COLON expr)?`.
fn parse_expr_field(&mut self) -> PResult<'a, ExprField> {
let attrs = self.parse_outer_attributes()?;
self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
let lo = this.token.span;
// Check if a colon exists one ahead. This means we're parsing a fieldname.
let is_shorthand = !this.look_ahead(1, |t| t == &token::Colon || t == &token::Eq);
let (ident, expr) = if is_shorthand {
// Mimic `x: x` for the `x` field shorthand.
let ident = this.parse_ident_common(false)?;
let path = ast::Path::from_ident(ident);
(ident, this.mk_expr(ident.span, ExprKind::Path(None, path), AttrVec::new()))
} else {
let ident = this.parse_field_name()?;
this.error_on_eq_field_init(ident);
this.bump(); // `:`
(ident, this.parse_expr()?)
};
Ok((
ast::ExprField {
ident,
span: lo.to(expr.span),
expr,
is_shorthand,
attrs: attrs.into(),
id: DUMMY_NODE_ID,
is_placeholder: false,
},
TrailingToken::MaybeComma,
))
})
}
/// Check for `=`. This means the source incorrectly attempts to
/// initialize a field with an eq rather than a colon.
fn error_on_eq_field_init(&self, field_name: Ident) {
if self.token != token::Eq {
return;
}
self.struct_span_err(self.token.span, "expected `:`, found `=`")
.span_suggestion(
field_name.span.shrink_to_hi().to(self.token.span),
"replace equals symbol with a colon",
":".to_string(),
Applicability::MachineApplicable,
)
.emit();
}
fn err_dotdotdot_syntax(&self, span: Span) {
self.struct_span_err(span, "unexpected token: `...`")
.span_suggestion(
span,
"use `..` for an exclusive range",
"..".to_owned(),
Applicability::MaybeIncorrect,
)
.span_suggestion(
span,
"or `..=` for an inclusive range",
"..=".to_owned(),
Applicability::MaybeIncorrect,
)
.emit();
}
fn err_larrow_operator(&self, span: Span) {
self.struct_span_err(span, "unexpected token: `<-`")
.span_suggestion(
span,
"if you meant to write a comparison against a negative value, add a \
space in between `<` and `-`",
"< -".to_string(),
Applicability::MaybeIncorrect,
)
.emit();
}
fn mk_assign_op(&self, binop: BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ExprKind {
ExprKind::AssignOp(binop, lhs, rhs)
}
fn mk_range(
&mut self,
start: Option<P<Expr>>,
end: Option<P<Expr>>,
limits: RangeLimits,
) -> ExprKind {
if end.is_none() && limits == RangeLimits::Closed {
self.inclusive_range_with_incorrect_end(self.prev_token.span);
ExprKind::Err
} else {
ExprKind::Range(start, end, limits)
}
}
fn mk_unary(&self, unop: UnOp, expr: P<Expr>) -> ExprKind {
ExprKind::Unary(unop, expr)
}
fn mk_binary(&self, binop: BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ExprKind {
ExprKind::Binary(binop, lhs, rhs)
}
fn mk_index(&self, expr: P<Expr>, idx: P<Expr>) -> ExprKind {
ExprKind::Index(expr, idx)
}
fn mk_call(&self, f: P<Expr>, args: Vec<P<Expr>>) -> ExprKind {
ExprKind::Call(f, args)
}
fn mk_await_expr(&mut self, self_arg: P<Expr>, lo: Span) -> P<Expr> {
let span = lo.to(self.prev_token.span);
let await_expr = self.mk_expr(span, ExprKind::Await(self_arg), AttrVec::new());
self.recover_from_await_method_call();
await_expr
}
crate fn mk_expr(&self, span: Span, kind: ExprKind, attrs: AttrVec) -> P<Expr> {
P(Expr { kind, span, attrs, id: DUMMY_NODE_ID, tokens: None })
}
pub(super) fn mk_expr_err(&self, span: Span) -> P<Expr> {
self.mk_expr(span, ExprKind::Err, AttrVec::new())
}
/// Create expression span ensuring the span of the parent node
/// is larger than the span of lhs and rhs, including the attributes.
fn mk_expr_sp(&self, lhs: &P<Expr>, lhs_span: Span, rhs_span: Span) -> Span {
lhs.attrs
.iter()
.find(|a| a.style == AttrStyle::Outer)
.map_or(lhs_span, |a| a.span)
.to(rhs_span)
}
fn collect_tokens_for_expr(
&mut self,
attrs: AttrWrapper,
f: impl FnOnce(&mut Self, Vec<ast::Attribute>) -> PResult<'a, P<Expr>>,
) -> PResult<'a, P<Expr>> {
self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
let res = f(this, attrs)?;
let trailing = if this.restrictions.contains(Restrictions::STMT_EXPR)
&& this.token.kind == token::Semi
{
TrailingToken::Semi
} else {
// FIXME - pass this through from the place where we know
// we need a comma, rather than assuming that `#[attr] expr,`
// always captures a trailing comma
TrailingToken::MaybeComma
};
Ok((res, trailing))
})
}
} | |
FormTest.js | import request from '@/utils/request';
//get请求
export async function quer | ams) {
//接口地址和参数应该可以一目了然了吧!
return request(`/server/api/test222?${stringify(params)}`);
}
//post请求
// export async function query(params) {
// return request('/server/api/getUserName', {
// method: 'POST',
// body: params,
// });
// }
| y(par |
solana_nodes_controller.go | package web
import (
"database/sql"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
"github.com/smartcontractkit/chainlink-solana/pkg/solana/db"
"github.com/smartcontractkit/chainlink/core/services/chainlink"
"github.com/smartcontractkit/chainlink/core/web/presenters"
)
// ErrSolanaNotEnabled is returned when SOLANA_ENABLED is not true.
var ErrSolanaNotEnabled = errors.New("Solana is disabled. Set SOLANA_ENABLED=true to enable.")
func NewSolanaNodesController(app chainlink.Application) NodesController {
parse := func(s string) (string, error) { return s, nil }
return newNodesController[string, db.Node, presenters.SolanaNodeResource](
app.GetChains().Solana, ErrSolanaNotEnabled, parse, presenters.NewSolanaNodeResource, func(c *gin.Context) (db.Node, error) {
var request db.NewNode
if err := c.ShouldBindJSON(&request); err != nil {
return db.Node{}, err
}
if _, err := app.GetChains().Solana.Show(request.SolanaChainID); err != nil {
if errors.Is(err, sql.ErrNoRows) { | err = errors.Errorf("Solana chain %s must be added first", request.SolanaChainID)
}
return db.Node{}, err
}
return db.Node{
Name: request.Name,
SolanaChainID: request.SolanaChainID,
SolanaURL: request.SolanaURL,
}, nil
})
} | |
umychart.index.wechat.js | /*
copyright (c) 2018 jones
http://www.apache.org/licenses/LICENSE-2.0
开源项目 https://github.com/jones2000/HQChart
[email protected]
指标基类及定制指标
*/
import {
JSCommonResource_Global_JSChartResource as g_JSChartResource,
} from './umychart.resource.wechat.js'
import { JSCommonComplier } from "./umychart.complier.wechat.js"; //通达信编译器
//日志
import { JSConsole } from "./umychart.console.wechat.js"
import {
JSCommon_ChartData as ChartData, JSCommon_HistoryData as HistoryData,
JSCommon_SingleData as SingleData, JSCommon_MinuteData as MinuteData,
JSCommon_JSCHART_EVENT_ID as JSCHART_EVENT_ID,
} from "./umychart.data.wechat.js";
//图形库
import {
JSCommonChartPaint_IChartPainting as IChartPainting,
JSCommonChartPaint_ChartSingleText as ChartSingleText,
JSCommonChartPaint_ChartDrawIcon as ChartDrawIcon,
JSCommonChartPaint_ChartDrawText as ChartDrawText,
JSCommonChartPaint_ChartDrawNumber as ChartDrawNumber,
JSCommonChartPaint_ChartKLine as ChartKLine,
JSCommonChartPaint_ChartColorKline as ChartColorKline,
JSCommonChartPaint_ChartLine as ChartLine,
JSCommonChartPaint_ChartSubLine as ChartSubLine,
JSCommonChartPaint_ChartPointDot as ChartPointDot,
JSCommonChartPaint_ChartStick as ChartStick,
JSCommonChartPaint_ChartLineStick as ChartLineStick,
JSCommonChartPaint_ChartStickLine as ChartStickLine,
JSCommonChartPaint_ChartOverlayKLine as ChartOverlayKLine,
JSCommonChartPaint_ChartMinuteInfo as ChartMinuteInfo,
JSCommonChartPaint_ChartRectangle as ChartRectangle,
JSCommonChartPaint_ChartMultiText as ChartMultiText,
JSCommonChartPaint_ChartMultiLine as ChartMultiLine,
JSCommonChartPaint_ChartMultiBar as ChartMultiBar,
JSCommonChartPaint_ChartPie as ChartPie,
JSCommonChartPaint_ChartCircle as ChartCircle,
JSCommonChartPaint_ChartChinaMap as ChartChinaMap,
JSCommonChartPaint_ChartRadar as ChartRadar,
JSCommonChartPaint_ChartCorssCursor as ChartCorssCursor,
JSCommonChartPaint_ChartBuySell as ChartBuySell,
JSCommonChartPaint_ChartMACD as ChartMACD,
JSCommonChartPaint_ChartSplashPaint as ChartSplashPaint,
JSCommonChartPaint_ChartBackground as ChartBackground,
JSCommonChartPaint_ChartMinuteVolumBar as ChartMinuteVolumBar,
JSCommonChartPaint_ChartMultiHtmlDom as ChartMultiHtmlDom,
JSCommonChartPaint_ChartLock as ChartLock,
JSCommonChartPaint_ChartVolStick as ChartVolStick,
JSCommonChartPaint_ChartBand as ChartBand,
JSCommonChartPaint_ChartLineMultiData as ChartLineMultiData,
JSCommonChartPaint_ChartStraightLine as ChartStraightLine,
} from "./umychart.chartpaint.wechat.js";
import
{
JSCommonSplit_CoordinateInfo as CoordinateInfo,
JSCommonSplit_IFrameSplitOperator as IFrameSplitOperator,
JSCommonSplit_FrameSplitKLinePriceY as FrameSplitKLinePriceY,
JSCommonSplit_FrameSplitY as FrameSplitY,
JSCommonSplit_FrameSplitKLineX as FrameSplitKLineX,
JSCommonSplit_FrameSplitMinutePriceY as FrameSplitMinutePriceY,
JSCommonSplit_FrameSplitMinuteX as FrameSplitMinuteX,
JSCommonSplit_FrameSplitXData as FrameSplitXData,
JSCommonSplit_SplitData as SplitData,
JSCommonSplit_PriceSplitData as PriceSplitData,
} from './umychart.framesplit.wechat.js'
import
{
JSCommonChartTitle_IChartTitlePainting as IChartTitlePainting,
JSCommonChartTitle_DynamicKLineTitlePainting as DynamicKLineTitlePainting,
JSCommonChartTitle_DynamicMinuteTitlePainting as DynamicMinuteTitlePainting,
JSCommonChartTitle_DynamicChartTitlePainting as DynamicChartTitlePainting,
JSCommonChartTitle_DynamicTitleData as DynamicTitleData,
JSCommonChartTitle_STRING_FORMAT_TYPE as STRING_FORMAT_TYPE,
} from './umychart.charttitle.wechat.js'
//////////////////////////////////////////////////////////
//
// 指标信息
//
function IndexInfo(name, param)
{
this.Name = name; //名字
this.Param = param; //参数
this.LineColor; //线段颜色
this.ReqeustData = null; //数据请求
}
function BaseIndex(name)
{
this.Index; //指标阐述
this.Name = name; | 标名字
this.UpdateUICallback; //数据到达回调
//默认创建都是线段
this.Create = function (hqChart, windowIndex)
{
for (var i in this.Index)
{
if (!this.Index[i].Name) continue;
var maLine = new ChartLine();
maLine.Canvas = hqChart.Canvas;
maLine.Name = this.Name + '-' + i.toString();
maLine.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
maLine.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
maLine.Color = this.Index[i].LineColor;
hqChart.ChartPaint.push(maLine);
}
}
//指标不支持 周期/复权/股票等
this.NotSupport = function (hqChart, windowIndex, message)
{
var paint = hqChart.GetChartPaint(windowIndex);
for (var i in paint)
{
paint[i].Data.Data = []; //清空数据
if (i == 0) paint[i].NotSupportMessage = message;
}
}
//格式化指标名字+参数
//格式:指标名(参数1,参数2,参数3,...)
this.FormatIndexTitle = function ()
{
var title = this.Name;
var param = null;
for (var i in this.Index)
{
var item = this.Index[i];
if (item.Param == null) continue;
if (param) param += ',' + item.Param.toString();
else param = item.Param.toString();
}
if (param) title += '(' + param + ')';
return title;
}
this.InvokeUpdateUICallback = function (paint)
{
if (typeof (this.UpdateUICallback) != 'function') return;
let indexData = new Array();
for (let i in paint)
{
indexData.push({ Name: this.Index[i].Name, Data: paint[i].Data });
}
this.UpdateUICallback(indexData);
}
}
//脚本指标
//name=指标名字 args=参数名字 参数值
function ScriptIndex(name, script, args, option)
{
this.newMethod = BaseIndex; //派生
this.newMethod(name);
delete this.newMethod;
this.Script = script;
this.Arguments = [];
this.OutVar = [];
this.ID; //指标ID
this.FloatPrecision = 2; //小数位数
this.StringFormat;
this.KLineType = null; //K线显示类型
this.InstructionType; //五彩K线, 交易指标
this.YSpecificMaxMin = null; //最大最小值
this.YSplitScale = null; //固定刻度
this.OutName=null; //动态输出指标名字
//指标上锁配置信息
this.IsLocked = false; //是否锁住指标
this.LockCallback = null;
this.LockID = null;
this.LockBG = null; //锁背景色
this.LockTextColor = null;
this.LockText = null;
this.LockFont = null;
this.LockCount = 10;
this.TitleFont=g_JSChartResource.DynamicTitleFont; //标题字体
if (option)
{
if (option.FloatPrecision >= 0) this.FloatPrecision = option.FloatPrecision;
if (option.StringFormat > 0) this.StringFormat = option.StringFormat;
if (option.ID) this.ID = option.ID;
if (option.KLineType) this.KLineType = option.KLineType;
if (option.InstructionType) this.InstructionType = option.InstructionType;
if (option.YSpecificMaxMin) this.YSpecificMaxMin = option.YSpecificMaxMin;
if (option.YSplitScale) this.YSplitScale = option.YSplitScale;
if (option.TitleFont) this.TitleFont=option.TitleFont;
if (option.OutName) this.OutName=option.OutName;
}
if (option && option.Lock)
{
if (option.Lock.IsLocked == true) this.IsLocked = true; //指标上锁
if (option.Lock.Callback) this.LockCallback = option.Lock.Callback; //锁回调
if (option.Lock.ID) this.LockID = option.Lock.ID; //锁ID
if (option.Lock.BG) this.LockBG = option.Lock.BG;
if (option.Lock.TextColor) this.LockTextColor = option.Lock.TextColor;
if (option.Lock.Text) this.LockText = option.Lock.Text;
if (option.Lock.Font) this.LockFont = option.Lock.Font;
if (option.Lock.Count) this.LockCount = option.Lock.Count;
}
if (args) this.Arguments = args;
this.SetLock = function (lockData) {
if (lockData.IsLocked == true) {
this.IsLocked = true; //指标上锁
if (lockData.Callback) this.LockCallback = lockData.Callback; //锁回调
if (lockData.ID) this.LockID = lockData.ID; //锁ID
if (lockData.BG) this.LockBG = lockData.BG;
if (lockData.TextColor) this.LockTextColor = lockData.TextColor;
if (lockData.Text) this.LockText = lockData.Text;
if (lockData.Font) this.LockFont = lockData.Font;
if (lockData.Count) this.LockCount = lockData.Count;
}
else { //清空锁配置信息
this.IsLocked = false; //是否锁住指标
this.LockCallback = null;
this.LockID = null;
this.LockBG = null; //锁背景色
this.LockTextColor = null;
this.LockText = null;
this.LockFont = null;
this.LockCount = 10;
}
}
this.ExecuteScript = function (hqChart, windowIndex, hisData)
{
this.OutVar = [];
let self = this;
let param =
{
HQChart: hqChart,
WindowIndex: windowIndex,
HistoryData: hisData,
Self: this
};
let hqDataType = 0; //默认K线
if (hqChart.ClassName === 'MinuteChartContainer' || hqChart.ClassName==='MinuteChartHScreenContainer')
{
if (hqChart.DayCount>1) hqDataType=HQ_DATA_TYPE.MULTIDAY_MINUTE_ID; //多日分钟
else hqDataType=HQ_DATA_TYPE.MINUTE_ID;
}
else if (hqChart.ClassName==='HistoryMinuteChartContainer')
{
hqDataType=HQ_DATA_TYPE.HISTORY_MINUTE_ID; //历史分钟
}
let option =
{
HQDataType: hqDataType,
Symbol: hqChart.Symbol,
Data: hisData,
SourceData: hqChart.SourceData, //原始数据
Callback: this.RecvResultData, CallbackParam: param,
Async: true,
MaxReqeustDataCount: hqChart.MaxReqeustDataCount,
MaxRequestMinuteDayCount: hqChart.MaxRequestMinuteDayCount,
Arguments: this.Arguments
};
if (hqDataType===HQ_DATA_TYPE.HISTORY_MINUTE_ID) option.TrateDate=hqChart.TradeDate;
if (hqDataType===HQ_DATA_TYPE.MULTIDAY_MINUTE_ID) option.DayCount=hqChart.DayCount;
if (hqChart.NetworkFilter) option.NetworkFilter = hqChart.NetworkFilter;
let code = this.Script;
let run = JSCommonComplier.JSComplier.Execute(code, option, hqChart.ScriptErrorCallback);
}
this.RecvResultData = function (outVar, param)
{
let hqChart = param.HQChart;
let windowIndex = param.WindowIndex;
let hisData = param.HistoryData;
param.Self.OutVar = outVar;
param.Self.BindData(hqChart, windowIndex, hisData);
if (param.Self.IsLocked == false) //不上锁
{
param.HQChart.Frame.SubFrame[windowIndex].Frame.SetLock(null);
}
else //上锁
{
let lockData =
{
IsLocked: true, Callback: param.Self.LockCallback, IndexName: param.Self.Name, ID: param.Self.LockID,
BG: param.Self.LockBG, Text: param.Self.LockText, TextColor: param.Self.LockTextColor, Font: param.Self.LockFont,
Count: param.Self.LockCount
};
param.HQChart.Frame.SubFrame[windowIndex].Frame.SetLock(lockData);
}
param.HQChart.UpdataDataoffset(); //更新数据偏移
param.HQChart.UpdateFrameMaxMin(); //调整坐标最大 最小值
param.HQChart.Draw();
var event = hqChart.GetIndexEvent(); //指标计算完成回调
if (event)
{
var self = param.Self;
var data = {
OutVar: self.OutVar, WindowIndex: windowIndex, Name: self.Name, Arguments: self.Arguments, HistoryData: hisData,
Stock: { Symbol: hqChart.Symbol, Name: hqChart.Name }
};
event.Callback(event, data, self);
}
}
this.CreateLine = function (hqChart, windowIndex, varItem, id)
{
let line = new ChartLine();
line.Canvas = hqChart.Canvas;
line.DrawType = 1; //无效数不画
line.Name = varItem.Name;
line.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
line.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
if (varItem.Color) line.Color = this.GetColor(varItem.Color);
else line.Color = this.GetDefaultColor(id);
if (varItem.IsShow==false) line.IsShow=false;
if (varItem.LineWidth)
{
let width = parseInt(varItem.LineWidth.replace("LINETHICK", ""));
if (!isNaN(width) && width > 0) line.LineWidth = width;
}
if (varItem.IsDotLine) line.IsDotLine = true; //虚线
if (varItem.IsShow == false) line.IsShow = false;
let titleIndex = windowIndex + 1;
line.Data.Data = varItem.Data;
if (varItem.IsShowTitle===false) //NOTEXT 不绘制标题
{
}
else if (IFrameSplitOperator.IsString(varItem.Name) && varItem.Name.indexOf("NOTEXT")==0) //标题中包含NOTEXT不绘制标题
{
}
else
{
hqChart.TitlePaint[titleIndex].Data[id] = new DynamicTitleData(line.Data, (varItem.NoneName==true? null: varItem.Name) , line.Color);
}
hqChart.ChartPaint.push(line);
}
this.CreateOverlayLine = function (hqChart, windowIndex, varItem, id)
{
let line = new ChartSubLine();
line.Canvas = hqChart.Canvas;
line.DrawType = 1; //无效数不画
line.Name = varItem.Name;
line.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
line.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
if (varItem.Color) line.Color = this.GetColor(varItem.Color);
else line.Color = this.GetDefaultColor(id);
if (varItem.LineWidth) {
let width = parseInt(varItem.LineWidth.replace("LINETHICK", ""));
if (!isNaN(width) && width > 0) line.LineWidth = width;
}
if (varItem.IsDotLine) line.IsDotLine = true; //虚线
if (varItem.IsShow == false) line.IsShow = false;
let titleIndex = windowIndex + 1;
line.Data.Data = varItem.Data;
hqChart.TitlePaint[titleIndex].Data[id] = new DynamicTitleData(line.Data, varItem.Name, line.Color);
hqChart.ChartPaint.push(line);
}
//创建柱子
this.CreateBar = function (hqChart, windowIndex, varItem, id)
{
let bar = new ChartStickLine();
bar.Canvas = hqChart.Canvas;
if (varItem.Draw.Width > 0) bar.LineWidth = varItem.Draw.Width;
else bar.LineWidth=1;
bar.Name = varItem.Name;
bar.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
bar.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
if (varItem.Color) bar.Color = this.GetColor(varItem.Color);
else bar.Color = this.GetDefaultColor(id);
let titleIndex = windowIndex + 1;
bar.Data.Data = varItem.Draw.DrawData;
bar.BarType = varItem.Draw.Type;
//hqChart.TitlePaint[titleIndex].Data[id]=new DynamicTitleData(bar.Data,varItem.Name,bar.Color);
hqChart.ChartPaint.push(bar);
}
//DRAWTEXT
this.CreateDrawTextV2=function(hqChart, windowIndex, varItem, id)
{
var chartText = new ChartDrawText();
chartText.Canvas = hqChart.Canvas;
chartText.TextAlign='left';
chartText.Name = varItem.Name;
chartText.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chartText.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
chartText.ReloadResource();
if (varItem.Color) chartText.Color = this.GetColor(varItem.Color);
else chartText.Color = this.GetDefaultColor(id);
if (varItem.Draw.DrawData) chartText.Data.Data = varItem.Draw.DrawData;
chartText.Text = varItem.Draw.Text;
if (varItem.Draw.YOffset > 0) chartText.YOffset = varItem.Draw.YOffset;
if (varItem.Draw.TextAlign) chartText.TextAlign = varItem.Draw.TextAlign;
//指定输出位置
if (varItem.Draw.FixedPosition==="TOP") chartText.FixedPosition=1;
else if (varItem.Draw.FixedPosition==="BOTTOM") chartText.FixedPosition=2;
if (varItem.DrawVAlign>=0)
{
if (varItem.DrawVAlign==0) chartText.Direction=1;
else if (varItem.DrawVAlign==1) chartText.Direction=0;
else if (varItem.DrawVAlign==2) chartText.Direction=2;
}
if (varItem.DrawAlign>=0)
{
if (varItem.DrawAlign==0) chartText.TextAlign="left";
else if (varItem.DrawAlign==1) chartText.TextAlign="center";
else if (varItem.DrawAlign==2) chartText.TextAlign='right';
}
if (varItem.DrawFontSize>0) chartText.FixedFontSize=varItem.DrawFontSize;
if (varItem.Background) chartText.TextBG=varItem.Background;
if (varItem.VerticalLine) chartText.VerticalLine=varItem.VerticalLine;
//var titleIndex = windowIndex + 1;
//hqChart.TitlePaint[titleIndex].Data[id]=new DynamicTitleData(bar.Data,varItem.Name,bar.Color);
hqChart.ChartPaint.push(chartText);
}
//DRAWNUMBER
this.CreateDrawNumber=function(hqChart,windowIndex,varItem,id)
{
var chartText=new ChartDrawNumber();
chartText.Canvas=hqChart.Canvas;
chartText.Name=varItem.Name;
chartText.ChartBorder=hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chartText.ChartFrame=hqChart.Frame.SubFrame[windowIndex].Frame;
chartText.ReloadResource();
if (varItem.Color) chartText.Color=this.GetColor(varItem.Color);
else chartText.Color=this.GetDefaultColor(id);
if (varItem.IsDrawCenter===true) chartText.TextAlign='center';
if (varItem.IsDrawAbove===true) chartText.TextBaseline='bottom'
if (varItem.IsDrawBelow===true) chartText.TextBaseline='top';
chartText.Data.Data=varItem.Draw.DrawData.Value;
chartText.Text=varItem.Draw.DrawData.Text;
if (varItem.Draw.Direction>0) chartText.Direction=varItem.Draw.Direction;
if (varItem.Draw.YOffset>0) chartText.YOffset=varItem.Draw.YOffset;
if (varItem.Draw.TextAlign) chartText.TextAlign=varItem.Draw.TextAlign;
//指定输出位置
if (varItem.Draw.FixedPosition==="TOP") chartText.FixedPosition=1;
else if (varItem.Draw.FixedPosition==="BOTTOM") chartText.FixedPosition=2;
if (varItem.DrawVAlign>=0)
{
if (varItem.DrawVAlign==0) chartText.TextBaseline='top';
else if (varItem.DrawVAlign==1) chartText.TextBaseline='middle';
else if (varItem.DrawVAlign==2) chartText.TextBaseline='bottom';
}
if (varItem.DrawAlign>=0)
{
if (varItem.DrawAlign==0) chartText.TextAlign="left";
else if (varItem.DrawAlign==1) chartText.TextAlign="center";
else if (varItem.DrawAlign==2) chartText.TextAlign='right';
}
if (varItem.DrawFontSize>0) chartText.FixedFontSize=varItem.DrawFontSize;
if (varItem.Background) chartText.TextBG=varItem.Background;
if (varItem.VerticalLine) chartText.VerticalLine=varItem.VerticalLine;
//let titleIndex=windowIndex+1;
//hqChart.TitlePaint[titleIndex].Data[id]=new DynamicTitleData(bar.Data,varItem.Name,bar.Color);
hqChart.ChartPaint.push(chartText);
}
//创建文本
this.CreateText = function (hqChart, windowIndex, varItem, id)
{
let chartText = new ChartSingleText();
chartText.Canvas = hqChart.Canvas;
chartText.TextAlign='left';
chartText.Name = varItem.Name;
chartText.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chartText.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
chartText.ReloadResource();
if (varItem.Color) chartText.Color = this.GetColor(varItem.Color);
else chartText.Color = this.GetDefaultColor(id);
let titleIndex = windowIndex + 1;
if (varItem.Draw.Position) chartText.Position=varItem.Draw.Position; //赋值坐标
if (varItem.Draw.DrawData) chartText.Data.Data = varItem.Draw.DrawData;
chartText.Text = varItem.Draw.Text;
if (varItem.Draw.Direction > 0) chartText.Direction = varItem.Draw.Direction;
if (varItem.Draw.YOffset > 0) chartText.YOffset = varItem.Draw.YOffset;
if (varItem.Draw.TextAlign) chartText.TextAlign = varItem.Draw.TextAlign;
if (varItem.DrawVAlign>=0)
{
if (varItem.DrawVAlign==0) chartText.Direction=1;
else if (varItem.DrawVAlign==1) chartText.Direction=0;
else if (varItem.DrawVAlign==2) chartText.Direction=2;
}
if (varItem.DrawAlign>=0)
{
if (varItem.DrawAlign==0) chartText.TextAlign="left";
else if (varItem.DrawAlign==1) chartText.TextAlign="center";
else if (varItem.DrawAlign==2) chartText.TextAlign='right';
}
if (varItem.DrawFontSize>0) chartText.FixedFontSize=varItem.DrawFontSize;
if (varItem.Background) chartText.TextBG=varItem.Background;
//hqChart.TitlePaint[titleIndex].Data[id]=new DynamicTitleData(bar.Data,varItem.Name,bar.Color);
hqChart.ChartPaint.push(chartText);
}
//COLORSTICK
this.CreateMACD = function (hqChart, windowIndex, varItem, id)
{
let chartMACD = new ChartMACD();
chartMACD.Canvas = hqChart.Canvas;
chartMACD.Name = varItem.Name;
chartMACD.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chartMACD.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
if (varItem.LineWidth)
{
var width=parseInt(varItem.LineWidth.replace("LINETHICK",""));
if (!isNaN(width) && width>0) chartMACD.LineWidth=width;
}
let titleIndex = windowIndex + 1;
chartMACD.Data.Data = varItem.Data;
var clrTitle=this.GetDefaultColor(id);
if (varItem.Color) clrTitle= this.GetColor(varItem.Color);
hqChart.TitlePaint[titleIndex].Data[id] = new DynamicTitleData(chartMACD.Data, varItem.Name, clrTitle);
hqChart.ChartPaint.push(chartMACD);
}
this.CreatePointDot = function (hqChart, windowIndex, varItem, id) {
let pointDot = new ChartPointDot();
pointDot.Canvas = hqChart.Canvas;
pointDot.Name = varItem.Name;
pointDot.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
pointDot.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
if (varItem.Color) pointDot.Color = this.GetColor(varItem.Color);
else pointDot.Color = this.GetDefaultColor(id);
if (varItem.Radius) pointDot.Radius = varItem.Radius;
if (varItem.LineWidth) {
let width = parseInt(varItem.LineWidth.replace("LINETHICK", ""));
if (!isNaN(width) && width > 0) pointDot.Radius = width;
}
let titleIndex = windowIndex + 1;
pointDot.Data.Data = varItem.Data;
hqChart.TitlePaint[titleIndex].Data[id] = new DynamicTitleData(pointDot.Data, varItem.Name, pointDot.Color);
hqChart.ChartPaint.push(pointDot);
}
this.CreateStick = function (hqChart, windowIndex, varItem, id) {
let chart = new ChartStick();
chart.Canvas = hqChart.Canvas;
chart.Name = varItem.Name;
chart.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chart.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
if (varItem.Color) chart.Color = this.GetColor(varItem.Color);
else chart.Color = this.GetDefaultColor(id);
if (varItem.LineWidth) {
let width = parseInt(varItem.LineWidth.replace("LINETHICK", ""));
if (!isNaN(width) && width > 0) chart.LineWidth = width;
}
let titleIndex = windowIndex + 1;
chart.Data.Data = varItem.Data;
hqChart.TitlePaint[titleIndex].Data[id] = new DynamicTitleData(chart.Data, varItem.Name, chart.Color);
hqChart.ChartPaint.push(chart);
}
this.CreateLineStick = function (hqChart, windowIndex, varItem, id) {
let chart = new ChartLineStick();
chart.Canvas = hqChart.Canvas;
chart.Name = varItem.Name;
chart.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chart.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
if (varItem.Color) chart.Color = this.GetColor(varItem.Color);
else chart.Color = this.GetDefaultColor(id);
if (varItem.LineWidth) {
let width = parseInt(varItem.LineWidth.replace("LINETHICK", ""));
if (!isNaN(width) && width > 0) chart.LineWidth = width;
}
let titleIndex = windowIndex + 1;
chart.Data.Data = varItem.Data;
hqChart.TitlePaint[titleIndex].Data[id] = new DynamicTitleData(chart.Data, varItem.Name, chart.Color);
hqChart.ChartPaint.push(chart);
}
this.CreateStraightLine = function (hqChart, windowIndex, varItem, id) {
let line = new ChartLine();
line.DrawType = 1;
line.Canvas = hqChart.Canvas;
line.Name = varItem.Name;
line.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
line.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
if (varItem.Color) line.Color = this.GetColor(varItem.Color);
else line.Color = this.GetDefaultColor(id);
if (varItem.LineWidth) {
let width = parseInt(varItem.LineWidth.replace("LINETHICK", ""));
if (!isNaN(width) && width > 0) line.LineWidth = width;
}
let titleIndex = windowIndex + 1;
line.Data.Data = varItem.Draw.DrawData;
//hqChart.TitlePaint[titleIndex].Data[id]=new DynamicTitleData(line.Data,varItem.Name,line.Color);
hqChart.ChartPaint.push(line);
}
this.CreateVolStick = function (hqChart, windowIndex, varItem, id, hisData) {
let chart = new ChartVolStick();
chart.Canvas = hqChart.Canvas;
chart.Name = varItem.Name;
chart.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chart.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
chart.KLineDrawType = hqChart.KLineDrawType; //设置K线显示类型
if (varItem.Color) chart.Color = this.GetColor(varItem.Color);
else chart.Color = this.GetDefaultColor(id);
let titleIndex = windowIndex + 1;
chart.Data.Data = varItem.Data;
chart.HistoryData = hisData;
hqChart.TitlePaint[titleIndex].Data[id] = new DynamicTitleData(chart.Data, varItem.Name, chart.Color);
hqChart.ChartPaint.push(chart);
}
this.CreateBand = function (hqChart, windowIndex, varItem, id) {
let chart = new ChartBand();
chart.Canvas = hqChart.Canvas;
chart.Name = varItem.Name;
chart.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chart.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
chart.FirstColor = varItem.Draw.Color[0];
chart.SecondColor = varItem.Draw.Color[1];
chart.Data.Data = varItem.Draw.DrawData;
hqChart.ChartPaint.push(chart);
}
this.CreatePolyLine = function (hqChart, windowIndex, varItem, id) {
let line = new ChartLine();
line.Canvas = hqChart.Canvas;
line.Name = varItem.Name;
line.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
line.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
if (varItem.Color) line.Color = this.GetColor(varItem.Color);
else line.Color = this.GetDefaultColor(id);
if (varItem.LineWidth) {
let width = parseInt(varItem.LineWidth.replace("LINETHICK", ""));
if (!isNaN(width) && width > 0) line.LineWidth = width;
}
let titleIndex = windowIndex + 1;
line.Data.Data = varItem.Draw.DrawData;
//hqChart.TitlePaint[titleIndex].Data[id] = new DynamicTitleData(line.Data, ' ', line.Color); //给一个空的标题
hqChart.ChartPaint.push(line);
}
//创建K线图
this.CreateKLine = function (hqChart, windowIndex, varItem, id)
{
let chart = new ChartKLine();
chart.Canvas = hqChart.Canvas;
chart.Name = varItem.Name;
chart.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chart.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
chart.Data.Data = varItem.Draw.DrawData;
chart.IsShowMaxMinPrice = false;
if (varItem.Color) //如果设置了颜色,使用外面设置的颜色
chart.UnchagneColor = chart.DownColor = chart.UpColor = this.GetColor(varItem.Color);
hqChart.ChartPaint.push(chart);
}
this.CreateDrawColorKLine=function(hqChart,windowIndex,varItem,i)
{
let chart=new ChartColorKline();
chart.Canvas=hqChart.Canvas;
chart.Name=varItem.Name;
chart.DrawName="DRAWCOLORKLINE";
chart.ChartBorder=hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chart.ChartFrame=hqChart.Frame.SubFrame[windowIndex].Frame;
chart.Data.Data=varItem.Draw.DrawData;
if (IFrameSplitOperator.IsBool(varItem.Draw.IsEmptyBar)) chart.IsEmptyBar=varItem.Draw.IsEmptyBar;
if (varItem.Draw.Color) chart.Color=varItem.Draw.Color;
hqChart.ChartPaint.push(chart);
}
this.CreateNumberText = function (hqChart, windowIndex, varItem, id) {
let chartText = new ChartSingleText();
chartText.Canvas = hqChart.Canvas;
chartText.Name = varItem.Name;
chartText.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chartText.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
chartText.ReloadResource();
chartText.TextAlign="center";
if (varItem.Color) chartText.Color = this.GetColor(varItem.Color);
else chartText.Color = this.GetDefaultColor(id);
if (varItem.IsDrawAbove) chartText.Direction=1;
else chartText.Direction=2;
let titleIndex = windowIndex + 1;
chartText.Data.Data = varItem.Draw.DrawData.Value;
chartText.Text = varItem.Draw.DrawData.Text;
//hqChart.TitlePaint[titleIndex].Data[id]=new DynamicTitleData(bar.Data,varItem.Name,bar.Color);
hqChart.ChartPaint.push(chartText);
}
this.CreateDrawIcon=function(hqChart, windowIndex, varItem, id, drawCallback)
{
var chart = new ChartDrawIcon();
chart.Canvas = hqChart.Canvas;
chart.TextAlign = 'center';
chart.Identify=id;
chart.DrawCallback=drawCallback;
chart.Name = varItem.Name;
chart.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chart.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
chart.Data.Data = varItem.Draw.DrawData;
chart.IconID=varItem.Draw.IconID;
if (varItem.Color) chart.Color = this.GetColor(varItem.Color);
else chart.Color = 'rgb(0,0,0)';
if (varItem.DrawVAlign>=0)
{
if (varItem.DrawVAlign==0) chart.TextBaseline="top";
else if (varItem.DrawVAlign==1) chart.TextBaseline="middle";
else if (varItem.DrawVAlign==2) chart.TextBaseline="bottom";
}
if (varItem.DrawAlign>=0)
{
if (varItem.DrawAlign==0) chart.TextAlign="left";
else if (varItem.DrawAlign==1) chart.TextAlign="center";
else if (varItem.DrawAlign==2) chart.TextAlign='right';
}
if (varItem.DrawFontSize>0) chart.FixedIconSize=varItem.DrawFontSize;
hqChart.ChartPaint.push(chart);
}
//创建图标
this.CreateIcon = function (hqChart, windowIndex, varItem, id)
{
var event=hqChart.GetEventCallback(JSCHART_EVENT_ID.ON_BIND_DRAWICON);
if (event && event.Callback)
{
var sendData={ FrameID:windowIndex, ID:id, Data:varItem, Callback:null };
event.Callback(event, sendData,this);
if (sendData.Callback)
{
this.CreateDrawIcon(hqChart, windowIndex, varItem, id, sendData.Callback);
return;
}
}
let chartText = new ChartSingleText();
chartText.Canvas = hqChart.Canvas;
chartText.TextAlign = 'center';
chartText.Name = varItem.Name;
chartText.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chartText.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
let titleIndex = windowIndex + 1;
chartText.Data.Data = varItem.Draw.DrawData;
chartText.Text = varItem.Draw.Icon.Symbol;
if (varItem.Color) chartText.Color = this.GetColor(varItem.Color);
else if (varItem.Draw.Icon.Color) chartText.Color = varItem.Draw.Icon.Color;
else chartText.Color = 'rgb(0,0,0)';
if (varItem.DrawVAlign>=0)
{
if (varItem.DrawVAlign==0) chartText.Direction=1;
else if (varItem.DrawVAlign==1) chartText.Direction=0;
else if (varItem.DrawVAlign==2) chartText.Direction=2;
}
if (varItem.DrawAlign>=0)
{
if (varItem.DrawAlign==0) chartText.TextAlign="left";
else if (varItem.DrawAlign==1) chartText.TextAlign="center";
else if (varItem.DrawAlign==2) chartText.TextAlign='right';
}
if (varItem.DrawFontSize>0) chartText.FixedFontSize=varItem.DrawFontSize;
//hqChart.TitlePaint[titleIndex].Data[id]=new DynamicTitleData(bar.Data,varItem.Name,bar.Color);
hqChart.ChartPaint.push(chartText);
}
this.CreateRectangle = function (hqChart, windowIndex, varItem, i)
{
let chart = new ChartRectangle();
chart.Canvas = hqChart.Canvas;
chart.Name = varItem.Name;
chart.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chart.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
chart.Color = [varItem.Draw.DrawData.Color];
chart.Rect = varItem.Draw.DrawData.Rect;
if (varItem.Color) chart.BorderColor = this.GetColor(varItem.Color);
hqChart.ChartPaint.push(chart);
}
this.CreateBackgroud=function(hqChart,windowIndex,varItem,id)
{
let chart=new ChartBackground();
chart.Canvas=hqChart.Canvas;
chart.Name=varItem.Name;
chart.ChartBorder=hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chart.ChartFrame=hqChart.Frame.SubFrame[windowIndex].Frame;
if (varItem.Draw && varItem.Draw.DrawData)
{
var drawData=varItem.Draw.DrawData;
chart.Color=drawData.Color;
chart.ColorAngle=drawData.Angle;
if (drawData.Data) chart.Data.Data=drawData.Data;
}
hqChart.ChartPaint.push(chart);
}
this.CreateMultiText = function (hqChart, windowIndex, varItem, i)
{
let chart = new ChartMultiText();
chart.Canvas = hqChart.Canvas;
chart.Name = varItem.Name;
chart.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chart.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
chart.Data = hqChart.ChartPaint[0].Data;//绑定K线
chart.Texts = varItem.Draw.DrawData;
hqChart.ChartPaint.push(chart);
}
this.CreateMulitHtmlDom=function(hqChart,windowIndex,varItem,i)
{
let chart=new ChartMultiHtmlDom();
chart.Canvas=hqChart.Canvas;
chart.Name=varItem.Name;
chart.ChartBorder=hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chart.ChartFrame=hqChart.Frame.SubFrame[windowIndex].Frame;
chart.Data=hqChart.ChartPaint[0].Data;//绑定K线
chart.Texts=varItem.Draw.DrawData;
chart.DrawCallback= varItem.Draw.Callback;
hqChart.ChartPaint.push(chart);
}
this.CreateMultiLine = function (hqChart, windowIndex, varItem, i)
{
let chart = new ChartMultiLine();
chart.Canvas = hqChart.Canvas;
chart.Name = varItem.Name;
chart.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chart.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
chart.Data = hqChart.ChartPaint[0].Data;//绑定K线
chart.Lines = varItem.Draw.DrawData;
if (varItem.Draw.LineDash) chart.LineDash=varItem.Draw.LineDash;
if (IFrameSplitOperator.IsNumber(varItem.Draw.LineWidth)) chart.LineWidth=varItem.Draw.LineWidth;
hqChart.ChartPaint.push(chart);
}
this.CreateMultiBar = function (hqChart, windowIndex, varItem, i)
{
let chart = new ChartMultiBar();
chart.Canvas = hqChart.Canvas;
chart.Name = varItem.Name;
chart.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chart.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
chart.Data = hqChart.ChartPaint[0].Data;//绑定K线
chart.Bars = varItem.Draw.DrawData;
hqChart.ChartPaint.push(chart);
}
//创建K线背景
this.CreateSelfKLine = function (hqChart, windowIndex, hisData)
{
let chart = new ChartKLine();
chart.Canvas = hqChart.Canvas;
chart.Name = "Self Kline"
chart.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
chart.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
chart.Data = hisData
chart.IsShowMaxMinPrice = false;
chart.IsShowKTooltip = false;
chart.DrawType = this.KLineType;
hqChart.ChartPaint.push(chart);
}
this.BindInstructionData = function (hqChart, windowIndex, hisData) //绑定指示指标
{
if (this.OutVar == null || this.OutVar.length < 0) return;
if (this.InstructionType == 2)
{
let varItem = this.OutVar[this.OutVar.length - 1]; //取最后一组数据作为指示数据
hqChart.SetInstructionData(this.InstructionType, { Data: varItem.Data }); //设置指示数据
return true;
}
else if (this.InstructionType == 1) //交易系统
{
var buyData, sellData;
for (var i in this.OutVar)
{
let item = this.OutVar[i];
if (item.Name == 'ENTERLONG') buyData = item.Data;
else if (item.Name == 'EXITLONG') sellData = item.Data;
}
hqChart.SetInstructionData(this.InstructionType, { Buy: buyData, Sell: sellData }); //设置指示数据
return true;
}
}
this.BindData = function (hqChart, windowIndex, hisData)
{
if (windowIndex == 0 && this.InstructionType)
{
this.BindInstructionData(hqChart, windowIndex, hisData);
return;
}
//清空指标图形
hqChart.DeleteIndexPaint(windowIndex);
if (windowIndex == 0) hqChart.ShowKLine(true);
if (this.OutVar == null || this.OutVar.length < 0) return;
//叠加一个K线背景
if (this.KLineType != null)
{
if (this.KLineType === 0 || this.KLineType === 1 || this.KLineType === 2) this.CreateSelfKLine(hqChart, windowIndex, hisData);
else if (this.KLineType === -1 && windowIndex == 0) hqChart.ShowKLine(false);
}
if (windowIndex >= 1 && hqChart.Frame)
{
hqChart.Frame.SubFrame[windowIndex].Frame.YSplitOperator.FloatPrecision = this.FloatPrecision;
if (this.YSpecificMaxMin) hqChart.Frame.SubFrame[windowIndex].Frame.YSpecificMaxMin = this.YSpecificMaxMin; //最大最小值
if (this.YSplitScale) hqChart.Frame.SubFrame[windowIndex].Frame.YSplitScale = this.YSplitScale; //固定刻度
}
for (var i=0 ;i<this.OutVar.length;++i )
{
let item = this.OutVar[i];
if (item.IsExData === true) continue; //扩展数据不显示图形
if (item.Type==1000 || item.Type==1001) continue; //数据集合, 字符串
if (item.Type == 0)
{
if (item.IsOverlayLine) this.CreateOverlayLine(hqChart, windowIndex, item, i);
else this.CreateLine(hqChart, windowIndex, item, i);
}
else if (item.Type == 1)
{
switch (item.Draw.DrawType)
{
case 'STICKLINE':
this.CreateBar(hqChart, windowIndex, item, i);
break;
case 'DRAWTEXT':
this.CreateDrawTextV2(hqChart, windowIndex, item, i);
break;
case 'SUPERDRAWTEXT':
case 'DRAWTEXT_FIX':
this.CreateText(hqChart, windowIndex, item, i);
break;
case 'DRAWLINE':
this.CreateStraightLine(hqChart, windowIndex, item, i);
break;
case 'DRAWBAND':
this.CreateBand(hqChart, windowIndex, item, i);
break;
case 'DRAWKLINE':
case "DRAWKLINE1":
this.CreateKLine(hqChart, windowIndex, item, i);
break;
case "DRAWCOLORKLINE":
this.CreateDrawColorKLine(hqChart,windowIndex,item,i);
break;
case 'DRAWKLINE_IF':
this.CreateKLine(hqChart, windowIndex, item, i);
break;
case 'POLYLINE':
this.CreatePolyLine(hqChart, windowIndex, item, i);
break;
case 'DRAWNUMBER':
this.CreateDrawNumber(hqChart, windowIndex, item, i);
break;
case 'DRAWICON':
this.CreateIcon(hqChart, windowIndex, item, i);
break;
case "ICON":
this.CreateIcon(hqChart,windowIndex,item,i);
break;
case 'DRAWRECTREL':
this.CreateRectangle(hqChart, windowIndex, item, i);
break;
case 'DRAWGBK':
case "DRAWGBK2":
this.CreateBackgroud(hqChart,windowIndex,item,i);
break;
//第3方指标定制
case 'MULTI_TEXT':
this.CreateMultiText(hqChart, windowIndex, item, i);
break;
case "MULTI_HTMLDOM":
this.CreateMulitHtmlDom(hqChart,windowIndex,item,i);
break;
case 'MULTI_LINE':
this.CreateMultiLine(hqChart, windowIndex, item, i);
break;
case 'MULTI_BAR':
this.CreateMultiBar(hqChart, windowIndex, item, i);
break;
case "KLINE_BG":
this.CreateBackgroud(hqChart,windowIndex,item,i);
break;
}
}
else if (item.Type == 2)
{
this.CreateMACD(hqChart, windowIndex, item, i);
}
else if (item.Type == 3)
{
this.CreatePointDot(hqChart, windowIndex, item, i);
}
else if (item.Type == 4)
{
this.CreateLineStick(hqChart, windowIndex, item, i);
}
else if (item.Type == 5)
{
this.CreateStick(hqChart, windowIndex, item, i);
}
else if (item.Type == 6)
{
this.CreateVolStick(hqChart, windowIndex, item, i, hisData);
}
var titlePaint = hqChart.TitlePaint[windowIndex + 1];
if (titlePaint && titlePaint.Data && i < titlePaint.Data.length) //设置标题数值 小数位数和格式
{
if (this.StringFormat > 0) titlePaint.Data[i].StringFormat = this.StringFormat;
if (this.FloatPrecision >= 0) titlePaint.Data[i].FloatPrecision = this.FloatPrecision;
if (this.OutName && this.OutName.length>0 && this.Arguments && this.Arguments.length>0)
{
titlePaint.SetDynamicOutName(this.OutName,this.Arguments);
}
}
}
let titleIndex = windowIndex + 1;
hqChart.TitlePaint[titleIndex].Title = this.Name;
let indexParam = '';
for (let i in this.Arguments)
{
let item = this.Arguments[i];
if (indexParam.length > 0) indexParam += ',';
indexParam += item.Value.toString();
}
if (indexParam.length > 0) hqChart.TitlePaint[titleIndex].Title = this.Name + '(' + indexParam + ')';
if (this.TitleFont) hqChart.TitlePaint[titleIndex].Font=this.TitleFont;
if (hqChart.UpdateUICallback) hqChart.UpdateUICallback('ScriptIndex', this.OutVar,
{ WindowIndex: windowIndex, Name: this.Name, Arguments: this.Arguments, HistoryData: hisData }); //通知上层回调
return true;
}
this.GetDefaultColor = function (id) //给一个默认的颜色
{
let COLOR_ARRAY = g_JSChartResource.ColorArray;
let number = parseInt(id);
return COLOR_ARRAY[number % (COLOR_ARRAY.length - 1)];
}
this.GetColor = function (colorName) //获取颜色
{
if (colorName.indexOf("RGB(")==0) return colorName.toLowerCase();
if (colorName.indexOf('rgb(')==0)return colorName;
if (colorName.indexOf("RGBA(")==0) return colorName.toLowerCase();
if (colorName.indexOf("rgba(")==0) return colorName;
var color=JSCommonComplier.JSComplier.ColorVarToRGB(colorName);
if (color) return color;
return 'rgb(30,144,255)';
/*
let COLOR_MAP = new Map([
['COLORBLACK', 'rgb(0,0,0)'],
['COLORBLUE', 'rgb(18,95,216)'],
['COLORGREEN', 'rgb(25,158,0)'],
['COLORCYAN', 'rgb(0,255,198)'],
['COLORRED', 'rgb(238,21,21)'],
['COLORMAGENTA', 'rgb(255,0,222)'],
['COLORBROWN', 'rgb(149,94,15)'],
['COLORLIGRAY', 'rgb(218,218,218)'], //画淡灰色
['COLORGRAY', 'rgb(133,133,133)'], //画深灰色
['COLORLIBLUE', 'rgb(94,204,255)'], //淡蓝色
['COLORLIGREEN', 'rgb(183,255,190)'], //淡绿色
['COLORLICYAN', 'rgb(154,255,242)'], //淡青色
['COLORLIRED', 'rgb(255,172,172)'], //淡红色
['COLORLIMAGENTA', 'rgb(255,145,241)'], //淡洋红色
['COLORWHITE', 'rgb(255,255,255)'], //白色
['COLORYELLOW', 'rgb(255,198,0)']
]);
if (COLOR_MAP.has(colorName)) return COLOR_MAP.get(colorName);
//COLOR 自定义色
//格式为COLOR+“RRGGBB”:RR、GG、BB表示红色、绿色和蓝色的分量,每种颜色的取值范围是00-FF,采用了16进制。
//例如:MA5:MA(CLOSE,5),COLOR00FFFF 表示纯红色与纯绿色的混合色:COLOR808000表示淡蓝色和淡绿色的混合色。
if (colorName.indexOf('COLOR') == 0) return '#' + colorName.substr(5);
return 'rgb(30,144,255)';
*/
}
}
var HQ_DATA_TYPE =
{
KLINE_ID: 0, //K线
MINUTE_ID: 2, //当日走势图
HISTORY_MINUTE_ID: 3,//历史分钟走势图
MULTIDAY_MINUTE_ID: 4,//多日走势图
};
function APIScriptIndex(name, script, args, option) //后台执行指标
{
this.newMethod = ScriptIndex; //派生
this.newMethod(name, script, args, option);
delete this.newMethod;
this.ApiUrl; //指标执行api地址
this.HQDataType;
if (option.API)
{
if (option.API.Url) this.ApiUrl = option.API.Url;
if (option.API.Name) this.Name = this.ID = option.API.Name;
if (option.API.ID) this.ID = option.API.ID;
}
this.ExecuteScript = function (hqChart, windowIndex, hisData)
{
JSConsole.Chart.Log('[APIScriptIndex::ExecuteScript] name, Arguments ', this.Name, this.Arguments);
//数据类型
let hqDataType = HQ_DATA_TYPE.KLINE_ID; //默认K线
var dateRange=null;
if (hqChart.ClassName === 'MinuteChartContainer' || hqChart.ClassName === 'MinuteChartHScreenContainer')
{
if (hqChart.DayCount > 1) hqDataType = HQ_DATA_TYPE.MULTIDAY_MINUTE_ID; //多日分钟
else hqDataType = HQ_DATA_TYPE.MINUTE_ID; //分钟数据
dateRange=hisData.GetDateRange();
}
else if (hqChart.ClassName === 'HistoryMinuteChartContainer')
{
hqDataType = HQ_DATA_TYPE.HISTORY_MINUTE_ID; //历史分钟
}
else
{
dateRange=hisData.GetDateRange();
}
var args = [];
if (this.Arguments)
{
for (var i in this.Arguments)
{
var item = this.Arguments[i];
args.push({ name: item.Name, value: item.Value });
}
}
var requestCount;
if (hqChart.GetRequestDataCount) requestCount= hqChart.GetRequestDataCount();
var self = this;
var postData =
{
indexname: this.ID, symbol: hqChart.Symbol, script: this.Script, args: args,
period: hqChart.Period, right: hqChart.Right, hqdatatype: hqDataType
};
if (dateRange) postData.DateRange=dateRange;
if (requestCount)
{
postData.maxdatacount=requestCount.MaxRequestDataCount;
postData.maxminutedaycount=requestCount.MaxRequestMinuteDayCount;
}
if (hqDataType == HQ_DATA_TYPE.MULTIDAY_MINUTE_ID || hqDataType == HQ_DATA_TYPE.MINUTE_ID) postData.daycount = hqChart.DayCount;
this.HQDataType = hqDataType;
if (hqChart.NetworkFilter)
{
var obj =
{
Name: 'APIScriptIndex::ExecuteScript', //类名::
Explain: '指标计算',
Request: { Url: self.ApiUrl, Type: 'POST', Data: postData },
Self: this,
HQChart: hqChart,
PreventDefault: false
};
hqChart.NetworkFilter(obj, function (data)
{
self.RecvAPIData(data, hqChart, windowIndex, hisData);
});
if (obj.PreventDefault == true) return; //已被上层替换,不调用默认的网络请求
}
wx.request({
url: self.ApiUrl,
data: postData,
method: 'POST',
dataType: "json",
async: true,
success: function (recvData)
{
self.RecvAPIData(recvData, hqChart, windowIndex, hisData);
},
error: function (request)
{
self.RecvError(request);
}
});
}
this.RecvAPIData = function (recvData, hqChart, windowIndex, hisData)
{
var data=recvData.data;
JSConsole.Chart.Log('[APIScriptIndex::RecvAPIData] recv data ', this.Name, data);
if (data.code != 0) return;
if (data.outdata && data.outdata.name) this.Name = data.outdata.name;
if (data.outdata.args) //外部修改显示参数
{
this.Arguments = [];
for (var i in data.outdata.args)
{
var item = data.outdata.args[i];
this.Arguments.push({ Name: item.name, Value: item.value });
}
}
if (this.HQDataType == HQ_DATA_TYPE.KLINE_ID)
{
this.OutVar = this.FittingData(data.outdata, hqChart);
JSConsole.Chart.Log('[APIScriptIndex::RecvAPIData] conver to OutVar ', this.OutVar);
}
else
{
this.OutVar = this.FittingMinuteData(data.outdata, hqChart); //走势图数据
}
this.BindData(hqChart, windowIndex, hisData);
if (this.IsLocked == false) //不上锁
{
hqChart.Frame.SubFrame[windowIndex].Frame.SetLock(null);
}
else //上锁
{
let lockData =
{
IsLocked: true, Callback: this.LockCallback, IndexName: this.Name, ID: this.LockID,
BG: this.LockBG, Text: this.LockText, TextColor: this.LockTextColor, Font: this.LockFont, Count: this.LockCount, MinWidth: this.LockMinWidth
};
hqChart.Frame.SubFrame[windowIndex].Frame.SetLock(lockData);
}
hqChart.UpdataDataoffset(); //更新数据偏移
hqChart.UpdateFrameMaxMin(); //调整坐标最大 最小值
hqChart.Draw();
if (hqChart.GetIndexEvent)
{
var event = hqChart.GetIndexEvent(); //指标计算完成回调
if (event)
{
var data =
{
OutVar: this.OutVar, WindowIndex: windowIndex, Name: this.Name, Arguments: this.Arguments, HistoryData: hisData,
Stock: { Symbol: hqChart.Symbol, Name: hqChart.Name }
};
event.Callback(event, data, this);
}
}
}
this.FittingData = function (jsonData, hqChart)
{
var outVar = jsonData.outvar;
var date = jsonData.date;
var time = jsonData.time;
var kdata = hqChart.ChartPaint[0].Data;
//把数据拟合到kdata上
var result = [];
for (var i in outVar)
{
var item = outVar[i];
var indexData = [];
var outVarItem = { Name: item.name, Type: item.type };
if (item.color) outVarItem.Color = item.color;
if (item.data)
{
outVarItem.Data = this.FittingArray(item.data, date, time, hqChart);
if (item.color) outVarItem.Color = item.color;
if (item.linewidth >= 1) outVarItem.LineWidth = item.linewidth;
if (item.isshow == false) outVarItem.IsShow = false;
if (item.isexdata == true) outVarItem.IsExData = true;
result.push(outVarItem);
}
else if (item.Draw)
{
var draw = item.Draw;
var drawItem = {};
if (draw.DrawType == 'DRAWICON') //图标
{
drawItem.Icon = draw.Icon;
//小程序不支持svg, 只能转文字
if (IFrameSplitOperator.IsNumber(draw.IconType))
drawItem.Icon=JSCommonComplier.g_JSComplierResource.GetDrawTextIcon(draw.IconType);
drawItem.Name = draw.Name;
drawItem.DrawType = draw.DrawType;
drawItem.DrawData = this.FittingArray(draw.DrawData, date, time, hqChart);
outVarItem.Draw = drawItem;
result.push(outVarItem);
}
else if (draw.DrawType == 'DRAWTEXT') //文本
{
drawItem.Text = draw.Text;
drawItem.Name = draw.Name;
drawItem.DrawType = draw.DrawType;
drawItem.DrawData = this.FittingArray(draw.DrawData, date, time, hqChart);
outVarItem.Draw = drawItem;
result.push(outVarItem);
}
else if (draw.DrawType == 'STICKLINE') //柱子
{
drawItem.Name = draw.Name;
drawItem.Type = draw.Type;
drawItem.Width = draw.Width;
drawItem.DrawType = draw.DrawType;
drawItem.DrawData = this.FittingArray(draw.DrawData, date, time, hqChart, 1);
outVarItem.Draw = drawItem;
result.push(outVarItem);
}
else if (draw.DrawType == 'MULTI_LINE')
{
drawItem.Text = draw.Text;
drawItem.Name = draw.Name;
drawItem.DrawType = draw.DrawType;
drawItem.DrawData = this.FittingMultiLine(draw.DrawData, date, time, hqChart);
outVarItem.Draw = drawItem;
if (draw.LineDash) drawItem.LineDash=draw.LineDash;
//if (draw.Arrow) drawItem.Arrow=draw.Arrow;
if (IFrameSplitOperator.IsNumber(draw.LineWidth)) drawItem.LineWidth=draw.LineWidth;
result.push(outVarItem);
}
else if (draw.DrawType == 'MULTI_BAR')
{
drawItem.Text = draw.Text;
drawItem.Name = draw.Name;
drawItem.DrawType = draw.DrawType;
drawItem.DrawData = this.FittingMultiLine(draw.DrawData, date, time, hqChart);
outVarItem.Draw = drawItem;
result.push(outVarItem);
}
else if (draw.DrawType == 'MULTI_TEXT')
{
drawItem.Text = draw.Text;
drawItem.Name = draw.Name;
drawItem.DrawType = draw.DrawType;
drawItem.DrawData = this.FittingMultiText(draw.DrawData, date, time, hqChart);
this.GetKLineData(drawItem.DrawData, hqChart);
outVarItem.Draw = drawItem;
result.push(outVarItem);
}
else if (draw.DrawType=="MULTI_HTMLDOM") //外部自己创建dom
{
drawItem.Text=draw.Text;
drawItem.Name=draw.Name;
drawItem.DrawType=draw.DrawType;
drawItem.Callback=draw.Callback;
drawItem.DrawData=this.FittingMultiText(draw.DrawData,date,time,hqChart);
this.GetKLineData(drawItem.DrawData, hqChart);
outVarItem.Draw=drawItem;
result.push(outVarItem);
}
else if (draw.DrawType=="KLINE_BG")
{
drawItem.Name=draw.Name;
drawItem.DrawType=draw.DrawType;
drawItem.DrawData={ Color:draw.Color, Angle:draw.Angle };
drawItem.DrawData.Data=this.FittingKLineBG(draw.DrawData, hqChart);
outVarItem.Draw=drawItem;
outVarItem.Name=draw.DrawType;
result.push(outVarItem);
}
}
}
return result;
}
// h, high, low l.
this.GetKLineData=function(data,hqChart)
{
if (!data) return;
if (!Array.isArray(data)) return;
var kData=hqChart.ChartPaint[0].Data; //K线
for(var i in data)
{
var item=data[i];
if (!IFrameSplitOperator.IsString(item.Value)) continue;
if(!IFrameSplitOperator.IsNumber(item.Index)) continue;
if (item.Index<0 || item.Index>=kData.Data.length) continue;
var valueName=item.Value.toUpperCase();
var kItem=kData.Data[item.Index];
switch(valueName)
{
case "HIGH":
case "H":
item.Value=kItem.High;
break;
case "L":
case "LOW":
item.Value=kItem.Low;
break;
}
}
}
this.FittingKLineBG=function(data, hqChart)
{
var kData=hqChart.ChartPaint[0].Data; //K线
var result=[];
if (ChartData.IsDayPeriod(hqChart.Period,true)) //日线
{
var bFill=false;
for(var i=0,j=0;i<kData.Data.length;)
{
result[i]=0;
var kItem=kData.Data[i];
if (j>=data.length)
{
++i;
continue;
}
var dataItem=data[j];
if (dataItem.Date<kItem.Date)
{
++j;
}
else if (dataItem.Date>kItem.Date)
{
++i;
}
else
{
bFill=true;
result[i]=1;
++j;
++i;
}
}
if (bFill) return result;
}
else if (ChartData.IsMinutePeriod(hqChart.Period,true)) //分钟线
{
var bFill=false;
for(var i=0,j=0;i<kData.Data.length;)
{
result[i]=0;
var kItem=kData.Data[i];
if (j>=data.length)
{
++i;
continue;
}
var dataItem=data[j];
if (dataItem.Date<kItem.Date || (dataItem.Date==kItem.Date && dataItem.Time<kItem.Time))
{
++j;
}
else if (dataItem.Date>kItem.Date || (dataItem.Date==kItem.Date && dataItem.Time>kItem.Time))
{
++i;
}
else
{
bFill=true;
result[i]=1;
++j;
++i;
}
}
if (bFill) return result;
}
return null;
}
this.FittingArray = function (sourceData, date, time, hqChart, arrayType) //arrayType 0=单值数组 1=结构体
{
var kdata = hqChart.ChartPaint[0].Data; //K线
var arySingleData = [];
for (var i in sourceData)
{
var value = sourceData[i];
var indexItem = new SingleData(); //单列指标数据
indexItem.Date = date[i];
if (time && i < time.length) indexItem.Time = time[i];
indexItem.Value = value;
arySingleData.push(indexItem);
}
var aryFittingData;
if (ChartData.IsDayPeriod(hqChart.Period,true))
aryFittingData = kdata.GetFittingData(arySingleData); //数据和主图K线拟合
else
aryFittingData = kdata.GetMinuteFittingData(arySingleData); //数据和主图K线拟合
var bindData = new ChartData();
bindData.Data = aryFittingData;
var result;
if (arrayType == 1) result = bindData.GetObject();
else result = bindData.GetValue();
return result;
}
this.FittingMultiLine = function (sourceData, date, time, hqChart)
{
var kdata = hqChart.ChartPaint[0].Data; //K线
if (ChartData.IsDayPeriod(hqChart.Period, true)) //日线
{
var aryPoint = [];
for (var i in sourceData)
{
var item = sourceData[i];
for (var j in item.Point)
{
var point = item.Point[j];
aryPoint.push(point);
}
}
aryPoint.sort(function (a, b) { return a.Date - b.Date; });
kdata.GetDateIndex(aryPoint);
return sourceData;
}
else if (ChartData.IsMinutePeriod(hqChart.Period, true)) //分钟线
{
var aryPoint = [];
for (var i in sourceData)
{
var item = sourceData[i];
for (var j in item.Point)
{
var point = item.Point[j];
aryPoint.push(point);
}
}
aryPoint.sort(function (a, b) {
if (a.Date == b.Date) return a.Time - b.Time;
return a.Date - b.Date;
});
kdata.GetDateTimeIndex(aryPoint);
return sourceData;
}
return null;
}
this.FittingMultiText = function (sourceData, date, time, hqChart)
{
var kdata = hqChart.ChartPaint[0].Data; //K线
if (ChartData.IsDayPeriod(hqChart.Period, true)) //日线
{
sourceData.sort(function (a, b) { return a.Date - b.Date; });
kdata.GetDateIndex(sourceData);
return sourceData;
}
else if (ChartData.IsMinutePeriod(hqChart.Period, true)) //分钟线
{
sourceData.sort(function (a, b) {
if (a.Date == b.Date) return a.Time - b.Time;
return a.Date - b.Date;
}
);
kdata.GetDateTimeIndex(sourceData);
return sourceData;
}
return null;
}
this.FittingMinuteData=function(jsonData, hqChart)
{
var outVar=jsonData.outvar;
var date=jsonData.date;
var time=jsonData.time;
var result=[];
for(var i in outVar)
{
var item=outVar[i];
var outVarItem={Name:item.name,Type:item.type}
if (item.data)
{
outVarItem.Data=this.FittingMinuteArray(item.data,date,time,hqChart);
if (item.color) outVarItem.Color=item.color;
if (item.linewidth>=1) outVarItem.LineWidth=item.linewidth;
if (item.isshow==false) outVarItem.IsShow = false;
if (item.isexdata==true) outVarItem.IsExData = true;
result.push(outVarItem);
}
else if (item.Draw)
{
var draw=item.Draw;
var drawItem={};
if (draw.DrawType=='DRAWICON') //图标
{
drawItem.Icon=draw.Icon;
drawItem.Name=draw.Name;
drawItem.DrawType=draw.DrawType;
drawItem.DrawData=this.FittingMinuteArray(draw.DrawData,date,time,hqChart);
outVarItem.Draw=drawItem;
result.push(outVarItem);
}
else if (draw.DrawType=='DRAWTEXT') //文本
{
drawItem.Text=draw.Text;
drawItem.Name=draw.Name;
drawItem.DrawType=draw.DrawType;
drawItem.DrawData=this.FittingMinuteArray(draw.DrawData,date,time,hqChart);
outVarItem.Draw=drawItem;
result.push(outVarItem);
}
else if (draw.DrawType=='STICKLINE') //柱子
{
drawItem.Name=draw.Name;
drawItem.Type=draw.Type;
drawItem.Width=draw.Width;
drawItem.DrawType=draw.DrawType;
drawItem.DrawData=this.FittingMinuteArray(draw.DrawData,date,time,hqChart,1);
outVarItem.Draw=drawItem;
result.push(outVarItem);
}
else if (draw.DrawType=='MULTI_LINE')
{
drawItem.Text=draw.Text;
drawItem.Name=draw.Name;
drawItem.DrawType=draw.DrawType;
drawItem.DrawData=this.FittingMultiLine(draw.DrawData,date,time,hqChart);
for(var k in drawItem.DrawData)
{
this.GetKLineData(drawItem.DrawData[k].Point, hqChart);
}
outVarItem.Draw=drawItem;
if (draw.LineDash) drawItem.LineDash=draw.LineDash;
if (draw.Arrow) drawItem.Arrow=draw.Arrow;
result.push(outVarItem);
}
else if (draw.DrawType=='MULTI_TEXT')
{
drawItem.Text=draw.Text;
drawItem.Name=draw.Name;
drawItem.DrawType=draw.DrawType;
drawItem.DrawData=this.FittingMultiText(draw.DrawData,date,time,hqChart);
this.GetKLineData(drawItem.DrawData, hqChart);
outVarItem.Draw=drawItem;
result.push(outVarItem);
}
else if (draw.DrawType=='MULTI_SVGICON')
{
drawItem.Text=draw.Text;
drawItem.Name=draw.Name;
drawItem.DrawType=draw.DrawType;
drawItem.DrawData={ Icon:this.FittingMultiText(draw.DrawData.Icon,date,time,hqChart), Family:draw.DrawData.Family };
this.GetKLineData(drawItem.DrawData.Icon, hqChart);
outVarItem.Draw=drawItem;
result.push(outVarItem);
}
else if (draw.DrawType=="MULTI_HTMLDOM") //外部自己创建dom
{
drawItem.Text=draw.Text;
drawItem.Name=draw.Name;
drawItem.DrawType=draw.DrawType;
drawItem.Callback=draw.Callback;
drawItem.DrawData=this.FittingMultiText(draw.DrawData,date,time,hqChart);
this.GetKLineData(drawItem.DrawData, hqChart);
outVarItem.Draw=drawItem;
result.push(outVarItem);
}
}
}
return result;
}
this.FittingMinuteArray=function(sourceData,date,time,hqChart)
{
var minutedata=hqChart.SourceData;; //分钟线
var arySingleData=[];
for(var i in sourceData)
{
var value=sourceData[i];
var indexItem=new SingleData(); //单列指标数据
indexItem.Date=date[i];
if (time && i<time.length) indexItem.Time=time[i];
indexItem.Value=value;
arySingleData.push(indexItem);
}
var aryFittingData=minutedata.GetMinuteFittingData(arySingleData); //数据和主图K线拟合
var bindData=new ChartData();
bindData.Data=aryFittingData;
var result=bindData.GetValue();
return result;
}
}
//市场多空
function MarketLongShortIndex()
{
this.newMethod = BaseIndex; //派生
this.newMethod('市场多空');
delete this.newMethod;
this.Index = new Array(
new IndexInfo("多空指标", null),
new IndexInfo("多头区域", null),
new IndexInfo("空头区域", null)
);
this.Index[0].LineColor = g_JSChartResource.Index.LineColor[0];
this.Index[1].LineColor = g_JSChartResource.UpBarColor;
this.Index[2].LineColor = g_JSChartResource.DownBarColor;
this.LongShortData; //多空数据
this.Create = function (hqChart, windowIndex) {
for (var i in this.Index) {
var paint = null;
if (i == 0)
paint = new ChartLine();
else
paint = new ChartStraightLine();
paint.Color = this.Index[i].LineColor;
paint.Canvas = hqChart.Canvas;
paint.Name = this.Name + "-" + i.toString();
paint.ChartBorder = hqChart.Frame.SubFrame[windowIndex].Frame.ChartBorder;
paint.ChartFrame = hqChart.Frame.SubFrame[windowIndex].Frame;
hqChart.ChartPaint.push(paint);
}
}
//请求数据
this.RequestData = function (hqChart, windowIndex, hisData) {
var self = this;
var param =
{
HQChart: hqChart,
WindowIndex: windowIndex,
HistoryData: hisData
};
this.LongShortData = [];
if (param.HQChart.Period > 0) //周期数据
{
this.NotSupport(param.HQChart, param.WindowIndex, "不支持周期切换");
param.HQChart.Draw();
return false;
}
//请求数据
wx.request({
url: g_JSChartResource.Index.MarketLongShortApiUrl,
data:
{
},
method: 'POST',
dataType: "json",
async: true,
success: function (recvData) {
self.RecvData(recvData, param);
}
});
return true;
}
this.RecvData = function (recvData, param) {
if (recvData.data.data.length <= 0) return;
var aryData = new Array();
for (var i in recvData.data.data) {
var item = recvData.data.data[i];
var indexData = new SingleData();
indexData.Date = item[0];
indexData.Value = item[1];
aryData.push(indexData);
}
var aryFittingData = param.HistoryData.GetFittingData(aryData);
var bindData = new ChartData();
bindData.Data = aryFittingData;
bindData.Period = param.HQChart.Period; //周期
bindData.Right = param.HQChart.Right; //复权
this.LongShortData = bindData.GetValue();
this.BindData(param.HQChart, param.WindowIndex, param.HistoryData);
param.HQChart.UpdataDataoffset(); //更新数据偏移
param.HQChart.UpdateFrameMaxMin(); //调整坐标最大 最小值
param.HQChart.Draw();
}
this.BindData = function (hqChart, windowIndex, hisData) {
var paint = hqChart.GetChartPaint(windowIndex);
if (paint.length != this.Index.length) return false;
//paint[0].Data.Data=SWLData;
paint[0].Data.Data = this.LongShortData;
paint[0].NotSupportMessage = null;
paint[1].Data.Data[0] = 8;
paint[2].Data.Data[0] = 1;
//指定[0,9]
hqChart.Frame.SubFrame[windowIndex].Frame.YSpecificMaxMin = { Max: 9, Min: 0, Count: 3 };
var titleIndex = windowIndex + 1;
for (var i in paint) {
hqChart.TitlePaint[titleIndex].Data[i] = new DynamicTitleData(paint[i].Data, this.Index[i].Name, this.Index[i].LineColor);
if (i > 0) hqChart.TitlePaint[titleIndex].Data[i].DataType = "StraightLine";
}
hqChart.TitlePaint[titleIndex].Title = this.FormatIndexTitle();
if (hqChart.UpdateUICallback) hqChart.UpdateUICallback('MarketLongShortIndex', paint, { WindowIndex: windowIndex, HistoryData: hisData }); //通知上层回调
return true;
}
}
module.exports =
{
JSCommonIndex:
{
IndexInfo: IndexInfo,
BaseIndex: BaseIndex,
ScriptIndex:ScriptIndex,
APIScriptIndex:APIScriptIndex,
},
//单个类导出
JSCommonIndex_IndexInfo: IndexInfo,
JSCommonIndex_BaseIndex: BaseIndex,
JSCommonIndex_ScriptIndex:ScriptIndex,
JSCommonIndex_APIScriptIndex:APIScriptIndex,
};
| //指 |
index.test.ts | import isPalindrome from '.';
describe('isPalindrome', () => {
it('should check if string is palindrome', () => {
expect.assertions(8);
expect(isPalindrome('a')).toBe(true);
expect(isPalindrome('aba')).toBe(true);
expect(isPalindrome('Abba')).toBe(true);
expect(isPalindrome('hello')).toBe(false);
expect(isPalindrome('Bob')).toBe(true); | }); | expect(isPalindrome('Madam')).toBe(true);
expect(isPalindrome('AbBa')).toBe(true);
expect(isPalindrome('')).toBe(true);
}); |
retrieve_file.go | // Code generated by go-swagger; DO NOT EDIT.
package files
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"net/http"
"github.com/go-openapi/runtime/middleware"
)
// RetrieveFileHandlerFunc turns a function with the right signature into a retrieve file handler
type RetrieveFileHandlerFunc func(RetrieveFileParams) middleware.Responder
// Handle executing the request and returning a response
func (fn RetrieveFileHandlerFunc) Handle(params RetrieveFileParams) middleware.Responder {
return fn(params)
}
// RetrieveFileHandler interface for that can handle valid retrieve file params
type RetrieveFileHandler interface {
Handle(RetrieveFileParams) middleware.Responder
}
// NewRetrieveFile creates a new http.Handler for the retrieve file operation
func NewRetrieveFile(ctx *middleware.Context, handler RetrieveFileHandler) *RetrieveFile {
return &RetrieveFile{Context: ctx, Handler: handler}
}
/*RetrieveFile swagger:route GET /apps/{app_guid}/instances/{instance_index}/files/{file_path} files retrieveFile
Retrieve File
curl --insecure -i %s/v2/apps/{app_guid}/instances/{instance_index}/files/{file_path} -X GET -H 'Authorization: %s'
*/
type RetrieveFile struct {
Context *middleware.Context
Handler RetrieveFileHandler
}
func (o *RetrieveFile) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
route, rCtx, _ := o.Context.RouteInfo(r)
if rCtx != nil {
r = rCtx
}
var Params = NewRetrieveFileParams()
if err := o.Context.BindValidRequest(r, route, &Params); err != nil |
res := o.Handler.Handle(Params) // actually handle the request
o.Context.Respond(rw, r, route.Produces, route, res)
}
| { // bind params
o.Context.Respond(rw, r, route.Produces, route, err)
return
} |
index.tsx | import React from 'react'
import {Image, View} from 'react-native'
import {styles} from './styles';
import DiscordSvg from '../../assets/discord.svg' | const {CDN_IMAGE} = process.env;
type Props = {
guildId: string;
iconId: string | null;
}
export function GuildIcon({guildId, iconId }:Props) {
const uri = `${CDN_IMAGE}/icons/${guildId}/${iconId}.png`
return (
<View style={styles.container}>
{
iconId ?
<Image
source={{uri}}
style={styles.image}
resizeMode="cover"
accessibilityRole="image"
/>
:
<DiscordSvg width={40} height={40}/>
}
</View>
)
} | |
cbt.go | /*
Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// Command docs are in cbtdoc.go.
import (
"bytes"
"flag"
"fmt"
"go/format"
"io"
"log"
"os"
"regexp"
"sort"
"strconv"
"strings"
"text/tabwriter"
"text/template"
"time"
"cloud.google.com/go/bigtable"
"cloud.google.com/go/bigtable/internal/cbtconfig"
"golang.org/x/net/context"
"google.golang.org/api/option"
)
var (
oFlag = flag.String("o", "", "if set, redirect stdout to this file")
config *cbtconfig.Config
client *bigtable.Client
adminClient *bigtable.AdminClient
instanceAdminClient *bigtable.InstanceAdminClient
)
func getClient() *bigtable.Client {
if client == nil {
var opts []option.ClientOption
if ep := config.DataEndpoint; ep != "" {
opts = append(opts, option.WithEndpoint(ep))
}
if ts := config.TokenSource; ts != nil {
opts = append(opts, option.WithTokenSource(ts))
}
var err error
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, opts...)
if err != nil {
log.Fatalf("Making bigtable.Client: %v", err)
}
}
return client
}
func getAdminClient() *bigtable.AdminClient {
if adminClient == nil {
var opts []option.ClientOption
if ep := config.AdminEndpoint; ep != "" {
opts = append(opts, option.WithEndpoint(ep))
}
if ts := config.TokenSource; ts != nil {
opts = append(opts, option.WithTokenSource(ts))
}
var err error
adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance, opts...)
if err != nil {
log.Fatalf("Making bigtable.AdminClient: %v", err)
}
}
return adminClient
}
func getInstanceAdminClient() *bigtable.InstanceAdminClient {
if instanceAdminClient == nil {
var opts []option.ClientOption
if ep := config.AdminEndpoint; ep != "" {
opts = append(opts, option.WithEndpoint(ep))
}
if ts := config.TokenSource; ts != nil {
opts = append(opts, option.WithTokenSource(ts))
}
var err error
instanceAdminClient, err = bigtable.NewInstanceAdminClient(context.Background(), config.Project, opts...)
if err != nil {
log.Fatalf("Making bigtable.InstanceAdminClient: %v", err)
}
}
return instanceAdminClient
}
func main() {
var err error
config, err = cbtconfig.Load()
if err != nil {
log.Fatal(err)
}
config.RegisterFlags()
flag.Usage = func() { usage(os.Stderr) }
flag.Parse()
if flag.NArg() == 0 {
usage(os.Stderr)
os.Exit(1)
}
if *oFlag != "" {
f, err := os.Create(*oFlag)
if err != nil {
log.Fatal(err)
}
defer func() {
if err := f.Close(); err != nil {
log.Fatal(err)
}
}()
os.Stdout = f
}
ctx := context.Background()
for _, cmd := range commands {
if cmd.Name == flag.Arg(0) {
if err := config.CheckFlags(cmd.Required); err != nil {
log.Fatal(err)
}
cmd.do(ctx, flag.Args()[1:]...)
return
}
}
log.Fatalf("Unknown command %q", flag.Arg(0))
}
func usage(w io.Writer) {
fmt.Fprintf(w, "Usage: %s [flags] <command> ...\n", os.Args[0])
flag.CommandLine.SetOutput(w)
flag.CommandLine.PrintDefaults()
fmt.Fprintf(w, "\n%s", cmdSummary)
}
var cmdSummary string // generated in init, below
func init() {
var buf bytes.Buffer
tw := tabwriter.NewWriter(&buf, 10, 8, 4, '\t', 0)
for _, cmd := range commands {
fmt.Fprintf(tw, "cbt %s\t%s\n", cmd.Name, cmd.Desc)
}
tw.Flush()
buf.WriteString(configHelp)
cmdSummary = buf.String()
}
var configHelp = `
For convenience, values of the -project, -instance, -creds,
-admin-endpoint and -data-endpoint flags may be specified in
` + cbtconfig.Filename() + ` in this format:
project = my-project-123
instance = my-instance
creds = path-to-account-key.json
admin-endpoint = hostname:port
data-endpoint = hostname:port
All values are optional, and all will be overridden by flags.
`
var commands = []struct {
Name, Desc string
do func(context.Context, ...string)
Usage string
Required cbtconfig.RequiredFlags
}{
{
Name: "count",
Desc: "Count rows in a table",
do: doCount,
Usage: "cbt count <table>",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "createfamily",
Desc: "Create a column family",
do: doCreateFamily,
Usage: "cbt createfamily <table> <family>",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "createtable",
Desc: "Create a table",
do: doCreateTable,
Usage: "cbt createtable <table>",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "deletefamily",
Desc: "Delete a column family",
do: doDeleteFamily,
Usage: "cbt deletefamily <table> <family>",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "deleterow",
Desc: "Delete a row",
do: doDeleteRow,
Usage: "cbt deleterow <table> <row>",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "deletetable",
Desc: "Delete a table",
do: doDeleteTable,
Usage: "cbt deletetable <table>",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "doc",
Desc: "Print godoc-suitable documentation for cbt",
do: doDoc,
Usage: "cbt doc",
Required: cbtconfig.NoneRequired,
},
{
Name: "help",
Desc: "Print help text",
do: doHelp,
Usage: "cbt help [command]",
Required: cbtconfig.NoneRequired,
},
{
Name: "listinstances",
Desc: "List instances in a project",
do: doListInstances,
Usage: "cbt listinstances",
Required: cbtconfig.ProjectRequired,
},
{
Name: "lookup",
Desc: "Read from a single row",
do: doLookup,
Usage: "cbt lookup <table> <row>",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "ls",
Desc: "List tables and column families",
do: doLS,
Usage: "cbt ls List tables\n" +
"cbt ls <table> List column families in <table>",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "mddoc",
Desc: "Print documentation for cbt in Markdown format",
do: doMDDoc,
Usage: "cbt mddoc",
Required: cbtconfig.NoneRequired,
},
{
Name: "read",
Desc: "Read rows",
do: doRead,
Usage: "cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [count=<n>]\n" +
" start=<row> Start reading at this row\n" +
" end=<row> Stop reading before this row\n" +
" prefix=<prefix> Read rows with this prefix\n" +
" count=<n> Read only this many rows\n",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "set",
Desc: "Set value of a cell",
do: doSet,
Usage: "cbt set <table> <row> family:column=val[@ts] ...\n" +
" family:column=val[@ts] may be repeated to set multiple cells.\n" +
"\n" +
" ts is an optional integer timestamp.\n" +
" If it cannot be parsed, the `@ts` part will be\n" +
" interpreted as part of the value.",
Required: cbtconfig.ProjectAndInstanceRequired,
},
{
Name: "setgcpolicy",
Desc: "Set the GC policy for a column family",
do: doSetGCPolicy,
Usage: "cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )\n" +
"\n" +
` maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d")` + "\n" +
" maxversions=<n> Maximum number of versions to preserve",
Required: cbtconfig.ProjectAndInstanceRequired,
},
}
func doCount(ctx context.Context, args ...string) {
if len(args) != 1 {
log.Fatal("usage: cbt count <table>")
}
tbl := getClient().Open(args[0])
n := 0
err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool {
n++
return true
}, bigtable.RowFilter(bigtable.StripValueFilter()))
if err != nil {
log.Fatalf("Reading rows: %v", err)
}
fmt.Println(n)
}
func doCreateFamily(ctx context.Context, args ...string) {
if len(args) != 2 {
log.Fatal("usage: cbt createfamily <table> <family>")
}
err := getAdminClient().CreateColumnFamily(ctx, args[0], args[1])
if err != nil {
log.Fatalf("Creating column family: %v", err)
}
}
func doCreateTable(ctx context.Context, args ...string) {
if len(args) != 1 {
log.Fatal("usage: cbt createtable <table>")
}
err := getAdminClient().CreateTable(ctx, args[0])
if err != nil {
log.Fatalf("Creating table: %v", err)
}
}
func doDeleteFamily(ctx context.Context, args ...string) {
if len(args) != 2 {
log.Fatal("usage: cbt deletefamily <table> <family>")
}
err := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1])
if err != nil |
}
func doDeleteRow(ctx context.Context, args ...string) {
if len(args) != 2 {
log.Fatal("usage: cbt deleterow <table> <row>")
}
tbl := getClient().Open(args[0])
mut := bigtable.NewMutation()
mut.DeleteRow()
if err := tbl.Apply(ctx, args[1], mut); err != nil {
log.Fatalf("Deleting row: %v", err)
}
}
func doDeleteTable(ctx context.Context, args ...string) {
if len(args) != 1 {
log.Fatalf("Can't do `cbt deletetable %s`", args)
}
err := getAdminClient().DeleteTable(ctx, args[0])
if err != nil {
log.Fatalf("Deleting table: %v", err)
}
}
// to break circular dependencies
var (
doDocFn func(ctx context.Context, args ...string)
doHelpFn func(ctx context.Context, args ...string)
doMDDocFn func(ctx context.Context, args ...string)
)
func init() {
doDocFn = doDocReal
doHelpFn = doHelpReal
doMDDocFn = doMDDocReal
}
func doDoc(ctx context.Context, args ...string) { doDocFn(ctx, args...) }
func doHelp(ctx context.Context, args ...string) { doHelpFn(ctx, args...) }
func doMDDoc(ctx context.Context, args ...string) { doMDDocFn(ctx, args...) }
func docFlags() []*flag.Flag {
// Only include specific flags, in a specific order.
var flags []*flag.Flag
for _, name := range []string{"project", "instance", "creds"} {
f := flag.Lookup(name)
if f == nil {
log.Fatalf("Flag not linked: -%s", name)
}
flags = append(flags, f)
}
return flags
}
func doDocReal(ctx context.Context, args ...string) {
data := map[string]interface{}{
"Commands": commands,
"Flags": docFlags(),
}
var buf bytes.Buffer
if err := docTemplate.Execute(&buf, data); err != nil {
log.Fatalf("Bad doc template: %v", err)
}
out, err := format.Source(buf.Bytes())
if err != nil {
log.Fatalf("Bad doc output: %v", err)
}
os.Stdout.Write(out)
}
func indentLines(s, ind string) string {
ss := strings.Split(s, "\n")
for i, p := range ss {
ss[i] = ind + p
}
return strings.Join(ss, "\n")
}
var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{
"indent": indentLines,
}).
Parse(`
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
// Run "go generate" to regenerate.
//go:generate go run cbt.go -o cbtdoc.go doc
/*
Cbt is a tool for doing basic interactions with Cloud Bigtable.
Usage:
cbt [options] command [arguments]
The commands are:
{{range .Commands}}
{{printf "%-25s %s" .Name .Desc}}{{end}}
Use "cbt help <command>" for more information about a command.
The options are:
{{range .Flags}}
-{{.Name}} string
{{.Usage}}{{end}}
{{range .Commands}}
{{.Desc}}
Usage:
{{indent .Usage "\t"}}
{{end}}
*/
package main
`))
func doHelpReal(ctx context.Context, args ...string) {
if len(args) == 0 {
usage(os.Stdout)
return
}
for _, cmd := range commands {
if cmd.Name == args[0] {
fmt.Println(cmd.Usage)
return
}
}
log.Fatalf("Don't know command %q", args[0])
}
func doListInstances(ctx context.Context, args ...string) {
if len(args) != 0 {
log.Fatalf("usage: cbt listinstances")
}
is, err := getInstanceAdminClient().Instances(ctx)
if err != nil {
log.Fatalf("Getting list of instances: %v", err)
}
tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
fmt.Fprintf(tw, "Instance Name\tInfo\n")
fmt.Fprintf(tw, "-------------\t----\n")
for _, i := range is {
fmt.Fprintf(tw, "%s\t%s\n", i.Name, i.DisplayName)
}
tw.Flush()
}
func doLookup(ctx context.Context, args ...string) {
if len(args) != 2 {
log.Fatalf("usage: cbt lookup <table> <row>")
}
table, row := args[0], args[1]
tbl := getClient().Open(table)
r, err := tbl.ReadRow(ctx, row)
if err != nil {
log.Fatalf("Reading row: %v", err)
}
printRow(r)
}
func printRow(r bigtable.Row) {
fmt.Println(strings.Repeat("-", 40))
fmt.Println(r.Key())
var fams []string
for fam := range r {
fams = append(fams, fam)
}
sort.Strings(fams)
for _, fam := range fams {
ris := r[fam]
sort.Sort(byColumn(ris))
for _, ri := range ris {
ts := time.Unix(0, int64(ri.Timestamp)*1e3)
fmt.Printf(" %-40s @ %s\n", ri.Column, ts.Format("2006/01/02-15:04:05.000000"))
fmt.Printf(" %q\n", ri.Value)
}
}
}
type byColumn []bigtable.ReadItem
func (b byColumn) Len() int { return len(b) }
func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column }
func doLS(ctx context.Context, args ...string) {
switch len(args) {
default:
log.Fatalf("Can't do `cbt ls %s`", args)
case 0:
tables, err := getAdminClient().Tables(ctx)
if err != nil {
log.Fatalf("Getting list of tables: %v", err)
}
sort.Strings(tables)
for _, table := range tables {
fmt.Println(table)
}
case 1:
table := args[0]
ti, err := getAdminClient().TableInfo(ctx, table)
if err != nil {
log.Fatalf("Getting table info: %v", err)
}
sort.Strings(ti.Families)
for _, fam := range ti.Families {
fmt.Println(fam)
}
}
}
func doMDDocReal(ctx context.Context, args ...string) {
data := map[string]interface{}{
"Commands": commands,
"Flags": docFlags(),
}
var buf bytes.Buffer
if err := mddocTemplate.Execute(&buf, data); err != nil {
log.Fatalf("Bad mddoc template: %v", err)
}
io.Copy(os.Stdout, &buf)
}
var mddocTemplate = template.Must(template.New("mddoc").Funcs(template.FuncMap{
"indent": indentLines,
}).
Parse(`
Cbt is a tool for doing basic interactions with Cloud Bigtable.
Usage:
cbt [options] command [arguments]
The commands are:
{{range .Commands}}
{{printf "%-25s %s" .Name .Desc}}{{end}}
Use "cbt help <command>" for more information about a command.
The options are:
{{range .Flags}}
-{{.Name}} string
{{.Usage}}{{end}}
{{range .Commands}}
## {{.Desc}}
{{indent .Usage "\t"}}
{{end}}
`))
func doRead(ctx context.Context, args ...string) {
if len(args) < 1 {
log.Fatalf("usage: cbt read <table> [args ...]")
}
tbl := getClient().Open(args[0])
parsed := make(map[string]string)
for _, arg := range args[1:] {
i := strings.Index(arg, "=")
if i < 0 {
log.Fatalf("Bad arg %q", arg)
}
key, val := arg[:i], arg[i+1:]
switch key {
default:
log.Fatalf("Unknown arg key %q", key)
case "limit":
// Be nicer; we used to support this, but renamed it to "end".
log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end")
case "start", "end", "prefix", "count":
parsed[key] = val
}
}
if (parsed["start"] != "" || parsed["end"] != "") && parsed["prefix"] != "" {
log.Fatal(`"start"/"end" may not be mixed with "prefix"`)
}
var rr bigtable.RowRange
if start, end := parsed["start"], parsed["end"]; end != "" {
rr = bigtable.NewRange(start, end)
} else if start != "" {
rr = bigtable.InfiniteRange(start)
}
if prefix := parsed["prefix"]; prefix != "" {
rr = bigtable.PrefixRange(prefix)
}
var opts []bigtable.ReadOption
if count := parsed["count"]; count != "" {
n, err := strconv.ParseInt(count, 0, 64)
if err != nil {
log.Fatalf("Bad count %q: %v", count, err)
}
opts = append(opts, bigtable.LimitRows(n))
}
// TODO(dsymonds): Support filters.
err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool {
printRow(r)
return true
}, opts...)
if err != nil {
log.Fatalf("Reading rows: %v", err)
}
}
var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`)
func doSet(ctx context.Context, args ...string) {
if len(args) < 3 {
log.Fatalf("usage: cbt set <table> <row> family:[column]=val[@ts] ...")
}
tbl := getClient().Open(args[0])
row := args[1]
mut := bigtable.NewMutation()
for _, arg := range args[2:] {
m := setArg.FindStringSubmatch(arg)
if m == nil {
log.Fatalf("Bad set arg %q", arg)
}
val := m[3]
ts := bigtable.Now()
if i := strings.LastIndex(val, "@"); i >= 0 {
// Try parsing a timestamp.
n, err := strconv.ParseInt(val[i+1:], 0, 64)
if err == nil {
val = val[:i]
ts = bigtable.Timestamp(n)
}
}
mut.Set(m[1], m[2], ts, []byte(val))
}
if err := tbl.Apply(ctx, row, mut); err != nil {
log.Fatalf("Applying mutation: %v", err)
}
}
func doSetGCPolicy(ctx context.Context, args ...string) {
if len(args) < 3 {
log.Fatalf("usage: cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )")
}
table := args[0]
fam := args[1]
var pol bigtable.GCPolicy
switch p := args[2]; {
case strings.HasPrefix(p, "maxage="):
d, err := parseDuration(p[7:])
if err != nil {
log.Fatal(err)
}
pol = bigtable.MaxAgePolicy(d)
case strings.HasPrefix(p, "maxversions="):
n, err := strconv.ParseUint(p[12:], 10, 16)
if err != nil {
log.Fatal(err)
}
pol = bigtable.MaxVersionsPolicy(int(n))
default:
log.Fatalf("Bad GC policy %q", p)
}
if err := getAdminClient().SetGCPolicy(ctx, table, fam, pol); err != nil {
log.Fatalf("Setting GC policy: %v", err)
}
}
// parseDuration parses a duration string.
// It is similar to Go's time.ParseDuration, except with a different set of supported units,
// and only simple formats supported.
func parseDuration(s string) (time.Duration, error) {
// [0-9]+[a-z]+
// Split [0-9]+ from [a-z]+.
i := 0
for ; i < len(s); i++ {
c := s[i]
if c < '0' || c > '9' {
break
}
}
ds, u := s[:i], s[i:]
if ds == "" || u == "" {
return 0, fmt.Errorf("invalid duration %q", s)
}
// Parse them.
d, err := strconv.ParseUint(ds, 10, 32)
if err != nil {
return 0, fmt.Errorf("invalid duration %q: %v", s, err)
}
unit, ok := unitMap[u]
if !ok {
return 0, fmt.Errorf("unknown unit %q in duration %q", u, s)
}
if d > uint64((1<<63-1)/unit) {
// overflow
return 0, fmt.Errorf("invalid duration %q overflows", s)
}
return time.Duration(d) * unit, nil
}
var unitMap = map[string]time.Duration{
"ms": time.Millisecond,
"s": time.Second,
"m": time.Minute,
"h": time.Hour,
"d": 24 * time.Hour,
}
| {
log.Fatalf("Deleting column family: %v", err)
} |
prices.go | package domain
import (
"context"
"fmt"
"strings"
"time"
"github.com/telecoda/teletrada/proto"
)
type Price struct {
Base SymbolType
As SymbolType
Price float64
At time.Time
Exchange string
}
type DaySummary struct {
Base SymbolType
As SymbolType
OpenPrice float64
ClosePrice float64
WeightedAvgPrice float64
HighestPrice float64
LowestPrice float64
ChangePrice float64
ChangePercent float64
At time.Time
Exchange string
}
func (p Price) Validate() error {
if p.Base == "" {
return fmt.Errorf("Price invalid: Base symbol cannot be blank")
}
if p.As == "" {
return fmt.Errorf("Price invalid: As symbol cannot be blank")
}
if p.Price == 0 {
return fmt.Errorf("Price invalid: Price cannot be zero")
}
if p.Price < 0 {
return fmt.Errorf("Price invalid: Price cannot be negative")
}
if p.At.IsZero() {
return fmt.Errorf("Price invalid: At cannot be zero")
}
if p.Base == p.As && p.Price != 1.0 {
return fmt.Errorf("Price invalid: %s as %s MUST equal 1.0", p.Base, p.As)
}
return nil
}
// GetPrices returns current prices
func (s *server) GetPrices(ctx context.Context, req *proto.GetPricesRequest) (*proto.GetPricesResponse, error) {
resp := &proto.GetPricesResponse{}
req.Base = strings.ToUpper(req.Base)
req.As = strings.ToUpper(req.As)
var symbolTypes []SymbolType
if req.Base == "" || req.Base == "*ALL" | else {
// only one symbol
symbolTypes = make([]SymbolType, 1)
symbolTypes[0] = SymbolType(req.Base)
}
resp.Prices = make([]*proto.Price, len(symbolTypes))
for i, symbolType := range symbolTypes {
price, err := DefaultArchive.GetLatestPriceAs(symbolType, SymbolType(req.As))
if err != nil {
return nil, fmt.Errorf("Failed to fetch symbol %s price as %s - %s", req.Base, req.As, err)
}
pp, err := price.toProto()
if err != nil {
return nil, err
}
daySummary, err := DefaultArchive.GetDaySummaryAs(symbolType, SymbolType(req.As))
if err == nil {
// day summary found so fill in corresponding fields
pp.ChangePct24H = float32(daySummary.ChangePercent)
pp.Change24H = float32(daySummary.ChangePrice)
pp.Opening = float32(daySummary.OpenPrice)
pp.Closing = float32(daySummary.ClosePrice)
pp.Highest = float32(daySummary.HighestPrice)
pp.Lowest = float32(daySummary.LowestPrice)
pp.ChangeToday = pp.Current - pp.Closing
if pp.ChangeToday != 0 {
pp.ChangePctToday = (pp.ChangeToday / pp.Closing) * 100.00
}
} else {
fmt.Printf("Failed to get day summary %s as %s - %s\n", symbolType, req.As, err)
}
resp.Prices[i] = pp
}
return resp, nil
}
| {
// all prices
symbolMap := DefaultArchive.GetSymbolTypes()
symbolTypes = make([]SymbolType, len(symbolMap))
i := 0
for symbolType, _ := range symbolMap {
symbolTypes[i] = symbolType
i++
}
} |
_fragment.py | """typy fragments"""
__all__ = ("Fragment",)
class Fragment(object):
| def __init__(self):
raise FragmentCannotBeInstantiated() |
|
record.go | package schema
import (
"encoding/json"
"fmt"
"strings"
"github.com/actgardner/gogen-avro/v8/generator"
)
type RecordDefinition struct {
name QualifiedName
aliases []QualifiedName
fields []*Field
doc string
metadata map[string]interface{}
}
func NewRecordDefinition(name QualifiedName, aliases []QualifiedName, fields []*Field, doc string, metadata map[string]interface{}) *RecordDefinition {
return &RecordDefinition{
name: name,
aliases: aliases,
fields: fields,
doc: doc,
metadata: metadata,
}
}
func (r *RecordDefinition) AvroName() QualifiedName {
return r.name
}
func (r *RecordDefinition) Name() string {
return generator.ToPublicName(r.name.String())
}
func (r *RecordDefinition) GoType() string {
return r.Name()
}
func (r *RecordDefinition) Aliases() []QualifiedName {
return r.aliases
}
func (r *RecordDefinition) SerializerMethod() string {
return fmt.Sprintf("write%v", r.Name())
}
func (r *RecordDefinition) NewWriterMethod() string {
return fmt.Sprintf("New%vWriter", r.Name())
}
func (s *RecordDefinition) Attribute(name string) interface{} {
return s.metadata[name]
}
func (r *RecordDefinition) Definition(scope map[QualifiedName]interface{}) (interface{}, error) {
if _, ok := scope[r.name]; ok {
return r.name.String(), nil
}
metadata := copyDefinition(r.metadata)
scope[r.name] = 1
fields := make([]map[string]interface{}, 0)
for _, f := range r.fields {
def, err := f.Definition(scope)
if err != nil {
return nil, err
}
fields = append(fields, def)
}
metadata["fields"] = fields
return metadata, nil
}
func (r *RecordDefinition) ConstructorMethod() string {
return fmt.Sprintf("New%v()", r.Name())
}
func (r *RecordDefinition) DefaultForField(f *Field) (string, error) {
return f.Type().DefaultValue(fmt.Sprintf("r.%v", f.GoName()), f.Default())
}
func (r *RecordDefinition) ConstructableForField(f *Field) string {
if constructor, ok := getConstructableForType(f.Type()); ok {
return fmt.Sprintf("r.%v = %v\n", f.GoName(), constructor.ConstructorMethod())
}
return ""
}
func (r *RecordDefinition) RecordReaderTypeName() string {
return r.Name() + "Reader"
}
// FieldByName finds a field in the reader schema whose name or aliases match a name in the writer schema.
func (r *RecordDefinition) FieldByName(field string) *Field {
for _, f := range r.fields {
if f.NameMatchesAliases(field) {
return f
}
}
return nil
} | items := rvalue.(map[string]interface{})
fieldSetters := ""
for k, v := range items {
field := r.FieldByName(k)
fieldSetter, err := field.Type().DefaultValue(fmt.Sprintf("%v.%v", lvalue, field.GoName()), v)
if err != nil {
return "", err
}
fieldSetters += fieldSetter + "\n"
}
return fieldSetters, nil
}
func (r *RecordDefinition) Fields() []*Field {
return r.fields
}
func (s *RecordDefinition) IsReadableBy(d Definition) bool {
_, ok := d.(*RecordDefinition)
return ok && hasMatchingName(s.AvroName(), d)
}
func (s *RecordDefinition) WrapperType() string {
return "types.Record"
}
func (s *RecordDefinition) Doc() string {
return strings.ReplaceAll(s.doc, "\n", " ")
}
func (s *RecordDefinition) Schema() (string, error) {
def0, err := s.Definition(make(map[QualifiedName]interface{}))
if err != nil {
return "", err
}
def := def0.(map[string]interface{})
delete(def, "namespace")
def["name"] = s.name.String()
jsonBytes, err := json.Marshal(def)
return string(jsonBytes), err
}
func (s *RecordDefinition) Children() []AvroType {
children := make([]AvroType, len(s.fields))
for i, field := range s.fields {
children[i] = field.Type()
}
return children
}
func (s *RecordDefinition) GetReference() bool {
return true
} |
func (r *RecordDefinition) DefaultValue(lvalue string, rvalue interface{}) (string, error) { |
zdump_v.py | """
ZdumpV - command ``/usr/sbin/zdump -v /etc/localtime -c 2019,2039``
===================================================================
The ``/usr/sbin/zdump -v /etc/localtime -c 2019,2039`` command provides information about
'Daylight Saving Time' in file /etc/localtime from 2019 to 2039.
Sample content from command ``zdump -v /etc/localtime -c 2019,2039`` is::
/etc/localtime Sun Mar 10 06:59:59 2019 UTC = Sun Mar 10 01:59:59 2019 EST isdst=0 gmtoff=-18000
/etc/localtime Sun Mar 10 07:00:00 2019 UTC = Sun Mar 10 03:00:00 2019 EDT isdst=1 gmtoff=-14400
/etc/localtime Sun Nov 7 05:59:59 2038 UTC = Sun Nov 7 01:59:59 2038 EDT isdst=1 gmtoff=-14400
/etc/localtime Sun Nov 7 06:00:00 2038 UTC = Sun Nov 7 01:00:00 2038 EST isdst=0 gmtoff=-18000
Examples:
>>> dst = zdump[0]
>>> dst.get('utc_time') | 'Sun Mar 10 06:59:59 2019 UTC'
>>> dst.get('local_time')
datetime.datetime(2019, 3, 10, 1, 59, 59)
>>> dst.get('local_time_raw')
'Sun Mar 10 01:59:59 2019 EST'
>>> dst.get('isdst')
0
>>> dst.get('gmtoff')
-18000
"""
from datetime import datetime
from insights.specs import Specs
from insights.parsers import SkipException
from insights import parser, CommandParser
def str2datetime(timestamp, tz=False):
"""
This function translates the time stamp into a datetime object.
Args:
timestamp (str): the time stamp from command `zdump -v`
tz (bool): True if it's UTC TimeZone.
Returns:
time (datetime): the datetime object about the time stamp
time_string (str): the formatted time stamp
"""
time, time_string = None, timestamp.strip()
# Fixed the problem that the program running this python code doesn't
# has the corresponding TimeZone where strptime will raise ValueError.
# So, we skip the `TimeZone`
time_s = time_string.rsplit(None, 1)[0]
time_f = "%a %b %d %H:%M:%S %Y"
if tz:
# In some version, `zdump` prints 'UT' instead of 'UTC'
# 'UC' is an invalid TimeZone for function `strptime`
time_s = time_s + " UTC"
time_f = "%a %b %d %H:%M:%S %Y %Z"
try:
time = datetime.strptime(time_s, time_f)
except ValueError:
pass
return time, time_string
@parser(Specs.zdump_v)
class ZdumpV(CommandParser, list):
"""
Parse the output from the ``/usr/sbin/zdump -v /etc/localtime -c 2019,2039`` command
and store the 'Daylight Saving Time' information into a list.
Raises:
SkipException: When nothing is parsed.
.. warning:: The value in key `local_time` doesn't include the TimeZone information
"""
def parse_content(self, content):
if not content:
raise SkipException("No Data from command: /usr/sbin/zdump -v /etc/localtime -c 2019,2039")
for line in content:
dst = {}
if 'isdst' not in line:
# skip the line that does not include a time stamp
continue
utc_time, remains = line.strip('/etc/localtime').split(' = ')
dst['utc_time'], dst['utc_time_raw'] = str2datetime(utc_time, True)
if dst['utc_time'] is None:
continue
local_time, _ = remains.split("isdst")
dst['local_time'], dst['local_time_raw'] = str2datetime(local_time)
if dst['local_time'] is None:
continue
isdst = [s.split('=')[1] for s in remains.split() if 'isdst' in s and '=' in s]
if isdst:
dst['isdst'] = int(isdst[0])
gmtoff = [s.split('=')[1] for s in remains.split() if 'gmtoff' in s and '=' in s]
if gmtoff:
dst['gmtoff'] = int(gmtoff[0])
self.append(dst) | datetime.datetime(2019, 3, 10, 6, 59, 59)
>>> dst.get('utc_time_raw') |
tmrouth.rs | #[doc = "Reader of register TMROUTH"]
pub type R = crate::R<u16, super::TMROUTH>;
#[doc = "Writer for register TMROUTH"]
pub type W = crate::W<u16, super::TMROUTH>;
#[doc = "Register TMROUTH `reset()`'s with value 0"]
impl crate::ResetValue for super::TMROUTH {
type Type = u16;
#[inline(always)]
fn | () -> Self::Type {
0
}
}
#[doc = "Reader of field `TIMEROUTHIGH`"]
pub type TIMEROUTHIGH_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `TIMEROUTHIGH`"]
pub struct TIMEROUTHIGH_W<'a> {
w: &'a mut W,
}
impl<'a> TIMEROUTHIGH_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff) | ((value as u16) & 0xffff);
self.w
}
}
impl R {
#[doc = "Bits 0:15 - Shows the value of the upper 16 bits of the watchdog timer."]
#[inline(always)]
pub fn timerouthigh(&self) -> TIMEROUTHIGH_R {
TIMEROUTHIGH_R::new((self.bits & 0xffff) as u16)
}
}
impl W {
#[doc = "Bits 0:15 - Shows the value of the upper 16 bits of the watchdog timer."]
#[inline(always)]
pub fn timerouthigh(&mut self) -> TIMEROUTHIGH_W {
TIMEROUTHIGH_W { w: self }
}
}
| reset_value |
repo.py | # coding: utf-8
from __future__ import absolute_import
from google.appengine.ext import ndb
import flask_restful
import flask
from api import helpers
import auth
import model
import util
from main import api_v1
@api_v1.resource('/repo/', endpoint='api.repo.list')
class RepoListAPI(flask_restful.Resource):
def get(self):
repo_dbs, repo_cursor = model.Repo.get_dbs()
return helpers.make_response(repo_dbs, model.Repo.FIELDS, repo_cursor)
@api_v1.resource('/repo/<string:repo_key>/', endpoint='api.repo')
class RepoAPI(flask_restful.Resource):
def get(self, repo_key):
repo_db = ndb.Key(urlsafe=repo_key).get()
if not repo_db:
helpers.make_not_found_exception('Repo %s not found' % repo_key)
return helpers.make_response(repo_db, model.Repo.FIELDS)
###############################################################################
# Admin
###############################################################################
@api_v1.resource('/admin/repo/', endpoint='api.admin.repo.list')
class AdminRepoListAPI(flask_restful.Resource):
@auth.admin_required
def get(self):
repo_keys = util.param('repo_keys', list)
if repo_keys:
repo_db_keys = [ndb.Key(urlsafe=k) for k in repo_keys]
repo_dbs = ndb.get_multi(repo_db_keys)
return helpers.make_response(repo_dbs, model.repo.FIELDS)
repo_dbs, repo_cursor = model.Repo.get_dbs()
return helpers.make_response(repo_dbs, model.Repo.FIELDS, repo_cursor)
@api_v1.resource('/admin/repo/<string:repo_key>/', endpoint='api.admin.repo')
class AdminRepoAPI(flask_restful.Resource):
@auth.admin_required
def get(self, repo_key):
repo_db = ndb.Key(urlsafe=repo_key).get()
if not repo_db:
|
return helpers.make_response(repo_db, model.Repo.FIELDS)
| helpers.make_not_found_exception('Repo %s not found' % repo_key) |
validate.go | package main
import (
"github.com/nlopes/slack"
)
// ******************************************************************************
// Name : isSlackDescriptionValid
// Description: Function to validate Slack Channel description before performing
// updates
// ******************************************************************************
func | (description []string) bool {
if len(description) < 3 {
return false
}
return true
}
// ******************************************************************************
// Name : isArgumentFormatValid
// Description: Function to validate double quote symbols in slack command
// arguments. To check that all arguments are starting and ending
// with double quotes
// ******************************************************************************
func isArgumentFormatValid(runeText []rune, s slack.SlashCommand) bool {
quotes := 0
for i, j := range runeText {
if int(j) == 8220 || int(j) == 8221 || j == '"' {
runeText[i] = '"'
quotes++
}
}
if quotes%2 != 0 {
return false
}
return true
}
| isSlackDescriptionValid |
05b_price_history_unadj.rs | use gurufocus_api as gfapi;
use std::env;
type PriceHistory = Vec<(String, f64)>;
#[tokio::main]
async fn | () {
let token = env::var("GURUFOCUS_TOKEN").unwrap();
let gf_connect = gfapi::GuruFocusConnector::new(token);
let stock = "NYSE:DIS";
let prices = gf_connect.get_unadj_price_hist(stock).await.unwrap();
let prices: PriceHistory = serde_json::from_value(prices).unwrap();
println!("Unadjusted Price history for Walt Disney\n{:#?}", prices);
}
| main |
client.go | /*
DNS-over-HTTPS
Copyright (C) 2017-2018 Star Brilliant <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
package main
import (
"context"
"crypto/tls"
"fmt"
"log"
"math/rand"
"net"
"net/http"
"net/http/cookiejar"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/m13253/dns-over-https/doh-client/config"
"github.com/m13253/dns-over-https/doh-client/selector"
jsondns "github.com/m13253/dns-over-https/json-dns"
"github.com/miekg/dns"
"golang.org/x/net/http2"
"golang.org/x/net/idna"
)
type Client struct {
conf *config.Config
bootstrap []string
passthrough []string
udpClient *dns.Client
tcpClient *dns.Client
udpServers []*dns.Server
tcpServers []*dns.Server
bootstrapResolver *net.Resolver
cookieJar http.CookieJar
httpClientMux *sync.RWMutex
httpTransport *http.Transport
httpClient *http.Client
httpClientLastCreate time.Time
selector selector.Selector
}
type DNSRequest struct {
response *http.Response
reply *dns.Msg
udpSize uint16
ednsClientAddress net.IP
ednsClientNetmask uint8
currentUpstream string
err error
}
func | (conf *config.Config) (c *Client, err error) {
c = &Client{
conf: conf,
}
udpHandler := dns.HandlerFunc(c.udpHandlerFunc)
tcpHandler := dns.HandlerFunc(c.tcpHandlerFunc)
c.udpClient = &dns.Client{
Net: "udp",
UDPSize: dns.DefaultMsgSize,
Timeout: time.Duration(conf.Other.Timeout) * time.Second,
}
c.tcpClient = &dns.Client{
Net: "tcp",
Timeout: time.Duration(conf.Other.Timeout) * time.Second,
}
for _, addr := range conf.Listen {
c.udpServers = append(c.udpServers, &dns.Server{
Addr: addr,
Net: "udp",
Handler: udpHandler,
UDPSize: dns.DefaultMsgSize,
})
c.tcpServers = append(c.tcpServers, &dns.Server{
Addr: addr,
Net: "tcp",
Handler: tcpHandler,
})
}
c.bootstrapResolver = net.DefaultResolver
if len(conf.Other.Bootstrap) != 0 {
c.bootstrap = make([]string, len(conf.Other.Bootstrap))
for i, bootstrap := range conf.Other.Bootstrap {
bootstrapAddr, err := net.ResolveUDPAddr("udp", bootstrap)
if err != nil {
bootstrapAddr, err = net.ResolveUDPAddr("udp", "["+bootstrap+"]:53")
}
if err != nil {
return nil, err
}
c.bootstrap[i] = bootstrapAddr.String()
}
c.bootstrapResolver = &net.Resolver{
PreferGo: true,
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
var d net.Dialer
numServers := len(c.bootstrap)
bootstrap := c.bootstrap[rand.Intn(numServers)]
conn, err := d.DialContext(ctx, network, bootstrap)
return conn, err
},
}
if len(conf.Other.Passthrough) != 0 {
c.passthrough = make([]string, len(conf.Other.Passthrough))
for i, passthrough := range conf.Other.Passthrough {
if punycode, err := idna.ToASCII(passthrough); err != nil {
passthrough = punycode
}
c.passthrough[i] = "." + strings.ToLower(strings.Trim(passthrough, ".")) + "."
}
}
}
// Most CDNs require Cookie support to prevent DDoS attack.
// Disabling Cookie does not effectively prevent tracking,
// so I will leave it on to make anti-DDoS services happy.
if !c.conf.Other.NoCookies {
c.cookieJar, err = cookiejar.New(nil)
if err != nil {
return nil, err
}
} else {
c.cookieJar = nil
}
c.httpClientMux = new(sync.RWMutex)
err = c.newHTTPClient()
if err != nil {
return nil, err
}
switch c.conf.Upstream.UpstreamSelector {
case config.NginxWRR:
if c.conf.Other.Verbose {
log.Println(config.NginxWRR, "mode start")
}
s := selector.NewNginxWRRSelector(time.Duration(c.conf.Other.Timeout) * time.Second)
for _, u := range c.conf.Upstream.UpstreamGoogle {
if err := s.Add(u.URL, selector.Google, u.Weight); err != nil {
return nil, err
}
}
for _, u := range c.conf.Upstream.UpstreamIETF {
if err := s.Add(u.URL, selector.IETF, u.Weight); err != nil {
return nil, err
}
}
c.selector = s
case config.LVSWRR:
if c.conf.Other.Verbose {
log.Println(config.LVSWRR, "mode start")
}
s := selector.NewLVSWRRSelector(time.Duration(c.conf.Other.Timeout) * time.Second)
for _, u := range c.conf.Upstream.UpstreamGoogle {
if err := s.Add(u.URL, selector.Google, u.Weight); err != nil {
return nil, err
}
}
for _, u := range c.conf.Upstream.UpstreamIETF {
if err := s.Add(u.URL, selector.IETF, u.Weight); err != nil {
return nil, err
}
}
c.selector = s
default:
if c.conf.Other.Verbose {
log.Println(config.Random, "mode start")
}
// if selector is invalid or random, use random selector, or should we stop program and let user knows he is wrong?
s := selector.NewRandomSelector()
for _, u := range c.conf.Upstream.UpstreamGoogle {
if err := s.Add(u.URL, selector.Google); err != nil {
return nil, err
}
}
for _, u := range c.conf.Upstream.UpstreamIETF {
if err := s.Add(u.URL, selector.IETF); err != nil {
return nil, err
}
}
c.selector = s
}
if c.conf.Other.Verbose {
if reporter, ok := c.selector.(selector.DebugReporter); ok {
reporter.ReportWeights()
}
}
return c, nil
}
func (c *Client) newHTTPClient() error {
c.httpClientMux.Lock()
defer c.httpClientMux.Unlock()
if !c.httpClientLastCreate.IsZero() && time.Since(c.httpClientLastCreate) < time.Duration(c.conf.Other.Timeout)*time.Second {
return nil
}
if c.httpTransport != nil {
c.httpTransport.CloseIdleConnections()
}
dialer := &net.Dialer{
Timeout: time.Duration(c.conf.Other.Timeout) * time.Second,
KeepAlive: 30 * time.Second,
// DualStack: true,
Resolver: c.bootstrapResolver,
}
c.httpTransport = &http.Transport{
DialContext: dialer.DialContext,
ExpectContinueTimeout: 1 * time.Second,
IdleConnTimeout: 90 * time.Second,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 10,
Proxy: http.ProxyFromEnvironment,
TLSHandshakeTimeout: time.Duration(c.conf.Other.Timeout) * time.Second,
TLSClientConfig: &tls.Config{InsecureSkipVerify: c.conf.Other.TLSInsecureSkipVerify},
}
if c.conf.Other.NoIPv6 {
c.httpTransport.DialContext = func(ctx context.Context, network, address string) (net.Conn, error) {
if strings.HasPrefix(network, "tcp") {
network = "tcp4"
}
return dialer.DialContext(ctx, network, address)
}
}
err := http2.ConfigureTransport(c.httpTransport)
if err != nil {
return err
}
c.httpClient = &http.Client{
Transport: c.httpTransport,
Jar: c.cookieJar,
}
c.httpClientLastCreate = time.Now()
return nil
}
func (c *Client) Start() error {
results := make(chan error, len(c.udpServers)+len(c.tcpServers))
for _, srv := range append(c.udpServers, c.tcpServers...) {
go func(srv *dns.Server) {
err := srv.ListenAndServe()
if err != nil {
log.Println(err)
}
results <- err
}(srv)
}
// start evaluation loop
c.selector.StartEvaluate()
for i := 0; i < cap(results); i++ {
err := <-results
if err != nil {
return err
}
}
close(results)
return nil
}
func (c *Client) handlerFunc(w dns.ResponseWriter, r *dns.Msg, isTCP bool) {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(c.conf.Other.Timeout)*time.Second)
defer cancel()
if r.Response {
log.Println("Received a response packet")
return
}
if len(r.Question) != 1 {
log.Println("Number of questions is not 1")
reply := jsondns.PrepareReply(r)
reply.Rcode = dns.RcodeFormatError
w.WriteMsg(reply)
return
}
question := &r.Question[0]
questionName := question.Name
questionClass := ""
if qclass, ok := dns.ClassToString[question.Qclass]; ok {
questionClass = qclass
} else {
questionClass = strconv.FormatUint(uint64(question.Qclass), 10)
}
questionType := ""
if qtype, ok := dns.TypeToString[question.Qtype]; ok {
questionType = qtype
} else {
questionType = strconv.FormatUint(uint64(question.Qtype), 10)
}
if c.conf.Other.Verbose {
fmt.Printf("%s - - [%s] \"%s %s %s\"\n", w.RemoteAddr(), time.Now().Format("02/Jan/2006:15:04:05 -0700"), questionName, questionClass, questionType)
}
shouldPassthrough := false
passthroughQuestionName := questionName
if punycode, err := idna.ToASCII(passthroughQuestionName); err != nil {
passthroughQuestionName = punycode
}
passthroughQuestionName = "." + strings.ToLower(strings.Trim(passthroughQuestionName, ".")) + "."
for _, passthrough := range c.passthrough {
if strings.HasSuffix(passthroughQuestionName, passthrough) {
shouldPassthrough = true
break
}
}
if shouldPassthrough {
numServers := len(c.bootstrap)
upstream := c.bootstrap[rand.Intn(numServers)]
log.Printf("Request \"%s %s %s\" is passed through %s.\n", questionName, questionClass, questionType, upstream)
var reply *dns.Msg
var err error
if !isTCP {
reply, _, err = c.udpClient.Exchange(r, upstream)
} else {
reply, _, err = c.tcpClient.Exchange(r, upstream)
}
if err == nil {
w.WriteMsg(reply)
return
}
log.Println(err)
reply = jsondns.PrepareReply(r)
reply.Rcode = dns.RcodeServerFailure
w.WriteMsg(reply)
return
}
upstream := c.selector.Get()
requestType := upstream.RequestType
if c.conf.Other.Verbose {
log.Println("choose upstream:", upstream)
}
var req *DNSRequest
switch requestType {
case "application/dns-json":
req = c.generateRequestGoogle(ctx, w, r, isTCP, upstream)
case "application/dns-message":
req = c.generateRequestIETF(ctx, w, r, isTCP, upstream)
default:
panic("Unknown request Content-Type")
}
if req.err != nil {
if urlErr, ok := req.err.(*url.Error); ok {
// should we only check timeout?
if urlErr.Timeout() {
c.selector.ReportUpstreamStatus(upstream, selector.Timeout)
}
}
return
}
// if req.err == nil, req.response != nil
defer req.response.Body.Close()
for _, header := range c.conf.Other.DebugHTTPHeaders {
if value := req.response.Header.Get(header); value != "" {
log.Printf("%s: %s\n", header, value)
}
}
candidateType := strings.SplitN(req.response.Header.Get("Content-Type"), ";", 2)[0]
switch candidateType {
case "application/json":
c.parseResponseGoogle(ctx, w, r, isTCP, req)
case "application/dns-message", "application/dns-udpwireformat":
c.parseResponseIETF(ctx, w, r, isTCP, req)
default:
switch requestType {
case "application/dns-json":
c.parseResponseGoogle(ctx, w, r, isTCP, req)
case "application/dns-message":
c.parseResponseIETF(ctx, w, r, isTCP, req)
default:
panic("Unknown response Content-Type")
}
}
// https://developers.cloudflare.com/1.1.1.1/dns-over-https/request-structure/ says
// returns code will be 200 / 400 / 413 / 415 / 504, some server will return 503, so
// I think if status code is 5xx, upstream must has some problems
/*if req.response.StatusCode/100 == 5 {
c.selector.ReportUpstreamStatus(upstream, selector.Medium)
}*/
switch req.response.StatusCode / 100 {
case 5:
c.selector.ReportUpstreamStatus(upstream, selector.Error)
case 2:
c.selector.ReportUpstreamStatus(upstream, selector.OK)
}
}
func (c *Client) udpHandlerFunc(w dns.ResponseWriter, r *dns.Msg) {
c.handlerFunc(w, r, false)
}
func (c *Client) tcpHandlerFunc(w dns.ResponseWriter, r *dns.Msg) {
c.handlerFunc(w, r, true)
}
var (
ipv4Mask24 = net.IPMask{255, 255, 255, 0}
ipv6Mask56 = net.CIDRMask(56, 128)
)
func (c *Client) findClientIP(w dns.ResponseWriter, r *dns.Msg) (ednsClientAddress net.IP, ednsClientNetmask uint8) {
ednsClientNetmask = 255
if c.conf.Other.NoECS {
return net.IPv4(0, 0, 0, 0), 0
}
if opt := r.IsEdns0(); opt != nil {
for _, option := range opt.Option {
if option.Option() == dns.EDNS0SUBNET {
edns0Subnet := option.(*dns.EDNS0_SUBNET)
ednsClientAddress = edns0Subnet.Address
ednsClientNetmask = edns0Subnet.SourceNetmask
return
}
}
}
remoteAddr, err := net.ResolveUDPAddr("udp", w.RemoteAddr().String())
if err != nil {
return
}
if ip := remoteAddr.IP; jsondns.IsGlobalIP(ip) {
if ipv4 := ip.To4(); ipv4 != nil {
ednsClientAddress = ipv4.Mask(ipv4Mask24)
ednsClientNetmask = 24
} else {
ednsClientAddress = ip.Mask(ipv6Mask56)
ednsClientNetmask = 56
}
}
return
}
| NewClient |
hashmap.go | // Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// This file contains the implementation of Go's map type.
//
// A map is just a hash table. The data is arranged
// into an array of buckets. Each bucket contains up to
// 8 key/value pairs. The low-order bits of the hash are
// used to select a bucket. Each bucket contains a few
// high-order bits of each hash to distinguish the entries
// within a single bucket.
//
// If more than 8 keys hash to a bucket, we chain on
// extra buckets.
//
// When the hashtable grows, we allocate a new array
// of buckets twice as big. Buckets are incrementally
// copied from the old bucket array to the new bucket array.
//
// Map iterators walk through the array of buckets and
// return the keys in walk order (bucket #, then overflow
// chain order, then bucket index). To maintain iteration
// semantics, we never move keys within their bucket (if
// we did, keys might be returned 0 or 2 times). When
// growing the table, iterators remain iterating through the
// old table and must check the new table if the bucket
// they are iterating through has been moved ("evacuated")
// to the new table.
// Picking loadFactor: too large and we have lots of overflow
// buckets, too small and we waste a lot of space. I wrote
// a simple program to check some stats for different loads:
// (64-bit, 8 byte keys and values)
// loadFactor %overflow bytes/entry hitprobe missprobe
// 4.00 2.13 20.77 3.00 4.00
// 4.50 4.05 17.30 3.25 4.50
// 5.00 6.85 14.77 3.50 5.00
// 5.50 10.55 12.94 3.75 5.50
// 6.00 15.27 11.67 4.00 6.00
// 6.50 20.90 10.79 4.25 6.50
// 7.00 27.14 10.15 4.50 7.00
// 7.50 34.03 9.73 4.75 7.50
// 8.00 41.10 9.40 5.00 8.00
//
// %overflow = percentage of buckets which have an overflow bucket
// bytes/entry = overhead bytes used per key/value pair
// hitprobe = # of entries to check when looking up a present key
// missprobe = # of entries to check when looking up an absent key
//
// Keep in mind this data is for maximally loaded tables, i.e. just
// before the table grows. Typical tables will be somewhat less loaded.
import (
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
const (
// Maximum number of key/value pairs a bucket can hold.
bucketCntBits = 3
bucketCnt = 1 << bucketCntBits
// Maximum average load of a bucket that triggers growth.
loadFactor = 6.5
// Maximum key or value size to keep inline (instead of mallocing per element).
// Must fit in a uint8.
// Fast versions cannot handle big values - the cutoff size for
// fast versions in ../../cmd/internal/gc/walk.go must be at most this value.
maxKeySize = 128
maxValueSize = 128
// data offset should be the size of the bmap struct, but needs to be
// aligned correctly. For amd64p32 this means 64-bit alignment
// even though pointers are 32 bit.
dataOffset = unsafe.Offsetof(struct {
b bmap
v int64
}{}.v)
// Possible tophash values. We reserve a few possibilities for special marks.
// Each bucket (including its overflow buckets, if any) will have either all or none of its
// entries in the evacuated* states (except during the evacuate() method, which only happens
// during map writes and thus no one else can observe the map during that time).
empty = 0 // cell is empty
evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table.
evacuatedY = 3 // same as above, but evacuated to second half of larger table.
minTopHash = 4 // minimum tophash for a normal filled cell.
// flags
iterator = 1 // there may be an iterator using buckets
oldIterator = 2 // there may be an iterator using oldbuckets
hashWriting = 4 // a goroutine is writing to the map
// sentinel bucket ID for iterator checks
noCheck = 1<<(8*sys.PtrSize) - 1
)
// A header for a Go map.
type hmap struct {
// Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and
// ../reflect/type.go. Don't change this structure without also changing that code!
count int // # live cells == size of map. Must be first (used by len() builtin)
flags uint8
B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
hash0 uint32 // hash seed
buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
// If both key and value do not contain pointers and are inline, then we mark bucket
// type as containing no pointers. This avoids scanning such maps.
// However, bmap.overflow is a pointer. In order to keep overflow buckets
// alive, we store pointers to all overflow buckets in hmap.overflow.
// Overflow is used only if key and value do not contain pointers.
// overflow[0] contains overflow buckets for hmap.buckets.
// overflow[1] contains overflow buckets for hmap.oldbuckets.
// The first indirection allows us to reduce static size of hmap.
// The second indirection allows to store a pointer to the slice in hiter.
overflow *[2]*[]*bmap
}
// A bucket for a Go map.
type bmap struct {
tophash [bucketCnt]uint8
// Followed by bucketCnt keys and then bucketCnt values.
// NOTE: packing all the keys together and then all the values together makes the
// code a bit more complicated than alternating key/value/key/value/... but it allows
// us to eliminate padding which would be needed for, e.g., map[int64]int8.
// Followed by an overflow pointer.
}
// A hash iteration structure.
// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
// the layout of this structure.
type hiter struct {
key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go).
value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
t *maptype
h *hmap
buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
bptr *bmap // current bucket
overflow [2]*[]*bmap // keeps overflow buckets alive
startBucket uintptr // bucket iteration started at
offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
wrapped bool // already wrapped around from end of bucket array to beginning
B uint8
i uint8
bucket uintptr
checkBucket uintptr
}
func evacuated(b *bmap) bool {
h := b.tophash[0]
return h > empty && h < minTopHash
}
func (b *bmap) overflow(t *maptype) *bmap {
return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
}
func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
if t.bucket.kind&kindNoPointers != 0 {
h.createOverflow()
*h.overflow[0] = append(*h.overflow[0], ovf)
}
*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
}
func (h *hmap) createOverflow() {
if h.overflow == nil {
h.overflow = new([2]*[]*bmap)
}
if h.overflow[0] == nil {
h.overflow[0] = new([]*bmap)
}
}
// makemap implements a Go map creation make(map[k]v, hint)
// If the compiler has determined that the map or the first bucket
// can be created on the stack, h and/or bucket may be non-nil.
// If h != nil, the map can be created directly in h.
// If bucket != nil, bucket can be used as the first bucket.
func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != t.hmap.size {
println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
throw("bad hmap size")
}
if hint < 0 || int64(int32(hint)) != hint {
panic(plainError("makemap: size out of range"))
// TODO: make hint an int, then none of this nonsense
}
if !ismapkey(t.key) {
throw("runtime.makemap: unsupported map key type")
}
// check compiler's and reflect's math
if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
throw("key size wrong")
}
if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
throw("value size wrong")
}
// invariants we depend on. We should probably check these at compile time
// somewhere, but for now we'll do it here.
if t.key.align > bucketCnt {
throw("key align too big")
}
if t.elem.align > bucketCnt {
throw("value align too big")
}
if t.key.size%uintptr(t.key.align) != 0 {
throw("key size not a multiple of key align")
}
if t.elem.size%uintptr(t.elem.align) != 0 {
throw("value size not a multiple of value align")
}
if bucketCnt < 8 {
throw("bucketsize too small for proper alignment")
}
if dataOffset%uintptr(t.key.align) != 0 {
throw("need padding in bucket (key)")
}
if dataOffset%uintptr(t.elem.align) != 0 {
throw("need padding in bucket (value)")
}
// find size parameter which will hold the requested # of elements
B := uint8(0)
for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
}
// allocate initial hash table
// if B == 0, the buckets field is allocated lazily later (in mapassign)
// If hint is large zeroing this memory could take a while.
buckets := bucket
if B != 0 {
buckets = newarray(t.bucket, 1<<B)
}
// initialize Hmap
if h == nil {
h = (*hmap)(newobject(t.hmap))
}
h.count = 0
h.B = B
h.flags = 0
h.hash0 = fastrand1()
h.buckets = buckets
h.oldbuckets = nil
h.nevacuate = 0
return h
}
// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
// it will return a reference to the zero object for the value type if
// the key is not in the map.
// NOTE: The returned pointer may keep the whole map live, so don't
// hold onto it for very long.
func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
pc := funcPC(mapaccess1)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
if msanenabled && h != nil {
msanread(key, t.key.size)
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
if !evacuated(oldb) {
b = oldb
}
}
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
}
return v
}
}
b = b.overflow(t)
if b == nil {
return unsafe.Pointer(&zeroVal[0])
}
}
}
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
pc := funcPC(mapaccess2)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
if msanenabled && h != nil {
msanread(key, t.key.size)
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
if !evacuated(oldb) {
b = oldb
}
}
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
}
return v, true
}
}
b = b.overflow(t)
if b == nil {
return unsafe.Pointer(&zeroVal[0]), false
}
}
}
// returns both key and value. Used by map iterator
func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
if h == nil || h.count == 0 {
return nil, nil
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
if !evacuated(oldb) {
b = oldb
}
}
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
}
return k, v
}
}
b = b.overflow(t)
if b == nil {
return nil, nil
}
}
}
func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
v := mapaccess1(t, h, key)
if v == unsafe.Pointer(&zeroVal[0]) {
return zero
}
return v
}
func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
v := mapaccess1(t, h, key)
if v == unsafe.Pointer(&zeroVal[0]) {
return zero, false
}
return v, true
}
func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&t))
pc := funcPC(mapassign1)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
raceReadObjectPC(t.elem, val, callerpc, pc)
}
if msanenabled {
msanread(key, t.key.size)
msanread(val, t.elem.size)
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
h.flags |= hashWriting
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
if h.buckets == nil {
h.buckets = newarray(t.bucket, 1)
}
again:
bucket := hash & (uintptr(1)<<h.B - 1)
if h.oldbuckets != nil {
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
var inserti *uint8
var insertk unsafe.Pointer
var insertv unsafe.Pointer
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
if b.tophash[i] == empty && inserti == nil {
inserti = &b.tophash[i]
insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
}
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
k2 := k
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if !alg.equal(key, k2) {
continue
}
// already have a mapping for key. Update it.
if t.needkeyupdate {
typedmemmove(t.key, k2, key)
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
v2 := v
if t.indirectvalue {
v2 = *((*unsafe.Pointer)(v2))
}
typedmemmove(t.elem, v2, val)
goto done
}
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}
// did not find mapping for key. Allocate new cell & add entry.
if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}
if inserti == nil {
// all current buckets are full, allocate a new one.
newb := (*bmap)(newobject(t.bucket))
h.setoverflow(t, b, newb)
inserti = &newb.tophash[0]
insertk = add(unsafe.Pointer(newb), dataOffset)
insertv = add(insertk, bucketCnt*uintptr(t.keysize))
}
// store new key/value at insert position
if t.indirectkey {
kmem := newobject(t.key)
*(*unsafe.Pointer)(insertk) = kmem
insertk = kmem
}
if t.indirectvalue {
vmem := newobject(t.elem)
*(*unsafe.Pointer)(insertv) = vmem
insertv = vmem
}
typedmemmove(t.key, insertk, key)
typedmemmove(t.elem, insertv, val)
*inserti = top
h.count++
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
}
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
pc := funcPC(mapdelete)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
if msanenabled && h != nil {
msanread(key, t.key.size)
}
if h == nil || h.count == 0 {
return
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
h.flags |= hashWriting
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
bucket := hash & (uintptr(1)<<h.B - 1)
if h.oldbuckets != nil {
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
k2 := k
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if !alg.equal(key, k2) {
continue
}
memclr(k, uintptr(t.keysize))
v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
memclr(v, uintptr(t.valuesize))
b.tophash[i] = empty
h.count--
goto done
}
b = b.overflow(t)
if b == nil {
goto done
}
}
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
}
func mapiterinit(t *maptype, h *hmap, it *hiter) {
// Clear pointer fields so garbage collector does not complain.
it.key = nil
it.value = nil
it.t = nil
it.h = nil
it.buckets = nil
it.bptr = nil
it.overflow[0] = nil
it.overflow[1] = nil
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
}
if h == nil || h.count == 0 {
it.key = nil
it.value = nil
return
}
if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
}
it.t = t
it.h = h
// grab snapshot of bucket state
it.B = h.B
it.buckets = h.buckets
if t.bucket.kind&kindNoPointers != 0 {
// Allocate the current slice and remember pointers to both current and old.
// This preserves all relevant overflow buckets alive even if
// the table grows and/or overflow buckets are added to the table
// while we are iterating.
h.createOverflow()
it.overflow = *h.overflow
}
// decide where to start
r := uintptr(fastrand1())
if h.B > 31-bucketCntBits {
r += uintptr(fastrand1()) << 31
}
it.startBucket = r & (uintptr(1)<<h.B - 1)
it.offset = uint8(r >> h.B & (bucketCnt - 1))
// iterator state
it.bucket = it.startBucket
it.wrapped = false
it.bptr = nil
// Remember we have an iterator.
// Can run concurrently with another hash_iter_init().
if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
atomic.Or8(&h.flags, iterator|oldIterator)
}
mapiternext(it)
}
func mapiternext(it *hiter) {
h := it.h
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&it))
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
}
if h.flags&hashWriting != 0 {
throw("concurrent map iteration and map write")
}
t := it.t
bucket := it.bucket
b := it.bptr
i := it.i
checkBucket := it.checkBucket
alg := t.key.alg
next:
if b == nil {
if bucket == it.startBucket && it.wrapped {
// end of iteration
it.key = nil
it.value = nil
return
}
if h.oldbuckets != nil && it.B == h.B {
// Iterator was started in the middle of a grow, and the grow isn't done yet.
// If the bucket we're looking at hasn't been filled in yet (i.e. the old
// bucket hasn't been evacuated) then we need to iterate through the old
// bucket and only return the ones that will be migrated to this bucket.
oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1)
b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
if !evacuated(b) {
checkBucket = bucket
} else {
b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
checkBucket = noCheck
}
} else {
b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
checkBucket = noCheck
}
bucket++
if bucket == uintptr(1)<<it.B {
bucket = 0
it.wrapped = true
}
i = 0
}
for ; i < bucketCnt; i++ {
offi := (i + it.offset) & (bucketCnt - 1)
k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
if checkBucket != noCheck {
// Special case: iterator was started during a grow and the
// grow is not done yet. We're working on a bucket whose
// oldbucket has not been evacuated yet. Or at least, it wasn't
// evacuated when we started the bucket. So we're iterating
// through the oldbucket, skipping any keys that will go
// to the other new bucket (each oldbucket expands to two
// buckets during a grow).
k2 := k
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if t.reflexivekey || alg.equal(k2, k2) {
// If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it.
hash := alg.hash(k2, uintptr(h.hash0))
if hash&(uintptr(1)<<it.B-1) != checkBucket {
continue
}
} else {
// Hash isn't repeatable if k != k (NaNs). We need a
// repeatable and randomish choice of which direction
// to send NaNs during evacuation. We'll use the low
// bit of tophash to decide which way NaNs go.
// NOTE: this case is why we need two evacuate tophash
// values, evacuatedX and evacuatedY, that differ in
// their low bit.
if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
continue
}
}
}
if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
// this is the golden data, we can return it.
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
it.key = k
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
}
it.value = v
} else {
// The hash table has grown since the iterator was started.
// The golden data for this key is now somewhere else.
k2 := k
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
if t.reflexivekey || alg.equal(k2, k2) {
// Check the current hash table for the data.
// This code handles the case where the key
// has been deleted, updated, or deleted and reinserted.
// NOTE: we need to regrab the key as it has potentially been
// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
rk, rv := mapaccessK(t, h, k2)
if rk == nil {
continue // key has been deleted
}
it.key = rk
it.value = rv
} else {
// if key!=key then the entry can't be deleted or
// updated, so we can just return it. That's lucky for
// us because when key!=key we can't look it up
// successfully in the current table.
it.key = k2
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
}
it.value = v
}
}
it.bucket = bucket
if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
it.bptr = b
}
it.i = i + 1
it.checkBucket = checkBucket
return
}
}
b = b.overflow(t)
i = 0
goto next
}
func hashGrow(t *maptype, h *hmap) {
if h.oldbuckets != nil {
throw("evacuation not done in time")
}
oldbuckets := h.buckets
newbuckets := newarray(t.bucket, 1<<(h.B+1))
flags := h.flags &^ (iterator | oldIterator)
if h.flags&iterator != 0 {
flags |= oldIterator
}
// commit the grow (atomic wrt gc)
h.B++
h.flags = flags
h.oldbuckets = oldbuckets
h.buckets = newbuckets
h.nevacuate = 0
if h.overflow != nil {
// Promote current overflow buckets to the old generation.
if h.overflow[1] != nil {
throw("overflow is not nil")
}
h.overflow[1] = h.overflow[0]
h.overflow[0] = nil
}
// the actual copying of the hash table data is done incrementally
// by growWork() and evacuate().
}
func growWork(t *maptype, h *hmap, bucket uintptr) {
noldbuckets := uintptr(1) << (h.B - 1)
// make sure we evacuate the oldbucket corresponding
// to the bucket we're about to use
evacuate(t, h, bucket&(noldbuckets-1))
// evacuate one more oldbucket to make progress on growing
if h.oldbuckets != nil {
evacuate(t, h, h.nevacuate)
}
}
func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
newbit := uintptr(1) << (h.B - 1)
alg := t.key.alg
if !evacuated(b) {
// TODO: reuse overflow buckets instead of using new ones, if there
// is no iterator using the old buckets. (If !oldIterator.)
x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
xi := 0
yi := 0
xk := add(unsafe.Pointer(x), dataOffset)
yk := add(unsafe.Pointer(y), dataOffset)
xv := add(xk, bucketCnt*uintptr(t.keysize))
yv := add(yk, bucketCnt*uintptr(t.keysize))
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
v := add(k, bucketCnt*uintptr(t.keysize))
for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
top := b.tophash[i]
if top == empty {
b.tophash[i] = evacuatedEmpty
continue
}
if top < minTopHash {
throw("bad map state")
}
k2 := k
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
// Compute hash to make our evacuation decision (whether we need
// to send this key/value to bucket x or bucket y).
hash := alg.hash(k2, uintptr(h.hash0))
if h.flags&iterator != 0 {
if !t.reflexivekey && !alg.equal(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the
// presence of iterators, as our evacuation decision must
// match whatever decision the iterator made.
// Fortunately, we have the freedom to send these keys either
// way. Also, tophash is meaningless for these kinds of keys.
// We let the low bit of tophash drive the evacuation decision.
// We recompute a new random tophash for the next level so
// these keys will get evenly distributed across all buckets
// after multiple grows.
if (top & 1) != 0 {
hash |= newbit
} else {
hash &^= newbit
}
top = uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
}
}
if (hash & newbit) == 0 {
b.tophash[i] = evacuatedX
if xi == bucketCnt {
newx := (*bmap)(newobject(t.bucket))
h.setoverflow(t, x, newx)
x = newx
xi = 0
xk = add(unsafe.Pointer(x), dataOffset)
xv = add(xk, bucketCnt*uintptr(t.keysize))
}
x.tophash[xi] = top
if t.indirectkey {
*(*unsafe.Pointer)(xk) = k2 // copy pointer
} else {
typedmemmove(t.key, xk, k) // copy value
}
if t.indirectvalue {
*(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
} else {
typedmemmove(t.elem, xv, v)
}
xi++
xk = add(xk, uintptr(t.keysize))
xv = add(xv, uintptr(t.valuesize))
} else {
b.tophash[i] = evacuatedY
if yi == bucketCnt {
newy := (*bmap)(newobject(t.bucket))
h.setoverflow(t, y, newy)
y = newy
yi = 0
yk = add(unsafe.Pointer(y), dataOffset)
yv = add(yk, bucketCnt*uintptr(t.keysize))
}
y.tophash[yi] = top
if t.indirectkey {
*(*unsafe.Pointer)(yk) = k2
} else {
typedmemmove(t.key, yk, k)
}
if t.indirectvalue {
*(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
} else {
typedmemmove(t.elem, yv, v)
}
yi++
yk = add(yk, uintptr(t.keysize))
yv = add(yv, uintptr(t.valuesize))
}
}
}
// Unlink the overflow buckets & clear key/value to help GC.
if h.flags&oldIterator == 0 {
b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
}
}
// Advance evacuation mark
if oldbucket == h.nevacuate {
h.nevacuate = oldbucket + 1
if oldbucket+1 == newbit { // newbit == # of oldbuckets
// Growing is all done. Free old main bucket array.
h.oldbuckets = nil
// Can discard old overflow buckets as well.
// If they are still referenced by an iterator,
// then the iterator holds a pointers to the slice.
if h.overflow != nil {
h.overflow[1] = nil
}
}
}
}
func ismapkey(t *_type) bool {
return t.alg.hash != nil
}
// Reflect stubs. Called from ../reflect/asm_*.s
//go:linkname reflect_makemap reflect.makemap
func reflect_makemap(t *maptype) *hmap {
return makemap(t, 0, nil, nil)
}
//go:linkname reflect_mapaccess reflect.mapaccess
func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
val, ok := mapaccess2(t, h, key)
if !ok {
// reflect wants nil for a missing element
val = nil
}
return val
}
//go:linkname reflect_mapassign reflect.mapassign
func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) |
//go:linkname reflect_mapdelete reflect.mapdelete
func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
mapdelete(t, h, key)
}
//go:linkname reflect_mapiterinit reflect.mapiterinit
func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
it := new(hiter)
mapiterinit(t, h, it)
return it
}
//go:linkname reflect_mapiternext reflect.mapiternext
func reflect_mapiternext(it *hiter) {
mapiternext(it)
}
//go:linkname reflect_mapiterkey reflect.mapiterkey
func reflect_mapiterkey(it *hiter) unsafe.Pointer {
return it.key
}
//go:linkname reflect_maplen reflect.maplen
func reflect_maplen(h *hmap) int {
if h == nil {
return 0
}
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&h))
racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
}
return h.count
}
//go:linkname reflect_ismapkey reflect.ismapkey
func reflect_ismapkey(t *_type) bool {
return ismapkey(t)
}
const maxZero = 1024 // must match value in ../cmd/compile/internal/gc/walk.go
var zeroVal [maxZero]byte
| {
mapassign1(t, h, key, val)
} |
setup.py | from __future__ import print_function
import codecs
import io
import os
from thecut.forms import __version__
from setuptools import setup, find_packages
import sys
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
filename = os.path.join(here, filename)
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.rst', 'HISTORY.rst')
setup(
# General information
name='thecut-forms',
version=__version__,
# Packaging
packages=find_packages(exclude=['docs']),
namespace_packages=['thecut'],
include_package_data=True,
# Dependencies
install_requires=[],
# Author information
author='The Cut Creative',
author_email='[email protected]',
# Additional information
url='https://github.com/thecut/thecut-forms',
license='Apache Software License 2.0',
description='A reusable application.',
long_description=long_description,
platforms='any',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2', | 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Development Status :: 5 - Production/Stable',
'Natural Language :: English',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
],
) | |
analyse_play.py | ################################################################################
# STD LIBS
import sys
# 3RD PARTY LIBS
import numpy
import pyaudio
import analyse
# USER LIBS
import notes
import timing
from constants import *
################################################################################
# These values will probably need to be tweaked for each guitar
# note must be at least this to be counted
# The higher the less chance "noise" will be detected as notes but means notes
# must be played hard
MINIMUM_VOLUME = -13
# The range is 0dB for the maximally loud sounds down to -40dB for silence.
# Typical very loud sounds are -1dB and typical silence is -36dB.
# note must be X decibels louder than previous to count as new note
ATTACK_THRESHOLD = 1.5
# X midi notes, semitones
OCTAVE_CORRECTION = 12
# Analyse X samples at a time
SAMPLE_SIZE = 1024
################################################################################
def main():
pyaud = pyaudio.PyAudio()
stream = pyaud.open (
format = pyaudio.paInt16,
channels = 2,
rate = 44100,
input_device_index = 1,
input = True
)
last_note = last_vol = last_time = 0
while True:
t = timing.get_time()
rawsamps = stream.read(SAMPLE_SIZE)
samps = numpy.fromstring(rawsamps, dtype=numpy.int16)
event = ''
midi_note = analyse.musical_detect_pitch(samps, min_note=28.0)
if midi_note:
midi_note += OCTAVE_CORRECTION
latest_note = notes.midi_to_note(midi_note)
latest_vol = analyse.loudness(samps)
attacked = latest_vol - last_vol > ATTACK_THRESHOLD
if latest_note != last_note or attacked:
if latest_vol > MINIMUM_VOLUME:
event = {'note':latest_note, 'time': t}
last_time = t
last_note = latest_note
last_vol = latest_vol
elif last_note:
last_note = None
print event
sys.stdout.flush()
| if __name__ == '__main__':
main() |
|
wrap_pystan.py | # pylint: disable=arguments-differ
"""Base class for PyStan wrappers."""
from ..data import from_pystan
from .base import SamplingWrapper
class PyStanSamplingWrapper(SamplingWrapper):
"""PyStan sampling wrapper base class.
See the documentation on :class:`~arviz.SamplingWrapper` for a more detailed
description. An example of ``PyStanSamplingWrapper`` usage can be found
in the :ref:`pystan_refitting` notebook.
Warnings
--------
Sampling wrappers are an experimental feature in a very early stage. Please use them
with caution.
"""
def sel_observations(self, idx):
|
def sample(self, modified_observed_data):
"""Resample the PyStan model stored in self.model on modified_observed_data."""
fit = self.model.sampling(data=modified_observed_data, **self.sample_kwargs)
return fit
def get_inference_data(self, fit):
"""Convert the fit object returned by ``self.sample`` to InferenceData."""
idata = from_pystan(posterior=fit, **self.idata_kwargs)
return idata
def log_likelihood__i(self, excluded_obs_log_like, idata__i):
"""Retrieve the log likelihood of the excluded observations from ``idata__i``."""
return idata__i.log_likelihood[excluded_obs_log_like]
| """Select a subset of the observations in idata_orig.
**Not implemented**: This method must be implemented on a model basis.
It is documented here to show its format and call signature.
Parameters
----------
idx
Indexes to separate from the rest of the observed data.
Returns
-------
modified_observed_data : dict
Dictionary containing both excluded and included data but properly divided
in the different keys. Passed to ``data`` argument of ``model.sampling``.
excluded_observed_data : str
Variable name containing the pointwise log likelihood data of the excluded
data. As PyStan cannot call C++ functions and log_likelihood__i is already
calculated *during* the simultion, instead of the value on which to evaluate
the likelihood, ``log_likelihood__i`` expects a string so it can extract the
corresponding data from the InferenceData object.
"""
raise NotImplementedError("sel_observations must be implemented on a model basis") |
fees.dto.ts | import { Field, ObjectType, ID } from "@nestjs/graphql";
import { ObjectId } from 'mongoose';
@ObjectType() | @Field()
monthlyFee: number
@Field()
Concession: number
@Field()
isPaid: boolean
@Field()
amountPaid: number
@Field()
date: Date
} | export class FeesDto{
@Field(() => ID)
studentId: ObjectId
|
loadavg_windows_test.go | //go:build windows
// +build windows
package loadavg
import (
"testing"
)
func TestGetLoadavg(t *testing.T) | {
loadavg, err := Get()
if err == nil {
t.Errorf("error should occur for Windows")
}
if loadavg != nil {
t.Errorf("loadavg should be nil")
}
} |
|
redisvalue.rs | use crate::RedisString;
#[derive(Debug, PartialEq)]
pub enum RedisValue {
SimpleStringStatic(&'static str),
SimpleString(String),
BulkString(String),
BulkRedisString(RedisString),
StringBuffer(Vec<u8>),
Integer(i64),
Float(f64),
Array(Vec<RedisValue>),
Null,
NoReply, // No reply at all (as opposed to a Null reply)
}
impl From<()> for RedisValue {
fn from(_: ()) -> Self {
RedisValue::Null
}
}
impl From<i64> for RedisValue {
fn from(i: i64) -> Self {
RedisValue::Integer(i)
}
}
impl From<usize> for RedisValue {
fn from(i: usize) -> Self {
(i as i64).into()
}
}
impl From<f64> for RedisValue {
fn from(f: f64) -> Self {
RedisValue::Float(f)
}
}
impl From<String> for RedisValue {
fn from(s: String) -> Self {
RedisValue::BulkString(s)
}
}
impl From<RedisString> for RedisValue {
fn from(s: RedisString) -> Self {
RedisValue::BulkRedisString(s)
}
}
impl From<Vec<u8>> for RedisValue {
fn | (s: Vec<u8>) -> Self {
RedisValue::StringBuffer(s)
}
}
impl From<&RedisString> for RedisValue {
fn from(s: &RedisString) -> Self {
s.to_owned().into()
}
}
impl From<&str> for RedisValue {
fn from(s: &str) -> Self {
s.to_owned().into()
}
}
impl From<&String> for RedisValue {
fn from(s: &String) -> Self {
s.to_owned().into()
}
}
impl<T: Into<RedisValue>> From<Option<T>> for RedisValue {
fn from(s: Option<T>) -> Self {
match s {
Some(v) => v.into(),
None => RedisValue::Null,
}
}
}
impl<T: Into<RedisValue>> From<Vec<T>> for RedisValue {
fn from(items: Vec<T>) -> Self {
RedisValue::Array(items.into_iter().map(Into::into).collect())
}
}
//////////////////////////////////////////////////////////
#[cfg(test)]
mod tests {
use super::RedisValue;
#[test]
fn from_vec_string() {
assert_eq!(
RedisValue::from(vec!["foo".to_string()]),
RedisValue::Array(vec![RedisValue::BulkString("foo".to_owned())])
);
}
#[test]
fn from_vec_str() {
assert_eq!(
RedisValue::from(vec!["foo"]),
RedisValue::Array(vec![RedisValue::BulkString("foo".to_owned())])
);
}
#[test]
fn from_vec_string_ref() {
assert_eq!(
RedisValue::from(vec![&"foo".to_string()]),
RedisValue::Array(vec![RedisValue::BulkString("foo".to_owned())])
);
}
#[test]
fn from_option_str() {
assert_eq!(
RedisValue::from(Some("foo")),
RedisValue::BulkString("foo".to_owned())
);
}
#[test]
fn from_vec() {
let v : Vec<u8> = vec![0,3,5,21,255];
assert_eq!(
RedisValue::from(v),
RedisValue::StringBuffer(vec![0,3,5,21,255])
);
}
#[test]
fn from_option_none() {
assert_eq!(RedisValue::from(None::<()>), RedisValue::Null,);
}
}
| from |
app.server.module.ts | import { NgModule } from '@angular/core';
import { ServerModule } from '@angular/platform-server';
import { AppModule } from './app.module';
import { AppComponent } from './app.component';
import { ServerTransferStateModule } from '../modules/transfer-state/server-transfer-state.module';
import { TransferState } from '../modules/transfer-state/transfer-state';
@NgModule({
imports: [
// The AppServerModule should import your AppModule followed
// by the ServerModule from @angular/platform-server.
AppModule,
ServerModule,
ServerTransferStateModule
],
// Since the bootstrapped component is not inherited from your
// imported AppModule, it needs to be repeated here.
bootstrap: [AppComponent],
})
export class AppServerModule {
constructor(private transferState: TransferState) { }
// Gotcha (needs to be an arrow function) | }
} | ngOnBootstrap = () => {
this.transferState.inject(); |
mma.py | import logging
import cvxpy as cvx
import numpy as np
from numpy.linalg import norm
from tqdm import tqdm
class MaxMarginAbbeel(object):
"""
implementation of (Abbeel & Ng 2004)
two versions: available
1. max-margin (stable, computationally more heavy)
2. projection (simpler)
"""
def __init__(self,
pi_init,
p,
mu_expert,
irl_precision,
mdp_solver,
mu_estimator,
evaluators,
method="max_margin",
slack_scale=0.01,
use_slack=False,
stochastic=True,
delta=0.2
):
"""TODO: to be defined1.
Parameters
----------
p : int
dimension of phi
mu_expert : target for feature expectation IRL
mu_estimator : function
estimate E[mu(s_0) | pi, D]
evaluator : function
evaluate i.t.o perf score and action matching
irl_precision : convergence threshold
use_slack : whether to use slack for convex optimization
slack_scale : scaling term
method: max_margin or projection
"""
self._pi_init = pi_init
self._p = p
self._mu_expert = mu_expert
self._mu_estimator = mu_estimator
self._irl_precision = irl_precision
self._method = method
self._evaluators = evaluators
self._mdp_solver = mdp_solver
self._use_slack = use_slack
self._slack_scale = slack_scale
self._stochastic = stochastic
self._delta = delta
def run(self, n_iteration):
"""TODO: Docstring for something.
Parameters
----------
n_iteration : max iteration count
Returns
-------
exp results
"""
mu_estimator = self._mu_estimator
stochastic = self._stochastic
pi_list = []
pi_best_list = []
mu_list = []
mu_bar_list = []
weight_list = []
weight_best_list = []
margin_v_list = []
margin_mu_list = []
pi_list.append(self._pi_init)
mu_estimator.fit(self._pi_init, stochastic)
mu_irl = mu_estimator.estimate()
mu_list.append(mu_irl)
mu_bar_list.append(mu_irl)
weight_list.append(-1.0)
margin_v_list.append(-1.0)
margin_mu_list.append(-1.0)
eval_metrics = {}
# Evaluate the inital policy
for e in self._evaluators:
the_metrics = e.evaluate(self._pi_init)
for k, v in the_metrics.items():
if k not in eval_metrics:
eval_metrics[k] = []
eval_metrics['best_' + k] = []
eval_metrics[k].append(v)
for epi_i in tqdm(range(n_iteration)):
if self._method == "max_margin":
W, (margin_v, margin_mu, converged) = self._optimize(mu_list)
elif self._method == "projection":
W, (margin_v, margin_mu, converged, mu_bar_im1) = \
self._optimize_projection(mu_list, mu_bar_list)
mu_bar_list.append(mu_bar_im1)
else:
raise Exception("Unknown IRL solver")
weight_list.append(W)
margin_v_list.append(margin_v)
margin_mu_list.append(margin_mu)
logging.info("margin_v: {}".format(margin_v))
logging.info("margin_mu: {}".format(margin_mu))
margin_hyperplane = 2 / norm(W, 2)
logging.info("margin_hyperplane: {}".format(margin_hyperplane))
if converged:
logging.info("margin_mu converged after {} iterations".format(epi_i + 1))
break
pi_irl = self._mdp_solver.solve(reward_fn=lambda obs_next: obs_next.dot(W))
pi_list.append(pi_irl)
mu_estimator.fit(pi_irl, stochastic)
mu_irl = mu_estimator.estimate()
mu_list.append(mu_irl)
logging.info("mu_irl: {}".format(mu_irl))
mu_list_ = np.array([mu.flatten() for mu in mu_list])
mixture_weight_list = self._choose_mixture_weight(mu_list_, self._mu_expert)
logging.info("mixture_weight_list: {}".format(mixture_weight_list))
# pi_best = MixturePolicy(mixture_weight_list, pi_list)
pi_best = 0
for w, p in zip(mixture_weight_list, pi_list):
pi_best += w * p
pi_best_list.append(pi_best)
best_mu = mixture_weight_list.T.dot(mu_list_)
w_best = self._mu_expert - best_mu
w_best /= norm(w_best, 2)
weight_best_list.append(w_best)
# Do the evaluations
for e in self._evaluators:
the_metrics = e.evaluate(pi_best)
for k, v in the_metrics.items():
eval_metrics['best_' + k].append(v)
the_metrics = e.evaluate(pi_irl)
for k, v in the_metrics.items():
eval_metrics[k].append(v)
logging.info("eval_metrics: {}".format(eval_metrics))
results = {
"margin_v": margin_v_list,
"margin_mu": margin_mu_list,
"mu": mu_list,
"weight": weight_list,
"policy": pi_list,
"policy_best": pi_best_list,
"weight_best": weight_best_list,
}
return results, eval_metrics
def _choose_mixture_weight(self, mu_list, mu_exp):
"""
implement the choice of policy in
Section 3.0 in Abbeel, Ng (2004)
Parameters
----------
mu_list : TODO
Returns
-------
pi_best
"""
lamda = cvx.Variable(len(mu_list))
obj = cvx.Minimize(cvx.norm(mu_exp - mu_list.T @ lamda, p=2))
constraints = [lamda >= 0, sum(lamda) == 1]
prob = cvx.Problem(obj, constraints)
prob.solve()
if prob.status in ["unbounded", "infeasible"]:
logging.warning("the optimization failed: {}".format(prob.status))
weight_list = np.array(lamda.value).flatten()
tol = 1e-6
weight_list[np.abs(weight_list) < tol] = 0.0
weight_list /= np.sum(weight_list)
return weight_list
def _optimize(self, mu_list):
"""linearly parametrize reward function.
implements Eq. 11 from Abbeel
Parameters
----------
W : weight
Returns
-------
TODO
- think whether to do s, a or just s
"""
logging.info("solving for W given mu_list")
# define variables
W = cvx.Variable(self._p)
t = cvx.Variable(1)
if self._use_slack:
xi = cvx.Variable(1)
mu_exp = cvx.Parameter(self._p)
mu_exp.value = self._mu_expert.flatten()
if self._use_slack:
C = cvx.Parameter(1)
C.value = self._slack_scale
obj = cvx.Maximize(t - C * xi)
else:
obj = cvx.Maximize(t)
constraints = []
for mu in mu_list:
mu = mu.flatten()
if self._use_slack:
constraints += [W.T @ mu_exp + xi >= W.T @ mu + t]
else:
constraints += [W.T @ mu_exp >= W.T @ mu + t]
constraints += [cvx.norm(W, 2) <= 1]
prob = cvx.Problem(obj, constraints)
prob.solve()
if prob.status in ["unbounded", "infeasible"]:
logging.warning("the optimization failed: {}".format(prob.status))
W = np.array(W.value)
margin_v = t.value
mu_list = np.array([mu.flatten() for mu in mu_list])
margin_mu_list = norm(np.array(mu_exp.value).T - mu_list, 2, axis=1)
margin_mu = np.min(margin_mu_list)
converged = margin_mu <= self._irl_precision
return W, (margin_v, margin_mu, converged)
def _optimize_projection(self, mu_list, mu_bar_list):
|
#
# def train_mma(pi_0, phi_sa_dim, task_desc, params, D, evaluator, ob_space=None, ac_space=None):
# gym.logger.setLevel(logging.WARN)
#
# gamma = task_desc["gamma"]
# horizon = task_desc["horizon"]
# eps = params["eps"]
# p = q = phi_sa_dim # adding action dim
# phi = D["phi_fn"]
# phi_s = D["phi_fn_s"]
# stochastic = True
# mu_estimator_type = params["mu_estimator"]
# n_action = task_desc["n_action"]
# assert isinstance(n_action, int)
# action_list = range(n_action)
# precision = params["precision"]
#
# mu_exp_estimator = EmpiricalMuEstimator(phi, gamma)
# mu_exp_estimator.fit(D, stochastic, return_s_init=True)
# mu_exp, s_init_list = mu_exp_estimator.estimate()
#
#
# logging.info("fitting {}".format(mu_estimator_type))
# if task_desc["type"] == "gym":
# env = gym.make(task_desc["env_id"])
# ac_space = env.action_space
# ob_space = env.observation_space
# mu_dim = p # only for discrete action
# elif task_desc["type"] == "sepsis":
# if ac_space is None:
# ac_space = (5, )
# if ob_space is None:
# ob_space = (46, )
# mu_dim = p
#
# stochastic = True
#
# s = D["s"]
# a = D["a"]
# if len(a.shape) == 1:
# a = np.expand_dims(a, axis=1)
# s_next = D["s_next"]
# done = D["done"]
# if len(done.shape) == 1:
# done = np.expand_dims(done, axis=1)
# phi_sa = D["phi_sa"]
#
# n_transition = D["s"].shape[0]
# idx = idx = int(n_transition * 0.7)
#
# D_train = {"s" : s[:idx, :],
# "a" : a[:idx, :],
# "phi_sa" : phi_sa[:idx, :],
# "s_next": s_next[:idx, :],
# "done": done[:idx, :]}
#
# D_val = {"s" : s[idx:, :],
# "a" : a[idx:, :],
# "phi_sa" : phi_sa[idx:, :],
# "s_next": s_next[idx:, :],
# "done": done[idx:, :]}
#
#
# if mu_estimator_type == "lstd":
# mu_estimator = LSTDMuEstimator(phi, gamma, D, p, q, eps, s_init_list)
# elif mu_estimator_type == "dsfn":
# mu_estimator = DeepMuEstimator(phi, gamma, D_train, D_val, s_init_list, ob_space,
# ac_space, mu_dim, horizon)
# else:
# raise NotImplementedError
#
# mdp_solver = DQNSepsis(D=D_train)
#
# mma = MaxMarginAbbeel(pi_init=pi_0,
# p=p,
# phi=phi,
# mu_exp=mu_exp,
# mdp_solver=mdp_solver,
# evaluator=evaluator,
# irl_precision=params["precision"],
# method=params["method"],
# mu_estimator=mu_estimator,
# stochastic=stochastic,
# D_val=D_val)
#
# results = mma.run(n_iteration=params["n_iteration"])
# return results
| """linearly parametrize reward function.
implements Sec. 3.1 from Abbeel, Ng (2004)
Parameters
----------
W : weight
Returns
-------
TODO
- think whether to do s, a or just s
"""
mu_e = self._mu_expert
mu_im1 = mu_list[-1]
mu_bar_im2 = mu_bar_list[-1]
if len(mu_bar_list) == 1:
mu_bar_im1 = mu_list[-1]
w_i = mu_e - mu_im1
else:
a = mu_im1 - mu_bar_im2
b = mu_e - mu_bar_im2
mu_bar_im1 = (mu_bar_im2 + a.T.dot(b) / norm(a)**2) * a
w_i = mu_e - mu_bar_im1
w_i /= np.linalg.norm(w_i, 2)
t_i = np.linalg.norm(w_i, 2)
margin_v = w_i.T.dot(mu_e - mu_bar_im1)
margin_mu = t_i
converged = margin_mu <= self._irl_precision
return w_i, (margin_v, margin_mu, converged, mu_bar_im1) |
mod.rs | mod lifeform;
pub use self::lifeform::LifeformComponent;
pub use self::lifeform::LifeformType;
pub use self::lifeform::Orientation;
pub use self::lifeform::get_rand_orientation;
mod monster;
pub use self::monster::Monster;
mod player_action;
pub use self::player_action::Action;
mod outfits;
pub use self::outfits::Skins;
pub use self::outfits::Outfit;
pub use self::outfits::get_outfit;
pub use self::outfits::outfit_from_str; | pub use self::walk_animation::WalkAnimation;
mod melee_animation;
pub use self::melee_animation::MeleeAnimation;
mod movement;
pub use self::movement::Move; |
mod walk_animation; |
event.rs | use crate::types::{Block, BlockHash, BlockHeader};
use std::fmt::{Debug, Display};
| GetBlockHashResult(BlockHash, BlockByHashResult<I>),
GetBlockHeightResult(u64, BlockByHeightResult<I>),
GetDeploysResult(DeploysResult<I>),
StartDownloadingDeploys,
NewPeerConnected(I),
BlockHandled(Box<BlockHeader>),
}
#[derive(Debug)]
pub enum DeploysResult<I> {
Found(Box<BlockHeader>),
NotFound(Box<BlockHeader>, I),
}
#[derive(Debug)]
pub enum BlockByHashResult<I> {
Absent(I),
FromStorage(Box<Block>),
FromPeer(Box<Block>, I),
}
#[derive(Debug)]
pub enum BlockByHeightResult<I> {
Absent(I),
FromStorage(Box<Block>),
FromPeer(Box<Block>, I),
}
impl<I> Display for Event<I>
where
I: Debug + Display,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Event::Start(init_peer) => write!(f, "Start syncing from peer {}.", init_peer),
Event::GetBlockHashResult(block_hash, r) => {
write!(f, "Get block result for {}: {:?}", block_hash, r)
}
Event::GetDeploysResult(result) => {
write!(f, "Get deploys for block result {:?}", result)
}
Event::StartDownloadingDeploys => write!(f, "Start downloading deploys event."),
Event::NewPeerConnected(peer_id) => write!(f, "A new peer connected: {}", peer_id),
Event::BlockHandled(block) => {
let hash = block.hash();
let height = block.height();
write!(
f,
"Block has been handled by consensus. Hash {}, height {}",
hash, height
)
}
Event::GetBlockHeightResult(height, res) => {
write!(f, "Get block result for height {}: {:?}", height, res)
}
}
}
} | #[derive(Debug)]
pub enum Event<I> {
Start(I), |
ca_test.go | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ca
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"io/ioutil"
"reflect"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
k8ssecret "istio.io/istio/security/pkg/k8s/secret"
caerror "istio.io/istio/security/pkg/pki/error"
"istio.io/istio/security/pkg/pki/util"
)
var (
cert1Pem = `
-----BEGIN CERTIFICATE-----
MIIC3jCCAcagAwIBAgIJAMwyWk0iqlOoMA0GCSqGSIb3DQEBCwUAMBwxGjAYBgNV
BAoMEWs4cy5jbHVzdGVyLmxvY2FsMB4XDTE4MDkyMTAyMjAzNFoXDTI4MDkxODAy
MjAzNFowHDEaMBgGA1UECgwRazhzLmNsdXN0ZXIubG9jYWwwggEiMA0GCSqGSIb3
DQEBAQUAA4IBDwAwggEKAoIBAQC8TDtfy23OKCRnkSYrKZwuHG5lOmTZgLwoFR1h
3NDTkjR9406CjnAy6Gl73CRG3zRYVgY/2dGNqTzAKRCeKZlOzBlK6Kilb0NIJ6it
s6ooMAxwXlr7jOKiSn6xbaexVMrP0VPUbCgJxQtGs3++hQ14D6WnyfdzPBZJLKbI
tVdDnAcl/FJXKVV9gIg+MM0gETWOYj5Yd8Ye0FTvoFcgs8NKkxhEZe/LeYa7XYsk
S0PymwbHwNZcfC4znp2bzu28LUmUe6kL97YU8ubvhR0muRy6h5MnQNMQrRG5Q5j4
A2+tkO0vto8gOb6/lacEUVYuQdSkMZJiqWEjWgWKeAYdkTJDAgMBAAGjIzAhMA4G
A1UdDwEB/wQEAwICBDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IB
AQAxWP3MT0IelJcb+e7fNTfMS0r3UhpiNkRU368Z7gJ4tDNOGRPzntW6CLnaE+3g
IjOMAE8jlXeEmNuXtDQqQoZwWc1D5ma3jyc83E5H9LJzjfmn5rAHafr29YH85Ms2
VlKdpP+teYg8Cag9u4ar/AUR4zMUEpGK5U+T9IH44lVqVH23T+DxAT+btsyuGiB0
DsM76XVDj4g3OKCUalu7a8FHvgTkBpUJBl7vwh9kqo9HwCaj4iC2CwveOm0WtSgy
K9PpVDxTGNSxqsxKn7DJQ15NTOP+gr29ABqFKwRr+S8ggw6evzHbABQTUMebaRSr
iH7cSgrzZBiUvJmZRi7/BrYU
-----END CERTIFICATE-----`
key1Pem = `
-----BEGIN PRIVATE KEY-----
MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQC8TDtfy23OKCRn
kSYrKZwuHG5lOmTZgLwoFR1h3NDTkjR9406CjnAy6Gl73CRG3zRYVgY/2dGNqTzA
KRCeKZlOzBlK6Kilb0NIJ6its6ooMAxwXlr7jOKiSn6xbaexVMrP0VPUbCgJxQtG
s3++hQ14D6WnyfdzPBZJLKbItVdDnAcl/FJXKVV9gIg+MM0gETWOYj5Yd8Ye0FTv
oFcgs8NKkxhEZe/LeYa7XYskS0PymwbHwNZcfC4znp2bzu28LUmUe6kL97YU8ubv
hR0muRy6h5MnQNMQrRG5Q5j4A2+tkO0vto8gOb6/lacEUVYuQdSkMZJiqWEjWgWK
eAYdkTJDAgMBAAECggEBAJTemFqmVQwWxKF1Kn4ZibcTF1zFDBLCKwBtoStMD3YW
M5YL7nhd8OruwOcCJ1Q5CAOHD63PolOjp7otPUwui1y3FJAa3areCo2zfTLHxxG6
2zrD/p6+xjeVOhFBJsGWzjn7v5FEaWs/9ChTpf2U6A8yH8BGd3MN4Hi96qboaDO0
fFz3zOu7sgjkDNZiapZpUuqs7a6MCCr2T3FPwdWUiILZF2t5yWd/l8KabP+3QvvR
tDU6sNv4j8e+dsF2l9ZT81JLkN+f6HvWcLVAADvcBqMcd8lmMSPgxSbytzKanx7o
wtzIiGkNZBCVKGO7IK2ByCluiyHDpGul60Th7HUluDECgYEA9/Q1gT8LTHz1n6vM
2n2umQN9R+xOaEYN304D5DQqptN3S0BCJ4dihD0uqEB5osstRTf4QpP/qb2hMDP4
qWbWyrc7Z5Lyt6HI1ly6VpVnYKb3HDeJ9M+5Se1ttdwyRCzuT4ZBhT5bbqBatsOU
V7+dyrJKbk8r9K4qy29UFozz/38CgYEAwmhzPVak99rVmqTpe0gPERW//n+PdW3P
Ta6ongU8zkkw9LAFwgjGtNpd4nlk0iQigiM4jdJDFl6edrRXv2cisEfJ9+s53AOb
hXui4HAn2rusPK+Dq2InkHYTGjEGDpx94zC/bjYR1GBIsthIh0w2G9ql8yvLatxG
x6oXEsb7Lz0CgYEA7Oj+/mDYUNrMbSVfdBvF6Rl2aHQWbncQ5h3Khg55+i/uuY3K
J66pqKQ0ojoIfk0XEh3qLOLv0qUHD+F4Y5OJAuOT9OBo3J/OH1M2D2hs/+JIFUPT
on+fEE21F6AuvwkXIhCrJb5w6gB47Etuv3CsOXGkwEURQJXw+bODapB+yc0CgYEA
t7zoTay6NdcJ0yLR2MZ+FvOrhekhuSaTqyPMEa15jq32KwzCJGUPCJbp7MY217V3
N+/533A+H8JFmoNP+4KKcnknFb2n7Z0rO7licyUNRdniK2jm1O/r3Mj7vOFgjCaz
hCnqg0tvBn4Jt55aziTlbuXzuiRGGTUfYE4NiJ2vgTECgYEA8di9yqGhETYQkoT3
E70JpEmkCWiHl/h2ClLcDkj0gXKFxmhzmvs8G5On4S8toNiJ6efmz0KlHN1F7Ldi
2iVd9LZnFVP1YwG0mvTJxxc5P5Uy5q/EhCLBAetqoTkWYlPcpkcathmCbCpJG4/x
iOmuuOfQWnMfcVk8I0YDL5+G9Pg=
-----END PRIVATE KEY-----`
)
// TODO (myidpt): Test Istio CA can load plugin key/certs from secret.
| maxCertTTL := time.Hour
org := "test.ca.Org"
const caNamespace = "default"
client := fake.NewSimpleClientset()
rootCertFile := ""
rootCertCheckInverval := time.Hour
rsaKeySize := 2048
caopts, err := NewSelfSignedIstioCAOptions(context.Background(),
0, caCertTTL, rootCertCheckInverval, defaultCertTTL,
maxCertTTL, org, false, caNamespace, -1, client.CoreV1(),
rootCertFile, false, rsaKeySize)
if err != nil {
t.Fatalf("Failed to create a self-signed CA Options: %v", err)
}
ca, err := NewIstioCA(caopts)
if err != nil {
t.Errorf("Got error while creating self-signed CA: %v", err)
}
if ca == nil {
t.Fatalf("Failed to create a self-signed CA.")
}
signingCert, _, certChainBytes, rootCertBytes := ca.GetCAKeyCertBundle().GetAll()
rootCert, err := util.ParsePemEncodedCertificate(rootCertBytes)
if err != nil {
t.Error(err)
}
// Root cert and siging cert are the same for self-signed CA.
if !rootCert.Equal(signingCert) {
t.Error("CA root cert does not match signing cert")
}
if ttl := rootCert.NotAfter.Sub(rootCert.NotBefore); ttl != caCertTTL {
t.Errorf("Unexpected CA certificate TTL (expecting %v, actual %v)", caCertTTL, ttl)
}
if certOrg := rootCert.Issuer.Organization[0]; certOrg != org {
t.Errorf("Unexpected CA certificate organization (expecting %v, actual %v)", org, certOrg)
}
if len(certChainBytes) != 0 {
t.Errorf("Cert chain should be empty")
}
// Check the signing cert stored in K8s secret.
caSecret, err := client.CoreV1().Secrets("default").Get(context.TODO(), CASecret, metav1.GetOptions{})
if err != nil {
t.Errorf("Failed to get secret (error: %s)", err)
}
signingCertFromSecret, err := util.ParsePemEncodedCertificate(caSecret.Data[caCertID])
if err != nil {
t.Errorf("Failed to parse cert (error: %s)", err)
}
if !signingCertFromSecret.Equal(signingCert) {
t.Error("CA signing cert does not match the K8s secret")
}
}
func TestCreateSelfSignedIstioCAWithSecret(t *testing.T) {
rootCertPem := cert1Pem
// Use the same signing cert and root cert for self-signed CA.
signingCertPem := []byte(cert1Pem)
signingKeyPem := []byte(key1Pem)
client := fake.NewSimpleClientset()
initSecret := k8ssecret.BuildSecret("", CASecret, "default",
nil, nil, nil, signingCertPem, signingKeyPem, istioCASecretType)
_, err := client.CoreV1().Secrets("default").Create(context.TODO(), initSecret, metav1.CreateOptions{})
if err != nil {
t.Errorf("Failed to create secret (error: %s)", err)
}
caCertTTL := time.Hour
defaultCertTTL := 30 * time.Minute
maxCertTTL := time.Hour
org := "test.ca.Org"
caNamespace := "default"
const rootCertFile = ""
rootCertCheckInverval := time.Hour
rsaKeySize := 2048
caopts, err := NewSelfSignedIstioCAOptions(context.Background(),
0, caCertTTL, rootCertCheckInverval, defaultCertTTL, maxCertTTL,
org, false, caNamespace, -1, client.CoreV1(),
rootCertFile, false, rsaKeySize)
if err != nil {
t.Fatalf("Failed to create a self-signed CA Options: %v", err)
}
ca, err := NewIstioCA(caopts)
if err != nil {
t.Errorf("Got error while creating self-signed CA: %v", err)
}
if ca == nil {
t.Fatalf("Failed to create a self-signed CA.")
}
signingCert, err := util.ParsePemEncodedCertificate(signingCertPem)
if err != nil {
t.Errorf("Failed to parse cert (error: %s)", err)
}
signingCertFromCA, _, certChainBytesFromCA, rootCertBytesFromCA := ca.GetCAKeyCertBundle().GetAll()
if !signingCert.Equal(signingCertFromCA) {
t.Error("Signing cert does not match")
}
if !bytes.Equal(rootCertBytesFromCA, []byte(rootCertPem)) {
t.Error("Root cert does not match")
}
if len(certChainBytesFromCA) != 0 {
t.Errorf("Cert chain should be empty")
}
}
func TestCreateSelfSignedIstioCAReadSigningCertOnly(t *testing.T) {
rootCertPem := cert1Pem
// Use the same signing cert and root cert for self-signed CA.
signingCertPem := []byte(cert1Pem)
signingKeyPem := []byte(key1Pem)
caCertTTL := time.Hour
defaultCertTTL := 30 * time.Minute
maxCertTTL := time.Hour
org := "test.ca.Org"
caNamespace := "default"
const rootCertFile = ""
rootCertCheckInverval := time.Hour
rsaKeySize := 2048
client := fake.NewSimpleClientset()
// Should abort with timeout.
expectedErr := "secret waiting thread is terminated"
ctx0, cancel0 := context.WithTimeout(context.Background(), time.Millisecond*50)
defer cancel0()
_, err := NewSelfSignedIstioCAOptions(ctx0, 0,
caCertTTL, defaultCertTTL, rootCertCheckInverval, maxCertTTL, org, false,
caNamespace, time.Millisecond*10, client.CoreV1(), rootCertFile, false, rsaKeySize)
if err == nil {
t.Errorf("Expected error, but succeeded.")
} else if err.Error() != expectedErr {
t.Errorf("Unexpected error message: %s VS (expected) %s", err.Error(), expectedErr)
return
}
// Should succeed once secret is ready.
secret := k8ssecret.BuildSecret("", CASecret, "default", nil, nil, nil, signingCertPem, signingKeyPem, istioCASecretType)
_, err = client.CoreV1().Secrets("default").Create(context.TODO(), secret, metav1.CreateOptions{})
if err != nil {
t.Errorf("Failed to create secret (error: %s)", err)
}
ctx1, cancel1 := context.WithCancel(context.Background())
defer cancel1()
caopts, err := NewSelfSignedIstioCAOptions(ctx1, 0,
caCertTTL, defaultCertTTL, rootCertCheckInverval, maxCertTTL, org, false,
caNamespace, time.Millisecond*10, client.CoreV1(), rootCertFile, false, rsaKeySize)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
ca, err := NewIstioCA(caopts)
if err != nil {
t.Errorf("Got error while creating self-signed CA: %v", err)
}
if ca == nil {
t.Fatalf("Failed to create a self-signed CA.")
}
signingCert, err := util.ParsePemEncodedCertificate(signingCertPem)
if err != nil {
t.Errorf("Failed to parse cert (error: %s)", err)
}
signingCertFromCA, _, certChainBytesFromCA, rootCertBytesFromCA := ca.GetCAKeyCertBundle().GetAll()
if !signingCert.Equal(signingCertFromCA) {
t.Error("Signing cert does not match")
}
if !bytes.Equal(rootCertBytesFromCA, []byte(rootCertPem)) {
t.Error("Root cert does not match")
}
if len(certChainBytesFromCA) != 0 {
t.Errorf("Cert chain should be empty")
}
}
func TestCreatePluggedCertCA(t *testing.T) {
rootCertFile := "../testdata/multilevelpki/root-cert.pem"
certChainFile := "../testdata/multilevelpki/int2-cert-chain.pem"
signingCertFile := "../testdata/multilevelpki/int2-cert.pem"
signingKeyFile := "../testdata/multilevelpki/int2-key.pem"
rsaKeySize := 2048
defaultWorkloadCertTTL := 99999 * time.Hour
maxWorkloadCertTTL := time.Hour
caopts, err := NewPluggedCertIstioCAOptions(certChainFile, signingCertFile, signingKeyFile, rootCertFile,
defaultWorkloadCertTTL, maxWorkloadCertTTL, rsaKeySize)
if err != nil {
t.Fatalf("Failed to create a plugged-cert CA Options: %v", err)
}
t0 := time.Now()
ca, err := NewIstioCA(caopts)
if err != nil {
t.Errorf("Got error while creating plugged-cert CA: %v", err)
}
if ca == nil {
t.Fatalf("Failed to create a plugged-cert CA.")
}
signingCertBytes, signingKeyBytes, certChainBytes, rootCertBytes := ca.GetCAKeyCertBundle().GetAllPem()
if !comparePem(signingCertBytes, signingCertFile) {
t.Errorf("Failed to verify loading of signing cert pem.")
}
if !comparePem(signingKeyBytes, signingKeyFile) {
t.Errorf("Failed to verify loading of signing key pem.")
}
if !comparePem(certChainBytes, certChainFile) {
t.Errorf("Failed to verify loading of cert chain pem.")
}
if !comparePem(rootCertBytes, rootCertFile) {
t.Errorf("Failed to verify loading of root cert pem.")
}
certChain, err := util.ParsePemEncodedCertificate(certChainBytes)
if err != nil {
t.Fatalf("Failed to parse cert chain pem.")
}
// if CA cert becomes invalid before workload cert it's going to cause workload cert to be invalid too,
// however citatel won't rotate if that happens
delta := certChain.NotAfter.Sub(t0.Add(ca.defaultCertTTL))
if delta >= time.Second*2 {
t.Errorf("Invalid default cert TTL, should be the same as cert chain: %v VS (expected) %v",
t0.Add(ca.defaultCertTTL),
certChain.NotAfter)
}
}
// TODO: merge tests for SignCSR.
func TestSignCSRForWorkload(t *testing.T) {
subjectID := "spiffe://example.com/ns/foo/sa/bar"
cases := map[string]struct {
certOpts util.CertOptions
}{
"Workload uses RSA": {
certOpts: util.CertOptions{
// This value is not used, instead, subjectID should be used in certificate.
Host: "spiffe://different.com/test",
RSAKeySize: 2048,
IsCA: false,
},
},
"Workload uses EC": {
certOpts: util.CertOptions{
// This value is not used, instead, subjectID should be used in certificate.
Host: "spiffe://different.com/test",
ECSigAlg: util.EcdsaSigAlg,
IsCA: false,
},
},
}
for id, tc := range cases {
csrPEM, keyPEM, err := util.GenCSR(tc.certOpts)
if err != nil {
t.Errorf("%s: GenCSR error: %v", id, err)
}
ca, err := createCA(time.Hour, tc.certOpts.ECSigAlg)
if err != nil {
t.Errorf("%s: createCA error: %v", id, err)
}
requestedTTL := 30 * time.Minute
certPEM, signErr := ca.Sign(csrPEM, []string{subjectID}, requestedTTL, false)
if signErr != nil {
t.Errorf("%s: Sign error: %v", id, err)
}
fields := &util.VerifyFields{
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
IsCA: false,
Host: subjectID,
}
_, _, certChainBytes, rootCertBytes := ca.GetCAKeyCertBundle().GetAll()
if err = util.VerifyCertificate(
keyPEM, append(certPEM, certChainBytes...), rootCertBytes, fields); err != nil {
t.Errorf("%s: VerifyCertificate error: %v", id, err)
}
cert, err := util.ParsePemEncodedCertificate(certPEM)
if err != nil {
t.Errorf("%s: ParsePemEncodedCertificate error: %v", id, err)
}
if ttl := cert.NotAfter.Sub(cert.NotBefore); ttl != requestedTTL {
t.Errorf("%s: Unexpected certificate TTL (expecting %v, actual %v)", id, requestedTTL, ttl)
}
san := util.ExtractSANExtension(cert.Extensions)
if san == nil {
t.Errorf("%s: No SAN extension is found in the certificate", id)
}
expected, err := util.BuildSubjectAltNameExtension(subjectID)
if err != nil {
t.Errorf("%s: BuildSubjectAltNameExtension error: %v", id, err)
}
if !reflect.DeepEqual(expected, san) {
t.Errorf("%s: Unexpected extensions: wanted %v but got %v", id, expected, san)
}
}
}
func TestSignCSRForCA(t *testing.T) {
subjectID := "spiffe://example.com/ns/foo/sa/baz"
cases := map[string]struct {
RSAKeySize int
IsCA bool
ECSigAlg util.SupportedECSignatureAlgorithms
}{
"CA uses RSA": {
RSAKeySize: 2048,
IsCA: true,
},
"CA uses EC": {
ECSigAlg: util.EcdsaSigAlg,
IsCA: true,
},
}
for id, tc := range cases {
certOpts := util.CertOptions{
RSAKeySize: tc.RSAKeySize,
IsCA: tc.IsCA,
ECSigAlg: tc.ECSigAlg,
}
csrPEM, keyPEM, err := util.GenCSR(certOpts)
if err != nil {
t.Errorf("%s: GenCSR error: %v", id, err)
}
ca, err := createCA(365*24*time.Hour, tc.ECSigAlg)
if err != nil {
t.Errorf("%s: createCA error: %v", id, err)
}
requestedTTL := 30 * 24 * time.Hour
certPEM, signErr := ca.Sign(csrPEM, []string{subjectID}, requestedTTL, true)
if signErr != nil {
t.Errorf("%s: Sign error: %v", id, err)
}
fields := &util.VerifyFields{
KeyUsage: x509.KeyUsageCertSign,
IsCA: true,
Host: subjectID,
}
_, _, certChainBytes, rootCertBytes := ca.GetCAKeyCertBundle().GetAll()
if err = util.VerifyCertificate(
keyPEM, append(certPEM, certChainBytes...), rootCertBytes, fields); err != nil {
t.Errorf("%s: VerifyCertificate error: %v", id, err)
}
cert, err := util.ParsePemEncodedCertificate(certPEM)
if err != nil {
t.Errorf("%s: ParsePemEncodedCertificate error: %v", id, err)
}
if ttl := cert.NotAfter.Sub(cert.NotBefore); ttl != requestedTTL {
t.Errorf("Unexpected certificate TTL (expecting %v, actual %v)", requestedTTL, ttl)
}
san := util.ExtractSANExtension(cert.Extensions)
if san == nil {
t.Errorf("%s: No SAN extension is found in the certificate", id)
}
expected, err := util.BuildSubjectAltNameExtension(subjectID)
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(expected, san) {
t.Errorf("%s: Unexpected extensions: wanted %v but got %v", id, expected, san)
}
}
}
func TestSignCSRTTLError(t *testing.T) {
subjectID := "spiffe://example.com/ns/foo/sa/bar"
cases := map[string]struct {
Org string
RSAKeySize int
ECSigAlg util.SupportedECSignatureAlgorithms
}{
"CSR uses RSA": {
Org: "istio.io",
RSAKeySize: 2048,
},
"CSR uses EC": {
Org: "istio.io",
ECSigAlg: util.EcdsaSigAlg,
},
}
for id, tc := range cases {
certOpts := util.CertOptions{
Org: tc.Org,
RSAKeySize: tc.RSAKeySize,
ECSigAlg: tc.ECSigAlg,
}
csrPEM, _, err := util.GenCSR(certOpts)
if err != nil {
t.Errorf("%s: GenCSR error: %v", id, err)
}
ca, err := createCA(2*time.Hour, tc.ECSigAlg)
if err != nil {
t.Errorf("%s: createCA error: %v", id, err)
}
ttl := 3 * time.Hour
cert, signErr := ca.Sign(csrPEM, []string{subjectID}, ttl, false)
if cert != nil {
t.Errorf("%s: Expected null cert be obtained a non-null cert.", id)
}
expectedErr := "requested TTL 3h0m0s is greater than the max allowed TTL 2h0m0s"
if signErr.(*caerror.Error).Error() != expectedErr {
t.Errorf("%s: Expected error: %s but got error: %s.", id, signErr.(*caerror.Error).Error(), expectedErr)
}
}
}
func TestAppendRootCerts(t *testing.T) {
root1 := "root-cert-1"
expRootCerts := `root-cert-1
root-cert-2
root-cert-3`
rootCerts, err := util.AppendRootCerts([]byte(root1), "./root-certs-for-testing.pem")
if err != nil {
t.Errorf("AppendRootCerts() returns an error: %v", err)
} else if expRootCerts != string(rootCerts) {
t.Errorf("the root certificates do not match. Expect:%v. Actual:%v.",
expRootCerts, string(rootCerts))
}
}
func TestAppendRootCertsToNullCert(t *testing.T) {
// nil certificate
var root1 []byte
expRootCerts := `root-cert-2
root-cert-3`
rootCerts, err := util.AppendRootCerts(root1, "./root-certs-for-testing.pem")
if err != nil {
t.Errorf("AppendRootCerts() returns an error: %v", err)
} else if expRootCerts != string(rootCerts) {
t.Errorf("the root certificates do not match. Expect:%v. Actual:%v.",
expRootCerts, string(rootCerts))
}
}
func TestSignWithCertChain(t *testing.T) {
rootCertFile := "../testdata/multilevelpki/root-cert.pem"
certChainFile := "../testdata/multilevelpki/int-cert-chain.pem"
signingCertFile := "../testdata/multilevelpki/int-cert.pem"
signingKeyFile := "../testdata/multilevelpki/int-key.pem"
rsaKeySize := 2048
defaultWorkloadCertTTL := 30 * time.Minute
maxWorkloadCertTTL := time.Hour
caopts, err := NewPluggedCertIstioCAOptions(certChainFile, signingCertFile, signingKeyFile, rootCertFile,
defaultWorkloadCertTTL, maxWorkloadCertTTL, rsaKeySize)
if err != nil {
t.Fatalf("Failed to create a plugged-cert CA Options: %v", err)
}
ca, err := NewIstioCA(caopts)
if err != nil {
t.Errorf("Got error while creating plugged-cert CA: %v", err)
}
if ca == nil {
t.Fatalf("Failed to create a plugged-cert CA.")
}
opts := util.CertOptions{
// This value is not used, instead, subjectID should be used in certificate.
Host: "spiffe://different.com/test",
RSAKeySize: 2048,
IsCA: false,
}
csrPEM, privPEM, err := util.GenCSR(opts)
if err != nil {
t.Error(err)
}
certPEM, signErr := ca.SignWithCertChain(csrPEM, []string{"localhost"}, time.Hour, false)
if signErr != nil {
t.Error(err)
}
cert, err := tls.X509KeyPair(certPEM, privPEM)
if err != nil {
t.Error(err)
}
if len(cert.Certificate) != 3 {
t.Errorf("Unexpected number of certificates returned: %d (expected 4)", len(cert.Certificate))
}
}
func TestGenKeyCert(t *testing.T) {
cases := map[string]struct {
rootCertFile string
certChainFile string
signingCertFile string
signingKeyFile string
certLifetime time.Duration
checkCertLifetime bool
expectedError string
}{
"RSA cryptography": {
rootCertFile: "../testdata/multilevelpki/root-cert.pem",
certChainFile: "../testdata/multilevelpki/int-cert-chain.pem",
signingCertFile: "../testdata/multilevelpki/int-cert.pem",
signingKeyFile: "../testdata/multilevelpki/int-key.pem",
certLifetime: 3650 * 24 * time.Hour,
checkCertLifetime: false,
expectedError: "",
},
"EC cryptography": {
rootCertFile: "../testdata/multilevelpki/ecc-root-cert.pem",
certChainFile: "../testdata/multilevelpki/ecc-int-cert-chain.pem",
signingCertFile: "../testdata/multilevelpki/ecc-int-cert.pem",
signingKeyFile: "../testdata/multilevelpki/ecc-int-key.pem",
certLifetime: 3650 * 24 * time.Hour,
checkCertLifetime: false,
expectedError: "",
},
"Pass lifetime check": {
rootCertFile: "../testdata/multilevelpki/ecc-root-cert.pem",
certChainFile: "../testdata/multilevelpki/ecc-int-cert-chain.pem",
signingCertFile: "../testdata/multilevelpki/ecc-int-cert.pem",
signingKeyFile: "../testdata/multilevelpki/ecc-int-key.pem",
certLifetime: 24 * time.Hour,
checkCertLifetime: true,
expectedError: "",
},
"Error lifetime check": {
rootCertFile: "../testdata/multilevelpki/ecc-root-cert.pem",
certChainFile: "../testdata/multilevelpki/ecc-int-cert-chain.pem",
signingCertFile: "../testdata/multilevelpki/ecc-int-cert.pem",
signingKeyFile: "../testdata/multilevelpki/ecc-int-key.pem",
certLifetime: 25 * time.Hour,
checkCertLifetime: true,
expectedError: "requested TTL 25h0m0s is greater than the max allowed TTL 24h0m0s",
},
}
defaultWorkloadCertTTL := 30 * time.Minute
maxWorkloadCertTTL := 24 * time.Hour
rsaKeySize := 2048
for id, tc := range cases {
caopts, err := NewPluggedCertIstioCAOptions(tc.certChainFile, tc.signingCertFile, tc.signingKeyFile, tc.rootCertFile,
defaultWorkloadCertTTL, maxWorkloadCertTTL, rsaKeySize)
if err != nil {
t.Fatalf("%s: failed to create a plugged-cert CA Options: %v", id, err)
}
ca, err := NewIstioCA(caopts)
if err != nil {
t.Fatalf("%s: got error while creating plugged-cert CA: %v", id, err)
}
if ca == nil {
t.Fatalf("failed to create a plugged-cert CA.")
}
certPEM, privPEM, err := ca.GenKeyCert([]string{"host1", "host2"}, tc.certLifetime, tc.checkCertLifetime)
if err != nil {
if tc.expectedError == "" {
t.Fatalf("[%s] Unexpected error: %v", id, err)
}
if err.Error() != tc.expectedError {
t.Fatalf("[%s] Error returned does not match expectation: %v VS (expected) %v", id, err, tc.expectedError)
}
continue
} else if tc.expectedError != "" {
t.Fatalf("[%s] GenKeyCert succeeded but expected error: %v", id, tc.expectedError)
}
cert, err := tls.X509KeyPair(certPEM, privPEM)
if err != nil {
t.Fatalf("[%s] X509KeyPair error: %v", id, err)
}
if len(cert.Certificate) != 3 {
t.Fatalf("[%s] unexpected number of certificates returned: %d (expected 3)", id, len(cert.Certificate))
}
}
}
func createCA(maxTTL time.Duration, ecSigAlg util.SupportedECSignatureAlgorithms) (*IstioCA, error) {
// Generate root CA key and cert.
rootCAOpts := util.CertOptions{
IsCA: true,
IsSelfSigned: true,
TTL: time.Hour,
Org: "Root CA",
RSAKeySize: 2048,
ECSigAlg: ecSigAlg,
}
rootCertBytes, rootKeyBytes, err := util.GenCertKeyFromOptions(rootCAOpts)
if err != nil {
return nil, err
}
rootCert, err := util.ParsePemEncodedCertificate(rootCertBytes)
if err != nil {
return nil, err
}
rootKey, err := util.ParsePemEncodedKey(rootKeyBytes)
if err != nil {
return nil, err
}
intermediateCAOpts := util.CertOptions{
IsCA: true,
IsSelfSigned: false,
TTL: time.Hour,
Org: "Intermediate CA",
RSAKeySize: 2048,
SignerCert: rootCert,
SignerPriv: rootKey,
ECSigAlg: ecSigAlg,
}
intermediateCert, intermediateKey, err := util.GenCertKeyFromOptions(intermediateCAOpts)
if err != nil {
return nil, err
}
bundle, err := util.NewVerifiedKeyCertBundleFromPem(
intermediateCert, intermediateKey, intermediateCert, rootCertBytes)
if err != nil {
return nil, err
}
// Disable root cert rotator by setting root cert check interval to 0ns.
rootCertCheckInverval := time.Duration(0)
caOpts := &IstioCAOptions{
DefaultCertTTL: time.Hour,
MaxCertTTL: maxTTL,
KeyCertBundle: bundle,
RotatorConfig: &SelfSignedCARootCertRotatorConfig{
CheckInterval: rootCertCheckInverval,
},
}
return NewIstioCA(caOpts)
}
func comparePem(expectedBytes []byte, file string) bool {
fileBytes, err := ioutil.ReadFile(file)
if err != nil {
return false
}
if !bytes.Equal(fileBytes, expectedBytes) {
return false
}
return true
} | func TestCreateSelfSignedIstioCAWithoutSecret(t *testing.T) {
caCertTTL := time.Hour
defaultCertTTL := 30 * time.Minute |
wxorx.rs | use super::super::{Error, Register, RISCV_MAX_MEMORY, RISCV_PAGES, RISCV_PAGESIZE};
use super::{
check_permission, round_page_down, round_page_up, Memory, FLAG_EXECUTABLE, FLAG_FREEZED,
FLAG_WRITABLE,
};
use bytes::Bytes;
use std::marker::PhantomData;
pub struct WXorXMemory<R: Register, M: Memory<R>> {
inner: M,
flags: Vec<u8>,
_inner: PhantomData<R>,
}
impl<R: Register, M: Memory<R> + Default> Default for WXorXMemory<R, M> {
fn default() -> Self {
Self {
inner: M::default(),
flags: vec![0; RISCV_PAGES],
_inner: PhantomData,
}
}
}
impl<R: Register, M: Memory<R>> WXorXMemory<R, M> {
pub fn inner_mut(&mut self) -> &mut dyn Memory<R> {
&mut self.inner
}
}
impl<R: Register, M: Memory<R>> Memory<R> for WXorXMemory<R, M> {
fn init_pages(
&mut self,
addr: u64,
size: u64,
flags: u8,
source: Option<Bytes>,
offset_from_addr: u64,
) -> Result<(), Error> {
if round_page_down(addr) != addr || round_page_up(size) != size {
return Err(Error::Unaligned);
}
if addr > RISCV_MAX_MEMORY as u64
|| size > RISCV_MAX_MEMORY as u64
|| addr + size > RISCV_MAX_MEMORY as u64
|| offset_from_addr > size
{
return Err(Error::OutOfBound);
}
for page_addr in (addr..addr + size).step_by(RISCV_PAGESIZE) {
let page = page_addr as usize / RISCV_PAGESIZE;
if self.flags[page] & FLAG_FREEZED != 0 {
return Err(Error::InvalidPermission);
}
self.flags[page] = flags;
}
self.inner
.init_pages(addr, size, flags, source, offset_from_addr)
}
fn fetch_flag(&mut self, page: u64) -> Result<u8, Error> |
fn execute_load16(&mut self, addr: u64) -> Result<u16, Error> {
check_permission(self, addr, 2, FLAG_EXECUTABLE)?;
self.inner.execute_load16(addr)
}
fn load8(&mut self, addr: &R) -> Result<R, Error> {
self.inner.load8(addr)
}
fn load16(&mut self, addr: &R) -> Result<R, Error> {
self.inner.load16(addr)
}
fn load32(&mut self, addr: &R) -> Result<R, Error> {
self.inner.load32(addr)
}
fn load64(&mut self, addr: &R) -> Result<R, Error> {
self.inner.load64(addr)
}
fn store8(&mut self, addr: &R, value: &R) -> Result<(), Error> {
check_permission(self, addr.to_u64(), 1, FLAG_WRITABLE)?;
self.inner.store8(addr, value)
}
fn store16(&mut self, addr: &R, value: &R) -> Result<(), Error> {
check_permission(self, addr.to_u64(), 2, FLAG_WRITABLE)?;
self.inner.store16(addr, value)
}
fn store32(&mut self, addr: &R, value: &R) -> Result<(), Error> {
check_permission(self, addr.to_u64(), 4, FLAG_WRITABLE)?;
self.inner.store32(addr, value)
}
fn store64(&mut self, addr: &R, value: &R) -> Result<(), Error> {
check_permission(self, addr.to_u64(), 8, FLAG_WRITABLE)?;
self.inner.store64(addr, value)
}
fn store_bytes(&mut self, addr: u64, value: &[u8]) -> Result<(), Error> {
check_permission(self, addr, value.len() as u64, FLAG_WRITABLE)?;
self.inner.store_bytes(addr, value)
}
fn store_byte(&mut self, addr: u64, size: u64, value: u8) -> Result<(), Error> {
check_permission(self, addr, size, FLAG_WRITABLE)?;
self.inner.store_byte(addr, size, value)
}
}
| {
if page < RISCV_PAGES as u64 {
Ok(self.flags[page as usize])
} else {
Err(Error::OutOfBound)
}
} |
chunk.rs | use super::abstract_mut::FastAbstractMut;
#[allow(missing_docs)]
pub struct FastChunk<Storage> {
pub(super) storage: Storage,
pub(super) current: usize,
pub(super) end: usize,
pub(super) step: usize,
}
impl<Storage: FastAbstractMut> Iterator for FastChunk<Storage> {
type Item = Storage::Slice;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.current + self.step < self.end {
self.current += self.step;
Some(unsafe {
FastAbstractMut::get_data_slice(
&self.storage,
self.current - self.step..self.current,
)
})
} else if self.current < self.end {
let result = Some(unsafe {
FastAbstractMut::get_data_slice(&self.storage, self.current..self.end)
});
self.current = self.end;
result | None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let exact = (self.end - self.current + self.step - 1) / self.step;
(exact, Some(exact))
}
#[inline]
fn fold<B, F>(mut self, mut init: B, mut f: F) -> B
where
Self: Sized,
F: FnMut(B, Self::Item) -> B,
{
while self.current + self.step < self.end {
self.current += self.step;
init = f(init, unsafe {
FastAbstractMut::get_data_slice(
&self.storage,
self.current - self.step..self.current,
)
});
}
if self.current < self.end {
init = f(init, unsafe {
FastAbstractMut::get_data_slice(&self.storage, self.current..self.end)
});
self.current = self.end;
}
init
}
}
impl<Storage: FastAbstractMut> ExactSizeIterator for FastChunk<Storage> {
#[inline]
fn len(&self) -> usize {
(self.end - self.current + self.step - 1) / self.step
}
}
impl<Storage: FastAbstractMut> DoubleEndedIterator for FastChunk<Storage> {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
if self.current + self.step < self.end {
self.end -= self.step;
Some(unsafe {
FastAbstractMut::get_data_slice(&self.storage, self.end..self.end + self.step)
})
} else {
None
}
}
#[inline]
fn rfold<B, F>(mut self, mut init: B, mut f: F) -> B
where
Self: Sized,
F: FnMut(B, Self::Item) -> B,
{
while self.current + self.step < self.end {
self.end -= self.step;
init = f(init, unsafe {
FastAbstractMut::get_data_slice(&self.storage, self.end..self.end + self.step)
});
}
init
}
} | } else { |
NEWGUI.py | from tkinter import *
from tkinter import ttk
import tkinter.filedialog as fd
import pandas as pd
from LocalModelCommunication import LocalModelCommunication
from APP import APP
class GUI(object):
def __init__(self):
# overall
self.tabControl = None
self.tab_step1 = None
self.tab_step2 = None
self.tab_step3 = None
self.tab_step4 = None
self.dataframe = None
self.img_wait = PhotoImage(file='test.GIF')
# 1 step
self.fname = None
self.data = None
self.features = None
self.import_lable = None
self.import_label_text = StringVar()
self.import_label_text.set(' ')
# 2 step
self.required = ['RR', 'QTm_old', 'sbjBeatConsidered', 'numRRaveraged', 'QR', 'QTn', 'QRS', 'IPG',
'PQ', 'PCpos', 'PCneg', 'patsex', 'AFclass', 'Age']
self.required_ordered = []
i = 0
for item in self.required:
self.required_ordered.append(str(i) + ': ' + item)
i = i + 1
self.leftbox = StringVar()
self.rightbox = StringVar()
self.rrightbox = StringVar()
self.list_left = None
self.list_right = None
self.list_rright = None
# 3 step
self.model_label = None
self.model_label_text = StringVar()
self.model_label_text.set('Waiting for model training...')
self.img_gif = PhotoImage(file='img.GIF')
# 4 step
self.connect_label = None
self.connect_label_text = StringVar()
self.connect_label_text.set('Waiting for central server response...')
# 5 step
# help functions
def add_tab(self, tabControl, tab_name):
tab = ttk.Frame(tabControl) # Create a tab
tabControl.add(tab, text=tab_name)
return tab
# Callback functions
## step 1
def get_csv(self): # open file system
self.fname = fd.askopenfilename(filetypes=[(".csv file", ".csv")])
self.data = pd.read_csv(self.fname, delimiter=',')
self.features = self.data.columns
self.import_label_text.set('Import data from: ' + self.fname + '\n' + str(self.features))
self.import_lable.pack(side=TOP)
def go_next_step2(self):
self.tab_step2 = self.add_tab(self.tabControl, "Step 2: Match Features")
self.tab_match(self.tab_step2)
self.tabControl.select(self.tab_step2)
self.tabControl.forget(self.tab_step1)
## step 2
def move_to_right(self):
self.list_right.insert(END,
str(self.list_right.size()) + ': ' + self.list_left.get(self.list_left.curselection()))
self.list_left.delete(self.list_left.curselection())
def move_to_left(self):
content = self.list_right.get(self.list_right.curselection())
contents = content.split(': ')
self.list_left.insert(END, contents[1])
self.list_right.delete(self.list_right.curselection())
def add_nan(self):
self.list_right.insert(END, str(self.list_right.size()) + ': ' + 'NAN')
def go_next_step3(self):
# prepare dataframe for localmodel
columns = []
contents = self.rightbox.get()
contents = contents.replace('(', '')
contents = contents.replace(')', '')
contents = contents.replace("'", '')
item_list = contents.split(', ')
for item in item_list:
content = item.split(': ')[1]
if content != 'NAN':
columns.append(content)
self.dataframe = self.data[columns]
print(self.dataframe.head(2))
self.tab_step3 = self.add_tab(self.tabControl, "Step 3: Train Model")
# render tab3
self.tab_model(self.tab_step3)
self.tabControl.select(self.tab_step3)
self.tabControl.forget(self.tab_step2)
def go_back_step1(self):
self.tab_step1 = self.add_tab(self.tabControl, "Step 1: Import Data")
# render tab1
self.tab_import(self.tab_step1, self.tabControl)
self.tabControl.select(self.tab_step1)
self.tabControl.forget(self.tab_step2)
## step 3
def go_next_step4(self):
self.tab_step4 = self.add_tab(self.tabControl, "Step 4: Connect to Central Server")
# render tab4
self.tab_connect(self.tab_step4)
self.tabControl.select(self.tab_step4)
self.tabControl.forget(self.tab_step3)
def go_back_step2(self):
self.tab_step2 = self.add_tab(self.tabControl, "Step 2: Match Features")
# render tab2
self.tab_match(self.tab_step2)
self.tabControl.select(self.tab_step2)
self.tabControl.forget(self.tab_step3)
## step 4
def go_next_step5(self):
self.tab_step5 = self.add_tab(self.tabControl, "Step 5: Wait for Prediction Call")
# render tab5
self.tab_wait(self.tab_step5)
self.tabControl.select(self.tab_step5)
self.tabControl.forget(self.tab_step4)
def go_back_step3(self):
self.tab_step3 = self.add_tab(self.tabControl, "Step 3: Train Model")
# render tab3
self.tab_model(self.tab_step3) | # frames
def tab_import(self, root, tabControl):
"""
Load local data (csv file)
"""
self.tabControl = tabControl
self.tab_step1 = root
frame = Frame(root)
frame.pack(side=TOP)
Button(frame, text='Import Data', command=self.get_csv, width=16).pack(side=TOP)
label_frame = ttk.LabelFrame(frame, text='Press Button to Import Data')
label_frame.pack(side=TOP)
self.import_lable = ttk.Label(label_frame, textvariable=self.import_label_text)
self.import_lable.pack(side=TOP)
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step2, width=16).pack(side=TOP)
def tab_match(self, root):
"""
Feature matching
"""
self.leftbox.set(sorted(self.features))
self.rightbox.set('')
self.rrightbox.set(self.required_ordered)
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step3, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step1, width=16).pack(side=LEFT)
frame = Frame(root)
frame.pack(side=LEFT)
column_head = ttk.Label(frame, text='Local Features')
column_head.pack(side=TOP)
self.list_left = Listbox(frame, listvariable=self.leftbox, width=25, height=20)
self.list_left.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_left.yview)
scrollbar.pack(side="right", fill="y")
frame = Frame(root)
frame.pack(side=LEFT)
Button(frame, text='->', command=self.move_to_right, width=7).pack(side=TOP)
Button(frame, text='<-', command=self.move_to_left, width=7).pack(side=TOP)
Button(frame, text='NAN', command=self.add_nan, width=7).pack(side=TOP)
frame = Frame(root)
frame.pack(side=LEFT)
column_head = ttk.Label(frame, text='Matched Features')
column_head.pack(side=TOP)
self.list_right = Listbox(frame, listvariable=self.rightbox,height=20, width=25)
self.list_right.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_right.yview)
scrollbar.pack(side="right", fill="y")
frame = Frame(root)
frame.pack(side=RIGHT)
column_head = ttk.Label(frame, text='Required Features')
column_head.pack(side=TOP)
self.list_rright = Listbox(frame, listvariable=self.rrightbox,height=20, width=25)
self.list_rright.pack(side=LEFT)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=self.list_rright.yview)
scrollbar.pack(side="right", fill="y")
def tab_model(self, root):
"""
Call localmodel.init() and localmodel.train()
Display model accuracy
"""
frame = Frame(root)
frame.pack(side=TOP)
self.label_frame = ttk.LabelFrame(frame)
self.label_frame.pack(side=TOP)
self.model_label = ttk.Label(self.label_frame, textvariable=self.model_label_text)
self.model_label.pack(side=TOP)
self.label_img = ttk.Label(self.label_frame, image=self.img_wait)
self.label_img.pack()
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step4, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step2, width=16).pack(side=LEFT)
print ("MODEL TRAINED -> ")
self.loca = LocalModelCommunication(data= self.dataframe)
training_result = self.loca.chooseModel_with_crossValidation_and_train()
print (training_result)
self.trainingdone()
def trainingdone(self):
self.label_img.config(image=self.img_gif)
self.label_img.pack()
def tab_connect(self, root):
"""
Connect to center server
"""
frame = Frame(root)
frame.pack(side=TOP)
label_frame = ttk.LabelFrame(frame)
label_frame.pack(side=TOP)
self.connect_label = ttk.Label(label_frame, textvariable=self.connect_label_text)
self.connect_label.pack(side=TOP)
label_img = ttk.Label(label_frame, image=self.img_wait)
label_img.pack()
frame = Frame(root)
frame.pack(side=BOTTOM)
Button(frame, text='Next>>', command=self.go_next_step5, width=16).pack(side=RIGHT)
Button(frame, text='<<Back', command=self.go_back_step3, width=16).pack(side=LEFT)
## cannot get fast responce! -> get false even if we are connected :]
if self.loca.connectToCentral() == False :
print ("not connected")
else :
print ("connected")
'''
self.root = Tk()
self.root.geometry("700x500")
self.root.title("Doctor Application")
self.root.resizable(width=False, height=False)
self.app = APP(root)
self.root.mainloop()
'''
def tab_wait(self, root):
"""
Call localmodel.predict()
:return:
"""
frame = Frame(root)
frame.pack(side=TOP)
label_frame = ttk.LabelFrame(frame)
label_frame.pack(side=TOP)
label = ttk.Label(label_frame, text='TODO')
label.pack(side=TOP)
if __name__ == '__main__':
root = Tk()
root.geometry("700x500")
root.title("Modeling Tool GUI")
root.resizable(width=False, height=False)
tabControl = ttk.Notebook(root)
tab_step1 = ttk.Frame(tabControl)
tabControl.add(tab_step1, text="Step 1: Import Data")
tabControl.pack(expand=1, fill="both") # Pack to make visible
gui = GUI()
gui.tab_import(tab_step1, tabControl)
root.mainloop() | self.tabControl.select(self.tab_step3)
self.tabControl.forget(self.tab_step4)
## step 5
|
contoller_test.go | /*
Copyright 2019 The hostpath provisioner operator Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostpathprovisioner
import (
"context"
"fmt"
"strings"
secv1 "github.com/openshift/api/security/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"kubevirt.io/hostpath-provisioner-operator/pkg/apis/hostpathprovisioner/v1alpha1"
"kubevirt.io/hostpath-provisioner-operator/version"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
conditions "github.com/openshift/custom-resource-status/conditions/v1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
versionString = "1.0.1"
)
var _ = Describe("Controller reconcile loop", func() {
var (
cr *v1alpha1.HostPathProvisioner
cl client.Client
r *ReconcileHostPathProvisioner
)
BeforeEach(func() {
watchNamespaceFunc = func() (string, error) {
return "test-namespace", nil
}
version.VersionStringFunc = func() *string {
value := versionString
return &value
}
cr = &v1alpha1.HostPathProvisioner{
ObjectMeta: metav1.ObjectMeta{
Name: "test-name",
Namespace: "test-namespace",
},
Spec: v1alpha1.HostPathProvisionerSpec{
ImagePullPolicy: corev1.PullAlways,
PathConfig: v1alpha1.PathConfig{
Path: "/tmp/test",
UseNamingPrefix: "false",
},
},
}
})
It("Should create new if none exist", func() {
createDeployedCr(cr)
})
It("Should fix a changed daemonSet", func() {
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "test-name",
Namespace: "test-namespace",
},
}
cr, r, cl = createDeployedCr(cr)
// Now modify the daemonSet to something not desired.
ds := &appsv1.DaemonSet{}
err := cl.Get(context.TODO(), req.NamespacedName, ds)
Expect(err).NotTo(HaveOccurred())
ds.Spec.Template.Spec.Volumes[0].Name = "invalid"
err = cl.Update(context.TODO(), ds)
Expect(err).NotTo(HaveOccurred())
ds = &appsv1.DaemonSet{}
err = cl.Get(context.TODO(), req.NamespacedName, ds)
Expect(err).NotTo(HaveOccurred())
Expect(ds.Spec.Template.Spec.Volumes[0].Name).To(Equal("invalid"))
// Run the reconcile loop
res, err := r.Reconcile(req)
Expect(err).NotTo(HaveOccurred())
Expect(res.Requeue).To(BeFalse())
// Check the daemonSet value, make sure it changed back.
ds = &appsv1.DaemonSet{}
err = cl.Get(context.TODO(), req.NamespacedName, ds)
Expect(err).NotTo(HaveOccurred())
Expect(ds.Spec.Template.Spec.Volumes[0].Name).To(Equal("pv-volume"))
})
It("Should fix a changed service account", func() {
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "test-name",
Namespace: "test-namespace",
},
}
saNN := types.NamespacedName{
Name: "test-name-admin",
Namespace: "test-namespace",
}
cr, r, cl = createDeployedCr(cr)
// Now modify the service account to something not desired.
sa := &corev1.ServiceAccount{}
err := cl.Get(context.TODO(), saNN, sa)
Expect(err).NotTo(HaveOccurred())
sa.ObjectMeta.Labels["k8s-app"] = "invalid"
err = cl.Update(context.TODO(), sa)
Expect(err).NotTo(HaveOccurred())
sa = &corev1.ServiceAccount{}
err = cl.Get(context.TODO(), saNN, sa)
Expect(err).NotTo(HaveOccurred())
Expect(sa.ObjectMeta.Labels["k8s-app"]).To(Equal("invalid"))
// Run the reconcile loop
res, err := r.Reconcile(req)
Expect(err).NotTo(HaveOccurred())
Expect(res.Requeue).To(BeFalse())
// Verify the label has been changed back.
sa = &corev1.ServiceAccount{}
err = cl.Get(context.TODO(), saNN, sa)
Expect(err).NotTo(HaveOccurred())
Expect(sa.ObjectMeta.Labels["k8s-app"]).To(Equal("test-name"))
})
It("Should fix a changed ClusterRole", func() {
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "test-name",
Namespace: "test-namespace",
},
}
croleNN := types.NamespacedName{
Name: "test-name",
}
cr, r, cl = createDeployedCr(cr)
// Now modify the ClusterRole to something not desired.
crole := &rbacv1.ClusterRole{}
err := cl.Get(context.TODO(), croleNN, crole)
Expect(err).NotTo(HaveOccurred())
Expect(len(crole.Rules[1].Verbs)).To(Equal(4))
// Add delete to persistentvolumeclaims rule
crole.Rules[1].Verbs = append(crole.Rules[1].Verbs, "delete")
err = cl.Update(context.TODO(), crole)
Expect(err).NotTo(HaveOccurred())
crole = &rbacv1.ClusterRole{}
err = cl.Get(context.TODO(), croleNN, crole)
Expect(err).NotTo(HaveOccurred())
// Verify the extra ability is there.
Expect(len(crole.Rules[1].Verbs)).To(Equal(5))
Expect(crole.Rules[1].Verbs[4]).To(Equal("delete"))
// Run the reconcile loop
res, err := r.Reconcile(req)
Expect(err).NotTo(HaveOccurred())
Expect(res.Requeue).To(BeFalse())
// Verify its gone now
err = cl.Get(context.TODO(), croleNN, crole)
Expect(err).NotTo(HaveOccurred())
Expect(len(crole.Rules[1].Verbs)).To(Equal(4))
})
It("Should fix a changed ClusterRoleBinding", func() {
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "test-name",
Namespace: "test-namespace",
},
}
crbNN := types.NamespacedName{
Name: "test-name",
}
cr, r, cl = createDeployedCr(cr)
// Now modify the CRB to something not desired.
crb := &rbacv1.ClusterRoleBinding{}
err := cl.Get(context.TODO(), crbNN, crb)
Expect(err).NotTo(HaveOccurred())
crb.Subjects[0].Name = "invalid"
err = cl.Update(context.TODO(), crb)
Expect(err).NotTo(HaveOccurred())
// Verify the name is wrong
crb = &rbacv1.ClusterRoleBinding{}
err = cl.Get(context.TODO(), crbNN, crb)
Expect(err).NotTo(HaveOccurred())
Expect(crb.Subjects[0].Name).To(Equal("invalid"))
// Run the reconcile loop
res, err := r.Reconcile(req)
Expect(err).NotTo(HaveOccurred())
Expect(res.Requeue).To(BeFalse())
// Verify the name is correct again.
crb = &rbacv1.ClusterRoleBinding{}
err = cl.Get(context.TODO(), crbNN, crb)
Expect(err).NotTo(HaveOccurred())
Expect(crb.Subjects[0].Name).To(Equal("test-name-admin"))
})
It("Should fix a changed SecurityContextConstraints", func() {
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "test-name",
Namespace: "test-namespace",
},
}
sccNN := types.NamespacedName{
Name: "test-name",
}
cr, r, cl = createDeployedCr(cr)
// Now modify the SCC to something not desired.
scc := &secv1.SecurityContextConstraints{}
err := cl.Get(context.TODO(), sccNN, scc)
Expect(err).NotTo(HaveOccurred())
scc.AllowPrivilegedContainer = true
err = cl.Update(context.TODO(), scc)
Expect(err).NotTo(HaveOccurred())
// Verify allowPrivileged is true
scc = &secv1.SecurityContextConstraints{}
err = cl.Get(context.TODO(), sccNN, scc)
Expect(err).NotTo(HaveOccurred())
Expect(scc.AllowPrivilegedContainer).To(BeTrue())
// Run the reconcile loop
res, err := r.Reconcile(req)
Expect(err).NotTo(HaveOccurred())
Expect(res.Requeue).To(BeFalse())
// Verify allowPrivileged is false
scc = &secv1.SecurityContextConstraints{}
err = cl.Get(context.TODO(), sccNN, scc)
Expect(err).NotTo(HaveOccurred())
Expect(scc.AllowPrivilegedContainer).To(BeFalse())
})
It("Should requeue if watch namespaces returns error", func() {
watchNamespaceFunc = func() (string, error) {
return "", fmt.Errorf("Something is not right, no watch namespace")
}
objs := []runtime.Object{cr}
// Register operator types with the runtime scheme.
s := scheme.Scheme
s.AddKnownTypes(v1alpha1.SchemeGroupVersion, cr)
secv1.AddToScheme(s)
// Create a fake client to mock API calls.
cl := fake.NewFakeClient(objs...)
// Create a ReconcileMemcached object with the scheme and fake client.
r := &ReconcileHostPathProvisioner{
client: cl,
scheme: s,
}
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "test-name",
Namespace: "test-namespace",
},
}
res, err := r.Reconcile(req)
Expect(err).To(HaveOccurred())
Expect(res.Requeue).To(BeFalse())
})
It("Should requeue if cr cannot be located", func() {
objs := []runtime.Object{cr}
// Register operator types with the runtime scheme.
s := scheme.Scheme
s.AddKnownTypes(v1alpha1.SchemeGroupVersion, cr)
secv1.AddToScheme(s)
// Create a fake client to mock API calls.
cl := fake.NewFakeClient(objs...)
// Create a ReconcileMemcached object with the scheme and fake client.
r := &ReconcileHostPathProvisioner{
client: cl,
scheme: s,
}
// Mock request to simulate Reconcile() being called on an event for a
// watched resource .
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "test-name2",
Namespace: "test-namespace",
},
}
res, err := r.Reconcile(req)
Expect(err).ToNot(HaveOccurred())
Expect(res.Requeue).To(BeFalse())
})
It("Should fail if trying to downgrade", func() {
cr, r, cl = createDeployedCr(cr)
version.VersionStringFunc = func() *string {
value := "1.0.0"
return &value
}
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "test-name",
Namespace: "test-namespace",
},
}
res, err := r.Reconcile(req)
Expect(err).To(HaveOccurred())
Expect(res.Requeue).To(BeFalse())
Expect(strings.Contains(err.Error(), "downgraded")).To(BeTrue())
})
It("Should update CR status when upgrading", func() {
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "test-name",
Namespace: "test-namespace",
},
}
cr, r, cl = createDeployedCr(cr)
version.VersionStringFunc = func() *string {
value := "1.0.2"
return &value
}
res, err := r.Reconcile(req)
Expect(err).NotTo(HaveOccurred())
Expect(res.Requeue).To(BeFalse())
updatedCr := &v1alpha1.HostPathProvisioner{}
err = r.client.Get(context.TODO(), req.NamespacedName, updatedCr)
Expect(err).NotTo(HaveOccurred())
Expect(updatedCr.Status.OperatorVersion).To(Equal("1.0.2"))
Expect(updatedCr.Status.ObservedVersion).To(Equal("1.0.2"))
Expect(updatedCr.Status.TargetVersion).To(Equal("1.0.2"))
// Didn't make daemonset unavailable, so should be fully healthy
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionAvailable)).To(BeTrue())
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionProgressing)).To(BeFalse())
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionDegraded)).To(BeFalse())
// Upgrade again, but make daemon set unavailable
version.VersionStringFunc = func() *string {
value := "1.0.3"
return &value
}
ds := &appsv1.DaemonSet{}
err = cl.Get(context.TODO(), req.NamespacedName, ds)
Expect(err).NotTo(HaveOccurred())
ds.Status.NumberReady = 1
ds.Status.DesiredNumberScheduled = 2
err = cl.Update(context.TODO(), ds)
Expect(err).NotTo(HaveOccurred())
res, err = r.Reconcile(req)
Expect(err).NotTo(HaveOccurred())
Expect(res.Requeue).To(BeFalse())
updatedCr = &v1alpha1.HostPathProvisioner{}
err = r.client.Get(context.TODO(), req.NamespacedName, updatedCr)
Expect(err).NotTo(HaveOccurred())
Expect(updatedCr.Status.OperatorVersion).To(Equal("1.0.3"))
Expect(updatedCr.Status.ObservedVersion).To(Equal("1.0.2"))
Expect(updatedCr.Status.TargetVersion).To(Equal("1.0.3"))
// Didn't make daemonset unavailable, so should be fully healthy
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionAvailable)).To(BeTrue())
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionProgressing)).To(BeFalse())
// It should be degraded
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionDegraded)).To(BeTrue())
ds = &appsv1.DaemonSet{}
err = cl.Get(context.TODO(), req.NamespacedName, ds)
Expect(err).NotTo(HaveOccurred())
ds.Status.NumberReady = 2
ds.Status.DesiredNumberScheduled = 2
err = cl.Update(context.TODO(), ds)
Expect(err).NotTo(HaveOccurred())
res, err = r.Reconcile(req)
Expect(err).NotTo(HaveOccurred())
Expect(res.Requeue).To(BeFalse())
updatedCr = &v1alpha1.HostPathProvisioner{}
err = r.client.Get(context.TODO(), req.NamespacedName, updatedCr)
Expect(err).NotTo(HaveOccurred())
Expect(updatedCr.Status.OperatorVersion).To(Equal("1.0.3"))
Expect(updatedCr.Status.ObservedVersion).To(Equal("1.0.3"))
Expect(updatedCr.Status.TargetVersion).To(Equal("1.0.3"))
// Didn't make daemonset unavailable, so should be fully healthy
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionAvailable)).To(BeTrue())
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionProgressing)).To(BeFalse())
// It should NOT be degraded
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionDegraded)).To(BeFalse())
})
It("Should not requeue when CR is deleted", func() {
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "test-name",
Namespace: "test-namespace",
},
}
cr, r, cl = createDeployedCr(cr)
err := cl.Delete(context.TODO(), cr)
Expect(err).NotTo(HaveOccurred())
res, err := r.Reconcile(req)
Expect(err).NotTo(HaveOccurred())
Expect(res.Requeue).To(BeFalse())
})
})
// After this has run, the returned cr state should be available, not progressing and not degraded.
func createDeployedCr(cr *v1alpha1.HostPathProvisioner) (*v1alpha1.HostPathProvisioner, *ReconcileHostPathProvisioner, client.Client) {
objs := []runtime.Object{cr}
// Register operator types with the runtime scheme.
s := scheme.Scheme
s.AddKnownTypes(v1alpha1.SchemeGroupVersion, cr)
secv1.AddToScheme(s)
// Create a fake client to mock API calls.
cl := fake.NewFakeClientWithScheme(s, objs...)
// Create a ReconcileMemcached object with the scheme and fake client.
r := &ReconcileHostPathProvisioner{
client: cl,
scheme: s,
}
// Mock request to simulate Reconcile() being called on an event for a
// watched resource .
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: "test-name",
Namespace: "test-namespace",
},
}
res, err := r.Reconcile(req)
Expect(err).NotTo(HaveOccurred())
Expect(res.Requeue).To(BeFalse())
updatedCr := &v1alpha1.HostPathProvisioner{}
err = r.client.Get(context.TODO(), req.NamespacedName, updatedCr)
Expect(err).NotTo(HaveOccurred())
Expect(updatedCr.Status.OperatorVersion).To(Equal(versionString))
Expect(updatedCr.Status.TargetVersion).To(Equal(versionString))
Expect(updatedCr.Status.ObservedVersion).To(Equal(""))
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionAvailable)).To(BeFalse())
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionProgressing)).To(BeTrue())
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionDegraded)).To(BeFalse())
// Verify all the different objects are created.
verifyCreateDaemonSet(r.client, req.NamespacedName)
verifyCreateServiceAccount(r.client)
verifyCreateClusterRole(r.client)
verifyCreateClusterRoleBinding(r.client)
verifyCreateSCC(r.client)
// Now make the daemonSet available, and reconcile again.
ds := &appsv1.DaemonSet{}
err = cl.Get(context.TODO(), req.NamespacedName, ds)
Expect(err).NotTo(HaveOccurred())
ds.Status.NumberReady = 2
ds.Status.DesiredNumberScheduled = 2
err = cl.Update(context.TODO(), ds)
Expect(err).NotTo(HaveOccurred())
// daemonSet is ready, now reconcile again. We should have condition changes and observed version should be set.
res, err = r.Reconcile(req)
Expect(err).NotTo(HaveOccurred())
Expect(res.Requeue).To(BeFalse())
updatedCr = &v1alpha1.HostPathProvisioner{}
err = r.client.Get(context.TODO(), req.NamespacedName, updatedCr)
Expect(err).NotTo(HaveOccurred())
Expect(updatedCr.Status.OperatorVersion).To(Equal(versionString))
Expect(updatedCr.Status.TargetVersion).To(Equal(versionString))
Expect(updatedCr.Status.ObservedVersion).To(Equal(versionString))
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionAvailable)).To(BeTrue())
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionProgressing)).To(BeFalse())
Expect(conditions.IsStatusConditionTrue(updatedCr.Status.Conditions, conditions.ConditionDegraded)).To(BeFalse())
return cr, r, cl
}
// Verify all the proper values are set when creating the daemonset
func verifyCreateDaemonSet(cl client.Client, nn types.NamespacedName) {
ds := &appsv1.DaemonSet{}
err := cl.Get(context.TODO(), nn, ds)
Expect(err).NotTo(HaveOccurred())
// Check Service Account
Expect(ds.Spec.Template.Spec.ServiceAccountName).To(Equal("test-name-admin"))
// Check container image
Expect(ds.Spec.Template.Spec.Containers[0].Image).To(Equal("hostpath-provisioner"))
// Check use naming prefix
Expect(ds.Spec.Template.Spec.Containers[0].Env[0].Value).To(Equal("false"))
// Check directory
Expect(ds.Spec.Template.Spec.Containers[0].Env[2].Value).To(Equal("/tmp/test"))
}
func verifyCreateServiceAccount(cl client.Client) {
sa := &corev1.ServiceAccount{}
nn := types.NamespacedName{
Name: "test-name-admin",
Namespace: "test-namespace",
}
err := cl.Get(context.TODO(), nn, sa)
Expect(err).NotTo(HaveOccurred())
Expect(sa.ObjectMeta.Name).To(Equal("test-name-admin"))
}
func verifyCreateClusterRole(cl client.Client) {
crole := &rbacv1.ClusterRole{}
nn := types.NamespacedName{
Name: "test-name",
}
err := cl.Get(context.TODO(), nn, crole)
Expect(err).NotTo(HaveOccurred())
expectedRules := []rbacv1.PolicyRule{
{
APIGroups: []string{
"",
},
Resources: []string{
"persistentvolumes",
},
Verbs: []string{
"get",
"list",
"watch",
"create",
"delete",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"persistentvolumeclaims",
},
Verbs: []string{
"get",
"list",
"watch",
"update",
},
},
{
APIGroups: []string{
"storage.k8s.io",
},
Resources: []string{
"storageclasses",
},
Verbs: []string{
"get",
"list",
"watch",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"events",
},
Verbs: []string{
"list",
"watch",
"create",
"patch",
"update",
},
},
{
APIGroups: []string{
"",
},
Resources: []string{
"nodes",
},
Verbs: []string{
"get",
},
},
}
Expect(crole.Rules).To(Equal(expectedRules))
}
func verifyCreateClusterRoleBinding(cl client.Client) {
crb := &rbacv1.ClusterRoleBinding{}
nn := types.NamespacedName{
Name: "test-name",
}
err := cl.Get(context.TODO(), nn, crb)
Expect(err).NotTo(HaveOccurred())
Expect(crb.Subjects[0].Name).To(Equal("test-name-admin"))
Expect(crb.Subjects[0].Namespace).To(Equal("test-namespace"))
}
func | (cl client.Client) {
scc := &secv1.SecurityContextConstraints{}
nn := types.NamespacedName{
Name: "test-name",
}
err := cl.Get(context.TODO(), nn, scc)
Expect(err).NotTo(HaveOccurred())
expected := &secv1.SecurityContextConstraints{
Groups: []string{},
TypeMeta: metav1.TypeMeta{
APIVersion: "security.openshift.io/v1",
Kind: "SecurityContextConstraints",
},
// Meta data is dynamic, copy it so we can compare.
ObjectMeta: *scc.ObjectMeta.DeepCopy(),
AllowPrivilegedContainer: false,
RequiredDropCapabilities: []corev1.Capability{
"KILL",
"MKNOD",
"SETUID",
"SETGID",
},
RunAsUser: secv1.RunAsUserStrategyOptions{
Type: secv1.RunAsUserStrategyRunAsAny,
},
SELinuxContext: secv1.SELinuxContextStrategyOptions{
Type: secv1.SELinuxStrategyRunAsAny,
},
FSGroup: secv1.FSGroupStrategyOptions{
Type: secv1.FSGroupStrategyRunAsAny,
},
SupplementalGroups: secv1.SupplementalGroupsStrategyOptions{
Type: secv1.SupplementalGroupsStrategyRunAsAny,
},
AllowHostDirVolumePlugin: true,
Users: []string{
"system:serviceaccount:test-namespace:test-name-admin",
},
Volumes: []secv1.FSType{
secv1.FSTypeHostPath,
secv1.FSTypeSecret,
},
}
Expect(scc).To(Equal(expected))
}
| verifyCreateSCC |
eeMad_run.py | '''
Created on 08.04.2019
@author: mort
ipywidget interface to the GEE for IR-MAD
'''
import ee, time, warnings, math
import ipywidgets as widgets
from IPython.display import display
from ipyleaflet import (Map,DrawControl,TileLayer,
basemaps,basemap_to_tiles,
LayersControl,
MeasureControl,
FullScreenControl)
from auxil.eeMad import imad,radcal
from geopy.geocoders import photon
ee.Initialize()
geolocator = photon.Photon(timeout=10)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
poly = ee.Geometry.MultiPolygon([])
# poly = ee.Geometry.Polygon([[6.30154, 50.948329], [6.293307, 50.877329],
# [6.427091, 50.875595], [6.417486, 50.947464],
# [6.30154, 50.948329]])
def chi2cdf(chi2,df):
''' Chi square cumulative distribution function '''
return ee.Image(chi2.divide(2)).gammainc(ee.Number(df).divide(2))
def makefeature(data):
''' for exporting as CSV to Drive '''
return ee.Feature(None, {'data': data})
def handle_draw(self, action, geo_json):
global poly
coords = geo_json['geometry']['coordinates']
if action == 'created':
poly = ee.Geometry.MultiPolygon(poly.coordinates().add(coords))
w_preview.disabled = True
w_export.disabled = True
w_collect.disabled = False
elif action == 'deleted':
poly1 = ee.Geometry.MultiPolygon(coords)
poly = poly.difference(poly1)
if len(poly.coordinates().getInfo()) == 0:
w_collect.disabled = True
dc = DrawControl(polyline={},circle={})
dc.on_draw(handle_draw)
# def GetTileLayerUrl(ee_image_object):
# map_id = ee.Image(ee_image_object).getMapId()
# tile_url_template = "https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}"
# return tile_url_template.format(**map_id)
def GetTileLayerUrl(ee_image_object):
map_id = ee.Image(ee_image_object).getMapId()
return map_id["tile_fetcher"].url_format
w_text = widgets.Textarea(
layout = widgets.Layout(width='75%'),
value = 'Algorithm output',
rows = 4,
disabled = False
)
w_platform = widgets.RadioButtons(
options=['SENTINEL/S2(VNIR/SWIR)','SENTINEL/S2(NIR/SWIR)','LANDSAT LC08','LANDSAT LE07','LANDSAT LT05'],
value='SENTINEL/S2(VNIR/SWIR)',
description='Platform:',
disabled=False
)
w_startdate1 = widgets.Text(
value='2020-05-01',
placeholder=' ',
description='Start T1:',
disabled=False
)
w_enddate1 = widgets.Text(
value='2020-07-01',
placeholder=' ',
description='End T1:',
disabled=False
)
w_startdate2 = widgets.Text(
value='2020-08-01',
placeholder=' ',
description='Start T2:',
disabled=False
)
w_enddate2 = widgets.Text(
value='2020-10-01',
placeholder=' ',
description='End T2:',
disabled=False
)
w_iterations = widgets.IntText(
value=30,
placeholder=' ',
description='Max Iter:',
disabled=False
)
w_scale = widgets.IntText(
value=30,
placeholder=' ',
description='Scale:',
disabled=False
)
w_exportname = widgets.Text(
value='users/<username>/<path>',
placeholder=' ',
disabled=False
)
w_location = widgets.Text(
value='Jülich',
placeholder=' ',
description='',
disabled=False
)
w_goto = widgets.Button(description="GoTo",disabled=False)
w_collect = widgets.Button(description="Collect",disabled=True)
w_preview = widgets.Button(description="Preview",disabled=True)
w_export = widgets.Button(description='Export to assets',disabled=True)
w_dates1 = widgets.VBox([w_startdate1,w_enddate1,w_iterations])
w_dates2 = widgets.VBox([w_startdate2,w_enddate2,w_scale])
w_dates = widgets.HBox([w_platform,w_dates1,w_dates2])
w_exp = widgets.HBox([w_export,w_exportname])
w_go = widgets.HBox([w_collect,w_preview,w_exp])
w_txt = widgets.HBox([w_text,w_goto,w_location])
box = widgets.VBox([w_txt,w_dates,w_go])
def on_widget_change(b):
w_preview.disabled = True
w_export.disabled = True
w_platform.observe(on_widget_change,names='value')
w_startdate1.observe(on_widget_change,names='value')
w_enddate1.observe(on_widget_change,names='value')
w_startdate2.observe(on_widget_change,names='value')
w_enddate2.observe(on_widget_change,names='value')
def on_goto_button_clicked(b):
try:
location = geolocator.geocode(w_location.value)
m.center = (location.latitude,location.longitude)
m.zoom = 11
except Exception as e:
print('Error: %s'%e)
w_goto.on_click(on_goto_button_clicked)
def on_collect_button_clicked(b):
global result,m,collection,count, \
w_startdate1,w_enddate1,w_startdate2, \
w_platfform,w_enddate2,w_changemap, \
scale,nbands, \
image1,image2, \
madnames,coords,timestamp1,timestamp2
try:
coords = ee.List(poly.bounds().coordinates().get(0))
w_text.value = 'collecting, please wait ...'
cloudcover = 'CLOUD_COVER'
scale = 30.0
rgb = ['B4','B5','B7']
if w_platform.value=='SENTINEL/S2(VNIR/SWIR)':
collectionid = 'COPERNICUS/S2'
scale = 10.0
bands = ['B2','B3','B4','B8']
rgb = ['B8','B4','B3']
cloudcover = 'CLOUDY_PIXEL_PERCENTAGE'
elif w_platform.value=='SENTINEL/S2(NIR/SWIR)':
collectionid = 'COPERNICUS/S2'
scale = 20.0
bands = ['B5','B6','B7','B8A','B11','B12']
rgb = ['B5','B7','B11']
cloudcover = 'CLOUDY_PIXEL_PERCENTAGE'
elif w_platform.value=='LANDSAT LC08':
collectionid = 'LANDSAT/LC08/C01/T1_RT_TOA'
bands = ['B2','B3','B4','B5','B6','B7']
rgb = ['B5','B6','B7']
elif w_platform.value=='LANDSAT LE07':
collectionid = 'LANDSAT/LE07/C01/T1_RT_TOA'
bands = ['B1','B2','B3','B4','B5','B7']
else:
collectionid = 'LANDSAT/LT05/C01/T1_TOA'
bands = ['B1','B2','B3','B4','B5','B7']
collection1 = ee.ImageCollection(collectionid) \
.filterBounds(ee.Geometry.Point(coords.get(0))) \
.filterBounds(ee.Geometry.Point(coords.get(1))) \
.filterBounds(ee.Geometry.Point(coords.get(2))) \
.filterBounds(ee.Geometry.Point(coords.get(3))) \
.filterDate(ee.Date(w_startdate1.value), ee.Date(w_enddate1.value)) \
.sort(cloudcover, True)
count = collection1.size().getInfo()
if count==0:
raise ValueError('No images found for first time interval: '+collectionid)
collection2 = ee.ImageCollection(collectionid) \
.filterBounds(ee.Geometry.Point(coords.get(0))) \
.filterBounds(ee.Geometry.Point(coords.get(1))) \
.filterBounds(ee.Geometry.Point(coords.get(2))) \
.filterBounds(ee.Geometry.Point(coords.get(3))) \
.filterDate(ee.Date(w_startdate2.value), ee.Date(w_enddate2.value)) \
.sort(cloudcover, True)
count = collection2.size().getInfo()
if count==0:
raise ValueError('No images found for second time interval: '+collectionid)
image1 = ee.Image(collection1.first()).select(bands)
timestamp1 = ee.Date(image1.get('system:time_start')).getInfo()
timestamp1 = time.gmtime(int(timestamp1['value'])/1000)
timestamp1 = time.strftime('%c', timestamp1)
systemid1 = image1.get('system:id').getInfo()
cloudcover1 = image1.get(cloudcover).getInfo()
image2 = ee.Image(collection2.first()).select(bands)
timestamp2 = ee.Date(image2.get('system:time_start')).getInfo()
timestamp2 = time.gmtime(int(timestamp2['value'])/1000)
timestamp2 = time.strftime('%c', timestamp2)
systemid2 = image2.get('system:id').getInfo()
cloudcover2 = image2.get(cloudcover).getInfo()
txt = 'Image1: %s \n'%systemid1
txt += 'Acquisition date: %s, Cloud cover: %f \n'%(timestamp1,cloudcover1)
txt += 'Image2: %s \n'%systemid2
txt += 'Acquisition date: %s, Cloud cover: %f \n'%(timestamp2,cloudcover2)
w_text.value = txt
nbands = image1.bandNames().length()
madnames = ['MAD'+str(i+1) for i in range(nbands.getInfo())]
# co-register
image2 = image2.register(image1,60)
w_preview.disabled = False
w_export.disabled = False
# display first image
if len(m.layers)>3:
m.remove_layer(m.layers[3])
img = image1.clip(poly).select(rgb).rename('r','g','b')
ps = img.reduceRegion(ee.Reducer.percentile([2,98]),maxPixels=1e10).getInfo()
mn = [ps['r_p2'],ps['g_p2'],ps['b_p2']]
mx = [ps['r_p98'],ps['g_p98'],ps['b_p98']]
m.add_layer(TileLayer(url=GetTileLayerUrl(img.visualize(min=mn,max=mx))))
except Exception as e:
w_text.value = 'Error: %s'%e
w_collect.on_click(on_collect_button_clicked)
def on_preview_button_clicked(b):
global nbands
try:
w_text.value = 'iteration started, please wait ...\n'
# iMAD
inputlist = ee.List.sequence(1,w_iterations.value)
first = ee.Dictionary({'done':ee.Number(0),
'scale':ee.Number(w_scale.value),
'niter':ee.Number(0),
'image':image1.addBands(image2).clip(poly),
'allrhos': [ee.List.sequence(1,nbands)],
'chi2':ee.Image.constant(0),
'MAD':ee.Image.constant(0)})
result = ee.Dictionary(inputlist.iterate(imad,first))
MAD = ee.Image(result.get('MAD')).rename(madnames)
niter = ee.Number(result.get('niter')).getInfo()
# threshold
nbands = MAD.bandNames().length()
chi2 = ee.Image(result.get('chi2')).rename(['chi2'])
pval = chi2cdf(chi2,nbands).subtract(1).multiply(-1)
tst = pval.gt(ee.Image.constant(0.0001))
MAD = MAD.where(tst,ee.Image.constant(0))
allrhos = ee.Array(result.get('allrhos')).toList()
txt = 'Canonical correlations: %s \nIterations: %i\n'%(str(allrhos.get(-1).getInfo()),niter)
w_text.value += txt
if len(m.layers)>3:
m.remove_layer(m.layers[3])
MAD2 = MAD.select(1).rename('b')
ps = MAD2.reduceRegion(ee.Reducer.percentile([1,99])).getInfo()
mn = ps['b_p1']
mx = ps['b_p99']
m.add_layer(TileLayer(url=GetTileLayerUrl( MAD2.visualize(min=mn,max=mx))))
except Exception as e:
w_text.value = 'Error: %s\n Retry collect/preview or export to assets'%e
w_preview.on_click(on_preview_button_clicked)
def on_export_button_clicked(b):
global w_exportname, nbands
try:
# iMAD
inputlist = ee.List.sequence(1,w_iterations.value)
first = ee.Dictionary({'done':ee.Number(0),
'scale':ee.Number(w_scale.value),
'niter':ee.Number(0),
'image':image1.addBands(image2).clip(poly),
'allrhos': [ee.List.sequence(1,nbands)],
'chi2':ee.Image.constant(0),
'MAD':ee.Image.constant(0)})
result = ee.Dictionary(inputlist.iterate(imad,first))
MAD = ee.Image(result.get('MAD')).rename(madnames)
# threshold
chi2 = ee.Image(result.get('chi2')).rename(['chi2'])
pval = chi2cdf(chi2,nbands).subtract(1).multiply(-1)
tst = pval.gt(ee.Image.constant(0.0001))
MAD = MAD.where(tst,ee.Image.constant(0))
allrhos = ee.Array(result.get('allrhos')).toList().slice(1,-1)
# radcal
ncmask = chi2cdf(chi2,nbands).lt(ee.Image.constant(0.05)).rename(['invarpix'])
inputlist1 = ee.List.sequence(0,nbands.subtract(1))
first = ee.Dictionary({'image':image1.addBands(image2),
'ncmask':ncmask,
'nbands':nbands,
'scale':ee.Number(w_scale.value),
'rect':poly,
'coeffs': ee.List([]),
'normalized':ee.Image()})
result1 = ee.Dictionary(inputlist1.iterate(radcal,first))
coeffs = ee.List(result1.get('coeffs'))
sel = ee.List.sequence(1,nbands)
normalized = ee.Image(result1.get('normalized')).select(sel)
MADs = ee.Image.cat(MAD,chi2,ncmask,image1.clip(poly),image2.clip(poly),normalized)
assexport = ee.batch.Export.image.toAsset(MADs,
description='assetExportTask',
assetId=w_exportname.value,scale=scale,maxPixels=1e9)
assexport.start()
assexportid = str(assexport.id)
w_text.value= 'Exporting change map, chisqr, original images and normalized image to %s\n task id: %s'%(w_exportname.value,assexportid)
except Exception as e:
w_text.value = 'Error: %s'%e
# export metadata to drive
ninvar = ee.String(ncmask.reduceRegion(ee.Reducer.sum().unweighted(),
scale=scale,maxPixels= 1e9).toArray().project([0]))
metadata = ee.List(['IR-MAD: '+time.asctime(),
'Platform: '+w_platform.value,
'Asset export name: '+w_exportname.value,
'Timestamps: %s %s'%(timestamp1,timestamp2)]) \
.cat(['Canonical Correlations:']) \
.cat(allrhos) \
.cat(['Radiometric Normalization, Invariant Pixels:']) \
.cat([ninvar]) \
.cat(['Slope, Intercept, R:']) \
.cat(coeffs)
fileNamePrefix=w_exportname.value.replace('/','-')
gdexport = ee.batch.Export.table.toDrive(ee.FeatureCollection(metadata.map(makefeature)).merge(ee.Feature(poly)),
description='driveExportTask_meta',
folder = 'gee',
fileNamePrefix=fileNamePrefix )
gdexport.start()
w_text.value += '\n Exporting metadata to Drive/EarthEngineImages/%s\n task id: %s'%(fileNamePrefix,str(gdexport.id))
w_export.on_click(on_export_button_clicked)
def r | ):
global m,center
center = [51.0,6.4]
osm = basemap_to_tiles(basemaps.OpenStreetMap.Mapnik)
ews = basemap_to_tiles(basemaps.Esri.WorldStreetMap)
ewi = basemap_to_tiles(basemaps.Esri.WorldImagery)
dc = DrawControl(polyline={},circlemarker={})
dc.rectangle = {"shapeOptions": {"fillColor": "#0000ff","color": "#0000ff","fillOpacity": 0.05}}
dc.polygon = {"shapeOptions": {"fillColor": "#0000ff","color": "#0000ff","fillOpacity": 0.05}}
dc.on_draw(handle_draw)
lc = LayersControl(position='topright')
fs = FullScreenControl(position='topleft')
mc = MeasureControl(position='topright',primary_length_unit = 'kilometers')
m = Map(center=center, zoom=11, layout={'height':'500px'},layers=(ewi,ews,osm),controls=(mc,dc,lc,fs))
# m = Map(center=center, zoom=11, layout={'height':'500px'},controls=(lc,dc,fs,mc,sm_control))
display(m)
return box
| un( |
utils.py | from django.contrib.auth.models import User
def get_anonymous_user():
"""
Get the user called "anonymous" if it exist. Create the user if it doesn't
exist This is the default concordia user if someone is working on the site
without logging in first.
"""
try:
return User.objects.get(username="anonymous")
except User.DoesNotExist:
return User.objects.create_user(username="anonymous")
def | (request):
accept_header = request.META.get("HTTP_ACCEPT", "*/*")
return "application/json" in accept_header
| request_accepts_json |
test_enr.py | import base64
import pytest
import rlp
from eth_utils import (
decode_hex,
to_bytes,
ValidationError,
)
from eth_utils.toolz import (
assoc,
assoc_in,
)
from p2p.discv5.enr import (
ENR,
ENRSedes,
UnsignedENR,
)
from p2p.discv5.identity_schemes import (
IdentityScheme,
V4IdentityScheme,
IdentitySchemeRegistry,
)
from p2p.forkid import ForkID
# Source: https://github.com/fjl/EIPs/blob/0acb5939555cbd0efcdd04da0d3acb0cc81d049a/EIPS/eip-778.md
OFFICIAL_TEST_DATA = {
"repr": (
"enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkT"
"fj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHY"
"pMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8"
),
"private_key": decode_hex("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291"),
"public_key": decode_hex("03ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138"),
"node_id": decode_hex("a448f24c6d18e575453db13171562b71999873db5b286df957af199ec94617f7"),
"identity_scheme": V4IdentityScheme,
"sequence_number": 1,
"kv_pairs": {
b"id": b"v4",
b"ip": decode_hex("7f000001"),
b"secp256k1": decode_hex(
"03ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138",
),
b"udp": 0x765f,
}
}
# This is an ENR sent by geth and it includes a fork ID (https://eips.ethereum.org/EIPS/eip-2124)
# kv pair as well.
REAL_LIFE_TEST_DATA = {
"repr": (
"enr:-Jq4QO5zEyIBU5lSa9iaen0A2xUB5_IVrCi1DbyASTTnLV5RJan6aGPr8kU0p0MYKU5YezZgdSUE"
"-GOBEio6Ultyf1Aog2V0aMrJhGN2AZCDGfCggmlkgnY0gmlwhF4_wLuJc2VjcDI1NmsxoQOt7cA_B_Kg"
"nQ5RmwyA6ji8M1Y0jfINItRGbOOwy7XgbIN0Y3CCdl-DdWRwgnZf"
),
"public_key": decode_hex("03adedc03f07f2a09d0e519b0c80ea38bc3356348df20d22d4466ce3b0cbb5e06c"),
"node_id": decode_hex("dc8542768b457753669bebfe215d5f9ef4adb7d7df84beabddbe98350869165f"),
"identity_scheme": V4IdentityScheme,
"sequence_number": 40,
"kv_pairs": {
b"eth": (ForkID(hash=to_bytes(hexstr='0x63760190'), next=1700000), ),
b"id": b"v4",
b"ip": decode_hex("5e3fc0bb"),
b"secp256k1": decode_hex(
"03adedc03f07f2a09d0e519b0c80ea38bc3356348df20d22d4466ce3b0cbb5e06c",
),
b"tcp": 30303,
b"udp": 30303,
}
}
class MockIdentityScheme(IdentityScheme):
id = b"mock"
private_key_size = 32
@classmethod
def create_enr_signature(cls, enr, private_key: bytes) -> bytes:
if len(private_key) != cls.private_key_size:
raise ValidationError("Invalid private key")
return private_key + enr.get_signing_message()
@classmethod
def validate_enr_structure(cls, enr) -> None:
pass
@classmethod
def validate_enr_signature(cls, enr) -> None:
if not enr.signature == enr.node_id + enr.get_signing_message():
raise ValidationError("Invalid signature")
@classmethod
def extract_public_key(cls, enr) -> bytes:
return b""
@classmethod
def extract_node_id(cls, enr) -> bytes:
return enr.signature[:cls.private_key_size]
@pytest.fixture
def | ():
return MockIdentityScheme
@pytest.fixture
def identity_scheme_registry(mock_identity_scheme):
registry = IdentitySchemeRegistry()
registry.register(V4IdentityScheme)
registry.register(mock_identity_scheme)
return registry
def test_mapping_interface(identity_scheme_registry):
kv_pairs = {
b"id": b"mock",
b"key1": b"value1",
b"key2": b"value2",
}
enr = ENR(
signature=b"",
sequence_number=0,
kv_pairs=kv_pairs,
identity_scheme_registry=identity_scheme_registry,
)
for key, value in kv_pairs.items():
assert key in enr
assert enr[key] == value
assert enr.get(key) == value
not_a_key = b"key3"
assert not_a_key not in kv_pairs
assert not_a_key not in enr
enr.get(not_a_key) is None
assert enr.get(not_a_key, b"default") == b"default"
assert tuple(enr.keys()) == tuple(kv_pairs.keys())
assert tuple(enr.values()) == tuple(kv_pairs.values())
assert tuple(enr.items()) == tuple(kv_pairs.items())
assert len(enr) == len(kv_pairs)
assert tuple(iter(enr)) == tuple(iter(kv_pairs))
def test_inititialization(identity_scheme_registry):
valid_sequence_number = 0
valid_kv_pairs = {b"id": b"mock"}
valid_signature = b"" # signature is not validated during initialization
assert UnsignedENR(
sequence_number=valid_sequence_number,
kv_pairs=valid_kv_pairs,
identity_scheme_registry=identity_scheme_registry,
)
assert ENR(
sequence_number=valid_sequence_number,
kv_pairs=valid_kv_pairs,
signature=valid_signature,
identity_scheme_registry=identity_scheme_registry,
)
with pytest.raises(ValidationError):
UnsignedENR(
sequence_number=valid_sequence_number,
kv_pairs={b"no-id": b""},
identity_scheme_registry=identity_scheme_registry,
)
with pytest.raises(ValidationError):
ENR(
sequence_number=valid_sequence_number,
kv_pairs={b"no-id": b""},
signature=valid_signature,
identity_scheme_registry=identity_scheme_registry,
)
with pytest.raises(ValidationError):
UnsignedENR(
sequence_number=-1,
kv_pairs=valid_kv_pairs,
identity_scheme_registry=identity_scheme_registry,
)
with pytest.raises(ValidationError):
ENR(
sequence_number=-1,
kv_pairs=valid_kv_pairs,
signature=valid_signature,
identity_scheme_registry=identity_scheme_registry,
)
def test_signing(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(
sequence_number=0,
kv_pairs={b"id": b"mock"},
identity_scheme_registry=identity_scheme_registry
)
private_key = b"\x00" * 32
enr = unsigned_enr.to_signed_enr(private_key)
assert enr.signature == mock_identity_scheme.create_enr_signature(enr, private_key)
def test_signature_validation(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(0, {b"id": b"mock"}, identity_scheme_registry)
private_key = b"\x00" * 32
enr = unsigned_enr.to_signed_enr(private_key)
enr.validate_signature()
invalid_signature = b"\xff" * 64
invalid_enr = ENR(
enr.sequence_number,
dict(enr),
invalid_signature,
identity_scheme_registry=identity_scheme_registry
)
with pytest.raises(ValidationError):
invalid_enr.validate_signature()
with pytest.raises(ValidationError):
ENR(
0,
{b"id": b"unknown"},
b"",
identity_scheme_registry=identity_scheme_registry,
)
def test_public_key(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(0, {b"id": b"mock"}, identity_scheme_registry)
private_key = b"\x00" * 32
enr = unsigned_enr.to_signed_enr(private_key)
assert enr.public_key == mock_identity_scheme.extract_public_key(enr)
def test_node_id(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(0, {b"id": b"mock"}, identity_scheme_registry)
private_key = b"\x00" * 32
enr = unsigned_enr.to_signed_enr(private_key)
assert enr.node_id == private_key
def test_signature_scheme_selection(mock_identity_scheme, identity_scheme_registry):
mock_enr = ENR(0, {b"id": b"mock"}, b"", identity_scheme_registry)
assert mock_enr.identity_scheme is mock_identity_scheme
v4_enr = ENR(0, {b"id": b"v4", b"secp256k1": b"\x02" * 33}, b"", identity_scheme_registry)
assert v4_enr.identity_scheme is V4IdentityScheme
with pytest.raises(ValidationError):
ENR(0, {b"id": b"other"}, b"", identity_scheme_registry)
def test_repr(mock_identity_scheme, identity_scheme_registry):
unsigned_enr = UnsignedENR(0, {b"id": b"mock"}, identity_scheme_registry)
enr = unsigned_enr.to_signed_enr(b"\x00" * 32)
base64_encoded_enr = base64.urlsafe_b64encode(rlp.encode(enr))
represented_enr = repr(enr)
assert represented_enr.startswith("enr:")
assert base64_encoded_enr.rstrip(b"=").decode() == represented_enr[4:]
assert ENR.from_repr(represented_enr, identity_scheme_registry) == enr
def test_deserialization_key_order_validation(identity_scheme_registry):
serialized_enr = rlp.encode([
b"signature",
0,
b"key1",
b"value1",
b"id",
b"",
b"key2",
b"value2",
])
with pytest.raises(rlp.DeserializationError):
rlp.decode(
serialized_enr,
ENRSedes,
identity_scheme_registry=identity_scheme_registry,
)
def test_deserialization_key_uniqueness_validation(identity_scheme_registry):
serialized_enr = rlp.encode([
b"signature",
0,
b"key1",
b"value1",
b"id",
b"",
b"key1",
b"value2",
])
with pytest.raises(rlp.DeserializationError):
rlp.decode(
serialized_enr,
ENRSedes,
identity_scheme_registry=identity_scheme_registry,
)
@pytest.mark.parametrize("incomplete_enr", (
(),
(b"signature",),
(b"signature", 0, b"key1"),
(b"signature", 0, b"key1", b"value1", b"id"),
))
def test_deserialization_completeness_validation(incomplete_enr, identity_scheme_registry):
incomplete_enr_rlp = rlp.encode(incomplete_enr)
with pytest.raises(rlp.DeserializationError):
rlp.decode(
incomplete_enr_rlp,
ENRSedes,
identity_scheme_registry=identity_scheme_registry,
)
def test_equality(identity_scheme_registry):
base_kwargs = {
"sequence_number": 0,
"kv_pairs": {
b"id": b"mock",
b"key1": b"value1",
b"key2": b"value2",
},
"signature": b"signature",
"identity_scheme_registry": identity_scheme_registry,
}
base_enr = ENR(**base_kwargs)
equal_enr = ENR(**base_kwargs)
enr_different_sequence_number = ENR(
**assoc(base_kwargs, "sequence_number", 1)
)
enr_different_kv_pairs = ENR(
**assoc_in(base_kwargs, ("kv_pairs", b"key1"), b"value2"),
)
enr_different_signature = ENR(
**assoc(base_kwargs, "signature", b"different-signature")
)
assert base_enr == base_enr
assert equal_enr == base_enr
assert enr_different_sequence_number != base_enr
assert enr_different_kv_pairs != base_enr
assert enr_different_signature != base_enr
def test_serialization_roundtrip(identity_scheme_registry):
original_enr = ENR(
sequence_number=0,
kv_pairs={
b"id": b"mock",
b"key2": b"value2", # wrong order so that serialization is forced to fix this
b"key1": b"value1",
},
signature=b"",
identity_scheme_registry=identity_scheme_registry,
)
encoded = rlp.encode(original_enr)
recovered_enr = rlp.decode(
encoded,
ENR,
identity_scheme_registry=identity_scheme_registry,
)
assert recovered_enr == original_enr
@pytest.mark.parametrize("invalid_kv_pairs", (
{b"id": b"v4"}, # missing public key
{b"id": b"v4", b"secp256k1": b"\x00"}, # invalid public key
))
def test_v4_structure_validation(invalid_kv_pairs, identity_scheme_registry):
with pytest.raises(ValidationError):
UnsignedENR(
sequence_number=0,
kv_pairs=invalid_kv_pairs,
identity_scheme_registry=identity_scheme_registry,
)
def test_official_test_vector():
enr = ENR.from_repr(OFFICIAL_TEST_DATA["repr"]) # use default identity scheme registry
assert enr.sequence_number == OFFICIAL_TEST_DATA["sequence_number"]
assert dict(enr) == OFFICIAL_TEST_DATA["kv_pairs"]
assert enr.public_key == OFFICIAL_TEST_DATA["public_key"]
assert enr.node_id == OFFICIAL_TEST_DATA["node_id"]
assert enr.identity_scheme is OFFICIAL_TEST_DATA["identity_scheme"]
assert repr(enr) == OFFICIAL_TEST_DATA["repr"]
unsigned_enr = UnsignedENR(enr.sequence_number, dict(enr))
reconstructed_enr = unsigned_enr.to_signed_enr(OFFICIAL_TEST_DATA["private_key"])
assert reconstructed_enr == enr
def test_real_life_test_vector():
enr = ENR.from_repr(REAL_LIFE_TEST_DATA["repr"])
assert enr.sequence_number == REAL_LIFE_TEST_DATA["sequence_number"]
assert enr.public_key == REAL_LIFE_TEST_DATA["public_key"]
assert enr.node_id == REAL_LIFE_TEST_DATA["node_id"]
assert enr.identity_scheme is REAL_LIFE_TEST_DATA["identity_scheme"]
assert dict(enr) == REAL_LIFE_TEST_DATA["kv_pairs"]
assert repr(enr) == REAL_LIFE_TEST_DATA["repr"]
| mock_identity_scheme |
index.ts | import config from './config'; |
export * from './config';
export default config; | |
test-scheduler_registry.py | #!@PYTHON_EXECUTABLE@
#ckwg +28
# Copyright 2011-2013 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def test_import():
try:
from sprokit.pipeline import config
import sprokit.pipeline.scheduler_factory
except:
test_error("Failed to import the scheduler_factory module")
def test_create():
from sprokit.pipeline import config
from sprokit.pipeline import scheduler_factory
scheduler_factory.SchedulerType()
## scheduler_factory.SchedulerTypes()
scheduler_factory.SchedulerDescription()
scheduler_factory.SchedulerModule()
def test_api_calls():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import scheduler_factory
modules.load_known_modules()
sched_type = 'thread_per_process'
c = config.empty_config()
p = pipeline.Pipeline()
scheduler_factory.create_scheduler(sched_type, p)
scheduler_factory.create_scheduler(sched_type, p, c)
scheduler_factory.types()
scheduler_factory.description(sched_type)
scheduler_factory.default_type
def example_scheduler(check_init):
from sprokit.pipeline import scheduler
class PythonExample(scheduler.PythonScheduler):
def __init__(self, pipe, conf):
scheduler.PythonScheduler.__init__(self, pipe, conf)
self.ran_start = check_init
self.ran_wait = check_init
self.ran_stop = check_init
self.ran_pause = check_init
self.ran_resume = check_init
def _start(self):
self.ran_start = True
def _wait(self):
self.ran_wait = True
def _stop(self):
self.ran_stop = True
def _pause(self):
self.ran_pause = True
def _resume(self):
|
def __del__(self):
if not self.ran_start:
test_error("start override was not called")
if not self.ran_wait:
test_error("wait override was not called")
if not self.ran_stop:
test_error("stop override was not called")
if not self.ran_pause:
test_error("pause override was not called")
if not self.ran_resume:
test_error("resume override was not called")
return PythonExample
def test_register():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import scheduler_factory
modules.load_known_modules()
sched_type = 'python_example'
sched_desc = 'simple description'
scheduler_factory.add_scheduler(sched_type, sched_desc, example_scheduler(True))
if not sched_desc == scheduler_factory.description(sched_type):
test_error("Description was not preserved when registering")
p = pipeline.Pipeline()
try:
s = scheduler_factory.create_scheduler(sched_type, p)
if s is None:
raise Exception()
except:
test_error("Could not create newly registered scheduler type")
def test_wrapper_api():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import process_factory
from sprokit.pipeline import scheduler_factory
sched_type = 'python_example'
sched_desc = 'simple description'
modules.load_known_modules()
scheduler_factory.add_scheduler(sched_type, sched_desc, example_scheduler(False))
p = pipeline.Pipeline()
proc_type = 'orphan'
proc_name = 'orphan'
proc = process_factory.create_process(proc_type, proc_name)
p.add_process(proc)
def check_scheduler(s):
if s is None:
test_error("Got a 'None' scheduler")
return
s.start()
s.pause()
s.resume()
s.stop()
s.start()
s.wait()
del s
p.reset()
p.setup_pipeline()
s = scheduler_factory.create_scheduler(sched_type, p)
check_scheduler(s)
if __name__ == '__main__':
import os
import sys
if not len(sys.argv) == 4:
test_error("Expected three arguments")
sys.exit(1)
testname = sys.argv[1]
os.chdir(sys.argv[2])
sys.path.append(sys.argv[3])
from sprokit.test.test import *
run_test(testname, find_tests(locals()))
| self.ran_resume = True |
qr.go | package main
import (
"fmt"
"os"
"path"
"github.com/odeke-em/rsc/qr"
)
func main() | {
argv := []string{"github.com/odeke-em", "github.com/indragiek"}
if len(os.Args[1:]) >= 1 {
argv = os.Args[1:]
}
for _, url := range argv {
code, err := qr.Encode(url, qr.Q)
if err != nil {
fmt.Fprintf(os.Stderr, "%s %v\n", url, err)
continue
}
pngImage := code.PNG()
base := path.Base(url)
rawPath := fmt.Sprintf("%s.png", base)
f, err := os.Create(rawPath)
if err != nil {
fmt.Fprintf(os.Stderr, "open %s %v\n", rawPath, err)
continue
}
fmt.Fprintf(f, "%s\n", pngImage)
f.Close()
}
} |
|
mock.py | # mock.py
# Test tools for mocking and patching.
# Copyright (C) 2007-2011 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# mock 0.8.0
# http://www.voidspace.org.uk/python/mock/
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# Comments, suggestions and bug reports welcome.
__all__ = (
'Mock',
'MagicMock',
'mocksignature',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
)
__version__ = '0.8.0alpha1'
__unittest = True
import sys
try:
import inspect
except ImportError:
# for alternative platforms that
# may not have inspect
inspect = None
try:
BaseException
except NameError:
# Python 2.4 compatibility
BaseException = Exception
try:
from functools import wraps
except ImportError:
# Python 2.4 compatibility
def wraps(original):
def inner(f):
f.__name__ = original.__name__
f.__doc__ = original.__doc__
f.__module__ = original.__module__
return f
return inner
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
try:
long
except NameError:
# Python 3
long = int
try:
_isidentifier = str.isidentifier
except AttributeError:
# Python 2.X
import keyword
import re
regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
def _isidentifier(string):
if string in keyword.kwlist:
return False
return regex.match(string)
inPy3k = sys.version_info[0] == 3
self = 'im_self'
builtin = '__builtin__'
if inPy3k:
self = '__self__'
builtin = 'builtins'
# hack for Python 3 :-)
_super = super
FILTER_DIR = True
# getsignature and mocksignature heavily "inspired" by
# the decorator module: http://pypi.python.org/pypi/decorator/
# by Michele Simionato
def _getsignature(func, skipfirst):
if inspect is None:
raise ImportError('inspect module not available')
if inspect.isclass(func):
func = func.__init__
# will have a self arg
skipfirst = True
elif not (inspect.ismethod(func) or inspect.isfunction(func)):
func = func.__call__
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
# instance methods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
_msg = "_mock_ is a reserved argument name, can't mock signatures using _mock_"
assert '_mock_' not in regargs, _msg
if varargs is not None:
assert '_mock_' not in varargs, _msg
if varkwargs is not None:
assert '_mock_' not in varkwargs, _msg
if skipfirst:
regargs = regargs[1:]
signature = inspect.formatargspec(regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _getsignature2(func, skipfirst):
if inspect is None:
raise ImportError('inspect module not available')
if isinstance(func, ClassTypes):
try:
func = func.__init__
except AttributeError:
return
skipfirst = True
elif not isinstance(func, FunctionTypes):
func = func.__call__
try:
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
# instance methods and classmethods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
if skipfirst:
# this condition and the above one are never both True - why?
regargs = regargs[1:]
signature = inspect.formatargspec(regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _check_signature(func, mock, skipfirst):
if not _callable(func):
return
result = _getsignature2(func, skipfirst)
if result is None:
return
signature, func = result
src = "lambda self, %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
#funcopy.__dict__.update(func.__dict__)
funcopy.__module__ = func.__module__
if not inPy3k:
funcopy.func_defaults = func.func_defaults
else:
funcopy.__defaults__ = func.__defaults__
funcopy.__kwdefaults__ = func.__kwdefaults__
def _callable(obj):
if isinstance(obj, ClassTypes):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _set_signature(mock, original, skipfirst):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original. This is effectively mocksignature2.
if not _callable(original):
return
skipfirst = isinstance(original, ClassTypes)
result = _getsignature2(original, skipfirst)
if result is None:
# was a C function (e.g. object().__init__ ) that can't be mocked
return
signature, func = result
src = "lambda %s: None" % signature
context = {'_mock_': mock}
checksig = eval(src, context)
_copy_func_details(func, checksig)
name = original.__name__
import keyword
if not _isidentifier(name):
name = funcopy
context = {'checksig': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
checksig(*args, **kwargs)
return mock(*args, **kwargs)""" % name
exec (src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def mocksignature(func, mock=None, skipfirst=False):
"""
mocksignature(func, mock=None, skipfirst=False)
Create a new function with the same signature as `func` that delegates
to `mock`. If `skipfirst` is True the first argument is skipped, useful
for methods where `self` needs to be omitted from the new function.
If you don't pass in a `mock` then one will be created for you.
The mock is set as the `mock` attribute of the returned function for easy
access.
`mocksignature` can also be used with classes. It copies the signature of
the `__init__` method.
When used with callable objects (instances) it copies the signature of the
`__call__` method.
"""
if mock is None:
mock = Mock()
signature, func = _getsignature(func, skipfirst)
src = "lambda %(signature)s: _mock_(%(signature)s)" % {
'signature': signature
}
funcopy = eval(src, dict(_mock_=mock))
_copy_func_details(func, funcopy)
_setup_func(funcopy, mock)
return funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
if not isinstance(mock, Mock):
return
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def reset_mock():
funcopy.method_calls = []
mock.reset_mock()
ret = funcopy.return_value
if isinstance(ret, Mock) and not ret is mock:
ret.reset_mock()
funcopy.called = False
funcopy.call_count = 0
funcopy.call_args = None
funcopy.call_args_list = []
funcopy.method_calls = []
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_with = assert_called_with
funcopy.assert_called_once_with = assert_called_once_with
funcopy.reset_mock = reset_mock
mock._mock_signature = funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
class SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return '<SentinelObject "%s">' % self.name
class Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, SentinelObject(name))
sentinel = Sentinel()
DEFAULT = sentinel.DEFAULT
class OldStyleClass:
pass
ClassType = type(OldStyleClass)
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
ClassTypes = (type,)
if not inPy3k:
ClassTypes = (type, ClassType)
_allowed_names = set(['return_value'])
def _mock_signature_property(name):
_allowed_names.add(name)
def _get(self):
sig = self._mock_signature
if sig is None:
return getattr(self, '_mock_' + name)
return getattr(sig, name)
def _set(self, value):
sig = self._mock_signature
if sig is None:
setattr(self, '_mock_' + name, value)
else:
setattr(sig, name, value)
return property(_get, _set)
class Mock(object):
"""
Create a new ``Mock`` object. ``Mock`` takes several optional arguments
that specify the behaviour of the Mock object:
* ``spec``: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an ``AttributeError``.
If ``spec`` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* ``spec_set``: A stricter variant of ``spec``. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
``spec_set`` will raise an ``AttributeError``.
* ``side_effect``: A function to be called whenever the Mock is called. See
the :attr:`Mock.side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns :data:`DEFAULT`, the return
value of this function is used as the return value.
Alternatively ``side_effect`` can be an exception class or instance. In
this case the exception will be raised when the mock is called.
* ``return_value``: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
:attr:`Mock.return_value` attribute.
* ``wraps``: Item for the mock object to wrap. If ``wraps`` is not None
then calling the Mock will pass the call through to the wrapped object
(returning the real result and ignoring ``return_value``). Attribute
access on the mock will return a Mock object that wraps the corresponding
attribute of the wrapped object (so attempting to access an attribute that
doesn't exist will raise an ``AttributeError``).
If the mock has an explicit ``return_value`` set then calls are not passed
to the wrapped object and the ``return_value`` is returned instead.
* ``name``: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
return object.__new__(new)
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None,
_old_name=None, _spec_state=None, **kwargs):
self._mock_parent = parent
self._mock_name = name
self._mock_old_name = _old_name
self._spec_state = _spec_state
_spec_class = None
if spec_set is not None:
spec = spec_set
spec_set = True
if spec is not None and type(spec) is not list:
if isinstance(spec, ClassTypes):
_spec_class = spec
else:
_spec_class = _get_class(spec)
spec = dir(spec)
self._spec_class = _spec_class
self._spec_set = spec_set
self._mock_methods = spec
self._mock_children = {}
self._mock_return_value = return_value
self._mock_side_effect = side_effect
self._mock_wraps = wraps
self.mock_calls = []
self._mock_signature = None
self._mock_called = False
self._mock_call_args = None
self._mock_call_count = 0
self._mock_call_args_list = []
self.reset_mock()
self.configure_mock(**kwargs)
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
called = _mock_signature_property('called')
call_count = _mock_signature_property('call_count')
call_args = _mock_signature_property('call_args')
call_args_list = _mock_signature_property('call_args_list')
side_effect = _mock_signature_property('side_effect')
def reset_mock(self):
"Restore the mock object to its initial state."
self.called = False
self.call_args = None
self.call_count = 0
self.call_args_list = []
self.method_calls = []
for child in self._mock_children.values():
child.reset_mock()
ret = self._mock_return_value
if isinstance(ret, Mock) and ret is not self:
ret.reset_mock()
def configure_mock(self, **kwargs):
"""XXX needs docstring"""
for arg, val in sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda entry: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
for entry in args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_signature is not None:
ret = self._mock_signature.return_value
if ret is DEFAULT:
ret = self._get_child_mock()
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_signature is not None:
self._mock_signature.return_value = value
else:
self._mock_return_value = value
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(self, *args, **kwargs):
self._mock_check_sig(*args, **kwargs)
return self._mock_call(*args, **kwargs)
def _mock_call(self, *args, **kwargs):
self.called = True
self.call_count += 1
self.call_args = callargs((args, kwargs))
self.call_args_list.append(callargs((args, kwargs)))
parent = self._mock_parent
name = self._mock_name
while parent is not None:
parent.method_calls.append(callargs((name, args, kwargs)))
if parent._mock_parent is None:
break
name = parent._mock_name + '.' + name
parent = parent._mock_parent
ret_val = DEFAULT
if self.side_effect is not None:
if (isinstance(self.side_effect, BaseException) or
isinstance(self.side_effect, ClassTypes) and
issubclass(self.side_effect, BaseException)):
raise self.side_effect
ret_val = self.side_effect(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
if self._mock_wraps is not None and self._mock_return_value is DEFAULT:
return self._mock_wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
def __getattr__(self, name):
if name == '_mock_methods':
raise AttributeError(name)
elif self._mock_methods is not None:
if name not in self._mock_methods or name in _all_magics:
raise AttributeError("Mock object has no attribute %r" % name)
elif _is_magic(name):
raise AttributeError(name)
result = self._mock_children.get(name)
if result is None:
wraps = None
if self._mock_wraps is not None:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(parent=self, name=name, wraps=wraps)
self._mock_children[name] = result
elif isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.inherit, None,
result.parent, result.name, result.instance
)
self._mock_children[name] = result
return result
def __repr__(self):
if self._mock_name is None and self._spec_class is None:
return object.__repr__(self)
name_string = ''
spec_string = ''
if self._mock_name is not None:
def get_name(name):
if name is None:
return 'mock'
return name
parent = self._mock_parent
name = self._mock_name
while parent is not None:
name = get_name(parent._mock_name) + '.' + name
parent = parent._mock_parent
name_string = ' name=%r' % name
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (type(self).__name__,
name_string,
spec_string,
id(self))
def __dir__(self):
extras = self._mock_methods or []
from_type = dir(type(self))
from_dict = list(self.__dict__)
if FILTER_DIR:
from_type = [e for e in from_type if not e.startswith('_')]
from_dict = [e for e in from_dict if not e.startswith('_') or
_is_magic(e)]
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if not 'method_calls' in self.__dict__:
# allow all attribute setting until initialisation is complete
return object.__setattr__(self, name, value)
if (self._spec_set and self._mock_methods is not None and name not in
self._mock_methods and name not in self.__dict__ and
name not in _allowed_names):
raise AttributeError("Mock object has no attribute '%s'" % name)
if name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._mock_methods is not None and name not in self._mock_methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if isinstance(value, MagicProxy):
setattr(type(self), name, value)
return
if not isinstance(value, Mock):
setattr(type(self), name, _get_method(name, value))
original = value
real = lambda *args, **kw: original(self, *args, **kw)
value = mocksignature(value, real, skipfirst=True)
else:
setattr(type(self), name, value)
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__: |
def assert_called_with(self, *args, **kwargs):
"""
assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock.
"""
if self.call_args is None:
raise AssertionError('Expected: %s\nNot called' % ((args, kwargs),))
if not self.call_args == (args, kwargs):
raise AssertionError(
'Expected: %s\nCalled with: %s' % ((args, kwargs), self.call_args)
)
def assert_called_once_with(self, *args, **kwargs):
"""
assert that the mock was called exactly once and with the specified
arguments.
"""
if not self.call_count == 1:
msg = ("Expected to be called once. Called %s times." %
self.call_count)
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def _get_child_mock(self, **kw):
klass = type(self).__mro__[1]
return klass(**kw)
class callargs(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
`(args, kwargs)` or `(name, args, kwargs)`.
If args or kwargs are empty then a callargs tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
callargs('name', (), {}) == ('name',)
callargs('name', (1,), {}) == ('name', (1,))
callargs((), {'a': 'b'}) == ({'a': 'b'},)
"""
def __eq__(self, other):
if len(self) == 3:
if other[0] != self[0]:
return False
args_kwargs = self[1:]
other_args_kwargs = other[1:]
else:
args_kwargs = tuple(self)
other_args_kwargs = other
if len(other_args_kwargs) == 0:
other_args, other_kwargs = (), {}
elif len(other_args_kwargs) == 1:
if isinstance(other_args_kwargs[0], tuple):
other_args = other_args_kwargs[0]
other_kwargs = {}
else:
other_args = ()
other_kwargs = other_args_kwargs[0]
else:
other_args, other_kwargs = other_args_kwargs
return tuple(args_kwargs) == (other_args, other_kwargs)
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
class _patch(object):
def __init__(self, target, attribute, new, spec, create,
mocksignature, spec_set, autospec, kwargs):
self.target = target
self.attribute = attribute
self.new = new
self.spec = spec
self.create = create
self.has_local = False
self.mocksignature = mocksignature
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
def copy(self):
return _patch(self.target, self.attribute, self.new, self.spec,
self.create, self.mocksignature, self.spec_set,
self.autospec, self.kwargs)
def __call__(self, func):
if isinstance(func, ClassTypes):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if attr.startswith("test") and hasattr(attr_value, "__call__"):
setattr(klass, attr, self.copy()(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
# don't use a with here (backwards compatability with 2.5)
extra_args = []
for patching in patched.patchings:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
try:
return func(*args, **keywargs)
finally:
for patching in reversed(getattr(patched, 'patchings', [])):
patching.__exit__()
patched.patchings = [self]
if hasattr(func, 'func_code'):
# not in Python 3
patched.compat_co_firstlineno = getattr(
func, "compat_co_firstlineno",
func.func_code.co_firstlineno
)
return patched
def get_original(self):
target = self.target
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if not self.create and original is DEFAULT:
raise AttributeError(
"%s does not have the attribute %r" % (target, name)
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec = self.new, self.spec
spec_set, autospec = self.spec_set, self.autospec
kwargs = self.kwargs
original, local = self.get_original()
if new is DEFAULT and autospec is False:
# XXXX what if original is DEFAULT - shouldn't use it as a spec
inherit = False
if spec_set == True:
spec_set = original
if isinstance(spec_set, ClassTypes):
inherit = True
elif spec == True:
# set spec to the object we are replacing
spec = original
if isinstance(spec, ClassTypes):
inherit = True
new = MagicMock(spec=spec, spec_set=spec_set, **kwargs)
if inherit:
new.return_value = Mock(spec=spec, spec_set=spec_set)
elif autospec is not False:
# spec is ignored, new *must* be default, spec_set is treated
# as a boolean. Should we check spec is not None and that spec_set
# is a bool? mocksignature should also not be used. Should we
# check this?
if new is not DEFAULT:
raise TypeError(
"autospec creates the mock for you. Can't specify "
"autospec and new."
)
spec_set = bool(spec_set)
_kwargs = {'_name': getattr(original, '__name__', None)}
if autospec is True:
autospec = original
new = create_autospec(autospec, spec_set, inherit=True,
configure=kwargs, **_kwargs)
elif self.kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new is a Mock we could call new.configure_mock(**kwargs)
raise TypeError("Can't pass kwargs to a mock we aren't creating")
new_attr = new
if self.mocksignature:
new_attr = mocksignature(original, new)
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
return new
def __exit__(self, *_):
"""Undo the patch."""
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and not hasattr(self.target, self.attribute):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
start = __enter__
stop = __exit__
def _patch_object(target, attribute, new=DEFAULT, spec=None,
create=False, mocksignature=False, spec_set=None,
autospec=False, **kwargs):
"""
patch.object(target, attribute, new=DEFAULT, spec=None, create=False,
mocksignature=False, spec_set=None)
patch the named member (`attribute`) on an object (`target`) with a mock
object.
Arguments new, spec, create, mocksignature and spec_set have the same
meaning as for patch.
"""
return _patch(target, attribute, new, spec, create, mocksignature,
spec_set, autospec, kwargs)
def patch(target, new=DEFAULT, spec=None, create=False, mocksignature=False,
spec_set=None, autospec=False, **kwargs):
"""
``patch`` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the ``target``
(specified in the form `'PackageName.ModuleName.ClassName'`) is patched
with a ``new`` object. When the function/with statement exits the patch is
undone.
The ``target`` is imported and the specified attribute patched with the new
object, so it must be importable from the environment you are calling the
decorator from.
If ``new`` is omitted, then a new ``Mock`` is created and passed in as an
extra argument to the decorated function.
The ``spec`` and ``spec_set`` keyword arguments are passed to the ``Mock``
if patch is creating one for you.
In addition you can pass ``spec=True`` or ``spec_set=True``, which causes
patch to pass in the object being mocked as the spec/spec_set object.
If ``mocksignature`` is True then the patch will be done with a function
created by mocking the one being replaced. If the object being replaced is
a class then the signature of `__init__` will be copied. If the object
being replaced is a callable object then the signature of `__call__` will
be copied.
By default ``patch`` will fail to replace attributes that don't exist. If
you pass in 'create=True' and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a TestCase class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set.
Patch can be used with the with statement, if this is available in your
version of Python. Here the patching applies to the indented block after
the with statement. If you use "as" then the patched object will be bound
to the name after the "as"; very useful if `patch` is creating a mock
object for you.
`patch.dict(...)` and `patch.object(...)` are available for alternate
use-cases.
"""
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
target = _importer(target)
return _patch(target, attribute, new, spec, create, mocksignature,
spec_set, autospec, kwargs)
class _patch_dict(object):
"""
Patch a dictionary and restore the dictionary to its original state after
the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of ``(key, value)`` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
"""
def __init__(self, in_dict, values=(), clear=False, **kwargs):
if isinstance(in_dict, basestring):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.values.update(kwargs)
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, ClassTypes):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
try:
return f(*args, **kw)
finally:
self._unpatch_dict()
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if attr.startswith("test") and hasattr(attr_value, "__call__"):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
"""Unpatch the dict."""
values = self.values
in_dict = self.in_dict
clear = self.clear
try:
original = in_dict.copy()
except AttributeError:
# dict like object with no copy method
# must support iteration over keys
original = {}
for key in in_dict:
original[key] = in_dict[key]
self._original = original
if clear:
_clear_dict(in_dict)
try:
in_dict.update(values)
except AttributeError:
# dict like object with no update method
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
try:
in_dict.update(original)
except AttributeError:
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
self._unpatch_dict()
return False
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
try:
in_dict.clear()
except AttributeError:
keys = list(in_dict)
for key in keys:
del in_dict[key]
patch.object = _patch_object
patch.dict = _patch_dict
class _isolate(object):
def __init__(self, target, excludes=None):
self.target = target
self.excludes = []
if excludes is not None:
self.excludes = excludes
self.names_under_test = set(self.get_names_under_test())
def get_names_under_test(self):
module = sys.modules[self.target.__module__]
for name, value in module.__dict__.items():
if value is self.target or name in self.excludes:
yield name
def __enter__(self):
module_name = self.target.__module__
self.module = sys.modules[module_name]
old_module_dict = self.module.__dict__.copy()
module_keys = set(self.module.__dict__.keys())
dunders = set([k for k in module_keys
if k.startswith('__') and k.endswith('__')])
replaced_keys = (module_keys - dunders - self.names_under_test)
for key in replaced_keys:
self.module.__dict__[key] = Mock()
self.module.__dict__['__mock_isolated_dict__'] = old_module_dict
def __exit__(self, *_):
old_module_dict = self.module.__dict__['__mock_isolated_dict__']
self.module.__dict__.clear()
self.module.__dict__.update(old_module_dict)
def __call__(self, thing, *args, **kwargs):
if isinstance(thing, ClassTypes):
return self.decorate_class(thing)
else:
return self.decorate_callable(thing)
def decorate_callable(self, func):
@wraps(func)
def patched(*args, **keywargs):
# don't use a with here (backwards compatability with 2.5)
self.__enter__()
try:
return func(*args, **keywargs)
finally:
self.__exit__()
if hasattr(func, 'func_code'):
# not in Python 3
patched.compat_co_firstlineno = getattr(func, "compat_co_firstlineno",
func.func_code.co_firstlineno)
return patched
def decorate_class(self, klass, *args):
# wrapping setUp allows further shared customization of mocks
setup = getattr(klass, 'setUp', None)
teardown = getattr(klass, 'tearDown', None)
if not setup:
setattr(klass, 'setUp', self.start)
else:
def wrap_setup(*args):
self.start()
setup(*args)
setattr(klass, setup.__name__, wrap_setup)
if not teardown:
setattr(klass, 'tearDown', self.stop)
else:
def wrap_teardown(*args):
self.stop()
teardown(*args)
setattr(klass, teardown.__name__, wrap_teardown)
return klass
start = __enter__
stop = __exit__
def isolate(target, excludes=None):
"""
``isolate`` acts as a function decorator, class decorator or
context manager. Within the function, TestCase methods or context all
objects within the targets module will be patched with a new ``Mock``
object. On exiting the function or context the patch is undone. For a
``TestCase`` setUp and tearDown are wrapped.
``isolate`` is useful to quickly mock out everything in a module except
the ``target``.
``excludes`` is either a string of form `'package.module.objectname'` or
a list of such strings. The named objects will not be patched.
If applied to a TestCase ``isolate`` will wrap the setUp and tearDown
methods. This allows configuration of the mocked module attributes
during setUp.
``isolate`` borrows heavily from DingusTestCase.
"""
target = _importer(target)
return _isolate(target, excludes)
def _isolate_object(*args, **kwargs):
return _isolate(*args, **kwargs)
isolate.object = _isolate_object
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
"divmod neg pos abs invert "
"complex int float index "
"trunc floor ceil "
)
numerics = "add sub mul div truediv floordiv mod lshift rshift and xor or pow "
inplace = ' '.join('i%s' % n for n in numerics.split())
right = ' '.join('r%s' % n for n in numerics.split())
extra = ''
if inPy3k:
extra = 'bool next '
else:
extra = 'unicode long nonzero oct hex '
# __truediv__ and __rtruediv__ not available in Python 3 either
# not including __prepare__, __instancecheck__, __subclasscheck__
# (as they are metaclass methods)
# __del__ is not supported at all as it causes problems if it exists
_non_defaults = set('__%s__' % method for method in [
'cmp', 'getslice', 'setslice', 'coerce', 'subclasses',
'format', 'get', 'set', 'delete', 'reversed',
'missing', 'reduce', 'reduce_ex', 'getinitargs',
'getnewargs', 'getstate', 'setstate', 'getformat',
'setformat', 'repr', 'dir'
])
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
return method
_magics = set(
'__%s__' % method for method in
' '.join([magic_methods, numerics, inplace, right, extra]).split()
)
_all_magics = _magics | _non_defaults
_unsupported_magics = set([
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
])
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
'__unicode__': lambda self: unicode(object.__str__(self)),
}
_side_effect_methods = {
'__eq__': lambda self: lambda other: self is other,
'__ne__': lambda self: lambda other: self is not other,
}
_return_values = {
'__int__': 1,
'__contains__': False,
'__len__': 0,
'__iter__': iter([]),
'__exit__': False,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': True,
'__nonzero__': True,
'__oct__': '1',
'__hex__': '0x1',
'__long__': long(1),
'__index__': 1,
}
def _get_eq(self):
def __eq__(other):
ret_val = self.__eq__._mock_return_value
if ret_val is not DEFAULT:
return ret_val
return self is other
return __eq__
def _get_ne(self):
def __ne__(other):
if self.__ne__._mock_return_value is not DEFAULT:
return DEFAULT
return self is not other
return __ne__
_side_effect_methods = {
'__eq__': _get_eq,
'__ne__': _get_ne,
}
def _set_return_value(mock, method, name):
return_value = DEFAULT
if name in _return_values:
return_value = _return_values[name]
elif name in _calculate_return_value:
try:
return_value = _calculate_return_value[name](mock)
except AttributeError:
# XXXX why do we return AttributeError here?
# set it as a side_effect instead?
return_value = AttributeError(name)
elif name in _side_effect_methods:
side_effect = _side_effect_methods[name](mock)
method.side_effect = side_effect
if return_value is not DEFAULT:
method.return_value = return_value
class MagicMock(Mock):
"""
MagicMock is a subclass of Mock with default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the ``spec`` or ``spec_set`` arguments then *only* magic
methods that exist in the spec will be created.
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def __init__(self, *args, **kw):
_super(MagicMock, self).__init__(*args, **kw)
these_magics = _magics
if self._mock_methods is not None:
these_magics = _magics.intersection(self._mock_methods)
for entry in these_magics:
setattr(self, entry, _create_proxy(entry, self))
def _create_proxy(entry, self):
# could specify parent?
def create_mock():
m = MagicMock(name=entry)
setattr(self, entry, m)
_set_return_value(self, m, entry)
return m
return MagicProxy(create_mock)
class MagicProxy(object):
def __init__(self, create_mock):
self.create_mock = create_mock
def __call__(self, *args, **kwargs):
m = self.create_mock()
return m(*args, **kwargs)
def __get__(self, obj, _type=None):
return self.create_mock()
class _ANY(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
return True
def __repr__(self):
return '<ANY>'
ANY = _ANY()
class _Call(object):
"Call helper object"
def __init__(self, name=None):
self.name = name
def __call__(self, *args, **kwargs):
if self.name is None:
return (args, kwargs)
return (self.name, args, kwargs)
def __getattr__(self, attr):
if self.name is None:
return _Call(attr)
name = '%s.%s' % (self.name, attr)
return _Call(name)
def __repr__(self):
if self.name is None:
return '<call>'
return '<call %s>' % self.name
call = _Call()
def create_autospec(spec, spec_set=False, inherit=False, configure=None,
_parent=None, _name=None, _instance=False):
"""XXXX needs docstring!"""
if configure is None:
configure = {}
if type(spec) == list:
# can't pass a list instance to the mock constructor as it will be
# interpreted as a list of strings
spec = list
is_type = isinstance(spec, ClassTypes)
kwargs = {'spec': spec}
if spec_set:
kwargs = {'spec_set': spec}
elif spec is None:
# None we mock with a normal mock without a spec
kwargs = {}
kwargs.update(configure)
mock = MagicMock(parent=_parent, name=_name, **kwargs)
if isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
# recurse for functions
mock = _set_signature(mock, spec, False)
else:
_check_signature(spec, mock, is_type)
if _parent is not None:
_parent._mock_children[_name] = mock
if is_type and inherit and not _instance:
# XXXX could give a name to the return_value mock?
mock.return_value = create_autospec(spec, spec_set, inherit,
_instance=True)
for entry in dir(spec):
if _is_magic(entry):
continue
if isinstance(spec, FunctionTypes) and entry in FunctionAttributes:
# allow a mock to actually be a function from mocksignature
continue
# XXXX do we need a better way of getting attributes
# without triggering code execution (?) Probably not - we need the
# actual object to mock it so we would rather trigger a property than
# mock the property descriptor. Likewise we want to mock out
# dynamically provided attributes.
original = getattr(spec, entry)
kwargs = {'spec': original}
if spec_set:
kwargs = {'spec_set': original}
if not isinstance(original, FunctionTypes):
new = _SpecState(original, spec_set, inherit, mock, entry,
_instance)
mock._mock_children[entry] = new
else:
parent = mock
if isinstance(spec, FunctionTypes):
parent = mock.mock
new = MagicMock(parent=parent, name=entry, **kwargs)
mock._mock_children[entry] = new
skipfirst = _must_skip(spec, entry, is_type)
_check_signature(original, new, skipfirst=skipfirst)
# so functions created with mocksignature become instance methods,
# *plus* their underlying mock exists in _mock_children of the parent
# mock. Adding to _mock_children may be unnecessary where we are also
# setting as an instance attribute?
if isinstance(new, FunctionTypes):
setattr(mock, entry, new)
return mock
def _must_skip(spec, entry, skipfirst):
if not isinstance(spec, ClassTypes):
if entry in getattr(spec, '__dict__', {}):
# instance attribute - shouldn't skip
return False
# can't use type because of old style classes
spec = spec.__class__
if not hasattr(spec, '__mro__'):
# old style class: can't have descriptors anyway
return skipfirst
for klass in spec.__mro__:
result = klass.__dict__.get(entry, DEFAULT)
if result is DEFAULT:
continue
if isinstance(result, (staticmethod, classmethod)):
return False
return skipfirst
# shouldn't get here unless attribute dynamically provided
return skipfirst
def _get_class(obj):
try:
return obj.__class__
except AttributeError:
# in Python 2, _sre.SRE_Pattern objects have no __class__
return type(obj)
class _SpecState(object):
def __init__(self, spec, spec_set=False, inherit=False, parent=None,
name=None, ids=None, instance=False):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.inherit = inherit
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
# unbound method
type(_ANY.__eq__),
)
FunctionAttributes = set([
'func_closure',
'func_code',
'func_defaults',
'func_dict',
'func_globals',
'func_name',
]) | delattr(type(self), name)
return object.__delattr__(self, name)
|
arbitrum_block_translator.go | package offchainreporting
import (
"context"
"fmt"
"math/big"
"sort"
"sync"
"time"
"github.com/pkg/errors"
"github.com/smartcontractkit/chainlink/core/logger"
"github.com/smartcontractkit/chainlink/core/services/eth"
"github.com/smartcontractkit/chainlink/core/store/models"
"github.com/smartcontractkit/chainlink/core/utils"
)
// ArbitrumBlockTranslator uses Arbitrum's special L1BlockNumber to optimise log lookups
// Performance matters here hence aggressive use of the cache
// We want to minimise fetches because calling eth_getBlockByNumber is
// relatively expensive
type ArbitrumBlockTranslator struct {
ethClient eth.Client
// l2->l1 cache
cache map[int64]int64
cacheMu sync.RWMutex
l2Locks utils.KeyedMutex
}
// NewArbitrumBlockTranslator returns a concrete ArbitrumBlockTranslator
func NewArbitrumBlockTranslator(ethClient eth.Client) *ArbitrumBlockTranslator {
return &ArbitrumBlockTranslator{
ethClient,
make(map[int64]int64),
sync.RWMutex{},
utils.KeyedMutex{},
}
}
// NumberToQueryRange implements BlockTranslator interface
func (a *ArbitrumBlockTranslator) NumberToQueryRange(ctx context.Context, changedInL1Block uint64) (fromBlock *big.Int, toBlock *big.Int) {
var err error
fromBlock, toBlock, err = a.BinarySearch(ctx, int64(changedInL1Block))
if err != nil {
logger.Warnw("ArbitrumBlockTranslator: failed to binary search L2->L1, falling back to slow scan over entire chain", "err", err)
return big.NewInt(0), nil
}
return
}
// BinarySearch uses both cache and RPC calls to find the smallest possible range of L2 block numbers that encompasses the given L1 block number
//
// Imagine as a virtual array of L1 block numbers indexed by L2 block numbers
// L1 values are likely duplicated so it looks something like
// [42, 42, 42, 42, 42, 155, 155, 155, 430, 430, 430, 430, 430, ...]
// Theoretical max difference between L1 values is typically about 5, "worst case" is 6545 but can be arbtrarily high if sequencer is broken
// The returned range of L2s from leftmost thru rightmost represent all possible L2s that correspond to the L1 value we are looking for
// nil can be returned as a rightmost value if the range has no upper bound
func (a *ArbitrumBlockTranslator) BinarySearch(ctx context.Context, targetL1 int64) (l2lowerBound *big.Int, l2upperBound *big.Int, err error) {
mark := time.Now()
var n int
defer func() {
duration := time.Since(mark)
logger.Debugw(fmt.Sprintf("ArbitrumBlockTranslator#binarySearch completed in %s with %d total lookups", duration, n), "finishedIn", duration, "err", err, "nLookups", n)
}()
var h *models.Head
// l2lower..l2upper is the inclusive range of L2 block numbers in which
// transactions that called block.number will return the given L1 block
// number
var l2lower int64
var l2upper int64
var skipUpperBound bool
{
var maybeL2Upper *int64
l2lower, maybeL2Upper = a.reverseLookup(targetL1)
if maybeL2Upper != nil {
l2upper = *maybeL2Upper
} else {
// Initial query to get highest L1 and L2 numbers
h, err = a.ethClient.HeadByNumber(ctx, nil)
n++
if err != nil {
return nil, nil, err
}
if h == nil {
return nil, nil, errors.New("got nil head")
}
if !h.L1BlockNumber.Valid {
return nil, nil, errors.New("head was missing L1 block number")
}
currentL1 := h.L1BlockNumber.Int64
currentL2 := h.Number
a.cachePut(currentL2, currentL1)
// NOTE: This case shouldn't ever happen but we ought to handle it in the least broken way possible
if targetL1 > currentL1 {
// real upper must always be nil, we can skip the upper limit part of the binary search
logger.Debugf("ArbitrumBlockTranslator#BinarySearch target of %d is above current L1 block number of %d, using nil for upper bound", targetL1, currentL1)
return big.NewInt(currentL2), nil, nil
} else if targetL1 == currentL1 {
// NOTE: If the latest seen L2 block corresponds to the target L1
// block, we have to leave the top end of the range open because future
// L2 blocks can be produced that would also match
skipUpperBound = true
}
l2upper = currentL2
}
}
logger.Tracef("ArbitrumBlockTranslator#BinarySearch starting search for L2 range wrapping L1 block number %d between bounds [%d, %d]", targetL1, l2lower, l2upper)
var exactMatch bool
// LEFT EDGE
// First, use binary search to find the smallest L2 block number for which L1 >= changedInBlock
// This L2 block number represents the lower bound on a range of L2s corresponding to this L1
{
l2lower, err = search(l2lower, l2upper+1, func(l2 int64) (bool, error) {
l1, miss, err2 := a.arbL2ToL1(ctx, l2)
if miss {
n++
}
if err2 != nil {
return false, err2
}
if targetL1 == l1 |
return l1 >= targetL1, nil
})
if err != nil {
return nil, nil, err
}
}
// RIGHT EDGE
// Second, use binary search again to find the smallest L2 block number for which L1 > changedInBlock
// Now we can subtract one to get the largest L2 that corresponds to this L1
// This can be skipped if we know we are already at the top of the range, and the upper limit will be returned as nil
if !skipUpperBound {
var r int64
r, err = search(l2lower, l2upper+1, func(l2 int64) (bool, error) {
l1, miss, err2 := a.arbL2ToL1(ctx, l2)
if miss {
n++
}
if err2 != nil {
return false, err2
}
if targetL1 == l1 {
exactMatch = true
}
return l1 > targetL1, nil
})
if err != nil {
return nil, nil, err
}
l2upper = r - 1
l2upperBound = big.NewInt(l2upper)
}
// NOTE: We expect either left or right search to make an exact match, if they don't something has gone badly wrong
if !exactMatch {
return nil, nil, errors.Errorf("target L1 block number %d is not represented by any L2 block", targetL1)
}
return big.NewInt(l2lower), l2upperBound, nil
}
// reverseLookup takes an l1 and returns lower and upper bounds for an L2 based on cache data
func (a *ArbitrumBlockTranslator) reverseLookup(targetL1 int64) (from int64, to *int64) {
type val struct {
l1 int64
l2 int64
}
vals := make([]val, 0)
a.cacheMu.RLock()
defer a.cacheMu.RUnlock()
for l2, l1 := range a.cache {
vals = append(vals, val{l1, l2})
}
sort.Slice(vals, func(i, j int) bool { return vals[i].l1 < vals[j].l1 })
for _, val := range vals {
if val.l1 < targetL1 {
from = val.l2
} else if val.l1 > targetL1 && to == nil {
// workaround golang footgun; can't take a pointer to val
l2 := val.l2
to = &l2
}
}
return
}
func (a *ArbitrumBlockTranslator) arbL2ToL1(ctx context.Context, l2 int64) (l1 int64, cacheMiss bool, err error) {
// This locking block synchronises access specifically around one l2 number so we never fetch the same data concurrently
// One thread will wait while the other fetches
unlock := a.l2Locks.LockInt64(l2)
defer unlock()
var exists bool
if l1, exists = a.cacheGet(l2); exists {
return l1, false, err
}
h, err := a.ethClient.HeadByNumber(ctx, big.NewInt(l2))
if err != nil {
return 0, true, err
}
if h == nil {
return 0, true, errors.New("got nil head")
}
if !h.L1BlockNumber.Valid {
return 0, true, errors.New("head was missing L1 block number")
}
l1 = h.L1BlockNumber.Int64
a.cachePut(l2, l1)
return l1, true, nil
}
func (a *ArbitrumBlockTranslator) cacheGet(l2 int64) (l1 int64, exists bool) {
a.cacheMu.RLock()
defer a.cacheMu.RUnlock()
l1, exists = a.cache[l2]
return
}
func (a *ArbitrumBlockTranslator) cachePut(l2, l1 int64) {
a.cacheMu.Lock()
defer a.cacheMu.Unlock()
a.cache[l2] = l1
}
// stolen from golang standard library and modified for 64-bit ints,
// customisable range and erroring function
// see: https://golang.org/src/sort/search.go
func search(i, j int64, f func(int64) (bool, error)) (int64, error) {
// Define f(-1) == false and f(n) == true.
// Invariant: f(i-1) == false, f(j) == true.
for i < j {
h := int64(uint64(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
is, err := f(h)
if err != nil {
return 0, err
}
if !is {
i = h + 1 // preserves f(i-1) == false
} else {
j = h // preserves f(j) == true
}
}
// i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
return i, nil
}
| {
exactMatch = true
} |
permissions_test.go | /*
Copyright 2021 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"context"
"testing"
"time"
"github.com/gravitational/trace"
"github.com/jonboulle/clockwork"
"github.com/stretchr/testify/require"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/lib/tlsca"
)
func TestContextLockTargets(t *testing.T) {
t.Parallel()
authContext := &Context{
Identity: BuiltinRole{
Role: types.RoleNode,
ClusterName: "cluster",
Identity: tlsca.Identity{
Username: "node.cluster",
Groups: []string{"role1", "role2"},
},
},
UnmappedIdentity: WrapIdentity(tlsca.Identity{
Username: "node.cluster",
Groups: []string{"mapped-role"},
}),
}
expected := []types.LockTarget{
{Node: "node"},
{Node: "node.cluster"},
{User: "node.cluster"},
{Role: "role1"},
{Role: "role2"},
{Role: "mapped-role"},
}
require.ElementsMatch(t, authContext.LockTargets(), expected)
}
func TestAuthorizeWithLocksForLocalUser(t *testing.T) {
t.Parallel()
ctx := context.Background()
srv, err := NewTestAuthServer(TestAuthServerConfig{
Dir: t.TempDir(),
Clock: clockwork.NewFakeClock(),
})
require.NoError(t, err)
user, _, err := CreateUserAndRole(srv.AuthServer, "test-user", []string{})
require.NoError(t, err)
localUser := LocalUser{
Username: user.GetName(),
Identity: tlsca.Identity{
Username: user.GetName(),
Groups: []string{"test-role-1"},
MFAVerified: "mfa-device-id",
},
}
// Apply an MFA lock.
mfaLock, err := types.NewLock("mfa-lock", types.LockSpecV2{
Target: types.LockTarget{MFADevice: localUser.Identity.MFAVerified},
})
require.NoError(t, err)
require.NoError(t, srv.AuthServer.UpsertLock(ctx, mfaLock))
upsertLockWithPutEvent(ctx, t, srv, mfaLock)
_, err = srv.Authorizer.Authorize(context.WithValue(ctx, ContextUser, localUser))
require.Error(t, err)
require.True(t, trace.IsAccessDenied(err))
// Remove the MFA record from the user value being authorized.
localUser.Identity.MFAVerified = ""
_, err = srv.Authorizer.Authorize(context.WithValue(ctx, ContextUser, localUser))
require.NoError(t, err)
// Create a lock targeting the role written in the user's identity.
roleLock, err := types.NewLock("role-lock", types.LockSpecV2{
Target: types.LockTarget{Role: localUser.Identity.Groups[0]},
})
require.NoError(t, err)
require.NoError(t, srv.AuthServer.UpsertLock(ctx, roleLock))
upsertLockWithPutEvent(ctx, t, srv, roleLock)
_, err = srv.Authorizer.Authorize(context.WithValue(ctx, ContextUser, localUser))
require.Error(t, err)
require.True(t, trace.IsAccessDenied(err))
}
func TestAuthorizeWithLocksForBuiltinRole(t *testing.T) |
func upsertLockWithPutEvent(ctx context.Context, t *testing.T, srv *TestAuthServer, lock types.Lock) {
lockWatch, err := srv.LockWatcher.Subscribe(ctx)
require.NoError(t, err)
defer lockWatch.Close()
require.NoError(t, srv.AuthServer.UpsertLock(ctx, lock))
select {
case event := <-lockWatch.Events():
require.Equal(t, types.OpPut, event.Type)
require.Empty(t, resourceDiff(lock, event.Resource))
case <-lockWatch.Done():
t.Fatalf("Watcher exited with error: %v.", lockWatch.Error())
case <-time.After(2 * time.Second):
t.Fatal("Timeout waiting for lock put.")
}
}
| {
t.Parallel()
ctx := context.Background()
srv, err := NewTestAuthServer(TestAuthServerConfig{
Dir: t.TempDir(),
Clock: clockwork.NewFakeClock(),
})
require.NoError(t, err)
builtinRole := BuiltinRole{
Username: "node",
Role: types.RoleNode,
Identity: tlsca.Identity{
Username: "node",
},
}
// Apply a node lock.
nodeLock, err := types.NewLock("node-lock", types.LockSpecV2{
Target: types.LockTarget{Node: builtinRole.Identity.Username},
})
require.NoError(t, err)
upsertLockWithPutEvent(ctx, t, srv, nodeLock)
_, err = srv.Authorizer.Authorize(context.WithValue(ctx, ContextUser, builtinRole))
require.Error(t, err)
require.True(t, trace.IsAccessDenied(err))
builtinRole.Identity.Username = ""
_, err = srv.Authorizer.Authorize(context.WithValue(ctx, ContextUser, builtinRole))
require.NoError(t, err)
} |
error.handle.js | const errorTypes = require("../constants/errorTypes");
const errorHandler = (error, ctx) => {
let status, message;
// 判定error.message和errorType的值
switch (error.message) {
case errorTypes.USERNAME_OR_PASSWORD_IS_REQUIRED:
status = 400; //400:传入的请求参数有问题
message = "请求错误,请检查请求参数";
break;
case errorTypes.USERNAME_IS_EXISTS:
status = 409; //conflict冲突,409代表发生冲突
message = "用户名已存在";
break;
case errorTypes.USERNAME_DOSE_NOT_EXISTS:
status = 400; //conflict冲突,409代表发生冲突
message = "用户名不存在";
break;
case errorTypes.PASSWORD_IS_INCORRECT: | status = 400; //conflict冲突,409代表发生冲突
message = "密码不正确";
break;
case errorTypes.UNAUTHORIZATION:
status = 401; //未授权
message = "token授权无效,请先登录";
break;
case errorTypes.UNPERMISSION:
status = 401; //未授权
message = "您无权进行操作";
break;
default:
status = 404;
message = "NOT FOUND";
break;
}
ctx.status = status;
ctx.body = message;
};
module.exports = errorHandler; | |
mod.rs | use itertools::Itertools;
use lsp_types::{
DidChangeTextDocumentParams, DidCloseTextDocumentParams, DidOpenTextDocumentParams,
DocumentFilter, DocumentHighlight, DocumentHighlightParams, FoldingRange, FoldingRangeParams,
GotoDefinitionParams, GotoDefinitionResponse, InitializeParams, InitializeResult,
InitializedParams, Location, MessageType, ReferenceParams, Registration, SemanticTokens,
SemanticTokensFullOptions, SemanticTokensOptions, SemanticTokensParams,
SemanticTokensRegistrationOptions, SemanticTokensResult, SemanticTokensServerCapabilities,
ServerCapabilities, ServerInfo, StaticRegistrationOptions,
TextDocumentChangeRegistrationOptions, TextDocumentContentChangeEvent,
TextDocumentRegistrationOptions, Url, WorkDoneProgressOptions,
};
use lspower::{jsonrpc, Client, LanguageServer};
use tokio::sync::Mutex;
use crate::{
compiler::{ast_to_hir::AstToHir, hir::CollectErrors},
database::PROJECT_DIRECTORY,
input::{Input, InputDb},
language_server::hints::HintsDb,
Database,
};
use self::{
definition::find_definition,
folding_range::FoldingRangeDb,
hints::HintsNotification,
references::{find_document_highlights, find_references},
semantic_tokens::SemanticTokenDb,
utils::{line_start_utf8_byte_offsets_raw, offset_from_lsp_raw},
};
pub mod definition;
pub mod folding_range;
pub mod hints;
pub mod references;
pub mod semantic_tokens;
pub mod utils;
pub struct CandyLanguageServer {
pub client: Client,
pub db: Mutex<Database>,
}
impl CandyLanguageServer {
pub fn from_client(client: Client) -> Self {
Self {
client,
db: Mutex::new(Database::default()),
}
}
}
#[lspower::async_trait]
impl LanguageServer for CandyLanguageServer {
async fn initialize(&self, params: InitializeParams) -> jsonrpc::Result<InitializeResult> {
log::info!("LSP: initialize");
self.client
.log_message(MessageType::INFO, "Initializing!")
.await;
let first_workspace_folder = params
.workspace_folders
.unwrap()
.first()
.unwrap()
.uri
.clone();
*PROJECT_DIRECTORY.lock().unwrap() = match first_workspace_folder.scheme() {
"file" => Some(first_workspace_folder.to_file_path().unwrap()),
_ => panic!("Workspace folder must be a file URI."),
};
Ok(InitializeResult {
// We only support dynamic registration for now.
capabilities: ServerCapabilities::default(),
server_info: Some(ServerInfo {
name: "🍭 Candy Language Server".to_owned(),
version: None,
}),
})
}
async fn initialized(&self, _: InitializedParams) {
log::info!("LSP: initialized");
let candy_files = vec![
DocumentFilter {
language: Some("candy".to_owned()),
scheme: Some("file".to_owned()),
pattern: None,
},
DocumentFilter {
language: Some("candy".to_owned()),
scheme: Some("untitled".to_owned()),
pattern: None,
},
];
let text_document_registration_options = TextDocumentRegistrationOptions {
document_selector: Some(candy_files.clone()),
};
self.client
.register_capability(vec![
Registration {
id: "0".to_owned(),
method: "textDocument/didOpen".to_owned(),
register_options: Some(
serde_json::to_value(text_document_registration_options.clone()).unwrap(),
),
},
Registration {
id: "1".to_owned(),
method: "textDocument/didOpen".to_owned(),
register_options: Some(
serde_json::to_value(text_document_registration_options.clone()).unwrap(),
),
},
Registration {
id: "2".to_owned(),
method: "textDocument/didChange".to_owned(),
register_options: Some(
serde_json::to_value(TextDocumentChangeRegistrationOptions {
document_selector: Some(candy_files),
sync_kind: 2, // incremental
})
.unwrap(),
),
},
Registration {
id: "3".to_owned(),
method: "textDocument/definition".to_owned(),
register_options: Some(
serde_json::to_value(text_document_registration_options.clone()).unwrap(),
),
},
Registration {
id: "4".to_owned(),
method: "textDocument/references".to_owned(),
register_options: Some(
serde_json::to_value(text_document_registration_options.clone()).unwrap(),
),
},
Registration {
id: "5".to_owned(),
method: "textDocument/documentHighlight".to_owned(),
register_options: Some(
serde_json::to_value(text_document_registration_options.clone()).unwrap(),
),
},
Registration {
id: "6".to_owned(),
method: "textDocument/foldingRange".to_owned(),
register_options: Some(
serde_json::to_value(text_document_registration_options.clone()).unwrap(),
),
},
Registration {
id: "7".to_owned(),
method: "textDocument/semanticTokens".to_owned(),
register_options: Some(
serde_json::to_value(
SemanticTokensServerCapabilities::SemanticTokensRegistrationOptions(
SemanticTokensRegistrationOptions {
text_document_registration_options,
semantic_tokens_options: SemanticTokensOptions {
work_done_progress_options: WorkDoneProgressOptions {
work_done_progress: None,
},
legend: semantic_tokens::LEGEND.clone(),
range: Some(false),
full: Some(SemanticTokensFullOptions::Bool(true)),
},
static_registration_options: StaticRegistrationOptions {
id: None,
},
},
),
)
.unwrap(),
),
},
])
.await
.expect("Dynamic capability registration failed.");
self.client
.log_message(MessageType::INFO, "server initialized!")
.await;
}
async fn shutdown(&self) -> jsonrpc::Result<()> {
Ok(())
}
async fn did_open(&self, params: DidOpenTextDocumentParams) {
let input = params.text_document.uri.into();
{
let mut db = self.db.lock().await;
db.did_open_input(&input, params.text_document.text);
}
self.analyze_files(vec![input]).await;
}
async fn did_change(&self, params: DidChangeTextDocumentParams) {
let input: Input = params.text_document.uri.into();
let mut open_inputs = Vec::<Input>::new();
{
let mut db = self.db.lock().await;
let text = apply_text_changes(&db, input.clone(), params.content_changes);
db.did_change_input(&input, text);
open_inputs.extend(db.open_inputs.keys().cloned());
}
self.analyze_files(open_inputs).await;
}
async fn did_close(&self, params: DidCloseTextDocumentParams) {
let input = params.text_document.uri.into();
let mut db = self.db.lock().await;
db.did_close_input(&input);
}
async fn goto_definition(
&self,
params: GotoDefinitionParams,
) -> jsonrpc::Result<Option<GotoDefinitionResponse>> {
let db = self.db.lock().await;
Ok(find_definition(&db, params))
}
async fn references(&self, params: ReferenceParams) -> jsonrpc::Result<Option<Vec<Location>>> {
let db = self.db.lock().await;
Ok(find_references(&db, params))
}
async fn document_highlight(
&self,
params: DocumentHighlightParams,
) -> jsonrpc::Result<Option<Vec<DocumentHighlight>>> {
let db = self.db.lock().await;
Ok(find_document_highlights(&db, params))
}
async fn folding_range(
&self,
params: FoldingRangeParams,
) -> jsonrpc::Result<Option<Vec<FoldingRange>>> {
let db = self.db.lock().await;
let ranges = db.folding_ranges(params.text_document.uri.into());
Ok(Some(ranges))
}
async fn semantic_tokens_full(
&self,
params: SemanticTokensParams,
) -> jsonrpc::Result<Option<SemanticTokensResult>> {
| impl CandyLanguageServer {
async fn analyze_files(&self, inputs: Vec<Input>) {
log::debug!("Analyzing file(s) {}", inputs.iter().join(", "));
let db = self.db.lock().await;
log::debug!("Locked.");
for input in inputs {
let (hir, _mapping) = db.hir(input.clone()).unwrap();
let diagnostics = {
let mut errors = vec![];
hir.collect_errors(&mut errors);
errors
.into_iter()
.map(|it| it.to_diagnostic(&db, input.clone()))
.collect()
};
self.client
.publish_diagnostics(input.clone().into(), diagnostics, None)
.await;
let hints = db.hints(input.clone());
self.client
.send_custom_notification::<HintsNotification>(HintsNotification {
uri: Url::from(input).to_string(),
hints,
})
.await;
}
}
}
fn apply_text_changes(
db: &Database,
input: Input,
changes: Vec<TextDocumentContentChangeEvent>,
) -> String {
let mut text = db.get_input(input.clone()).unwrap().as_ref().to_owned();
for change in changes {
match change.range {
Some(range) => {
let line_start_offsets = line_start_utf8_byte_offsets_raw(&text);
let start = offset_from_lsp_raw(&text, &line_start_offsets[..], range.start);
let end = offset_from_lsp_raw(&text, &line_start_offsets[..], range.end);
text = format!("{}{}{}", &text[..start], &change.text, &text[end..]);
}
None => text = change.text,
}
}
text
}
| let db = self.db.lock().await;
let tokens = db.semantic_tokens(params.text_document.uri.into());
Ok(Some(SemanticTokensResult::Tokens(SemanticTokens {
result_id: None,
data: tokens,
})))
}
}
|
utils.py | class QueryAsyncIterator:
def __init__(self, query, callback=None):
self.query = query
self.sequence = None
self._sequence_iterator = None
self._callback = callback
def __aiter__(self):
|
async def fetch_sequence(self) -> None:
self.sequence = await self.query
self._sequence_iterator = self.sequence.__iter__()
if self._callback:
await self._callback(self)
async def __anext__(self):
if self.sequence is None:
await self.fetch_sequence()
try:
return next(self._sequence_iterator)
except StopIteration:
raise StopAsyncIteration
def get_schema_sql(client) -> str:
generator = client.schema_generator(client)
creation_string = generator.get_create_schema_sql()
return creation_string
async def generate_schema(client) -> None:
generator = client.schema_generator(client)
await generator.generate_from_string(get_schema_sql(client))
| return self |
factory.go | package algo
import (
"log"
"sync"
http_errors "github.com/abdybaevae/url-shortener/pkg/errors/http"
repo "github.com/abdybaevae/url-shortener/pkg/repos/algo"
num_srv "github.com/abdybaevae/url-shortener/pkg/services/number"
)
type factory struct {
mu *sync.Mutex
repo repo.AlgoRepo
store map[string]AlgoService
numService num_srv.NumberService
}
func NewFactory(algoRepo repo.AlgoRepo, numService num_srv.NumberService) AlgoFactory {
if algoRepo == nil || numService == nil {
log.Fatalln("Cannot init algo factory")
}
return &factory{
repo: algoRepo,
store: make(map[string]AlgoService),
numService: numService,
mu: &sync.Mutex{},
}
}
func (f *factory) Get(strategy string) (AlgoService, error) {
f.mu.Lock()
defer f.mu.Unlock()
if val, ok := f.store[strategy]; ok {
return val, nil
}
entity, err := f.repo.Get(strategy)
if err != nil {
return nil, err
}
algoService, err := newService(f.repo, f.numService, entity)
if err != nil |
f.store[strategy] = algoService
return algoService, nil
}
| {
return nil, http_errors.ServerInternal
} |
make_pkg.py | """
Prepare transcriptiondata from the transcription sources.
"""
from uritemplate import URITemplate
from clldutils.clilib import ParserError
from csvw.dsv import UnicodeWriter
from pyclts.commands.make_dataset import process_transcription_data
try:
from lingpy.sequence.sound_classes import token2class
from lingpy.data import Model
LINGPY = True
except ImportError:
LINGPY = False
token2class = None
Model = None
from pyclts.soundclasses import SOUNDCLASS_SYSTEMS
def run(args):
| if not LINGPY:
raise ParserError('lingpy must be installed to run this command!')
def writer(*comps):
return UnicodeWriter(args.repos.path('pkg', *comps), delimiter='\t')
columns = ['LATEX', 'FEATURES', 'SOUND', 'IMAGE', 'COUNT', 'NOTE']
bipa = args.repos.bipa
for src, rows in args.repos.iter_sources(type='td'):
args.log.info('TranscriptionData {0} ...'.format(src['NAME']))
uritemplate = URITemplate(src['URITEMPLATE']) if src['URITEMPLATE'] else None
out = process_transcription_data(
rows, columns, src, uritemplate, bipa, args)
found = len([o for o in out if o[0] != '<NA>'])
args.log.info('... {0} of {1} graphemes found ({2:.0f}%)'.format(
found, len(out), found / len(out) * 100))
with writer('transcriptiondata', '{0}.tsv'.format(src['NAME'])) as w:
w.writerows(out)
count = 0
with writer('soundclasses', 'lingpy.tsv') as w:
w.writerow(['CLTS_NAME', 'BIPA_GRAPHEME'] + SOUNDCLASS_SYSTEMS)
for grapheme, sound in sorted(bipa.sounds.items()):
if not sound.alias:
w.writerow(
[sound.name, grapheme]
+ [token2class(grapheme, Model(cls)) for cls in SOUNDCLASS_SYSTEMS])
count += 1
args.log.info('SoundClasses: {0} written to file.'.format(count)) |