content
stringlengths 0
1.55M
|
---|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
<import_stmt>grpc<import_stmt>fibcapi_pb2<as>fibcapi__pb2<import_stmt>fibcapis_pb2<as>fibcapis__pb2<class_stmt>FIBCApApiStub(object)# missing associated documentation comment in .proto file
<block_start><pass><def_stmt>__init__ self channel<block_start>"""Constructor.
Args:
channel: A grpc.Channel.
"""<line_sep>self.Monitor=channel.unary_stream('/fibcapi.FIBCApApi/Monitor' request_serializer=fibcapis__pb2.ApMonitorRequest.SerializeToString response_deserializer=fibcapis__pb2.ApMonitorReply.FromString )<line_sep>self.GetPortStats=channel.unary_stream('/fibcapi.FIBCApApi/GetPortStats' request_serializer=fibcapis__pb2.ApGetPortStatsRequest.SerializeToString response_deserializer=fibcapi__pb2.FFPortStats.FromString )<line_sep>self.ModPortStats=channel.unary_unary('/fibcapi.FIBCApApi/ModPortStats' request_serializer=fibcapis__pb2.ApModPortStatsRequest.SerializeToString response_deserializer=fibcapis__pb2.ApModPortStatsReply.FromString )<line_sep>self.GetPortEntries=channel.unary_stream('/fibcapi.FIBCApApi/GetPortEntries' request_serializer=fibcapis__pb2.ApGetPortEntriesRequest.SerializeToString response_deserializer=fibcapis__pb2.DbPortEntry.FromString )<line_sep>self.GetIDEntries=channel.unary_stream('/fibcapi.FIBCApApi/GetIDEntries' request_serializer=fibcapis__pb2.ApGetIdEntriesRequest.SerializeToString response_deserializer=fibcapis__pb2.DbIdEntry.FromString )<line_sep>self.GetDpEntries=channel.unary_stream('/fibcapi.FIBCApApi/GetDpEntries' request_serializer=fibcapis__pb2.ApGetDpEntriesRequest.SerializeToString response_deserializer=fibcapis__pb2.DbDpEntry.FromString )<line_sep>self.AddPortEntry=channel.unary_unary('/fibcapi.FIBCApApi/AddPortEntry' request_serializer=fibcapis__pb2.DbPortEntry.SerializeToString response_deserializer=fibcapis__pb2.ApAddPortEntryReply.FromString )<line_sep>self.AddIDEntry=channel.unary_unary('/fibcapi.FIBCApApi/AddIDEntry' request_serializer=fibcapis__pb2.DbIdEntry.SerializeToString response_deserializer=fibcapis__pb2.ApAddIdEntryReply.FromString )<line_sep>self.DelPortEntry=channel.unary_unary('/fibcapi.FIBCApApi/DelPortEntry' request_serializer=fibcapis__pb2.DbPortKey.SerializeToString response_deserializer=fibcapis__pb2.ApDelPortEntryReply.FromString )<line_sep>self.DelIDEntry=channel.unary_unary('/fibcapi.FIBCApApi/DelIDEntry' request_serializer=fibcapis__pb2.DbIdEntry.SerializeToString response_deserializer=fibcapis__pb2.ApDelIdEntryReply.FromString )<line_sep>self.GetStats=channel.unary_stream('/fibcapi.FIBCApApi/GetStats' request_serializer=fibcapis__pb2.ApGetStatsRequest.SerializeToString response_deserializer=fibcapis__pb2.StatsEntry.FromString )<line_sep>self.RunOAM=channel.unary_unary('/fibcapi.FIBCApApi/RunOAM' request_serializer=fibcapi__pb2.OAM.Request.SerializeToString response_deserializer=fibcapis__pb2.OAMReplyAck.FromString )<block_end><block_end><class_stmt>FIBCApApiServicer(object)# missing associated documentation comment in .proto file
<block_start><pass><def_stmt>Monitor self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>GetPortStats self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>ModPortStats self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>GetPortEntries self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>GetIDEntries self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>GetDpEntries self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>AddPortEntry self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>AddIDEntry self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>DelPortEntry self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>DelIDEntry self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>GetStats self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>RunOAM self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><block_end><def_stmt>add_FIBCApApiServicer_to_server servicer server<block_start>rpc_method_handlers={'Monitor':grpc.unary_stream_rpc_method_handler(servicer.Monitor request_deserializer=fibcapis__pb2.ApMonitorRequest.FromString response_serializer=fibcapis__pb2.ApMonitorReply.SerializeToString ) 'GetPortStats':grpc.unary_stream_rpc_method_handler(servicer.GetPortStats request_deserializer=fibcapis__pb2.ApGetPortStatsRequest.FromString response_serializer=fibcapi__pb2.FFPortStats.SerializeToString ) 'ModPortStats':grpc.unary_unary_rpc_method_handler(servicer.ModPortStats request_deserializer=fibcapis__pb2.ApModPortStatsRequest.FromString response_serializer=fibcapis__pb2.ApModPortStatsReply.SerializeToString ) 'GetPortEntries':grpc.unary_stream_rpc_method_handler(servicer.GetPortEntries request_deserializer=fibcapis__pb2.ApGetPortEntriesRequest.FromString response_serializer=fibcapis__pb2.DbPortEntry.SerializeToString ) 'GetIDEntries':grpc.unary_stream_rpc_method_handler(servicer.GetIDEntries request_deserializer=fibcapis__pb2.ApGetIdEntriesRequest.FromString response_serializer=fibcapis__pb2.DbIdEntry.SerializeToString ) 'GetDpEntries':grpc.unary_stream_rpc_method_handler(servicer.GetDpEntries request_deserializer=fibcapis__pb2.ApGetDpEntriesRequest.FromString response_serializer=fibcapis__pb2.DbDpEntry.SerializeToString ) 'AddPortEntry':grpc.unary_unary_rpc_method_handler(servicer.AddPortEntry request_deserializer=fibcapis__pb2.DbPortEntry.FromString response_serializer=fibcapis__pb2.ApAddPortEntryReply.SerializeToString ) 'AddIDEntry':grpc.unary_unary_rpc_method_handler(servicer.AddIDEntry request_deserializer=fibcapis__pb2.DbIdEntry.FromString response_serializer=fibcapis__pb2.ApAddIdEntryReply.SerializeToString ) 'DelPortEntry':grpc.unary_unary_rpc_method_handler(servicer.DelPortEntry request_deserializer=fibcapis__pb2.DbPortKey.FromString response_serializer=fibcapis__pb2.ApDelPortEntryReply.SerializeToString ) 'DelIDEntry':grpc.unary_unary_rpc_method_handler(servicer.DelIDEntry request_deserializer=fibcapis__pb2.DbIdEntry.FromString response_serializer=fibcapis__pb2.ApDelIdEntryReply.SerializeToString ) 'GetStats':grpc.unary_stream_rpc_method_handler(servicer.GetStats request_deserializer=fibcapis__pb2.ApGetStatsRequest.FromString response_serializer=fibcapis__pb2.StatsEntry.SerializeToString ) 'RunOAM':grpc.unary_unary_rpc_method_handler(servicer.RunOAM request_deserializer=fibcapi__pb2.OAM.Request.FromString response_serializer=fibcapis__pb2.OAMReplyAck.SerializeToString ) }<line_sep>generic_handler=grpc.method_handlers_generic_handler('fibcapi.FIBCApApi' rpc_method_handlers)<line_sep>server.add_generic_rpc_handlers((generic_handler ))<block_end><class_stmt>FIBCVmApiStub(object)# missing associated documentation comment in .proto file
<block_start><pass><def_stmt>__init__ self channel<block_start>"""Constructor.
Args:
channel: A grpc.Channel.
"""<line_sep>self.SendHello=channel.unary_unary('/fibcapi.FIBCVmApi/SendHello' request_serializer=fibcapi__pb2.Hello.SerializeToString response_deserializer=fibcapis__pb2.HelloReply.FromString )<line_sep>self.SendPortConfig=channel.unary_unary('/fibcapi.FIBCVmApi/SendPortConfig' request_serializer=fibcapi__pb2.PortConfig.SerializeToString response_deserializer=fibcapis__pb2.PortConfigReply.FromString )<line_sep>self.SendFlowMod=channel.unary_unary('/fibcapi.FIBCVmApi/SendFlowMod' request_serializer=fibcapi__pb2.FlowMod.SerializeToString response_deserializer=fibcapis__pb2.FlowModReply.FromString )<line_sep>self.SendGroupMod=channel.unary_unary('/fibcapi.FIBCVmApi/SendGroupMod' request_serializer=fibcapi__pb2.GroupMod.SerializeToString response_deserializer=fibcapis__pb2.GroupModReply.FromString )<line_sep>self.SendOAMReply=channel.unary_unary('/fibcapi.FIBCVmApi/SendOAMReply' request_serializer=fibcapis__pb2.OAMReply.SerializeToString response_deserializer=fibcapis__pb2.OAMReplyAck.FromString )<line_sep>self.Monitor=channel.unary_stream('/fibcapi.FIBCVmApi/Monitor' request_serializer=fibcapis__pb2.VmMonitorRequest.SerializeToString response_deserializer=fibcapis__pb2.VmMonitorReply.FromString )<block_end><block_end><class_stmt>FIBCVmApiServicer(object)# missing associated documentation comment in .proto file
<block_start><pass><def_stmt>SendHello self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>SendPortConfig self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>SendFlowMod self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>SendGroupMod self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>SendOAMReply self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>Monitor self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><block_end><def_stmt>add_FIBCVmApiServicer_to_server servicer server<block_start>rpc_method_handlers={'SendHello':grpc.unary_unary_rpc_method_handler(servicer.SendHello request_deserializer=fibcapi__pb2.Hello.FromString response_serializer=fibcapis__pb2.HelloReply.SerializeToString ) 'SendPortConfig':grpc.unary_unary_rpc_method_handler(servicer.SendPortConfig request_deserializer=fibcapi__pb2.PortConfig.FromString response_serializer=fibcapis__pb2.PortConfigReply.SerializeToString ) 'SendFlowMod':grpc.unary_unary_rpc_method_handler(servicer.SendFlowMod request_deserializer=fibcapi__pb2.FlowMod.FromString response_serializer=fibcapis__pb2.FlowModReply.SerializeToString ) 'SendGroupMod':grpc.unary_unary_rpc_method_handler(servicer.SendGroupMod request_deserializer=fibcapi__pb2.GroupMod.FromString response_serializer=fibcapis__pb2.GroupModReply.SerializeToString ) 'SendOAMReply':grpc.unary_unary_rpc_method_handler(servicer.SendOAMReply request_deserializer=fibcapis__pb2.OAMReply.FromString response_serializer=fibcapis__pb2.OAMReplyAck.SerializeToString ) 'Monitor':grpc.unary_stream_rpc_method_handler(servicer.Monitor request_deserializer=fibcapis__pb2.VmMonitorRequest.FromString response_serializer=fibcapis__pb2.VmMonitorReply.SerializeToString ) }<line_sep>generic_handler=grpc.method_handlers_generic_handler('fibcapi.FIBCVmApi' rpc_method_handlers)<line_sep>server.add_generic_rpc_handlers((generic_handler ))<block_end><class_stmt>FIBCVsApiStub(object)# missing associated documentation comment in .proto file
<block_start><pass><def_stmt>__init__ self channel<block_start>"""Constructor.
Args:
channel: A grpc.Channel.
"""<line_sep>self.SendHello=channel.unary_unary('/fibcapi.FIBCVsApi/SendHello' request_serializer=fibcapi__pb2.FFHello.SerializeToString response_deserializer=fibcapis__pb2.FFHelloReply.FromString )<line_sep>self.SendFFPacket=channel.unary_unary('/fibcapi.FIBCVsApi/SendFFPacket' request_serializer=fibcapi__pb2.FFPacket.SerializeToString response_deserializer=fibcapis__pb2.FFPacketReply.FromString )<line_sep>self.SendPacketIn=channel.unary_unary('/fibcapi.FIBCVsApi/SendPacketIn' request_serializer=fibcapi__pb2.FFPacketIn.SerializeToString response_deserializer=fibcapis__pb2.FFPacketInReply.FromString )<line_sep>self.SendOAMReply=channel.unary_unary('/fibcapi.FIBCVsApi/SendOAMReply' request_serializer=fibcapis__pb2.OAMReply.SerializeToString response_deserializer=fibcapis__pb2.OAMReplyAck.FromString )<line_sep>self.Monitor=channel.unary_stream('/fibcapi.FIBCVsApi/Monitor' request_serializer=fibcapis__pb2.VsMonitorRequest.SerializeToString response_deserializer=fibcapis__pb2.VsMonitorReply.FromString )<block_end><block_end><class_stmt>FIBCVsApiServicer(object)# missing associated documentation comment in .proto file
<block_start><pass><def_stmt>SendHello self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>SendFFPacket self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>SendPacketIn self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>SendOAMReply self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>Monitor self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><block_end><def_stmt>add_FIBCVsApiServicer_to_server servicer server<block_start>rpc_method_handlers={'SendHello':grpc.unary_unary_rpc_method_handler(servicer.SendHello request_deserializer=fibcapi__pb2.FFHello.FromString response_serializer=fibcapis__pb2.FFHelloReply.SerializeToString ) 'SendFFPacket':grpc.unary_unary_rpc_method_handler(servicer.SendFFPacket request_deserializer=fibcapi__pb2.FFPacket.FromString response_serializer=fibcapis__pb2.FFPacketReply.SerializeToString ) 'SendPacketIn':grpc.unary_unary_rpc_method_handler(servicer.SendPacketIn request_deserializer=fibcapi__pb2.FFPacketIn.FromString response_serializer=fibcapis__pb2.FFPacketInReply.SerializeToString ) 'SendOAMReply':grpc.unary_unary_rpc_method_handler(servicer.SendOAMReply request_deserializer=fibcapis__pb2.OAMReply.FromString response_serializer=fibcapis__pb2.OAMReplyAck.SerializeToString ) 'Monitor':grpc.unary_stream_rpc_method_handler(servicer.Monitor request_deserializer=fibcapis__pb2.VsMonitorRequest.FromString response_serializer=fibcapis__pb2.VsMonitorReply.SerializeToString ) }<line_sep>generic_handler=grpc.method_handlers_generic_handler('fibcapi.FIBCVsApi' rpc_method_handlers)<line_sep>server.add_generic_rpc_handlers((generic_handler ))<block_end><class_stmt>FIBCDpApiStub(object)# missing associated documentation comment in .proto file
<block_start><pass><def_stmt>__init__ self channel<block_start>"""Constructor.
Args:
channel: A grpc.Channel.
"""<line_sep>self.SendHello=channel.unary_unary('/fibcapi.FIBCDpApi/SendHello' request_serializer=fibcapi__pb2.FFHello.SerializeToString response_deserializer=fibcapis__pb2.FFHelloReply.FromString )<line_sep>self.SendPacketIn=channel.unary_unary('/fibcapi.FIBCDpApi/SendPacketIn' request_serializer=fibcapi__pb2.FFPacketIn.SerializeToString response_deserializer=fibcapis__pb2.FFPacketInReply.FromString )<line_sep>self.SendPortStatus=channel.unary_unary('/fibcapi.FIBCDpApi/SendPortStatus' request_serializer=fibcapi__pb2.FFPortStatus.SerializeToString response_deserializer=fibcapis__pb2.FFPortStatusReply.FromString )<line_sep>self.SendL2AddrStatus=channel.unary_unary('/fibcapi.FIBCDpApi/SendL2AddrStatus' request_serializer=fibcapi__pb2.FFL2AddrStatus.SerializeToString response_deserializer=fibcapis__pb2.L2AddrStatusReply.FromString )<line_sep>self.SendMultipartReply=channel.unary_unary('/fibcapi.FIBCDpApi/SendMultipartReply' request_serializer=fibcapis__pb2.DpMultipartReply.SerializeToString response_deserializer=fibcapis__pb2.DpMultipartReplyAck.FromString )<line_sep>self.SendOAMReply=channel.unary_unary('/fibcapi.FIBCDpApi/SendOAMReply' request_serializer=fibcapis__pb2.OAMReply.SerializeToString response_deserializer=fibcapis__pb2.OAMReplyAck.FromString )<line_sep>self.Monitor=channel.unary_stream('/fibcapi.FIBCDpApi/Monitor' request_serializer=fibcapis__pb2.DpMonitorRequest.SerializeToString response_deserializer=fibcapis__pb2.DpMonitorReply.FromString )<block_end><block_end><class_stmt>FIBCDpApiServicer(object)# missing associated documentation comment in .proto file
<block_start><pass><def_stmt>SendHello self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>SendPacketIn self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>SendPortStatus self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>SendL2AddrStatus self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>SendMultipartReply self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>SendOAMReply self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>Monitor self request context# missing associated documentation comment in .proto file
<block_start><pass><line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><block_end><def_stmt>add_FIBCDpApiServicer_to_server servicer server<block_start>rpc_method_handlers={'SendHello':grpc.unary_unary_rpc_method_handler(servicer.SendHello request_deserializer=fibcapi__pb2.FFHello.FromString response_serializer=fibcapis__pb2.FFHelloReply.SerializeToString ) 'SendPacketIn':grpc.unary_unary_rpc_method_handler(servicer.SendPacketIn request_deserializer=fibcapi__pb2.FFPacketIn.FromString response_serializer=fibcapis__pb2.FFPacketInReply.SerializeToString ) 'SendPortStatus':grpc.unary_unary_rpc_method_handler(servicer.SendPortStatus request_deserializer=fibcapi__pb2.FFPortStatus.FromString response_serializer=fibcapis__pb2.FFPortStatusReply.SerializeToString ) 'SendL2AddrStatus':grpc.unary_unary_rpc_method_handler(servicer.SendL2AddrStatus request_deserializer=fibcapi__pb2.FFL2AddrStatus.FromString response_serializer=fibcapis__pb2.L2AddrStatusReply.SerializeToString ) 'SendMultipartReply':grpc.unary_unary_rpc_method_handler(servicer.SendMultipartReply request_deserializer=fibcapis__pb2.DpMultipartReply.FromString response_serializer=fibcapis__pb2.DpMultipartReplyAck.SerializeToString ) 'SendOAMReply':grpc.unary_unary_rpc_method_handler(servicer.SendOAMReply request_deserializer=fibcapis__pb2.OAMReply.FromString response_serializer=fibcapis__pb2.OAMReplyAck.SerializeToString ) 'Monitor':grpc.unary_stream_rpc_method_handler(servicer.Monitor request_deserializer=fibcapis__pb2.DpMonitorRequest.FromString response_serializer=fibcapis__pb2.DpMonitorReply.SerializeToString ) }<line_sep>generic_handler=grpc.method_handlers_generic_handler('fibcapi.FIBCDpApi' rpc_method_handlers)<line_sep>server.add_generic_rpc_handlers((generic_handler ))<block_end> |
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""<import_stmt>os<import_from_stmt>programy.config.file.json_file JSONConfigurationFile<import_from_stmt>programy.clients.events.console.config ConsoleConfiguration<import_from_stmt>programy.utils.substitutions.substitues Substitutions<import_from_stmt>programytest.config.file.base_file_tests ConfigurationBaseFileTests<class_stmt>JSONConfigurationFileTests(ConfigurationBaseFileTests)<block_start><def_stmt>test_get_methods self<block_start>config_data=JSONConfigurationFile()<line_sep>self.assertIsNotNone(config_data)<line_sep>configuration=config_data.load_from_text("""
{
"brain": {
"overrides": {
"allow_system_aiml": true,
"allow_learn_aiml": true,
"allow_learnf_aiml": true
}
}
}
""" ConsoleConfiguration() ".")<line_sep>self.assertIsNotNone(configuration)<line_sep>section=config_data.get_section("brainx")<line_sep>self.assertIsNone(section)<line_sep>section=config_data.get_section("brain")<line_sep>self.assertIsNotNone(section)<line_sep>child_section=config_data.get_section("overrides" section)<line_sep>self.assertIsNotNone(child_section)<line_sep>keys=list(config_data.get_child_section_keys("overrides" section))<line_sep>self.assertIsNotNone(keys)<line_sep>self.assertEqual(3 len(keys))<line_sep>self.assertTrue("allow_system_aiml"<in>keys)<line_sep>self.assertTrue("allow_learn_aiml"<in>keys)<line_sep>self.assertTrue("allow_learnf_aiml"<in>keys)<line_sep>self.assertIsNone(config_data.get_child_section_keys("missing" section))<line_sep>self.assertEqual(<true> config_data.get_option(child_section "allow_system_aiml"))<line_sep>self.assertEqual(<true> config_data.get_option(child_section "missing" missing_value=<true>))<line_sep>self.assertEqual(<true> config_data.get_bool_option(child_section "allow_system_aiml"))<line_sep>self.assertEqual(<false> config_data.get_bool_option(child_section "other_value"))<line_sep>self.assertEqual(0 config_data.get_int_option(child_section "other_value"))<block_end><def_stmt>test_load_from_file self<block_start>config=JSONConfigurationFile()<line_sep>self.assertIsNotNone(config)<line_sep>configuration=config.load_from_file(os.path.dirname(__file__)+os.sep+"test_json.json" ConsoleConfiguration() ".")<line_sep>self.assertIsNotNone(configuration)<line_sep>self.assert_configuration(configuration)<block_end><def_stmt>test_load_from_text_multis_one_value self<block_start>config=JSONConfigurationFile()<line_sep>self.assertIsNotNone(config)<line_sep>configuration=config.load_from_text("""
{
"bot": {
"brain": "bot1"
}
}
""" ConsoleConfiguration() ".")<line_sep>self.assertIsNotNone(configuration)<line_sep>self.assertEqual(1 len(configuration.client_configuration.configurations[0].configurations))<block_end><def_stmt>test_load_from_text_multis_multiple_values self<block_start>config=JSONConfigurationFile()<line_sep>self.assertIsNotNone(config)<line_sep>configuration=config.load_from_text("""
{
"console": {
"bot": "bot"
},
"bot": {
"brain": ["bot1", "bot2"]
}
}
""" ConsoleConfiguration() ".")<line_sep>self.assertIsNotNone(configuration)<line_sep>self.assertEqual(2 len(configuration.client_configuration.configurations[0].configurations))<block_end><def_stmt>test_load_from_text self<block_start>config=JSONConfigurationFile()<line_sep>self.assertIsNotNone(config)<line_sep>configuration=config.load_from_text("""
{
"console": {
"bot": "bot",
"prompt": ">>>",
"scheduler": {
"name": "Scheduler1",
"debug_level": 50,
"add_listeners": false,
"remove_all_jobs": false
},
"storage": {
"entities": {
"users": "sql",
"linked_accounts": "sql",
"links": "sql",
"properties": "file",
"conversations": "file",
"categories": "file",
"maps": "file",
"sets": "file",
"rdf": "file",
"denormal": "file",
"normal": "file",
"gender": "file",
"person": "file",
"person2": "file",
"spelling_corpus": "file",
"license_keys": "file",
"nodes": "file",
"binaries": "file",
"braintree": "file",
"preprocessors": "file",
"postprocessors": "file",
"regex_templates": "file",
"usergroups": "file",
"learnf": "file"
},
"stores": {
"sql": {
"type": "sql",
"config": {
"url": "sqlite:///:memory",
"echo": false,
"encoding": "utf-8",
"create_db": true,
"drop_all_first": true
}
},
"mongo": {
"type": "mongo",
"config": {
"url": "mongodb://localhost:27017/",
"database": "programy",
"drop_all_first": true
}
},
"redis": {
"type": "redis",
"config": {
"host": "localhost",
"port": 6379,
"password": <PASSWORD>,
"db": 0,
"prefix": "programy",
"drop_all_first": true
}
},
"file": {
"type": "file",
"config": {
"category_storage": {
"files": "./storage/categories"
},
"conversations_storage": {
"files": "./storage/conversations"
},
"sets_storage": {
"files": "./storage/sets",
"extension": ".txt",
"directories": false
},
"maps_storage": {
"files": "./storage/maps",
"extension": ".txt",
"directories": false
},
"regex_templates": {
"files": "./storage/regex"
},
"lookups_storage": {
"files": "./storage/lookups",
"extension": ".txt",
"directories": false
},
"properties_storage": {
"file": "./storage/properties.txt"
},
"defaults_storage": {
"file": "./storage/defaults.txt"
},
"rdf_storage": {
"files": "./storage/rdfs",
"extension": ".txt",
"directories": true
},
"spelling_corpus": {
"file": "./storage/spelling/corpus.txt"
},
"license_keys": {
"file": "./storage/license.keys"
},
"nodes": {
"files": "./storage/nodes"
},
"binaries": {
"files": "./storage/binaries"
},
"braintree": {
"file": "./storage/braintree/braintree.xml",
"format": "xml"
},
"preprocessors": {
"file": "./storage/processing/preprocessors.txt"
},
"postprocessors": {
"file": "./storage/processing/postprocessing.txt"
},
"usergroups": {
"files": "./storage/security/usergroups.txt"
},
"learnf": {
"files": "./storage/categories/learnf"
}
}
}
}
},
"logger": {
"type": "logger",
"config": {
"conversation_logger": "conversation"
}
}
},
"voice": {
"license_keys": "$BOT_ROOT/config/license.keys",
"tts": "osx",
"stt": "azhang",
"osx": {
"classname": "talky.clients.voice.tts.osxsay.OSXSayTextToSpeach"
},
"pytts": {
"classname": "talky.clients.voice.tts.pyttssay.PyTTSSayTextToSpeach",
"rate_adjust": 10
},
"azhang": {
"classname": "talky.clients.voice.stt.azhang.AnthonyZhangSpeechToText",
"ambient_adjust": 3,
"service": "ibm"
}
},
"rest": {
"host": "0.0.0.0",
"port": 8989,
"debug": false,
"workers": 4,
"license_keys": "$BOT_ROOT/config/license.keys"
},
"webchat": {
"host": "0.0.0.0",
"port": 8090,
"debug": false,
"license_keys": "$BOT_ROOT/config/license.keys",
"api": "/api/web/v1.0/ask"
},
"twitter": {
"polling": true,
"polling_interval": 49,
"streaming": false,
"use_status": true,
"use_direct_message": true,
"auto_follow": true,
"storage": "file",
"welcome_message": "Thanks for following me, send me a message and I'll try and help",
"license_keys": "file"
},
"xmpp": {
"server": "talk.google.com",
"port": 5222,
"xep_0030": true,
"xep_0004": true,
"xep_0060": true,
"xep_0199": true,
"license_keys": "file"
},
"socket": {
"host": "127.0.0.1",
"port": 9999,
"queue": 5,
"debug": true,
"license_keys": "file"
},
"telegram": {
"unknown_command": "Sorry, that is not a command I have been taught yet!",
"license_keys": "file"
},
"facebook": {
"host": "127.0.0.1",
"port": 5000,
"debug": false,
"license_keys": "file"
},
"twilio": {
"host": "127.0.0.1",
"port": 5000,
"debug": false,
"license_keys": "file"
},
"slack": {
"polling_interval": 1,
"license_keys": "file"
},
"viber": {
"name": "Servusai",
"avatar": "http://viber.com/avatar.jpg",
"license_keys": "file"
},
"line": {
"host": "127.0.0.1",
"port": 8084,
"debug": false,
"license_keys": "file"
},
"kik": {
"bot_name": "servusai",
"webhook": "https://93638f7a.ngrok.io/api/kik/v1.0/ask",
"host": "127.0.0.1",
"port": 8082,
"debug": false,
"license_keys": "file"
},
"bot": {
"brain": "brain",
"initial_question": "Hi, how can I help you today?",
"initial_question_srai": "YINITIALQUESTION",
"default_response": "Sorry, I don't have an answer for that!",
"default_response_srai": "YEMPTY",
"empty_string": "YEMPTY",
"exit_response": "So long, and thanks for the fish!",
"exit_response_srai": "YEXITRESPONSE",
"override_properties": true,
"max_question_recursion": 1000,
"max_question_timeout": 60,
"max_search_depth": 100,
"max_search_timeout": 60,
"spelling": {
"load": true,
"classname": "programy.spelling.norvig.NorvigSpellingChecker",
"check_before": true,
"check_and_retry": true
},
"conversations": {
"max_histories": 100,
"restore_last_topic": false,
"initial_topic": "TOPIC1",
"empty_on_start": false
}
},
"brain": {
"overrides": {
"allow_system_aiml": true,
"allow_learn_aiml": true,
"allow_learnf_aiml": true
},
"defaults": {
"default-get": "unknown",
"default-property": "unknown",
"default-map": "unknown",
"learnf-path": "file"
},
"binaries": {
"save_binary": true,
"load_binary": true,
"load_aiml_on_binary_fail": true
},
"braintree": {
"create": true
},
"services": {
"REST": {
"classname": "programy.services.rest.GenericRESTService",
"method": "GET",
"host": "0.0.0.0",
"port": 8080
},
"Pannous": {
"classname": "programy.services.pannous.PannousService",
"url": "http://weannie.pannous.com/api"
}
},
"security": {
"authentication": {
"classname": "programy.security.authenticate.passthrough.BasicPassThroughAuthenticationService",
"denied_srai": "AUTHENTICATION_FAILED"
},
"authorisation": {
"classname": "programy.security.authorise.usergroupsauthorisor.BasicUserGroupAuthorisationService",
"denied_srai": "AUTHORISATION_FAILED",
"usergroups": {
"storage": "file"
}
}
},
"oob": {
"default": {
"classname": "programy.oob.defaults.default.DefaultOutOfBandProcessor"
},
"alarm": {
"classname": "programy.oob.defaults.alarm.AlarmOutOfBandProcessor"
},
"camera": {
"classname": "programy.oob.defaults.camera.CameraOutOfBandProcessor"
},
"clear": {
"classname": "programy.oob.defaults.clear.ClearOutOfBandProcessor"
},
"dial": {
"classname": "programy.oob.defaults.dial.DialOutOfBandProcessor"
},
"dialog": {
"classname": "programy.oob.defaults.dialog.DialogOutOfBandProcessor"
},
"email": {
"classname": "programy.oob.defaults.email.EmailOutOfBandProcessor"
},
"geomap": {
"classname": "programy.oob.defaults.map.MapOutOfBandProcessor"
},
"schedule": {
"classname": "programy.oob.defaults.schedule.ScheduleOutOfBandProcessor"
},
"search": {
"classname": "programy.oob.defaults.search.SearchOutOfBandProcessor"
},
"sms": {
"classname": "programy.oob.defaults.sms.SMSOutOfBandProcessor"
},
"url": {
"classname": "programy.oob.defaults.url.URLOutOfBandProcessor"
},
"wifi": {
"classname": "programy.oob.defaults.wifi.WifiOutOfBandProcessor"
}
},
"dynamic": {
"variables": {
"gettime": "programy.dynamic.variables.datetime.GetTime"
},
"sets": {
"numeric": "programy.dynamic.sets.numeric.IsNumeric",
"roman": "programy.dynamic.sets.roman.IsRomanNumeral"
},
"maps": {
"romantodec": "programy.dynamic.maps.roman.MapRomanToDecimal",
"dectoroman": "programy.dynamic.maps.roman.MapDecimalToRoman"
}
}
}
}
""" ConsoleConfiguration() ".")<line_sep>self.assertIsNotNone(configuration)<line_sep>self.assert_configuration(configuration)<block_end><def_stmt>test_load_additionals self<block_start>config=JSONConfigurationFile()<line_sep>self.assertIsNotNone(config)<line_sep>configuration=config.load_from_text("""
{
"console": {
"bot": "bot"
},
"bot": {
"brain": "brain"
},
"brain": {
"security": {
"authentication": {
"classname": "programy.security.authenticate.passthrough.PassThroughAuthenticationService",
"denied_srai": "ACCESS_DENIED"
}
}
}
}
""" ConsoleConfiguration() ".")<line_sep>self.assertIsNotNone(configuration)<line_sep>auth_service=configuration.client_configuration.configurations[0].configurations[0].security.authentication<line_sep>self.assertIsNotNone(auth_service)<line_sep>self.assertEqual("ACCESS_DENIED" auth_service.denied_srai)<block_end><def_stmt>test_load_with_subs self<block_start>subs=Substitutions()<line_sep>subs.add_substitute("$ALLOW_SYSTEM" <true>)<line_sep>config_data=JSONConfigurationFile()<line_sep>self.assertIsNotNone(config_data)<line_sep>configuration=config_data.load_from_text("""
{
"brain": {
"overrides": {
"allow_system_aiml": true,
"allow_learn_aiml": true,
"allow_learnf_aiml": true
}
}
}
""" ConsoleConfiguration() ".")<line_sep>self.assertIsNotNone(configuration)<line_sep>section=config_data.get_section("brainx")<line_sep>self.assertIsNone(section)<line_sep>section=config_data.get_section("brain")<line_sep>self.assertIsNotNone(section)<line_sep>child_section=config_data.get_section("overrides" section)<line_sep>self.assertIsNotNone(child_section)<line_sep>self.assertEqual(<true> config_data.get_option(child_section "allow_system_aiml"))<line_sep>self.assertEqual(<true> config_data.get_bool_option(child_section "allow_system_aiml"))<line_sep>self.assertEqual(<false> config_data.get_bool_option(child_section "other_value"))<block_end><block_end> |
"""Test Evil Genius Labs light."""<import_from_stmt>unittest.mock patch<import_stmt>pytest<import_from_stmt>homeassistant.components.light ATTR_COLOR_MODE ATTR_SUPPORTED_COLOR_MODES ColorMode LightEntityFeature <import_from_stmt>homeassistant.const ATTR_SUPPORTED_FEATURES<line_sep>@pytest.mark.parametrize("platforms" [("light" )])<async_keyword><def_stmt>test_works hass setup_evil_genius_labs<block_start>"""Test it works."""<line_sep>state=hass.states.get("light.fibonacci256_23d4")<assert_stmt>state<is><not><none><assert_stmt>state.state<eq>"on"<assert_stmt>state.attributes["brightness"]<eq>128<assert_stmt>state.attributes[ATTR_COLOR_MODE]<eq>ColorMode.RGB<assert_stmt>state.attributes[ATTR_SUPPORTED_COLOR_MODES]<eq>[ColorMode.RGB]<assert_stmt>state.attributes[ATTR_SUPPORTED_FEATURES]<eq>LightEntityFeature.EFFECT<block_end>@pytest.mark.parametrize("platforms" [("light" )])<async_keyword><def_stmt>test_turn_on_color hass setup_evil_genius_labs<block_start>"""Test turning on with a color."""<with_stmt>patch("pyevilgenius.EvilGeniusDevice.set_path_value")<as>mock_set_path_value patch("pyevilgenius.EvilGeniusDevice.set_rgb_color")<as>mock_set_rgb_color<block_start><await>hass.services.async_call("light" "turn_on" {"entity_id":"light.fibonacci256_23d4" "brightness":100 "rgb_color":(10 20 30) } blocking=<true> )<block_end><assert_stmt>len(mock_set_path_value.mock_calls)<eq>2<line_sep>mock_set_path_value.mock_calls[0][1]<eq>("brightness" 100)<line_sep>mock_set_path_value.mock_calls[1][1]<eq>("power" 1)<assert_stmt>len(mock_set_rgb_color.mock_calls)<eq>1<line_sep>mock_set_rgb_color.mock_calls[0][1]<eq>(10 20 30)<block_end>@pytest.mark.parametrize("platforms" [("light" )])<async_keyword><def_stmt>test_turn_on_effect hass setup_evil_genius_labs<block_start>"""Test turning on with an effect."""<with_stmt>patch("pyevilgenius.EvilGeniusDevice.set_path_value")<as>mock_set_path_value<block_start><await>hass.services.async_call("light" "turn_on" {"entity_id":"light.fibonacci256_23d4" "effect":"Pride Playground" } blocking=<true> )<block_end><assert_stmt>len(mock_set_path_value.mock_calls)<eq>2<line_sep>mock_set_path_value.mock_calls[0][1]<eq>("pattern" 4)<line_sep>mock_set_path_value.mock_calls[1][1]<eq>("power" 1)<block_end>@pytest.mark.parametrize("platforms" [("light" )])<async_keyword><def_stmt>test_turn_off hass setup_evil_genius_labs<block_start>"""Test turning off."""<with_stmt>patch("pyevilgenius.EvilGeniusDevice.set_path_value")<as>mock_set_path_value<block_start><await>hass.services.async_call("light" "turn_off" {"entity_id":"light.fibonacci256_23d4" } blocking=<true> )<block_end><assert_stmt>len(mock_set_path_value.mock_calls)<eq>1<line_sep>mock_set_path_value.mock_calls[0][1]<eq>("power" 0)<block_end> |
<import_from_stmt>django.core.exceptions ValidationError<import_from_stmt>django.core.validators validate_email<import_from_stmt>django.template Template TemplateSyntaxError TemplateDoesNotExist<import_from_stmt>django.utils.encoding force_str<def_stmt>validate_email_with_name value<block_start>"""
Validate email address.
Both "<NAME> <<EMAIL>>" and "<EMAIL>" are valid.
"""<line_sep>value=force_str(value)<line_sep>recipient=value<if_stmt>'<'<in>value<and>'>'<in>value<block_start>start=value.find('<')+1<line_sep>end=value.find('>')<if_stmt>start<l>end<block_start>recipient=value[start:end]<block_end><block_end>validate_email(recipient)<block_end><def_stmt>validate_comma_separated_emails value<block_start>"""
Validate every email address in a comma separated list of emails.
"""<if_stmt><not>isinstance(value (tuple list))<block_start><raise>ValidationError('Email list must be a list/tuple.')<block_end><for_stmt>email value<block_start><try_stmt><block_start>validate_email_with_name(email)<block_end><except_stmt>ValidationError<block_start><raise>ValidationError('Invalid email: %s'%email code='invalid')<block_end><block_end><block_end><def_stmt>validate_template_syntax source<block_start>"""
Basic Django Template syntax validation. This allows for robuster template
authoring.
"""<try_stmt><block_start>Template(source)<block_end><except_stmt>(TemplateSyntaxError TemplateDoesNotExist)<as>err<block_start><raise>ValidationError(str(err))<block_end><block_end> |
"""
Determine the number of bits required to convert integer A to integer B
Example
Given n = 31, m = 14,return 2
(31)10=(11111)2
(14)10=(01110)2
"""<line_sep>__author__='Danyang'<class_stmt>Solution<block_start><def_stmt>bitSwapRequired self a b<block_start>"""
:param a:
:param b:
:return: int
"""<line_sep>a=self.to_bin(a)<line_sep>b=self.to_bin(b)<line_sep>diff=len(a)-len(b)<line_sep>ret=0<if_stmt>diff<l>0<block_start>a,b=b a<line_sep>diff<augmul>-1<block_end>b="0"<times>diff+b<for_stmt>i xrange(len(b))<block_start><if_stmt>a[i]<ne>b[i]<block_start>ret<augadd>1<block_end><block_end><return>ret<block_end><def_stmt>to_bin self n<block_start>"""
2's complement
32-bit
:param n:
:return:
"""<line_sep>"""
:param n:
:return:
"""<line_sep>a=abs(n)<line_sep>lst=[]<while_stmt>a<g>0<block_start>lst.append(a%2)<line_sep>a<augdiv>2<block_end># 2's complement
<if_stmt>n<ge>0<block_start>lst.extend([0]<times>(32-len(lst)))<block_end><else_stmt><block_start>pivot=-1<for_stmt>i xrange(len(lst))<block_start><if_stmt>pivot<eq>-1<and>lst[i]<eq>1<block_start>pivot=i<line_sep><continue><block_end><if_stmt>pivot<ne>-1<block_start>lst[i]<augxor>1<block_end><block_end>lst.extend([1]<times>(32-len(lst)))<block_end><return>"".join(map(str reversed(lst)))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><assert_stmt>Solution().bitSwapRequired(1 -1)<eq>31<assert_stmt>Solution().bitSwapRequired(31 14)<eq>2<block_end> |
<import_stmt>contextlib<import_stmt>logging<import_stmt>typing<import_from_stmt>typing Any Dict Tuple<import_stmt>attr<import_from_stmt>dbnd._core.configuration get_dbnd_project_config<import_from_stmt>dbnd._core.constants RESULT_PARAM DbndTargetOperationStatus DbndTargetOperationType TaskRunState <import_from_stmt>dbnd._core.current current_task_run get_databand_run is_verbose try_get_current_task <import_from_stmt>dbnd._core.errors.errors_utils log_exception<import_from_stmt>dbnd._core.log.external_exception_logging log_exception_to_server<import_from_stmt>dbnd._core.parameter.parameter_definition ParameterDefinition<import_from_stmt>dbnd._core.parameter.parameter_value ParameterFilters<import_from_stmt>dbnd._core.settings TrackingConfig<import_from_stmt>dbnd._core.task.tracking_task TrackingTask<import_from_stmt>dbnd._core.task_build.task_context try_get_current_task<import_from_stmt>dbnd._core.task_build.task_definition TaskDefinition<import_from_stmt>dbnd._core.task_build.task_results FuncResultParameter<import_from_stmt>dbnd._core.task_run.task_run TaskRun<import_from_stmt>dbnd._core.task_run.task_run_error TaskRunError<import_from_stmt>dbnd._core.utils.callable_spec args_to_kwargs<import_from_stmt>dbnd._core.utils.timezone utcnow<import_from_stmt>targets InMemoryTarget Target<import_from_stmt>targets.value_meta ValueMetaConf<import_from_stmt>targets.values get_value_type_of_obj<if_stmt>typing.TYPE_CHECKING<block_start><import_from_stmt>dbnd._core.task_build.task_decorator TaskDecorator<block_end>logger=logging.getLogger(__name__)<line_sep>@attr.s<class_stmt>TrackedFuncCallWithResult(object)<block_start>call_args=attr.ib()# type: Tuple[Any]
call_kwargs=attr.ib()# type: Dict[str,Any]
callable=attr.ib()<line_sep>result=attr.ib(default=<none>)<def_stmt>set_result self value<block_start>self.result=value<line_sep><return>value<block_end><def_stmt>invoke self<block_start>func=self.callable<line_sep><return>func(*self.call_args **self.call_kwargs)<block_end><block_end><class_stmt>CallableTrackingManager(object)<block_start><def_stmt>__init__ self task_decorator# type: (CallableTrackingManager, TaskDecorator) -> None
<block_start>self.task_decorator=task_decorator<line_sep>self._tracking_task_definition=<none><line_sep>self._call_count=0<line_sep>self._call_as_func=<false><line_sep>self._max_call_count=get_dbnd_project_config().max_calls_per_run<block_end>@property<def_stmt>callable self<block_start><return>self.task_decorator.class_or_func<block_end><def_stmt>get_tracking_task_definition self<block_start><if_stmt><not>self._tracking_task_definition<block_start>self._tracking_task_definition=self._build_tracking_task_definition()<block_end><return>self._tracking_task_definition<block_end><def_stmt>_build_tracking_task_definition self<block_start><return>TaskDefinition.from_task_decorator(task_decorator=self.task_decorator)<block_end><def_stmt>_call_count_limit_exceeded self<block_start><if_stmt><not>self._call_as_func<block_start>self._call_count<augadd>1<if_stmt>self._call_count<g>self._max_call_count<block_start>logger.info("Reached maximum tracking limit of {} tasks. Running function regularly.".format(self._max_call_count))<line_sep>self._call_as_func=<true><block_end><block_end><return>self._call_as_func<block_end>@contextlib.contextmanager<def_stmt>tracking_context self call_args call_kwargs<block_start>user_code_called=<false># whether we got to executing of user code
user_code_finished=<false># whether we passed executing of user code
func_call=<none><try_stmt># 1. check that we don't have too many calls
<block_start><if_stmt>self._call_count_limit_exceeded()<block_start><yield>_do_nothing_decorator<line_sep><return><block_end># 2. Start or reuse existing "main tracking task" that is root for tracked tasks
<if_stmt><not>try_get_current_task()<block_start>"""
try to get existing task, and if not exists - try to get/create inplace_task_run
"""<import_from_stmt>dbnd._core.tracking.script_tracking_manager try_get_inplace_tracking_task_run <line_sep>inplace_tacking_task=try_get_inplace_tracking_task_run()<if_stmt><not>inplace_tacking_task# we didn't manage to start inplace tracking task run, we will not be able to track
<block_start><yield>_do_nothing_decorator<line_sep><return><block_end><block_end>tracking_task_definition=self.get_tracking_task_definition()<line_sep>callable_spec=tracking_task_definition.task_decorator.get_callable_spec()<line_sep>func_call=TrackedFuncCallWithResult(callable=self.callable call_args=tuple(call_args) # prevent original call_args modification
call_kwargs=dict(call_kwargs) # prevent original kwargs modification
)<line_sep># replace any position argument with kwarg if it possible
args,kwargs=args_to_kwargs(callable_spec.args func_call.call_args func_call.call_kwargs )<line_sep># instantiate inline task
task=TrackingTask.for_func(tracking_task_definition args kwargs)<line_sep># update upstream/downstream relations - needed for correct tracking
# we can have the task as upstream , as it was executed already
parent_task=current_task_run().task<if_stmt><not>parent_task.task_dag.has_upstream(task)<block_start>parent_task.set_upstream(task)<block_end># checking if any of the inputs are the outputs of previous task.
# we can add that task as upstream.
dbnd_run=get_databand_run()<line_sep>call_kwargs_as_targets=dbnd_run.target_origin.get_for_map(kwargs)<for_stmt>value_origin call_kwargs_as_targets.values()<block_start>up_task=value_origin.origin_target.task<line_sep>task.set_upstream(up_task)<block_end># creating task_run as a task we found mid-run
task_run=dbnd_run.create_task_run_at_execution_time(task task_engine=current_task_run().task_engine)<line_sep>should_capture_log=TrackingConfig.current().capture_tracking_log<with_stmt>task_run.runner.task_run_execution_context(handle_sigterm=<true> capture_log=should_capture_log)<block_start>task_run.set_task_run_state(state=TaskRunState.RUNNING)<line_sep>_log_inputs(task_run)<line_sep># if we reached this line, then all tracking initialization is
# finished successfully, and we're going to execute user code
user_code_called=<true><try_stmt># tracking_context is context manager - user code will run on yield
<block_start><yield>func_call.set_result<line_sep># if we reached this line, this means that user code finished
# successfully without any exceptions
user_code_finished=<true><block_end><except_stmt>Exception<as>ex<block_start>task_run.finished_time=utcnow()<line_sep>error=TaskRunError.build_from_ex(ex task_run)<line_sep>task_run.set_task_run_state(TaskRunState.FAILED error=error)<line_sep><raise><block_end><else_stmt><block_start>task_run.finished_time=utcnow()<line_sep># func_call.result should contain result, log it
_log_result(task_run func_call.result)<line_sep>task_run.set_task_run_state(TaskRunState.SUCCESS)<block_end><block_end><block_end><except_stmt>Exception<block_start><if_stmt>user_code_called<and><not>user_code_finished# if we started to call the user code and not got to user_code_finished
# line - it means there was user code exception - so just re-raise it
<block_start><raise><block_end># else it's either we didn't reached calling user code, or already passed it
# then it's some dbnd tracking error - just log it
<if_stmt>func_call<block_start>_handle_tracking_error("tracking-init" func_call)<block_end><else_stmt><block_start>log_exception_to_server()<block_end><block_end># if we didn't reached user_code_called=True line - there was an error during
# dbnd tracking initialization, so nothing is done - user function wasn't called yet
<if_stmt><not>user_code_called# tracking_context is context manager - user code will run on yield
<block_start><yield>_do_nothing_decorator<line_sep><return><block_end><block_end><block_end><def_stmt>_handle_tracking_error msg func_call=<none><block_start>log_exception_to_server()<line_sep>location=" for %s"%func_call.callable<if>func_call<else>""<line_sep>msg="Failed during dbnd %s for %s, ignoring, and continue without tracking"%(msg location )<if_stmt>is_verbose()<block_start>logger.warning(msg exc_info=<true> )<block_end><else_stmt><block_start>logger.info(msg)<block_end><block_end><def_stmt>_do_nothing_decorator f<block_start><return>f<block_end><def_stmt>_log_inputs task_run<block_start>"""
For tracking mode. Logs InMemoryTarget inputs.
"""<try_stmt><block_start>params=task_run.task._params<for_stmt>param_value params.get_param_values(ParameterFilters.INPUTS)<block_start>param,value=param_value.parameter param_value.value<if_stmt>isinstance(param_value InMemoryTarget)<block_start><try_stmt><block_start>param=param.modify(value_meta_conf=ValueMetaConf(log_preview=<true> log_schema=<true> ))<line_sep>task_run.tracker.log_parameter_data(parameter=param target=param_value value=value operation_type=DbndTargetOperationType.read operation_status=DbndTargetOperationStatus.OK )<block_end><except_stmt>Exception<as>ex<block_start>log_exception("Failed to log input param to tracking store." ex=ex non_critical=<true> )<block_end><block_end><block_end><block_end><except_stmt>Exception<as>ex<block_start>log_exception("Failed to log input params to tracking store." ex=ex non_critical=<true>)<block_end><block_end><def_stmt>_log_result task_run result# type: (TaskRun, Any) -> None
<block_start>"""
For tracking mode. Logs the task result and adds it to the target_origin map to support relationships between
dynamic tasks.
"""<try_stmt><block_start>result_param=task_run.task.task_params.get_param_value(RESULT_PARAM)<if_stmt><not>result_param<block_start>logger.debug("No result params to log for task {}".format(task_run.task_af_id))<line_sep><return><block_end># we now the parameter value is a target because this is an output param
# the target is created in the task creation
result_param_def,result_target=result_param.parameter result_param.value<line_sep># spread result into relevant fields.
<if_stmt>isinstance(result_param_def FuncResultParameter)# assign all returned values to relevant band Outputs
<block_start><if_stmt>result<is><none><block_start><return><block_end><for_stmt>result_name,value result_param_def.named_results(result)# we now the parameter value is a target because this is an output param
# the target is created in the task creation
<block_start>parameter_value=task_run.task.task_params.get_param_value(result_name)<line_sep>_log_parameter_value(task_run parameter_definition=parameter_value.parameter target=parameter_value.value value=value )<block_end><block_end><else_stmt><block_start>_log_parameter_value(task_run parameter_definition=result_param_def target=result_target value=result )<block_end><block_end><except_stmt>Exception<as>ex<block_start>log_exception("Failed to log result to tracking store." ex=ex non_critical=<true>)<block_end><block_end><def_stmt>_log_parameter_value task_run parameter_definition target value# type: (TaskRun, ParameterDefinition, Target, Any) -> None
# make sure it will be logged correctly
<block_start>parameter_definition=parameter_definition.modify(value_meta_conf=ValueMetaConf(log_preview=<true> log_schema=<true>))<try_stmt># case what if result is Proxy
<block_start>value_type=get_value_type_of_obj(value parameter_definition.value_type)<line_sep>task_run.run.target_origin.add(target value value_type)<block_end><except_stmt>Exception<as>ex<block_start>log_exception("Failed to register result to target tracking." ex=ex non_critical=<true>)<block_end><try_stmt><block_start>task_run.tracker.log_parameter_data(parameter=parameter_definition # was: task_run.task.task_definition.task_class.result,
target=target value=value operation_type=DbndTargetOperationType.write # is it write? (or log?)
operation_status=DbndTargetOperationStatus.OK )<block_end><except_stmt>Exception<as>ex<block_start>log_exception("Failed to log result to tracking store." ex=ex non_critical=<true>)<block_end><block_end> |
<import_stmt>nose<import_stmt>angr<import_stmt>logging<line_sep>l=logging.getLogger("angr.tests.test_bindiff")<import_stmt>os<line_sep>test_location=os.path.join(os.path.dirname(os.path.realpath(__file__)) '..' '..' 'binaries' 'tests')<line_sep># todo make a better test
<def_stmt>test_bindiff_x86_64 <block_start>binary_path_1=os.path.join(test_location 'x86_64' 'bindiff_a')<line_sep>binary_path_2=os.path.join(test_location 'x86_64' 'bindiff_b')<line_sep>b=angr.Project(binary_path_1 load_options={"auto_load_libs":<false>})<line_sep>b2=angr.Project(binary_path_2 load_options={"auto_load_libs":<false>})<line_sep>bindiff=b.analyses.BinDiff(b2)<line_sep>identical_functions=bindiff.identical_functions<line_sep>differing_functions=bindiff.differing_functions<line_sep>unmatched_functions=bindiff.unmatched_functions<line_sep># check identical functions
nose.tools.assert_in((0x40064c 0x40066a) identical_functions)<line_sep># check differing functions
nose.tools.assert_in((0x400616 0x400616) differing_functions)<line_sep># check unmatched functions
nose.tools.assert_less_equal(len(unmatched_functions[0]) 1)<line_sep>nose.tools.assert_less_equal(len(unmatched_functions[1]) 2)<line_sep># check for no major regressions
nose.tools.assert_greater(len(identical_functions) len(differing_functions))<line_sep>nose.tools.assert_less(len(differing_functions) 4)<line_sep># check a function diff
fdiff=bindiff.get_function_diff(0x400616 0x400616)<line_sep>block_matches={(a.addr b.addr)<for>a,b fdiff.block_matches}<line_sep>nose.tools.assert_in((0x40064a 0x400668) block_matches)<line_sep>nose.tools.assert_in((0x400616 0x400616) block_matches)<line_sep>nose.tools.assert_in((0x40061e 0x40061e) block_matches)<block_end><def_stmt>run_all <block_start>functions=globals()<line_sep>all_functions=dict(filter((<lambda>kv:kv[0].startswith('test_')) functions.items()))<for_stmt>f sorted(all_functions.keys())<block_start><if_stmt>hasattr(all_functions[f] '__call__')<block_start>all_functions[f]()<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>logging.getLogger("angr.analyses.bindiff").setLevel(logging.DEBUG)<import_stmt>sys<if_stmt>len(sys.argv)<g>1<block_start>globals()['test_'+sys.argv[1]]()<block_end><else_stmt><block_start>run_all()<block_end><block_end> |
"""
Warning - this will reset all components back to a blank state before running the simulation
Runs node1, electrumx1 and electrumsv1 and loads the default wallet on the daemon (so that newly
submitted blocks will be synchronized by ElectrumSV
reorged txid: 'a1fa9460ca105c1396cd338f7fa202bf79a9d244d730e91e19f6302a05b2f07a'
"""<import_stmt>asyncio<import_stmt>os<import_from_stmt>pathlib Path<import_stmt>pytest<import_stmt>pytest_asyncio<import_from_stmt>electrumsv_node electrumsv_node<import_from_stmt>electrumsv_sdk utils<import_stmt>logging<import_stmt>requests<import_from_stmt>contrib.functional_tests.websocket_client TxStateWSClient<line_sep>MODULE_DIR=os.path.dirname(os.path.abspath(__file__))<line_sep>logging.basicConfig(level=logging.DEBUG)<line_sep>logger=logging.getLogger("simulate-fresh-reorg")<async_keyword><def_stmt>wait_for_reog_transaction_update reorged_txids reorg_height<block_start>MAX_WAIT_TIME=10# seconds
<async_keyword><with_stmt>TxStateWSClient()<as>ws_client<block_start><try_stmt><block_start><await>asyncio.wait_for(ws_client.block_until_confirmed_and_height_updated(reorged_txids reorg_height) MAX_WAIT_TIME)<block_end><except_stmt>asyncio.TimeoutError<block_start>logger.exception(f"timed out after {MAX_WAIT_TIME} seconds")<line_sep><raise><block_end><block_end><block_end><class_stmt>TestReorg<block_start>@classmethod<def_stmt>setup_class cls<block_start><pass><block_end>@classmethod<def_stmt>teardown_class cls<block_start><pass><block_end>@pytest.mark.asyncio<def_stmt>test_reorg self event_loop<block_start><async_keyword><def_stmt>test_reorg <block_start>payload={"password":"<PASSWORD>"}<line_sep>REORGED_TXIDS="a1fa9460ca105c1396cd338f7fa202bf79a9d244d730e91e19f6302a05b2f07a"<line_sep># Load the default wallet on ElectrumSV daemon
url=f"http://127.0.0.1:9999/v1/regtest/dapp/wallets/worker1.sqlite/load_wallet"<line_sep>result=requests.post(url json=payload)<line_sep>result.raise_for_status()<line_sep># Submit node1 blocks to node
<if_stmt>electrumsv_node.is_node_running()<block_start>utils.submit_blocks_from_file(node_id='node1' filepath=Path(MODULE_DIR).joinpath('../reorg_blocks/node1_blocks.dat'))<block_end><else_stmt><block_start>logger.exception("node unavailable")<block_end><try_stmt><block_start><await>wait_for_reog_transaction_update([REORGED_TXIDS] 201)<line_sep># Todo check state of get_balance; get_coin_state; get_transaction_history
# Submit node2 blocks to node
<if_stmt>electrumsv_node.is_node_running()<block_start>utils.submit_blocks_from_file(node_id='node1' filepath=Path(MODULE_DIR).joinpath('../reorg_blocks/node2_blocks.dat'))<block_end><else_stmt><block_start>logger.exception("node unavailable")<block_end><await>wait_for_reog_transaction_update([REORGED_TXIDS] 202)<block_end><except_stmt>asyncio.TimeoutError<block_start>pytest.xfail("work in progress alongside refactoring changes...")<block_end># Todo check state of get_balance; get_coin_state; get_transaction_history
<block_end>event_loop.run_until_complete(test_reorg())<block_end><block_end> |
<import_stmt>torch<import_stmt>torch.overrides<import_stmt>linecache<import_from_stmt>typing Type Dict List Any Union<import_from_stmt>.graph Graph<import_stmt>copy<line_sep># normal exec loses the source code, however we can patch
# the linecache module to still recover it.
# using exec_with_source will add it to our local cache
# and then tools like TorchScript will be able to get source info.
_next_id=0<def_stmt>exec_with_source src:str globals:Dict[str Any]<block_start><global>_next_id<line_sep>key=f'<eval_with_key_{_next_id}>'<line_sep>_next_id<augadd>1<line_sep>_eval_cache[key]=[line+'\n'<for>line src.splitlines()]<line_sep>exec(compile(src key 'exec') globals)<block_end># patch linecache so that any code we exec using exec_with_source
# works with inspect
_eval_cache:Dict[str List[str]]={}<line_sep>_orig_getlines=linecache.getlines<def_stmt>patched_getline *args **kwargs<block_start><if_stmt>args[0]<in>_eval_cache<block_start><return>_eval_cache[args[0]]<block_end><return>_orig_getlines(*args **kwargs)<block_end>linecache.getlines=patched_getline<def_stmt>_forward_from_src src:str<block_start>gbls:Dict[str Any]={'torch':torch}<line_sep>exec_with_source(src gbls)<line_sep><return>gbls['forward']<block_end><def_stmt>deserialize_graphmodule body:dict<arrow>torch.nn.Module<block_start>"""
Deserialize a GraphModule given the dictionary of the original module,
using the code to reconstruct the graph. We delete the actual graph before
saving the dictionary so that changes to the in-memory graph format do not
get serialized.
"""<line_sep># We create a dummy class here because symbolic_trace pulls the forward()
# function off of the class, rather than the instance
<class_stmt>CodeOnlyModule(torch.nn.Module)<block_start><def_stmt>__init__ self body<block_start>super().__init__()<line_sep>self.__dict__=body<block_end><block_end>CodeOnlyModule.forward=_forward_from_src(body['code'])<import_from_stmt>.symbolic_trace Tracer<line_sep># we shouldn't trace into any of the submodules, they were not
# because they were not traced in the original GraphModule
<class_stmt>KeepModules(Tracer)<block_start><def_stmt>is_leaf_module self _:torch.nn.Module __:str<arrow>bool<block_start><return><true><block_end><block_end><return>KeepModules().trace(CodeOnlyModule(body))<block_end># copy an attribute value with qualified name 'target' from 'from_module' to 'to_module'
# This installs empty Modules where none exist yet if they are subpaths of target
<def_stmt>_copy_attr from_module:torch.nn.Module to_module:torch.nn.Module target:str<block_start>*prefix,field=target.split('.')<for_stmt>item prefix<block_start>f=getattr(from_module item)<line_sep>t=getattr(to_module item <none>)<if_stmt>f<is>t# we have already installed one of its parents
# (e.g. target = root.linear.weight, but we have already installed root.linear)
# once we install a parent, we no longer need to copy the children
# since all the needed properties will already be present
<block_start><return><block_end><if_stmt>t<is><none><block_start>t=torch.nn.Module()<line_sep>setattr(to_module item t)<block_end>from_module,to_module=f t<block_end>setattr(to_module field getattr(from_module field))<block_end># Assign attribute 'from_obj' to the qualified name 'target' on 'to_module
# This installs empty Modules where none exist yet if they are subpaths of target
<def_stmt>_assign_attr from_obj:Any to_module:torch.nn.Module target:str<block_start>*prefix,field=target.split('.')<for_stmt>item prefix<block_start>t=getattr(to_module item <none>)<if_stmt>t<is><none><block_start>t=torch.nn.Module()<line_sep>setattr(to_module item t)<block_end>to_module=t<block_end>setattr(to_module field from_obj)<block_end><class_stmt>GraphModule(torch.nn.Module)<block_start>"""
GraphModule is an nn.Module generated from an fx.Graph. GraphModule has
important attributes:
graph : The graph from which this GraphModule was generated
code : The Python source code for the function generated from `graph`
forward : The Python method generated from `graph`
Note that when `graph` is reassigned, `code` and `forward` will be automatically
regenerated.
"""<def_stmt>__new__ cls:'Type[GraphModule]' *args **kwargs# each instance of a graph module needs its own forward method
# so create a new singleton class for each instance.
# it is a subclass of the user-defined class, the only difference
# is an extra layer to install the forward method
<block_start><class_stmt>GraphModuleImpl(cls)# type: ignore
<block_start><pass><block_end><return>super().__new__(GraphModuleImpl)<block_end><def_stmt>__init__ self root:Union[torch.nn.Module Dict[str Any]] graph:Graph<block_start>"""
Construct a GraphModule.
root - `root` can either be an nn.Module instance or a Dict mapping strings to any attribute type.
- In the case that `root` is a Module, any references to Module-based objects (via qualified
name) in the Graph's Nodes' `target` field will be copied over from the respective place
within `root`'s Module hierarchy into the GraphModule's module hierarchy.
- In the case that `root` is a dict, the qualified name found in a Node's `target` will be
looked up directly in the dict's keys. The object mapped to by the Dict will be copied
over into the appropriate place within the GraphModule's module hierarchy.
graph - `graph` contains the nodes this GraphModule should use for code generation
"""<line_sep>super().__init__()<if_stmt>isinstance(root torch.nn.Module)<block_start><if_stmt>hasattr(root 'training')<block_start>self.training=root.training<block_end><for_stmt>node graph.nodes<block_start><if_stmt>node.op<in>['get_attr' 'call_module']<block_start><assert_stmt>isinstance(node.target str)<line_sep>_copy_attr(root self node.target)<block_end><block_end><block_end><elif_stmt>isinstance(root dict)<block_start>targets_to_copy=[]<for_stmt>node graph.nodes<block_start><if_stmt>node.op<in>['get_attr' 'call_module']<block_start><assert_stmt>isinstance(node.target str)<if_stmt>node.target<not><in>root<block_start><raise>RuntimeError('Node '+str(node)+' referenced target '+node.target+' but that target was not provided in `root`!')<block_end>targets_to_copy.append(node.target)<block_end><block_end># Sort targets in ascending order of the # of atoms.
# This will ensure that less deeply nested attributes are assigned
# before more deeply nested attributes. For example, foo.bar
# will be assigned before foo.bar.baz. Otherwise, we might assign
# the user-provided `foo.bar` and wipe out the previously-assigned
# `foo.bar.baz`
targets_to_copy.sort(key=<lambda>t:t.count('.'))<for_stmt>target_to_copy targets_to_copy<block_start>_assign_attr(root[target_to_copy] self target_to_copy)<block_end><block_end><else_stmt><block_start><raise>RuntimeError('Unsupported type '+str(root)+' passed for root!')<block_end>self.graph=graph<block_end># TorchScript breaks trying to compile the graph setter because of the
# continued string literal. Issue here: https://github.com/pytorch/pytorch/issues/44842
#
# Shouldn't be an issue since these methods shouldn't be used in TorchScript anyway
__jit_unused_properties__=['graph']<line_sep>@property<def_stmt>graph self<block_start><return>self._graph<block_end>@graph.setter<def_stmt>graph self val<arrow><none><block_start>self._graph=val<line_sep>body,result,free_variables=self._graph.python_code(root_module='self')<line_sep>body='\n'.join(' '+line<for>line body.split('\n'))+'\n'<line_sep>self.code=f"""\
def forward(self, {', '.join(free_variables)}):
{body}
return {result}
"""<line_sep>cls=type(self)<line_sep>cls.forward=_forward_from_src(self.code)<block_end><def_stmt>__reduce__ self<block_start>dict_without_graph=self.__dict__.copy()<del_stmt>dict_without_graph['_graph']<line_sep><return>(deserialize_graphmodule (dict_without_graph ))<block_end># because __reduce__ is defined for serialization,
# we need to define deepcopy otherwise it will call __reduce__
# and cause symbolic tracing to occur every time we try to copy the object
<def_stmt>__deepcopy__ self memo<block_start>fake_mod=torch.nn.Module()<line_sep>fake_mod.__dict__=copy.deepcopy(self.__dict__)<line_sep><return>GraphModule(fake_mod self.graph)<block_end><def_stmt>__copy__ self<block_start><return>GraphModule(self self.graph)<block_end><def_stmt>__str__ self<arrow>str<block_start>orig_str=super().__str__()<line_sep><return>'\n'.join([orig_str self.code])<block_end><block_end># workarounds for issues in __torch_function__
# WAR for __torch_function__ not handling tensor lists,
# fix is in https://github.com/pytorch/pytorch/pull/34725
# orig_cat = torch.cat
# def patched_cat(*args, **kwargs):
# tensors = args[0]
# for t in tensors:
# if isinstance(t, Proxy):
# return t.__torch_function__(patched_cat, (), args, kwargs)
# return orig_cat(*args, **kwargs)
# patched_cat.__module__ = 'torch'
# patched_cat.__name__ = 'cat'
# torch.cat = patched_cat
|
<import_from_stmt>hover.utils.metrics classification_accuracy<import_stmt>numpy<as>np<def_stmt>test_classification_accuracy <block_start>true=np.array([1 2 3 4 5 6 7 7])<line_sep>pred=np.array([1 2 3 4 5 6 7 8])<line_sep>accl=classification_accuracy(true pred)<line_sep>accr=classification_accuracy(pred true)<assert_stmt>np.allclose(accl 7/8)<assert_stmt>np.allclose(accr 7/8)<block_end> |
'''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
This script tests arbitrary payload of the RackHD API 2.0 OS bootstrap workflows.
The default case is running a minimum payload Windows OS install.
Other Windows-type OS install cases can be specified by creating a payload file and specifiying it using the '-extra' argument.
This test takes 30-45 minutes to run.
Example payload file (installed in configuration dir):
{"bootstrap-payload":
{"name": "Graph.InstallWindowsServer",
"options": {"defaults": {"version": "2012",
"repo": "http://172.31.128.1:8080/repo/winpe",
"smbRepo": "\\\\172.31.128.1\\windowsServer2012",
"productkey": "<KEY>",
"username": "rackhduser",
"password": "<PASSWORD>",
"smbUser": "vagrant",
"smbPassword": "<PASSWORD>"}}}
}
Example command line using external payload file:
python run_tests.py -stack 4 -test tests/bootstrap/test_api20_windows_bootstrap.py -extra base_windows_2012_install.json
RackHD Windows installation workflow requires special configuration of the RackHD server:
- A customized WinPE environment installed on RackHD server as documented here:
https://github.com/RackHD/on-tools/tree/master/winpe
- Samba installed on the RackHD server and configured as documented here:
http://rackhd.readthedocs.io/en/latest/rackhd/install_os.html?highlight=os%20install
- Windows 2012 installation distro installed on RackHD server or equivalent NFS mount.
- Windows 2012 activation key in the installation payload file.
'''<import_stmt>fit_path# NOQA: unused import
<import_from_stmt>nose.plugins.attrib attr<import_stmt>fit_common<import_stmt>flogging<import_stmt>random<import_stmt>json<import_stmt>time<import_from_stmt>nosedep depends<import_from_stmt>datetime datetime<line_sep>log=flogging.get_loggers()<line_sep># sample default base payload
PAYLOAD={"name":"Graph.InstallWindowsServer" "options":{"defaults":{"version":"2012" "repo":"http://172.31.128.1:8080/repo/winpe" "smbRepo":"\\\\172.31.128.1\\windowsServer2012" "productkey":"<KEY>" "username":"rackhduser" "password":"<PASSWORD>" "smbUser":"vagrant" "smbPassword":"<PASSWORD>"}}}<line_sep># if an external payload file is specified, use that
config=fit_common.fitcfg().get('bootstrap-payload' <none>)<if_stmt>config<block_start>PAYLOAD=config<block_end># function to return the value of a field from the workflow response
<def_stmt>findall obj key<block_start><if_stmt>isinstance(obj dict)<block_start><for_stmt>k,v obj.items()<block_start><if_stmt>k<eq>key<block_start>log.error(" workflow error: %s" v)<block_end>findall(v key)<block_end><block_end><elif_stmt>isinstance(obj list)<block_start><for_stmt>item obj<block_start>findall(item key)<block_end><block_end><else_stmt><block_start><pass><block_end><block_end># this routine polls a workflow task ID for completion
<def_stmt>wait_for_workflow_complete instanceid start_time waittime=3200 cycle=30<block_start>log.info_1(" Workflow started at time: "+str(datetime.fromtimestamp(start_time)))<while_stmt>time.time()-start_time<l>waittime# limit test to waittime seconds
<block_start>result=fit_common.rackhdapi("/api/2.0/workflows/"+instanceid)<if_stmt>result['status']<ne>200<block_start>log.error(" HTTP error: "+result['text'])<line_sep><return><false><block_end><if_stmt>result['json']['status']<in>['running' 'pending']<block_start>log.info_5("{} workflow status: {}".format(result['json']['injectableName'] result['json']['status']))<line_sep>fit_common.time.sleep(cycle)<block_end><elif_stmt>result['json']['status']<eq>'succeeded'<block_start>log.info_1("{} workflow status: {}".format(result['json']['injectableName'] result['json']['status']))<line_sep>end_time=time.time()<line_sep>log.info_1(" Workflow completed at time: "+str(datetime.fromtimestamp(end_time)))<line_sep>log.info_1(" Workflow duration: "+str(end_time-start_time))<line_sep><return><true><block_end><else_stmt><block_start>end_time=time.time()<line_sep>log.info_1(" Workflow failed at time: "+str(datetime.fromtimestamp(end_time)))<line_sep>log.info_1(" Workflow duration: "+str(end_time-start_time))<try_stmt><block_start>res=json.loads(result['text'])<line_sep>findall(res "error")<block_end><except_stmt><block_start>res=result['text']<block_end>log.error(" Workflow failed: status: %s" result['json']['status'])<line_sep>log.error(" Data: %s" json.dumps(res indent=4 separators=(',' ':')))<line_sep><return><false><block_end><block_end><try_stmt><block_start>res=json.loads(result['text'])<block_end><except_stmt><block_start>res=result['text']<block_end>log.error(" Workflow Timeout: "+json.dumps(res indent=4 separators=(',' ':')))<line_sep><return><false><block_end># ------------------------ Tests -------------------------------------
@attr(all=<false>)<class_stmt>api20_bootstrap_windows(fit_common.unittest.TestCase)<block_start>@classmethod<def_stmt>setUpClass cls# Get the list of nodes
<block_start>NODECATALOG=fit_common.node_select()<assert_stmt>(len(NODECATALOG)<ne>0) "There are no nodes currently discovered"<line_sep># Select one node at random
cls.__NODE=NODECATALOG[random.randint(0 len(NODECATALOG)-1)]<line_sep># Print node Id, node BMC mac ,node type
nodeinfo=fit_common.rackhdapi('/api/2.0/nodes/'+cls.__NODE)['json']<line_sep>nodesku=fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name']<line_sep>monurl="/api/2.0/nodes/"+cls.__NODE+"/catalogs/bmc"<line_sep>mondata=fit_common.rackhdapi(monurl action="get")<line_sep>catalog=mondata['json']<line_sep>bmcresult=mondata['status']<if_stmt>bmcresult<ne>200<block_start>log.info_1(" Node ID: "+cls.__NODE)<line_sep>log.info_1(" Error on catalog/bmc command")<block_end><else_stmt><block_start>log.info_1(" Node ID: "+cls.__NODE)<line_sep>log.info_1(" Node SKU: "+nodesku)<line_sep>log.info_1(" Node BMC Mac: %s" catalog.get('data')['MAC Address'])<line_sep>log.info_1(" Node BMC IP Addr: %s" catalog.get('data')['IP Address'])<line_sep>log.info_1(" Node BMC IP Addr Src: %s" catalog.get('data')['IP Address Source'])<block_end># delete active workflows for specified node
result=fit_common.cancel_active_workflows(cls.__NODE)<assert_stmt>(result<is><true>) "There are still some active workflows running against the node"<block_end><def_stmt>test01_node_check self# Log node data
<block_start>nodeinfo=fit_common.rackhdapi('/api/2.0/nodes/'+self.__class__.__NODE)['json']<line_sep>nodesku=fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name']<line_sep>log.info_1(" Node ID: %s " self.__class__.__NODE)<line_sep>log.info_1(" Node SKU: %s " nodesku)<line_sep>log.info_1(" Graph Name: Graph.PowerOn.Node")<line_sep># Ensure the compute node is powered on and reachable
result=fit_common.rackhdapi('/api/2.0/nodes/'+self.__class__.__NODE+'/workflows' action='post' payload={"name":"Graph.PowerOn.Node"})<line_sep>self.assertEqual(result['status'] 201 "Node Power on workflow API failed, see logs.")<line_sep>self.assertTrue(wait_for_workflow_complete(result['json']['instanceId'] time.time() 50 5) "Node Power on workflow failed, see logs.")<block_end>@depends(after=test01_node_check)<def_stmt>test02_os_install self# Log node data
<block_start>nodeinfo=fit_common.rackhdapi('/api/2.0/nodes/'+self.__class__.__NODE)['json']<line_sep>nodesku=fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name']<line_sep>log.info_1(" Node ID: "+self.__class__.__NODE)<line_sep>log.info_1(" Node SKU: "+nodesku)<line_sep>log.info_1(" Graph Name: Graph.InstallWindowsServer")<line_sep>log.info_1(" Payload: "+fit_common.json.dumps(PAYLOAD))<line_sep># launch workflow
workflowid=<none><line_sep>result=fit_common.rackhdapi('/api/2.0/nodes/'+self.__class__.__NODE+'/workflows' action='post' payload=PAYLOAD)<if_stmt>result['status']<eq>201# workflow running
<block_start>log.info_1(" InstanceID: "+result['json']['instanceId'])<line_sep>workflowid=result['json']['instanceId']<block_end><else_stmt># workflow failed with response code
<block_start>log.error(" InstanceID: "+result['text'])<line_sep>self.fail("Workflow failed with response code: "+result['status'])<block_end>self.assertTrue(wait_for_workflow_complete(workflowid time.time()) "OS Install workflow failed, see logs.")<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>fit_common.unittest.main()<block_end> |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph quantization script.
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>sys<import_stmt>numpy<as>np<import_from_stmt>tensorflow.core.framework graph_pb2<import_from_stmt>tensorflow.python.client session<import_from_stmt>tensorflow.python.framework dtypes<import_from_stmt>tensorflow.python.framework graph_util<import_from_stmt>tensorflow.python.framework importer<import_from_stmt>tensorflow.python.framework ops<as>ops_lib<import_from_stmt>tensorflow.python.platform flags<as>flags_lib<import_from_stmt>tensorflow.python.platform test<import_from_stmt>tensorflow.python.platform tf_logging<import_from_stmt>tensorflow.tools.quantization quantize_graph<line_sep>flags=flags_lib<line_sep>FLAGS=flags.FLAGS<def_stmt>run_graph_def graph_def input_map outputs<block_start>graph=ops_lib.Graph()<with_stmt>graph.as_default()<block_start>importer.import_graph_def(graph_def input_map={} name="")<block_end><with_stmt>session.Session(graph=graph)<as>sess<block_start>results=sess.run(outputs feed_dict=input_map)<block_end><return>results<block_end><def_stmt>test_mat_mul m n k a b<block_start>"""Tests a MatMul replacement."""<line_sep>a_constant_name="a_constant"<line_sep>b_constant_name="b_constant"<line_sep>mat_mul_name="mat_mul"<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>a_constant=quantize_graph.create_constant_node(a_constant_name value=a dtype=dtypes.float32 shape=[m k])<line_sep>float_graph_def.node.extend([a_constant])<line_sep>b_constant=quantize_graph.create_constant_node(b_constant_name value=b dtype=dtypes.float32 shape=[k n])<line_sep>float_graph_def.node.extend([b_constant])<line_sep>mat_mul_node=quantize_graph.create_node("MatMul" mat_mul_name [a_constant_name b_constant_name])<line_sep>quantize_graph.set_attr_dtype(mat_mul_node "T" dtypes.float32)<line_sep>quantize_graph.set_attr_bool(mat_mul_node "transpose_a" <false>)<line_sep>quantize_graph.set_attr_bool(mat_mul_node "transpose_b" <false>)<line_sep>float_graph_def.node.extend([mat_mul_node])<line_sep>test_graph(float_graph_def {} [mat_mul_name])<block_end><def_stmt>test_conv depth image_width image_height image_batch_count filter_size filter_count stride padding input_values filter_values<block_start>"""Tests a Conv replacement."""<line_sep>input_constant_name="input_constant"<line_sep>filter_constant_name="filter_constant"<line_sep>conv_name="conv"<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>input_constant=quantize_graph.create_constant_node(input_constant_name value=input_values dtype=dtypes.float32 shape=[image_batch_count image_height image_width depth])<line_sep>float_graph_def.node.extend([input_constant])<line_sep>filter_constant=quantize_graph.create_constant_node(filter_constant_name value=filter_values dtype=dtypes.float32 shape=[filter_size filter_size depth filter_count])<line_sep>float_graph_def.node.extend([filter_constant])<line_sep>conv_node=quantize_graph.create_node("Conv2D" conv_name [input_constant_name filter_constant_name])<line_sep>quantize_graph.set_attr_dtype(conv_node "T" dtypes.float32)<line_sep>quantize_graph.set_attr_int_list(conv_node "strides" [1 stride stride 1])<line_sep>quantize_graph.set_attr_string(conv_node "padding" padding)<line_sep>float_graph_def.node.extend([conv_node])<line_sep>test_graph(float_graph_def {} [conv_name])<block_end><def_stmt>are_tensors_near a b tolerance<block_start>"""Tests whether two tensors are nearly identical.
This is a specialized comparison function designed to help debug problems with
quantization. It prints out information about the differences between tensors
on failure, paying special attention to possible biases by looking at the mean
and absolute average errors.
Args:
a: First comparison tensor.
b: Second comparison tensor.
tolerance: Float value indicating how large an error between values is ok.
Returns:
Boolean indicating whether the two inputs were close enough.
"""<line_sep>flat_a=a.flatten()<line_sep>flat_b=b.flatten()<if_stmt>len(flat_a)<ne>len(flat_b)<block_start>tf_logging.info("Tensors are different sizes: "+str(len(flat_a))+" vs "+str(len(flat_b)))<line_sep><return><false><block_end>value_count=len(flat_a)<line_sep>how_many_different=0<line_sep>total_difference=0<line_sep>total_abs_difference=0<for_stmt>index range(value_count)<block_start>a_value=flat_a[index]<line_sep>b_value=flat_b[index]<line_sep>difference=a_value-b_value<line_sep>total_difference<augadd>difference<line_sep>total_abs_difference<augadd>abs(difference)<if_stmt>abs(difference)<g>tolerance<block_start>how_many_different<augadd>1<block_end><block_end>mean_difference=total_difference/value_count<line_sep>mean_abs_difference=total_abs_difference/value_count<line_sep>proportion_different=(how_many_different<times>1.0)/value_count<if_stmt>how_many_different<eq>0<block_start><return><true><block_end><else_stmt><block_start>tf_logging.info("Tensors have {0} different values ({1}%), with mean"<concat>" difference {2} and mean absolute difference {3}".format(how_many_different proportion_different<times>100 mean_difference mean_abs_difference))<line_sep><return><false><block_end><block_end><def_stmt>get_top_value input_values<block_start>max_value=<none><line_sep>max_index=<none><for_stmt>index,value enumerate(input_values.flatten())<block_start><if_stmt>max_value<is><none><or>value<g>max<block_start>max_value=value<line_sep>max_index=index<block_end><block_end><return>max_index max_value<block_end><def_stmt>test_graph float_graph_def input_map output_names log_graph=<false><block_start>"""Runs the float graph through the rewriter and tests the results."""<line_sep>float_results=run_graph_def(float_graph_def input_map [output_name+":0"<for>output_name output_names])<line_sep># TODO(petewarden): round test is currently failing because there is no
# RoundToSteps op available.
# round_rewriter = quantize_graph.GraphRewriter(float_graph_def, "round")
# round_graph_def = round_rewriter.rewrite(output_name)
# round_results = run_graph_def(round_graph_def, input_map,
# [output_name + ":0"])
# assert are_tensors_near(expected, round_results[0], 1.0)
#
# TODO(petewarden): Add test for "quantize" mode.
eightbit_rewriter=quantize_graph.GraphRewriter(float_graph_def "eightbit" quantized_input_range=<none>)<line_sep>eightbit_graph_def=eightbit_rewriter.rewrite(output_names)<line_sep>eightbit_results=run_graph_def(eightbit_graph_def input_map [output_name+":0"<for>output_name output_names])<for_stmt>expected,result zip(float_results eightbit_results)<block_start><assert_stmt>are_tensors_near(expected result 1.0)<block_end><if_stmt>log_graph<block_start>tf_logging.info("8bit:\n%s" str(eightbit_graph_def))<block_end># Test the weights_rounded mode. This uses the default bit_depth.
weights_rounded_rewriter=quantize_graph.GraphRewriter(float_graph_def "weights_rounded" quantized_input_range=<none>)<line_sep>weights_rounded_graph_def=weights_rounded_rewriter.rewrite(output_names)<line_sep>weights_rounded_results=run_graph_def(weights_rounded_graph_def input_map [output_name+":0"<for>output_name output_names])<for_stmt>expected,result zip(float_results weights_rounded_results)<block_start><assert_stmt>are_tensors_near(expected result 1.0)<block_end><block_end><class_stmt>QuantizeGraphTest(test.TestCase)<block_start><def_stmt>test_negative_const_problem self<block_start>shape_constant_name="shape_constant"<line_sep>shape_constant=quantize_graph.create_constant_node(shape_constant_name value=-0.8 dtype=dtypes.float32 shape=[1])<line_sep>quantization_result=quantize_graph.quantize_weight_eightbit(shape_constant b"MIN_COMBINED")<line_sep>self.assertEqual(4 len(quantization_result))<block_end><def_stmt>test_odd_padding_problem self<block_start>"""Tests one error case we ran into in a real graph."""<line_sep>test_conv(1 4 4 1 3 1 2 b"SAME" [1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16] [1 2 3 4 5 6 7 8 9])<block_end><def_stmt>test_mat_mul_tiny self# These tests are added to test the generate case where
# min(matrix) == max(matrix), which used to cause problems.
<block_start>test_mat_mul(1 1 1 [2] [3])<line_sep>test_mat_mul(1 2 1 [1] [2 3])<line_sep>test_mat_mul(1 1 2 [1 1] [1 1])<line_sep>test_mat_mul(1 1 2 [0 0] [1 1])<line_sep># The general case.
test_mat_mul(1 1 2 [1 2] [1 2])<block_end><def_stmt>test_mat_mul_small self<block_start>test_mat_mul(2 4 3 [1 2 3 4 5 6] [7 8 9 10 11 12 13 14 15 16 17 18])<block_end><def_stmt>test_conv self<block_start>test_conv(1 4 3 1 3 1 1 b"SAME" [1 2 3 4 5 6 7 8 9 10 11 12] [1 4 7 2 5 8 3 6 9])<block_end><def_stmt>test_reshape self<block_start>"""Tests that MatMul->Reshape->MatMul avoids extra quantize/dequantize."""<def_stmt>make_matmul name a b<block_start>n=quantize_graph.create_node("MatMul" name [a.name b.name])<line_sep>quantize_graph.set_attr_dtype(n "T" dtypes.float32)<line_sep>quantize_graph.set_attr_bool(n "transpose_a" <false>)<line_sep>quantize_graph.set_attr_bool(n "transpose_b" <false>)<line_sep><return>n<block_end># matmul_1 = input*weight_1
input_node=quantize_graph.create_constant_node("input" value=[0 1 2 3] dtype=dtypes.float32 shape=[4 1])<line_sep>weight_1_node=quantize_graph.create_constant_node("weight_1" value=[.5 .6 .7 .8 .9] dtype=dtypes.float32 shape=[1 5])<line_sep>matmul_1_node=make_matmul("matmul_1" input_node weight_1_node)<line_sep># Reshape 4x5 to 10x2.
new_shape_node=quantize_graph.create_constant_node("new_shape_node" value=[10 2] dtype=dtypes.int32 shape=[2])<line_sep>reshape_node=quantize_graph.create_node("Reshape" "reshape" [matmul_1_node.name new_shape_node.name])<line_sep>quantize_graph.set_attr_dtype(reshape_node "T" dtypes.float32)<line_sep># matmul_2_node = reshape*weight_2
weight_2_node=quantize_graph.create_constant_node("weight_2" value=[1.5 2.5] dtype=dtypes.float32 shape=[2 1])<line_sep>matmul_2_node=make_matmul("matmul_2" reshape_node weight_2_node)<line_sep>g=graph_pb2.GraphDef()<line_sep>g.node.extend([input_node weight_1_node matmul_1_node new_shape_node reshape_node weight_2_node matmul_2_node])<line_sep># Test the graph
test_graph(g {} ["matmul_2"])<line_sep># Verify there is only one Quantize and one Requantize op.
eightbit_rewriter=quantize_graph.GraphRewriter(g "eightbit" quantized_input_range=<none>)<line_sep>eightbit_graph_def=eightbit_rewriter.rewrite(["matmul_2"])<line_sep>ops=[node.op<for>node eightbit_graph_def.node]<line_sep># No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0 ops.count("QuantizeV2")+ops.count("Quantize"))<line_sep>self.assertEqual(1 ops.count("QuantizedReshape"))<line_sep># One dequantize at the end.
self.assertEqual(1 ops.count("Dequantize"))<block_end><def_stmt>test_quantize_array self# Test invalid parameters (empty array, or 0 buckets.
<block_start>self.assertRaises(ValueError quantize_graph.quantize_array np.array([]) 2)<line_sep>self.assertRaises(ValueError quantize_graph.quantize_array np.array([1 2]) 0)<line_sep># Test input array of length 1.
arr=np.array([1])<line_sep>qarr=quantize_graph.quantize_array(arr 1)<line_sep>self.assertEqual(arr qarr)<line_sep>qarr=quantize_graph.quantize_array(arr 2)<line_sep>self.assertEqual(arr qarr)<line_sep># Test input array with all elements equal.
arr=np.array([1 1 1])<line_sep>qarr=quantize_graph.quantize_array(arr 10)<line_sep>self.assertTrue((np.array([1 1 1])<eq>qarr).all())<line_sep># Test "normal" input arrays.
arr=np.array([0 0.3 0.6 1])<line_sep>qarr=quantize_graph.quantize_array(arr 1)<line_sep>self.assertTrue((np.array([0.5 0.5 0.5 0.5])<eq>qarr).all())<line_sep>qarr=quantize_graph.quantize_array(arr 2)<line_sep>self.assertTrue((np.array([0.25 0.25 0.75 0.75])<eq>qarr).all())<line_sep>qarr=quantize_graph.quantize_array(arr.reshape((2 2)) 2)<line_sep>self.assertTrue((np.array([[0.25 0.25] [0.75 0.75]])<eq>qarr).all())<block_end><def_stmt>test_non_float_concat self<block_start>concat_dim=quantize_graph.create_constant_node("concat_dim" value=0 dtype=dtypes.int32 shape=[])<line_sep>a=quantize_graph.create_constant_node("a" value=[1 2 3 4 5 6 7 8 9 10 11 12] dtype=dtypes.int32 shape=[2 2 3])<line_sep>b=quantize_graph.create_constant_node("b" value=[13 14 15 16 17 18 19 20 21 22 23 24] dtype=dtypes.int32 shape=[2 2 3])<line_sep>concat=quantize_graph.create_node("Concat" "concat" [concat_dim.name a.name b.name])<line_sep>quantize_graph.set_attr_int(concat "N" 2)<line_sep>quantize_graph.set_attr_dtype(concat "T" dtypes.int32)<line_sep>g=graph_pb2.GraphDef()<line_sep>g.node.extend([concat_dim a b concat])<line_sep>test_graph(g {} [concat.name])<block_end><def_stmt>test_non_float_reshape self<block_start>a=quantize_graph.create_constant_node("a" value=[1 2 3 4 5 6 7 8 9 10 11 12] dtype=dtypes.int32 shape=[2 2 3])<line_sep>shape=quantize_graph.create_constant_node("shape" value=[12] dtype=dtypes.int32 shape=[1])<line_sep>reshape=quantize_graph.create_node("Reshape" "reshape" [a.name shape.name])<line_sep>quantize_graph.set_attr_dtype(reshape "T" dtypes.int32)<line_sep>g=graph_pb2.GraphDef()<line_sep>g.node.extend([a shape reshape])<line_sep>test_graph(g {} [reshape.name])<block_end><def_stmt>test_concat self<block_start>shape_constant_name="shape_constant"<line_sep>a_constant_name="a_constant"<line_sep>b_constant_name="b_constant"<line_sep>concat_name="concat"<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>shape_constant=quantize_graph.create_constant_node(shape_constant_name value=0 dtype=dtypes.int32 shape=[])<line_sep>float_graph_def.node.extend([shape_constant])<line_sep>a_constant=quantize_graph.create_constant_node(a_constant_name value=[1 2 3 4 5 6 7 8 9 10 11 12] dtype=dtypes.float32 shape=[2 2 3])<line_sep>float_graph_def.node.extend([a_constant])<line_sep>b_constant=quantize_graph.create_constant_node(b_constant_name value=[13 14 15 16 17 18 19 20 21 22 23 24] dtype=dtypes.float32 shape=[2 2 3])<line_sep>float_graph_def.node.extend([b_constant])<line_sep>concat_node=quantize_graph.create_node("Concat" concat_name [shape_constant_name a_constant_name b_constant_name])<line_sep>quantize_graph.set_attr_int(concat_node "N" 2)<line_sep>quantize_graph.set_attr_dtype(concat_node "T" dtypes.float32)<line_sep>float_graph_def.node.extend([concat_node])<line_sep>test_graph(float_graph_def {} [concat_name])<line_sep># Verify the concat is quantized.
eightbit_rewriter=quantize_graph.GraphRewriter(float_graph_def "eightbit" quantized_input_range=<none>)<line_sep>eightbit_graph_def=eightbit_rewriter.rewrite([concat_name])<line_sep>ops=[node.op<for>node eightbit_graph_def.node]<line_sep>self.assertEqual(1 ops.count("QuantizedConcat"))<block_end><def_stmt>test_multiple_outputs self<block_start>input_constant_name="input_constant"<line_sep>split_constant_name="split_constant"<line_sep>split_name="split"<line_sep>concat_constant_name="concat_constant"<line_sep>concat_name="concat"<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>input_constant=quantize_graph.create_constant_node(input_constant_name value=[1 2 3 4 5 6 7 8 9 10 11 12] dtype=dtypes.float32 shape=[2 6])<line_sep>float_graph_def.node.extend([input_constant])<line_sep>split_constant=quantize_graph.create_constant_node(split_constant_name value=1 dtype=dtypes.int32 shape=[])<line_sep>float_graph_def.node.extend([split_constant])<line_sep>split_node=quantize_graph.create_node("Split" split_name [split_constant_name input_constant_name])<line_sep>quantize_graph.set_attr_int(split_node "num_split" 2)<line_sep>quantize_graph.set_attr_dtype(split_node "T" dtypes.float32)<line_sep>float_graph_def.node.extend([split_node])<line_sep>concat_constant=quantize_graph.create_constant_node(concat_constant_name value=1 dtype=dtypes.int32 shape=[])<line_sep>float_graph_def.node.extend([concat_constant])<line_sep>concat_node=quantize_graph.create_node("Concat" concat_name [concat_constant_name split_name+":0" split_name+":1"])<line_sep>quantize_graph.set_attr_int(concat_node "N" 2)<line_sep>quantize_graph.set_attr_dtype(concat_node "T" dtypes.float32)<line_sep>float_graph_def.node.extend([concat_node])<line_sep>test_graph(float_graph_def {} [concat_name])<block_end><def_stmt>test_node_name_from_input self<block_start>self.assertEqual("SomeName" quantize_graph.node_name_from_input("^SomeName:2"))<block_end><def_stmt>test_unique_node_name_from_input self<block_start>self.assertEqual("__hat__SomeName__port__2" quantize_graph.unique_node_name_from_input("^SomeName:2"))<block_end><def_stmt>test_identity self<block_start>input_constant_name="input_constant"<line_sep>identity_name="identity"<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>input_constant=quantize_graph.create_constant_node(input_constant_name value=[1 2 3 4 5 6 7 8 9 10 11 12] dtype=dtypes.float32 shape=[2 6])<line_sep>float_graph_def.node.extend([input_constant])<line_sep>identity_node=quantize_graph.create_node("Identity" identity_name [input_constant_name])<line_sep>quantize_graph.set_attr_dtype(identity_node "T" dtypes.float32)<line_sep>float_graph_def.node.extend([identity_node])<line_sep>mul_name="mul"<line_sep>mul_node=quantize_graph.create_node("Mul" mul_name [identity_name identity_name])<line_sep>quantize_graph.set_attr_dtype(mul_node "T" dtypes.float32)<line_sep>float_graph_def.node.extend([mul_node])<line_sep>test_graph(float_graph_def {} [mul_name])<block_end><def_stmt>test_keep_control_edges self<block_start>no_op_name="no_op"<line_sep>a_constant_name="a_constant"<line_sep>b_constant_name="b_constant"<line_sep>a_check_name="a_check"<line_sep>b_check_name="b_check"<line_sep>a_identity_name="a_identity"<line_sep>b_identity_name="b_identity"<line_sep>add_name="add"<line_sep>graph_def=graph_pb2.GraphDef()<line_sep>no_op=quantize_graph.create_node("NoOp" no_op_name [])<line_sep>graph_def.node.extend([no_op])<line_sep>a_constant=quantize_graph.create_constant_node(a_constant_name value=1 dtype=dtypes.float32 shape=[])<line_sep>graph_def.node.extend([a_constant])<line_sep>a_check_node=quantize_graph.create_node("CheckNumerics" a_check_name [a_constant_name])<line_sep>graph_def.node.extend([a_check_node])<line_sep>a_identity_node=quantize_graph.create_node("Identity" a_identity_name [a_constant_name "^"+a_check_name "^"+no_op_name])<line_sep>graph_def.node.extend([a_identity_node])<line_sep>b_constant=quantize_graph.create_constant_node(b_constant_name value=1 dtype=dtypes.float32 shape=[])<line_sep>graph_def.node.extend([b_constant])<line_sep>b_check_node=quantize_graph.create_node("CheckNumerics" b_check_name [b_constant_name])<line_sep>graph_def.node.extend([b_check_node])<line_sep>b_identity_node=quantize_graph.create_node("Identity" b_identity_name [b_constant_name "^"+b_check_name])<line_sep>graph_def.node.extend([b_identity_node])<line_sep>add_node=quantize_graph.create_node("Add" add_name [a_identity_name b_identity_name])<line_sep>quantize_graph.set_attr_dtype(add_node "T" dtypes.float32)<line_sep>graph_def.node.extend([add_node])<line_sep>expected_output=graph_pb2.GraphDef()<line_sep>no_op=quantize_graph.create_node("NoOp" no_op_name [])<line_sep>expected_output.node.extend([no_op])<line_sep>a_constant=quantize_graph.create_constant_node(a_constant_name value=1 dtype=dtypes.float32 shape=[])<line_sep>expected_output.node.extend([a_constant])<line_sep>a_identity_node=quantize_graph.create_node("Identity" a_identity_name [a_constant_name "^"+no_op_name])<line_sep>expected_output.node.extend([a_identity_node])<line_sep>b_constant=quantize_graph.create_constant_node(b_constant_name value=1 dtype=dtypes.float32 shape=[])<line_sep>expected_output.node.extend([b_constant])<line_sep>add_node=quantize_graph.create_node("Add" add_name [a_identity_name b_constant_name])<line_sep>quantize_graph.set_attr_dtype(add_node "T" dtypes.float32)<line_sep>expected_output.node.extend([add_node])<line_sep>expected_output.versions.CopyFrom(graph_def.versions)<line_sep>expected_output.library.CopyFrom(graph_def.library)<line_sep>output=graph_util.remove_training_nodes(graph_def)<line_sep>stripped_output=graph_util.extract_sub_graph(output [add_name])<line_sep>self.assertProtoEquals(expected_output stripped_output)<block_end><def_stmt>test_batch_norm self<block_start>input_constant_name="input_constant"<line_sep>mean_constant_name="mean_constant"<line_sep>variance_constant_name="variance_constant"<line_sep>beta_constant_name="beta_constant"<line_sep>gamma_constant_name="gamma_constant"<line_sep>batch_norm_name="batch_norm"<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>input_constant=quantize_graph.create_constant_node(input_constant_name value=[1 4 2 5 3 6 -1 -4 -2 -5 -3 -6] dtype=dtypes.float32 shape=[1 1 6 2])<line_sep>float_graph_def.node.extend([input_constant])<line_sep>mean_constant=quantize_graph.create_constant_node(mean_constant_name value=[10 20] dtype=dtypes.float32 shape=[2])<line_sep>float_graph_def.node.extend([mean_constant])<line_sep>variance_constant=quantize_graph.create_constant_node(variance_constant_name value=[0.25 0.5] dtype=dtypes.float32 shape=[2])<line_sep>float_graph_def.node.extend([variance_constant])<line_sep>beta_constant=quantize_graph.create_constant_node(beta_constant_name value=[0.1 0.6] dtype=dtypes.float32 shape=[2])<line_sep>float_graph_def.node.extend([beta_constant])<line_sep>gamma_constant=quantize_graph.create_constant_node(gamma_constant_name value=[0 0] dtype=dtypes.float32 shape=[2])<line_sep>float_graph_def.node.extend([gamma_constant])<line_sep>batch_norm_node=quantize_graph.create_node("BatchNormWithGlobalNormalization" batch_norm_name [input_constant_name mean_constant_name variance_constant_name beta_constant_name gamma_constant_name])<line_sep>quantize_graph.set_attr_dtype(batch_norm_node "T" dtypes.float32)<line_sep>quantize_graph.set_attr_bool(batch_norm_node "scale_after_normalization" <false>)<line_sep>quantize_graph.set_attr_float(batch_norm_node "variance_epsilon" 0.001)<line_sep>float_graph_def.node.extend([batch_norm_node])<line_sep>test_graph(float_graph_def {} [batch_norm_name])<block_end><def_stmt>test_max_pool self<block_start>input_constant_name="input_constant"<line_sep>max_pool_name="max_pool"<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>input_constant=quantize_graph.create_constant_node(input_constant_name value=[1 2 3 4 5 6 7 8 9 10 11 12] dtype=dtypes.float32 shape=[1 2 6 1])<line_sep>float_graph_def.node.extend([input_constant])<line_sep>max_pool_node=quantize_graph.create_node("MaxPool" max_pool_name [input_constant_name])<line_sep>quantize_graph.set_attr_int_list(max_pool_node "ksize" [1 2 2 1])<line_sep>quantize_graph.set_attr_int_list(max_pool_node "strides" [1 1 1 1])<line_sep>quantize_graph.set_attr_string(max_pool_node "padding" b"SAME")<line_sep>float_graph_def.node.extend([max_pool_node])<line_sep>test_graph(float_graph_def {} [max_pool_name])<block_end><def_stmt>test_avg_pool self<block_start>input_constant_name="input_constant"<line_sep>avg_pool_name="avg_pool"<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>input_constant=quantize_graph.create_constant_node(input_constant_name value=[1 2 3 4 5 6 7 8 9 10 11 12] dtype=dtypes.float32 shape=[1 2 6 1])<line_sep>float_graph_def.node.extend([input_constant])<line_sep>avg_pool_node=quantize_graph.create_node("AvgPool" avg_pool_name [input_constant_name])<line_sep>quantize_graph.set_attr_dtype(avg_pool_node "T" dtypes.float32)<line_sep>quantize_graph.set_attr_int_list(avg_pool_node "ksize" [1 2 2 1])<line_sep>quantize_graph.set_attr_int_list(avg_pool_node "strides" [1 1 1 1])<line_sep>quantize_graph.set_attr_string(avg_pool_node "padding" b"SAME")<line_sep>float_graph_def.node.extend([avg_pool_node])<line_sep>test_graph(float_graph_def {} [avg_pool_name])<block_end><def_stmt>test_relu self<block_start>input_constant_name="input_constant"<line_sep>relu_name="relu"<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>input_constant=quantize_graph.create_constant_node(input_constant_name value=[1 2 3 4 5 6 7 8 9 10 11 12] dtype=dtypes.float32 shape=[1 2 6 1])<line_sep>float_graph_def.node.extend([input_constant])<line_sep>relu_node=quantize_graph.create_node("Relu" relu_name [input_constant_name])<line_sep>quantize_graph.set_attr_dtype(relu_node "T" dtypes.float32)<line_sep>float_graph_def.node.extend([relu_node])<line_sep>test_graph(float_graph_def {} [relu_name])<block_end><def_stmt>test_relu_w_fake_quant_w_min_max_vars self<block_start>input_node=quantize_graph.create_constant_node("input" value=[1 2 3 4 5 6 7 8 9 10 11 12] dtype=dtypes.float32 shape=[1 2 6 1])<line_sep>relu_node=quantize_graph.create_node("Relu" "relu" [input_node.name])<line_sep>quantize_graph.set_attr_dtype(relu_node "T" dtypes.float32)<line_sep>min_node=quantize_graph.create_constant_node("min_bias_add" value=0 dtype=dtypes.float32 shape=[])<line_sep>max_node=quantize_graph.create_constant_node("max_bias_add" value=12 dtype=dtypes.float32 shape=[])<line_sep>fake_quant_node=quantize_graph.create_node("FakeQuantWithMinMaxVars" "fake_quant" [relu_node.name min_node.name max_node.name])<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>float_graph_def.node.extend([input_node relu_node min_node max_node fake_quant_node])<line_sep>test_graph(float_graph_def {} [fake_quant_node.name] log_graph=<true>)<line_sep># Verify there is only one Quantize and one Requantize op.
eightbit_rewriter=quantize_graph.GraphRewriter(float_graph_def "eightbit" quantized_input_range=<none>)<line_sep>eightbit_graph_def=eightbit_rewriter.rewrite([fake_quant_node.name])<line_sep>ops=[node.op<for>node eightbit_graph_def.node]<line_sep># No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0 ops.count("QuantizeV2")+ops.count("Quantize"))<line_sep># One dequantize at the end.
self.assertEqual(1 ops.count("Dequantize"))<block_end><def_stmt>test_relu6 self<block_start>input_constant_name="input_constant"<line_sep>relu6_name="relu6"<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>input_constant=quantize_graph.create_constant_node(input_constant_name value=[1 2 3 4 5 6 7 8 9 10 11 12] dtype=dtypes.float32 shape=[1 2 6 1])<line_sep>float_graph_def.node.extend([input_constant])<line_sep>relu6_node=quantize_graph.create_node("Relu6" relu6_name [input_constant_name])<line_sep>quantize_graph.set_attr_dtype(relu6_node "T" dtypes.float32)<line_sep>float_graph_def.node.extend([relu6_node])<line_sep>test_graph(float_graph_def {} [relu6_name])<block_end><def_stmt>test_bias_add self<block_start>input_constant_name="input_constant"<line_sep>offset_constant_name="offset_constant"<line_sep>bias_add_name="bias_add"<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>input_constant=quantize_graph.create_constant_node(input_constant_name value=[1 2 3 4 5 6 7 8 9 10 11 12] dtype=dtypes.float32 shape=[1 1 2 6])<line_sep>float_graph_def.node.extend([input_constant])<line_sep>offset_constant=quantize_graph.create_constant_node(offset_constant_name value=[1 2 3 4 5 6] dtype=dtypes.float32 shape=[6])<line_sep>float_graph_def.node.extend([offset_constant])<line_sep>bias_add_node=quantize_graph.create_node("BiasAdd" bias_add_name [input_constant_name offset_constant_name])<line_sep>quantize_graph.set_attr_dtype(bias_add_node "T" dtypes.float32)<line_sep>float_graph_def.node.extend([bias_add_node])<line_sep>test_graph(float_graph_def {} [bias_add_name])<block_end><def_stmt>test_quantized_input_range_errors self<block_start><with_stmt>self.assertRaises(ValueError)# Invalid mode.
<block_start>quantize_graph.GraphRewriter(graph_pb2.GraphDef() "weights_rounded" [0 1])<block_end><with_stmt>self.assertRaises(ValueError)# Invalid range.
<block_start>quantize_graph.GraphRewriter(graph_pb2.GraphDef() "eightbit" [0 -1])<block_end><block_end><def_stmt>test_quantized_input_range_bias_add self<block_start>input_shape=[1 1 2 6]<line_sep>input_n=quantize_graph.create_node("Placeholder" "input" [])<line_sep>quantize_graph.set_attr_dtype(input_n "dtype" dtypes.float32)<line_sep>quantize_graph.set_attr_shape(input_n "shape" input_shape)<line_sep>offset_n=quantize_graph.create_constant_node("offset" value=[1 2 3 4 5 6] dtype=dtypes.float32 shape=[6])<line_sep>bias_add_n=quantize_graph.create_node("BiasAdd" "bias_add" [input_n.name offset_n.name])<line_sep>quantize_graph.set_attr_dtype(bias_add_n "T" dtypes.float32)<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>float_graph_def.node.extend([input_n offset_n bias_add_n])<line_sep>input_map={input_n.name+":0":np.reshape([1 2 3 4 5 6 7 8 9 10 11 12] input_shape)}<line_sep>self._RunTestsForQuantizedInputRange(float_graph_def input_map [bias_add_n.name] [-1 20.])<line_sep>self._RunTestsForQuantizedInputRange(float_graph_def input_map [bias_add_n.name] [0 12.])<block_end><def_stmt>test_quantized_input_range_mat_mul self<block_start>shapes=[[3 2] [2 4]]<line_sep>inputs=[]<for_stmt>i,shape enumerate(shapes)<block_start>node=quantize_graph.create_node("Placeholder" "input_%s"%i [])<line_sep>quantize_graph.set_attr_dtype(node "dtype" dtypes.float32)<line_sep>quantize_graph.set_attr_shape(node "shape" shape)<line_sep>inputs.append(node)<block_end>mat_mul_node=quantize_graph.create_node("MatMul" "mat_mul" [n.name<for>n inputs])<line_sep>quantize_graph.set_attr_dtype(mat_mul_node "T" dtypes.float32)<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>float_graph_def.node.extend(inputs+[mat_mul_node])<line_sep>input_map={inputs[0].name+":0":np.reshape([1 2 3 4 5 6] shapes[0]) inputs[1].name+":0":np.reshape([.8 .7 .6 .5 .4 .3 .2 .1] shapes[1])}<line_sep>self._RunTestsForQuantizedInputRange(float_graph_def input_map [mat_mul_node.name] [-1 20.])<line_sep>self._RunTestsForQuantizedInputRange(float_graph_def input_map [mat_mul_node.name] [0 6.])<block_end><def_stmt>_RunTestsForQuantizedInputRange self float_graph_def input_map output_names input_range<block_start><if_stmt>sys.version_info[0]<eq>3# uint8->quint8 conversion for numpy is not working currently.
<block_start><return><block_end>quantized_input_map={}<for_stmt>k,v input_map.items()<block_start>arr=[int(round((n-input_range[0])<times>255/(input_range[1]-input_range[0])))<for>n v.flat]<line_sep>arr=np.array(arr np.uint8)<line_sep>arr=arr.reshape(v.shape)<line_sep>arr=arr.astype(dtypes.quint8.as_numpy_dtype)<line_sep>quantized_input_map[k]=arr<block_end>output_tensors=[output_name+":0"<for>output_name output_names]<line_sep>float_results=run_graph_def(float_graph_def input_map output_tensors)<line_sep># Quantize treating the input as quantized in range <input_range>.
rewriter=quantize_graph.GraphRewriter(float_graph_def "eightbit" input_range)<line_sep>graph_def=rewriter.rewrite(output_names)<line_sep>results=run_graph_def(graph_def quantized_input_map output_tensors)<for_stmt>expected,result zip(float_results results)<block_start><assert_stmt>are_tensors_near(expected result .5)<block_end>ops=[node.op<for>node graph_def.node]<line_sep>self.assertEqual(0 ops.count("QuantizeV2")+ops.count("Quantize"))<line_sep>self.assertEqual(len(output_names) ops.count("Dequantize"))<line_sep># Quantize without treating input as quantized.
rewriter=quantize_graph.GraphRewriter(float_graph_def "eightbit" quantized_input_range=<none>)<line_sep>graph_def=rewriter.rewrite(output_names)<line_sep>results=run_graph_def(graph_def input_map output_tensors)<for_stmt>expected,result zip(float_results results)<block_start><assert_stmt>are_tensors_near(expected result .5)<block_end>ops=[node.op<for>node graph_def.node]<line_sep>self.assertEqual(len(input_map) ops.count("QuantizeV2")+ops.count("Quantize"))<line_sep>self.assertEqual(len(output_names) ops.count("Dequantize"))<block_end><def_stmt>test_bias_add_w_fake_quant_w_min_max_vars self<block_start>input_node=quantize_graph.create_constant_node("input" value=[1 2 3 4 5 6 7 8 9 10] dtype=dtypes.float32 shape=[1 1 2 5])<line_sep>offset_node=quantize_graph.create_constant_node("offset" value=[1 2 3 4 5] dtype=dtypes.float32 shape=[5])<line_sep>bias_add_node=quantize_graph.create_node("BiasAdd" "bias_add" [input_node.name offset_node.name])<line_sep>quantize_graph.set_attr_dtype(bias_add_node "T" dtypes.float32)<line_sep>min_node=quantize_graph.create_constant_node("min_bias_add" value=-.5 dtype=dtypes.float32 shape=[])<line_sep>max_node=quantize_graph.create_constant_node("max_bias_add" value=15.5 dtype=dtypes.float32 shape=[])<line_sep>fake_quant_node=quantize_graph.create_node("FakeQuantWithMinMaxVars" "fake_quant" [bias_add_node.name min_node.name max_node.name])<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>float_graph_def.node.extend([input_node offset_node bias_add_node min_node max_node fake_quant_node])<line_sep>test_graph(float_graph_def {} [fake_quant_node.name] log_graph=<true>)<line_sep># Verify there is only one Quantize and one Requantize op.
# Pass in fallback_quantization_range, although it will have no effect
# because the FakeQuantWithMinMaxVars are used instead.
eightbit_rewriter=quantize_graph.GraphRewriter(float_graph_def "eightbit" quantized_input_range=<none> fallback_quantization_range=[-100 100])<line_sep>eightbit_graph_def=eightbit_rewriter.rewrite([fake_quant_node.name])<line_sep>ops=[node.op<for>node eightbit_graph_def.node]<line_sep>node_names=[node.name<for>node eightbit_graph_def.node]<line_sep># No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0 ops.count("QuantizeV2")+ops.count("Quantize"))<line_sep># One dequantize at the end.
self.assertEqual(1 ops.count("Dequantize"))<line_sep># The fallback constants are not in the graph.
self.assertEqual(0 node_names.count("fallback_quantization_min_value"))<line_sep>self.assertEqual(0 node_names.count("fallback_quantization_max_value"))<block_end><def_stmt>test_bias_add_w_fallback_min_max_vars self<block_start>input_node=quantize_graph.create_constant_node("input" value=[1 2 3 4 5 6 7 8 9 10] dtype=dtypes.float32 shape=[1 1 2 5])<line_sep>offset_node=quantize_graph.create_constant_node("offset" value=[1 2 3 4 5] dtype=dtypes.float32 shape=[5])<line_sep>bias_add_node=quantize_graph.create_node("BiasAdd" "bias_add" [input_node.name offset_node.name])<line_sep>quantize_graph.set_attr_dtype(bias_add_node "T" dtypes.float32)<line_sep>float_graph_def=graph_pb2.GraphDef()<line_sep>float_graph_def.node.extend([input_node offset_node bias_add_node])<line_sep>test_graph(float_graph_def {} [bias_add_node.name] log_graph=<true>)<line_sep># Verify there is only one Quantize, one Requantize op, and no
# RequantizationRange op.
eightbit_rewriter=quantize_graph.GraphRewriter(float_graph_def "eightbit" quantized_input_range=<none> fallback_quantization_range=[-.5 15.5])<line_sep>eightbit_graph_def=eightbit_rewriter.rewrite([bias_add_node.name])<line_sep>ops=[node.op<for>node eightbit_graph_def.node]<line_sep>node_names=[node.name<for>node eightbit_graph_def.node]<line_sep># No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0 ops.count("QuantizeV2")+ops.count("Quantize"))<line_sep># One dequantize at the end.
self.assertEqual(1 ops.count("Dequantize"))<line_sep># No RequantizationRange
self.assertEqual(0 ops.count("RequantizationRange"))<line_sep># The fallback constants are in the graph.
self.assertEqual(1 node_names.count("fallback_quantization_min_value"))<line_sep>self.assertEqual(1 node_names.count("fallback_quantization_max_value"))<block_end><def_stmt>test_remove_redundant_quantization self<block_start>a_constant_name="a_constant"<line_sep>a_constant_min_name="a_constant_min"<line_sep>a_constant_max_name="a_constant_max"<line_sep>a_dequantize_name="a_dequantize"<line_sep>a_quantize_name="a_quantize"<line_sep>b_constant_name="b_constant"<line_sep>b_constant_min_name="b_constant_min"<line_sep>b_constant_max_name="b_constant_max"<line_sep>b_dequantize_name="b_dequantize"<line_sep>b_quantize_name="b_quantize"<line_sep>mat_mul_name="mat_mul"<line_sep>graph_def=graph_pb2.GraphDef()<line_sep>a_constant=quantize_graph.create_constant_node(a_constant_name value=(0 ) dtype=dtypes.quint8 shape=[])<line_sep>graph_def.node.extend([a_constant])<line_sep>a_constant_min=quantize_graph.create_constant_node(a_constant_min_name value=2 dtype=dtypes.float32 shape=[])<line_sep>graph_def.node.extend([a_constant_min])<line_sep>a_constant_max=quantize_graph.create_constant_node(a_constant_max_name value=2 dtype=dtypes.float32 shape=[])<line_sep>graph_def.node.extend([a_constant_max])<line_sep>a_dequantize_node=quantize_graph.create_node("Dequantize" a_dequantize_name [a_constant_name a_constant_min_name a_constant_max_name])<line_sep>quantize_graph.set_attr_dtype(a_dequantize_node "T" dtypes.uint8)<line_sep>graph_def.node.extend([a_dequantize_node])<line_sep>a_quantize_node=quantize_graph.create_node("QuantizeV2" a_quantize_name [a_dequantize_name a_dequantize_name+":1" a_dequantize_name+":2"])<line_sep>quantize_graph.set_attr_dtype(a_quantize_node "T" dtypes.uint8)<line_sep>graph_def.node.extend([a_quantize_node])<line_sep>b_constant=quantize_graph.create_constant_node(b_constant_name value=(0 ) dtype=dtypes.quint8 shape=[])<line_sep>graph_def.node.extend([b_constant])<line_sep>b_constant_min=quantize_graph.create_constant_node(b_constant_min_name value=3 dtype=dtypes.float32 shape=[])<line_sep>graph_def.node.extend([b_constant_min])<line_sep>b_constant_max=quantize_graph.create_constant_node(b_constant_max_name value=3 dtype=dtypes.float32 shape=[])<line_sep>graph_def.node.extend([b_constant_max])<line_sep>b_dequantize_node=quantize_graph.create_node("Dequantize" b_dequantize_name [b_constant_name b_constant_min_name b_constant_max_name])<line_sep>quantize_graph.set_attr_dtype(b_dequantize_node "T" dtypes.uint8)<line_sep>graph_def.node.extend([b_dequantize_node])<line_sep>b_quantize_node=quantize_graph.create_node("QuantizeV2" b_quantize_name [b_dequantize_name b_dequantize_name+":1" b_dequantize_name+":2"])<line_sep>quantize_graph.set_attr_dtype(b_quantize_node "T" dtypes.uint8)<line_sep>graph_def.node.extend([b_quantize_node])<line_sep>mat_mul_node=quantize_graph.create_node("QuantizedMatMul" mat_mul_name [a_quantize_name b_quantize_name a_quantize_name+":1" a_quantize_name+":2" b_quantize_name+":1" b_quantize_name+":2"])<line_sep>quantize_graph.set_attr_dtype(mat_mul_node "T1" dtypes.uint8)<line_sep>quantize_graph.set_attr_dtype(mat_mul_node "T2" dtypes.int32)<line_sep>graph_def.node.extend([mat_mul_node])<line_sep>expected_output=graph_pb2.GraphDef()<line_sep>a_constant=quantize_graph.create_constant_node(a_constant_name value=(0 ) dtype=dtypes.quint8 shape=[])<line_sep>expected_output.node.extend([a_constant])<line_sep>a_constant_min=quantize_graph.create_constant_node(a_constant_min_name value=2 dtype=dtypes.float32 shape=[])<line_sep>expected_output.node.extend([a_constant_min])<line_sep>a_constant_max=quantize_graph.create_constant_node(a_constant_max_name value=2 dtype=dtypes.float32 shape=[])<line_sep>expected_output.node.extend([a_constant_max])<line_sep>b_constant=quantize_graph.create_constant_node(b_constant_name value=(0 ) dtype=dtypes.quint8 shape=[])<line_sep>expected_output.node.extend([b_constant])<line_sep>b_constant_min=quantize_graph.create_constant_node(b_constant_min_name value=3 dtype=dtypes.float32 shape=[])<line_sep>expected_output.node.extend([b_constant_min])<line_sep>b_constant_max=quantize_graph.create_constant_node(b_constant_max_name value=3 dtype=dtypes.float32 shape=[])<line_sep>expected_output.node.extend([b_constant_max])<line_sep>mat_mul_node=quantize_graph.create_node("QuantizedMatMul" mat_mul_name [a_constant_name b_constant_name a_constant_min_name a_constant_max_name b_constant_min_name b_constant_max_name])<line_sep>quantize_graph.set_attr_dtype(mat_mul_node "T1" dtypes.uint8)<line_sep>quantize_graph.set_attr_dtype(mat_mul_node "T2" dtypes.int32)<line_sep>expected_output.node.extend([mat_mul_node])<line_sep>expected_output.versions.CopyFrom(graph_def.versions)<line_sep>expected_output.library.CopyFrom(graph_def.library)<line_sep>rewriter=quantize_graph.GraphRewriter(graph_def [mat_mul_name] quantized_input_range=<none>)<line_sep>output=rewriter.remove_redundant_quantization(graph_def)<line_sep>stripped_output=graph_util.extract_sub_graph(output [mat_mul_name])<line_sep>self.assertProtoEquals(expected_output stripped_output)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>test.main()<block_end> |
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2017-2019 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
<import_from_stmt>litex.build.generic_platform *<import_from_stmt>litex.build.xilinx XilinxPlatform VivadoProgrammer<line_sep># IOs ----------------------------------------------------------------------------------------------
_io=[# Clk / Rst
("clk125" 0 Subsignal("p" Pins("G10") IOStandard("LVDS")) Subsignal("n" Pins("F10") IOStandard("LVDS"))) ("clk300" 0 Subsignal("p" Pins("AK17") IOStandard("DIFF_SSTL12")) Subsignal("n" Pins("AK16") IOStandard("DIFF_SSTL12"))) ("cpu_reset" 0 Pins("AN8") IOStandard("LVCMOS18")) # Leds
("user_led" 0 Pins("AP8") IOStandard("LVCMOS18")) ("user_led" 1 Pins("H23") IOStandard("LVCMOS18")) ("user_led" 2 Pins("P20") IOStandard("LVCMOS18")) ("user_led" 3 Pins("P21") IOStandard("LVCMOS18")) ("user_led" 4 Pins("N22") IOStandard("LVCMOS18")) ("user_led" 5 Pins("M22") IOStandard("LVCMOS18")) ("user_led" 6 Pins("R23") IOStandard("LVCMOS18")) ("user_led" 7 Pins("P23") IOStandard("LVCMOS18")) # Buttons
("user_btn_c" 0 Pins("AE10") IOStandard("LVCMOS18")) ("user_btn_n" 0 Pins("AD10") IOStandard("LVCMOS18")) ("user_btn_s" 0 Pins("AF8") IOStandard("LVCMOS18")) ("user_btn_w" 0 Pins("AF9") IOStandard("LVCMOS18")) ("user_btn_e" 0 Pins("AE8") IOStandard("LVCMOS18")) # Switches
("user_dip_btn" 0 Pins("AN16") IOStandard("LVCMOS12")) ("user_dip_btn" 1 Pins("AN19") IOStandard("LVCMOS12")) ("user_dip_btn" 2 Pins("AP18") IOStandard("LVCMOS12")) ("user_dip_btn" 3 Pins("AN14") IOStandard("LVCMOS12")) # SMA
("user_sma_clock" 0 Subsignal("p" Pins("D23") IOStandard("LVDS")) Subsignal("n" Pins("C23") IOStandard("LVDS"))) ("user_sma_clock_p" 0 Pins("D23") IOStandard("LVCMOS18")) ("user_sma_clock_n" 0 Pins("C23") IOStandard("LVCMOS18")) ("user_sma_gpio" 0 Subsignal("p" Pins("H27") IOStandard("LVDS")) Subsignal("n" Pins("G27") IOStandard("LVDS"))) ("user_sma_gpio_p" 0 Pins("H27") IOStandard("LVCMOS18")) ("user_sma_gpio_n" 0 Pins("G27") IOStandard("LVCMOS18")) # I2C
("i2c" 0 Subsignal("scl" Pins("J24")) Subsignal("sda" Pins("J25")) IOStandard("LVCMOS18")) # Serial
("serial" 0 Subsignal("cts" Pins("L23")) Subsignal("rts" Pins("K27")) Subsignal("tx" Pins("K26")) Subsignal("rx" Pins("G25")) IOStandard("LVCMOS18")) # SPIFlash
("spiflash" 0 # clock needs to be accessed through primitive
Subsignal("cs_n" Pins("U7")) Subsignal("dq" Pins("AC7 AB7 AA7 Y7")) IOStandard("LVCMOS18")) ("spiflash" 1 # clock needs to be accessed through primitive
Subsignal("cs_n" Pins("G26")) Subsignal("dq" Pins("M20 L20 R21 R22")) IOStandard("LVCMOS18")) # SDCard
("spisdcard" 0 Subsignal("clk" Pins("AL10")) Subsignal("cs_n" Pins("AH8")) Subsignal("mosi" Pins("AD9") Misc("PULLUP")) Subsignal("miso" Pins("AP9") Misc("PULLUP")) Misc("SLEW=FAST") IOStandard("LVCMOS18")) ("sdcard" 0 Subsignal("clk" Pins("AL10")) Subsignal("cmd" Pins("AD9") Misc("PULLUP True")) Subsignal("data" Pins("AP9 AN9 AH9 AH8") Misc("PULLUP True")) Misc("SLEW=FAST") IOStandard("LVCMOS18")) # Rotary Encoder
("rotary" 0 Subsignal("a" Pins("Y21")) Subsignal("b" Pins("AD26")) Subsignal("push" Pins("AF28")) IOStandard("LVCMOS18")) # HDMI
("hdmi" 0 Subsignal("d" Pins("AK11 AP11 AP13 AN13 AN11 AM11 AN12 AM12" "AL12 AK12 AL13 AK13 AD11 AH12 AG12 AJ11" "AG10 AK8")) Subsignal("de" Pins("AE11")) Subsignal("clk" Pins("AF13")) Subsignal("vsync" Pins("AH13")) Subsignal("hsync" Pins("AE13")) Subsignal("spdif" Pins("AE12")) Subsignal("spdif_out" Pins("AF12")) IOStandard("LVCMOS18")) # DDR4 SDRAM
("ddram" 0 Subsignal("a" Pins("AE17 AH17 AE18 AJ15 AG16 AL17 AK18 AG17" "AF18 AH19 AF15 AD19 AJ14 AG19") IOStandard("SSTL12_DCI")) Subsignal("ba" Pins("AF17 AL15") IOStandard("SSTL12_DCI")) Subsignal("bg" Pins("AG15") IOStandard("SSTL12_DCI")) Subsignal("ras_n" Pins("AF14") IOStandard("SSTL12_DCI")) # A16
Subsignal("cas_n" Pins("AG14") IOStandard("SSTL12_DCI")) # A15
Subsignal("we_n" Pins("AD16") IOStandard("SSTL12_DCI")) # A14
Subsignal("cs_n" Pins("AL19") IOStandard("SSTL12_DCI")) Subsignal("act_n" Pins("AH14") IOStandard("SSTL12_DCI")) #Subsignal("ten", Pins("AH16"), IOStandard("SSTL12_DCI")),
#Subsignal("alert_n", Pins("AJ16"), IOStandard("SSTL12_DCI")),
#Subsignal("par", Pins("AD18"), IOStandard("SSTL12_DCI")),
Subsignal("dm" Pins("AD21 AE25 AJ21 AM21 AH26 AN26 AJ29 AL32") IOStandard("POD12_DCI")) Subsignal("dq" Pins("AE23 AG20 AF22 AF20 AE22 AD20 AG22 AE20" "AJ24 AG24 AJ23 AF23 AH23 AF24 AH22 AG25" "AL22 AL25 AM20 AK23 AK22 AL24 AL20 AL23" "AM24 AN23 AN24 AP23 AP25 AN22 AP24 AM22" "AH28 AK26 AK28 AM27 AJ28 AH27 AK27 AM26" "AL30 AP29 AM30 AN28 AL29 AP28 AM29 AN27" "AH31 AH32 AJ34 AK31 AJ31 AJ30 AH34 AK32" "AN33 AP33 AM34 AP31 AM32 AN31 AL34 AN32") IOStandard("POD12_DCI") Misc("PRE_EMPHASIS=RDRV_240") Misc("EQUALIZATION=EQ_LEVEL2")) Subsignal("dqs_p" Pins("AG21 AH24 AJ20 AP20 AL27 AN29 AH33 AN34") IOStandard("DIFF_POD12_DCI") Misc("PRE_EMPHASIS=RDRV_240") Misc("EQUALIZATION=EQ_LEVEL2")) Subsignal("dqs_n" Pins("AH21 AJ25 AK20 AP21 AL28 AP30 AJ33 AP34") IOStandard("DIFF_POD12_DCI") Misc("PRE_EMPHASIS=RDRV_240") Misc("EQUALIZATION=EQ_LEVEL2")) Subsignal("clk_p" Pins("AE16") IOStandard("DIFF_SSTL12_DCI")) Subsignal("clk_n" Pins("AE15") IOStandard("DIFF_SSTL12_DCI")) Subsignal("cke" Pins("AD15") IOStandard("SSTL12_DCI")) Subsignal("odt" Pins("AJ18") IOStandard("SSTL12_DCI")) Subsignal("reset_n" Pins("AL18") IOStandard("LVCMOS12")) Misc("SLEW=FAST") ) # PCIe
("pcie_x1" 0 Subsignal("rst_n" Pins("K22") IOStandard("LVCMOS18")) Subsignal("clk_p" Pins("AB6")) Subsignal("clk_n" Pins("AB5")) Subsignal("rx_p" Pins("AB2")) Subsignal("rx_n" Pins("AB1")) Subsignal("tx_p" Pins("AC4")) Subsignal("tx_n" Pins("AC3"))) ("pcie_x2" 0 Subsignal("rst_n" Pins("K22") IOStandard("LVCMOS18")) Subsignal("clk_p" Pins("AB6")) Subsignal("clk_n" Pins("AB5")) Subsignal("rx_p" Pins("AB2 AD2")) Subsignal("rx_n" Pins("AB1 AD1")) Subsignal("tx_p" Pins("AC4 AE4")) Subsignal("tx_n" Pins("AC3 AE3"))) ("pcie_x4" 0 Subsignal("rst_n" Pins("K22") IOStandard("LVCMOS18")) Subsignal("clk_p" Pins("AB6")) Subsignal("clk_n" Pins("AB5")) Subsignal("rx_p" Pins("AB2 AD2 AF2 AH2")) Subsignal("rx_n" Pins("AB1 AD1 AF1 AH1")) Subsignal("tx_p" Pins("AC4 AE4 AG4 AH6")) Subsignal("tx_n" Pins("AC3 AE3 AG3 AH5"))) ("pcie_x8" 0 Subsignal("rst_n" Pins("K22") IOStandard("LVCMOS18")) Subsignal("clk_p" Pins("AB6")) Subsignal("clk_n" Pins("AB5")) Subsignal("rx_p" Pins("AB2 AD2 AF2 AH2 AJ4 AK2 AM2 AP2")) Subsignal("rx_n" Pins("AB1 AD1 AF1 AH1 AJ3 AK1 AM1 AP1")) Subsignal("tx_p" Pins("AC4 AE4 AG4 AH6 AK6 AL4 AM6 AN4")) Subsignal("tx_n" Pins("AC3 AE3 AG3 AH5 AK5 AL3 AM5 AN3"))) # SGMII Clk
("sgmii_clock" 0 Subsignal("p" Pins("P26") IOStandard("LVDS_25")) Subsignal("n" Pins("N26") IOStandard("LVDS_25"))) # SI570
("si570_refclk" 0 Subsignal("p" Pins("P6")) Subsignal("n" Pins("P5"))) # SMA
("user_sma_mgt_refclk" 0 Subsignal("p" Pins("V6")) Subsignal("n" Pins("V5"))) ("user_sma_mgt_tx" 0 Subsignal("p" Pins("R4")) Subsignal("n" Pins("R3"))) ("user_sma_mgt_rx" 0 Subsignal("p" Pins("P2")) Subsignal("n" Pins("P1"))) # SFP
("sfp" 0 Subsignal("txp" Pins("U4")) Subsignal("txn" Pins("U3")) Subsignal("rxp" Pins("T2")) Subsignal("rxn" Pins("T1"))) ("sfp_tx" 0 Subsignal("p" Pins("U4")) Subsignal("n" Pins("U3")) ) ("sfp_rx" 0 Subsignal("p" Pins("T2")) Subsignal("n" Pins("T1")) ) ("sfp_tx_disable_n" 0 Pins("AL8") IOStandard("LVCMOS18")) ("sfp" 1 Subsignal("txp" Pins("W4")) Subsignal("txn" Pins("W3")) Subsignal("rxp" Pins("V2")) Subsignal("rxn" Pins("V1"))) ("sfp_tx" 1 Subsignal("p" Pins("W4")) Subsignal("n" Pins("W3")) ) ("sfp_rx" 1 Subsignal("p" Pins("V2")) Subsignal("n" Pins("V1")) ) ("sfp_tx_disable_n" 1 Pins("D28") IOStandard("LVCMOS18")) ]<line_sep># Connectors ---------------------------------------------------------------------------------------
_connectors=[("HPC" {"DP0_C2M_P":"F6" "DP0_C2M_N":"F5" "DP0_M2C_P":"E4" "DP0_M2C_N":"E3" "DP1_C2M_P":"D6" "DP1_C2M_N":"D5" "DP1_M2C_P":"D2" "DP1_M2C_N":"D1" "DP2_C2M_P":"C4" "DP2_C2M_N":"C3" "DP2_M2C_P":"B2" "DP2_M2C_N":"B1" "DP3_C2M_P":"B6" "DP3_C2M_N":"B5" "DP3_M2C_P":"A4" "DP3_M2C_N":"A3" "DP4_C2M_P":"N4" "DP4_C2M_N":"N3" "DP4_M2C_P":"M2" "DP4_M2C_N":"M1" "DP5_C2M_P":"J4" "DP5_C2M_N":"J3" "DP5_M2C_P":"H2" "DP5_M2C_N":"H1" "DP6_C2M_P":"L4" "DP6_C2M_N":"L3" "DP6_M2C_P":"K2" "DP6_M2C_N":"K1" "DP7_C2M_P":"G4" "DP7_C2M_N":"G3" "DP7_M2C_P":"F2" "DP7_M2C_N":"F1" "LA06_P":"D13" "LA06_N":"C13" "LA10_P":"L8" "LA10_N":"K8" "LA14_P":"B10" "LA14_N":"A10" "LA18_CC_P":"E22" "LA18_CC_N":"E23" "LA27_P":"H21" "LA27_N":"G21" "HA01_CC_P":"E16" "HA01_CC_N":"D16" "HA05_P":"J15" "HA05_N":"J14" "HA09_P":"F18" "HA09_N":"F17" "HA13_P":"B14" "HA13_N":"A14" "HA16_P":"A19" "HA16_N":"A18" "HA20_P":"C19" "HA20_N":"B19" "CLK1_M2C_P":"E25" "CLK1_M2C_N":"D25" "LA00_CC_P":"H11" "LA00_CC_N":"G11" "LA03_P":"A13" "LA03_N":"A12" "LA08_P":"J8" "LA08_N":"H8" "LA12_P":"E10" "LA12_N":"D10" "LA16_P":"B9" "LA16_N":"A9" "LA20_P":"B24" "LA20_N":"A24" "LA22_P":"G24" "LA22_N":"F25" "LA25_P":"D20" "LA25_N":"D21" "LA29_P":"B20" "LA29_N":"A20" "LA31_P":"B25" "LA31_N":"A25" "LA33_P":"A27" "LA33_N":"A28" "HA03_P":"G15" "HA03_N":"G14" "HA07_P":"L19" "HA07_N":"L18" "HA11_P":"J19" "HA11_N":"J18" "HA14_P":"F15" "HA14_N":"F14" "HA18_P":"B17" "HA18_N":"B16" "HA22_P":"C18" "HA22_N":"C17" "GBTCLK1_M2C_P":"H6" "GBTCLK1_M2C_N":"H5" "GBTCLK0_M2C_P":"K6" "GBTCLK0_M2C_N":"K5" "LA01_CC_P":"G9" "LA01_CC_N":"F9" "LA05_P":"L13" "LA05_N":"K13" "LA09_P":"J9" "LA09_N":"H9" "LA13_P":"D9" "LA13_N":"C9" "LA17_CC_P":"D24" "LA17_CC_N":"C24" "LA23_P":"G22" "LA23_N":"F22" "LA26_P":"G20" "LA26_N":"F20" "PG_M2C":"L27" "HA00_CC_P":"G17" "HA00_CC_N":"G16" "HA04_P":"G19" "HA04_N":"F19" "HA08_P":"K18" "HA08_N":"K17" "HA12_P":"K16" "HA12_N":"J16" "HA15_P":"D14" "HA15_N":"C14" "HA19_P":"D19" "HA19_N":"D18" "PRSNT_M2C_B":"H24" "CLK0_M2C_P":"H12" "CLK0_M2C_N":"G12" "LA02_P":"K10" "LA02_N":"J10" "LA04_P":"L12" "LA04_N":"K12" "LA07_P":"F8" "LA07_N":"E8" "LA11_P":"K11" "LA11_N":"J11" "LA15_P":"D8" "LA15_N":"C8" "LA19_P":"C21" "LA19_N":"C22" "LA21_P":"F23" "LA21_N":"F24" "LA24_P":"E20" "LA24_N":"E21" "LA28_P":"B21" "LA28_N":"B22" "LA30_P":"C26" "LA30_N":"B26" "LA32_P":"E26" "LA32_N":"D26" "HA02_P":"H19" "HA02_N":"H18" "HA06_P":"L15" "HA06_N":"K15" "HA10_P":"H17" "HA10_N":"H16" "HA17_CC_P":"E18" "HA17_CC_N":"E17" "HA21_P":"E15" "HA21_N":"D15" "HA23_P":"B15" "HA23_N":"A15" }) ("LPC" {"GBTCLK0_M2C_P":"AA24" "GBTCLK0_M2C_N":"AA25" "LA01_CC_P":"W25" "LA01_CC_N":"Y25" "LA05_P":"V27" "LA05_N":"V28" "LA09_P":"V26" "LA09_N":"W26" "LA13_P":"AA20" "LA13_N":"AB20" "LA17_CC_P":"AA32" "LA17_CC_N":"AB32" "LA23_P":"AD30" "LA23_N":"AD31" "LA26_P":"AF33" "LA26_N":"AG34" "CLK0_M2C_P":"AA24" "CLK0_M2C_N":"AA25" "LA02_P":"AA22" "LA02_N":"AB22" "LA04_P":"U26" "LA04_N":"U27" "LA07_P":"V22" "LA07_N":"V23" "LA11_P":"V21" "LA11_N":"W21" "LA15_P":"AB25" "LA15_N":"AB26" "LA19_P":"AA29" "LA19_N":"AB29" "LA21_P":"AC33" "LA21_N":"AD33" "LA24_P":"AE32" "LA24_N":"AF32" "LA28_P":"V31" "LA28_N":"W31" "LA30_P":"Y31" "LA30_N":"Y32" "LA32_P":"W30" "LA32_N":"Y30" "LA06_P":"V29" "LA06_N":"W29" "LA10_P":"T22" "LA10_N":"T23" "LA14_P":"U21" "LA14_N":"U22" "LA18_CC_P":"AB30" "LA18_CC_N":"AB31" "LA27_P":"AG31" "LA27_N":"AG32" "CLK1_M2C_P":"AC31" "CLK1_M2C_N":"AC32" "LA00_CC_P":"W23" "LA00_CC_N":"W24" "LA03_P":"W28" "LA03_N":"Y28" "LA08_P":"U24" "LA08_N":"U25" "LA12_P":"AC22" "LA12_N":"AC23" "LA16_P":"AB21" "LA16_N":"AC21" "LA20_P":"AA34" "LA20_N":"AB34" "LA22_P":"AC34" "LA22_N":"AD34" "LA25_P":"AE33" "LA25_N":"AF34" "LA29_P":"U34" "LA29_N":"V34" "LA31_P":"V33" "LA31_N":"W34" "LA33_P":"W33" "LA33_N":"Y33" }) ("pmod0" "AK25 AN21 AH18 AM19 AE26 AF25 AE21 AM17") ("pmod1" "AL14 AM14 AP16 AP15 AM16 AM15 AN18 AN17") ]<line_sep># Platform -----------------------------------------------------------------------------------------
<class_stmt>Platform(XilinxPlatform)<block_start>default_clk_name="clk125"<line_sep>default_clk_period=1e9/125e6<def_stmt>__init__ self<block_start>XilinxPlatform.__init__(self "xcku040-ffva1156-2-e" _io _connectors toolchain="vivado")<block_end><def_stmt>create_programmer self<block_start><return>VivadoProgrammer()<block_end><def_stmt>do_finalize self fragment<block_start>XilinxPlatform.do_finalize(self fragment)<line_sep>self.add_period_constraint(self.lookup_request("clk125" loose=<true>) 1e9/125e6)<line_sep>self.add_period_constraint(self.lookup_request("clk300" loose=<true>) 1e9/300e6)<line_sep>self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 44]")<line_sep>self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 45]")<line_sep>self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 46]")<block_end><block_end> |
<import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>matplotlib.pyplot<as>plt<import_stmt>itertools<import_from_stmt>sklearn.datasets make_regression make_blobs load_digits fetch_openml load_diabetes <import_from_stmt>sklearn.preprocessing KBinsDiscretizer<import_from_stmt>dabl.preprocessing clean detect_types guess_ordinal<import_from_stmt>dabl.plot.supervised plot plot_classification_categorical plot_classification_continuous plot_regression_categorical plot_regression_continuous <import_from_stmt>dabl.utils data_df_from_bunch<import_from_stmt>dabl set_config<line_sep># FIXME: check that target is not y but a column name
@pytest.mark.filterwarnings('ignore:the matrix subclass')@pytest.mark.parametrize("continuous_features, categorical_features, task" itertools.product([0 1 3 100] [0 1 3 100] ['classification' 'regression']))<def_stmt>test_plots_smoke continuous_features categorical_features task# simple smoke test
# should be parametrized
<block_start>n_samples=100<line_sep>X_cont,y_cont=make_regression(n_samples=n_samples n_features=continuous_features n_informative=min(continuous_features 2))<line_sep>X_cat,y_cat=make_regression(n_samples=n_samples n_features=categorical_features n_informative=min(categorical_features 2))<if_stmt>X_cat.shape[1]<g>0<block_start>X_cat=KBinsDiscretizer(encode='ordinal').fit_transform(X_cat)<block_end>cont_columns=["asdf_%d_cont"%i<for>i range(continuous_features)]<line_sep>df_cont=pd.DataFrame(X_cont columns=cont_columns)<if_stmt>categorical_features<g>0<block_start>cat_columns=["asdf_%d_cat"%i<for>i range(categorical_features)]<line_sep>df_cat=pd.DataFrame(X_cat columns=cat_columns).astype('int')<line_sep>df_cat=df_cat.astype("category")<line_sep>X_df=pd.concat([df_cont df_cat] axis=1)<block_end><else_stmt><block_start>X_df=df_cont<block_end><assert_stmt>(X_df.shape[1]<eq>continuous_features+categorical_features)<line_sep>X_clean=clean(X_df.copy())<line_sep>y=y_cont+y_cat<if_stmt>X_df.shape[1]<eq>0<block_start>y=np.random.uniform(size=n_samples)<block_end><if_stmt>task<eq>"classification"<block_start>y=np.digitize(y np.percentile(y [5 10 60 85]))<block_end>X_clean['target']=y<if_stmt>task<eq>"classification"<block_start>X_clean['target']=X_clean['target'].astype('category')<block_end>types=detect_types(X_clean)<line_sep>column_types=types.T.idxmax()<assert_stmt>np.all(column_types[:continuous_features]<eq>'continuous')<assert_stmt>np.all(column_types[continuous_features:-1]<eq>'categorical')<if_stmt>task<eq>"classification"<block_start><assert_stmt>column_types[-1]<eq>'categorical'<block_end><else_stmt><block_start><assert_stmt>column_types[-1]<eq>'continuous'<block_end>plot(X_clean target_col='target')<line_sep>plt.close("all")<block_end>@pytest.mark.parametrize("add, feature_type, target_type" itertools.product([0 .1] ['continuous' 'categorical'] ['continuous' 'categorical']))<def_stmt>test_type_hints add feature_type target_type<block_start>X=pd.DataFrame(np.random.randint(4 size=100))+add<line_sep>X['target']=np.random.uniform(size=100)<line_sep>plot(X type_hints={0:feature_type 'target':target_type} target_col='target')<line_sep># get title of figure
text=plt.gcf()._suptitle.get_text()<assert_stmt>feature_type.capitalize()<in>text<line_sep>ax=plt.gca()<line_sep># one of the labels is 'target' iif regression
labels=ax.get_ylabel()+ax.get_xlabel()<assert_stmt>('target'<in>labels)<eq>(target_type<eq>'continuous')<line_sep>plt.close("all")<block_end><def_stmt>test_float_classification_target # check we can plot even if we do classification with a float target
<block_start>X,y=make_blobs()<line_sep>data=pd.DataFrame(X)<line_sep>data['target']=y.astype(np.float)<line_sep>types=detect_types(data)<assert_stmt>types.categorical['target']<line_sep>plot(data target_col='target')<line_sep># same with "actual float" - we need to specify classification for that :-/
data['target']=y.astype(np.float)+.2<line_sep>plot(data target_col='target' type_hints={'target':'categorical'})<line_sep>plt.close("all")<block_end>@pytest.mark.filterwarnings('ignore:Discarding near-constant')<def_stmt>test_plot_classification_n_classes <block_start>X,y=make_blobs()<line_sep>X=pd.DataFrame(X)<line_sep>X['target']=0<with_stmt>pytest.raises(ValueError match="Less than two classes")<block_start>plot_classification_categorical(X 'target')<block_end><with_stmt>pytest.raises(ValueError match="Less than two classes")<block_start>plot_classification_continuous(X 'target')<block_end><block_end><def_stmt>test_plot_wrong_target_type <block_start>X,y=make_blobs()<line_sep>X=pd.DataFrame(X)<line_sep>X['target']=y<with_stmt>pytest.raises(ValueError match="need continuous")<block_start>plot_regression_categorical(X 'target')<block_end><with_stmt>pytest.raises(ValueError match="need continuous")<block_start>plot_regression_continuous(X 'target')<block_end>X['target']=X[0]<with_stmt>pytest.raises(ValueError match="need categorical")<block_start>plot_classification_categorical(X 'target')<block_end><with_stmt>pytest.raises(ValueError match="need categorical")<block_start>plot_classification_continuous(X 'target')<block_end><block_end><def_stmt>test_plot_target_low_card_int <block_start>data=load_digits()<line_sep>df=data_df_from_bunch(data)<line_sep>plot(df[::10] target_col='target')<block_end><def_stmt>test_plot_X_y <block_start>X,y=make_blobs()<line_sep>X=pd.DataFrame(X)<line_sep>plot(X y)<block_end><def_stmt>test_plot_regression_numpy <block_start>X,y=make_regression()<line_sep>plot(X y)<block_end><def_stmt>test_plot_lda_binary <block_start>X,y=make_blobs(centers=2)<line_sep>X=pd.DataFrame(X)<line_sep>plot(X y univariate_plot='kde')<block_end><def_stmt>test_plot_int_column_name <block_start>X,y=make_blobs()<line_sep>X=pd.DataFrame(X)<line_sep>X[3]=y<line_sep>plot(X target_col=3)<block_end><def_stmt>test_negative_ordinal # check that a low card int with negative values is plotted correctly
<block_start>data=pd.DataFrame([np.random.randint(0 10 size=1000)-5 np.random.randint(0 2 size=1000)]).T<line_sep># ensure first column is low_card_int
<assert_stmt>(detect_types(data).T.idxmax()<eq>['low_card_int' 'categorical']).all()<assert_stmt>guess_ordinal(data[0])<line_sep># smoke test
plot(data target_col=1)<block_end><def_stmt>test_large_ordinal # check that large integers don't bring us down (bincount memory error)
# here some random phone numbers
<block_start><assert_stmt><not>guess_ordinal(pd.Series([6786930208 2142878625 9106275431]))<block_end><def_stmt>test_plot_classification_continuous <block_start>data=fetch_openml('MiceProtein')<line_sep>df=data_df_from_bunch(data)<line_sep># only univariate plots
figures=plot_classification_continuous(df target_col='target' plot_pairwise=<false>)<assert_stmt>len(figures)<eq>1<line_sep># top 10 axes
<assert_stmt>len(figures[0].get_axes())<eq>10<line_sep># six is the minimum number of features for histograms
# (last column is target)
figures=plot_classification_continuous(df.iloc[: -7:] target_col='target' plot_pairwise=<false>)<assert_stmt>len(figures)<eq>1<assert_stmt>len(figures[0].get_axes())<eq>6<line_sep># for 5 features, do full pairplot
figures=plot_classification_continuous(df.iloc[: -6:] target_col='target' plot_pairwise=<false>)<assert_stmt>len(figures)<eq>1<line_sep># diagonal has twin axes
<assert_stmt>len(figures[0].get_axes())<eq>5<times>5+5<line_sep># also do pairwise plots
figures=plot_classification_continuous(df target_col='target' random_state=42)<line_sep># univariate, pairwise, pca, lda
<assert_stmt>len(figures)<eq>4<line_sep># univariate
axes=figures[0].get_axes()<assert_stmt>len(axes)<eq>10<line_sep># known result
<assert_stmt>axes[0].get_xlabel()<eq>"SOD1_N"<line_sep># bar plot never has ylabel
<assert_stmt>axes[0].get_ylabel()<eq>""<line_sep># pairwise
axes=figures[1].get_axes()<assert_stmt>len(axes)<eq>4<line_sep># known result
<assert_stmt>axes[0].get_xlabel()<eq>"SOD1_N"<assert_stmt>axes[0].get_ylabel()<eq>'S6_N'<line_sep># PCA
axes=figures[2].get_axes()<assert_stmt>len(axes)<eq>4<line_sep># known result
<assert_stmt>axes[0].get_xlabel()<eq>"PCA 1"<assert_stmt>axes[0].get_ylabel()<eq>'PCA 5'<line_sep># LDA
axes=figures[3].get_axes()<assert_stmt>len(axes)<eq>4<line_sep># known result
<assert_stmt>axes[0].get_xlabel()<eq>"LDA 0"<assert_stmt>axes[0].get_ylabel()<eq>'LDA 1'<block_end><def_stmt>test_plot_string_target <block_start>X,y=make_blobs(n_samples=30)<line_sep>data=pd.DataFrame(X)<line_sep>y=pd.Series(y)<line_sep>y[y<eq>0]='a'<line_sep>y[y<eq>1]='b'<line_sep>y[y<eq>2]='c'<line_sep>data['target']=y<line_sep>plot(data target_col='target')<block_end><def_stmt>test_na_vals_reg_plot_raise_warning <block_start>X,y=load_diabetes(return_X_y=<true>)<line_sep>X=pd.DataFrame(X)<line_sep>y[::50]=np.NaN<line_sep>X['target_col']=y<with_stmt>pytest.warns(UserWarning match="Missing values in target_col have "<concat>"been removed for regression")<block_start>plot(X 'target_col')<block_end><with_stmt>pytest.warns(UserWarning match="Missing values in target_col have "<concat>"been removed for regression")<block_start>plot_regression_continuous(X 'target_col')<block_end><with_stmt>pytest.warns(UserWarning match="Missing values in target_col have "<concat>"been removed for regression")<block_start>plot_regression_categorical(X 'target_col')<block_end><block_end><def_stmt>test_plot_regression_continuous_with_target_outliers <block_start>df=pd.DataFrame(data={"feature":np.random.randint(low=1 high=100 size=200) # target values are bound between 50 and 100
"target":np.random.randint(low=50 high=100 size=200)})<line_sep># append single outlier record with target value 0
df=df.append({"feature":50 "target":0} ignore_index=<true>)<with_stmt>pytest.warns(UserWarning match="Dropped 1 outliers in column target.")<block_start>plot_regression_continuous(df 'target')<block_end><block_end><def_stmt>test_plot_regression_categorical_missing_value <block_start>df=pd.DataFrame({'y':np.random.normal(size=300)})<line_sep>df.loc[100:200 'y']<augadd>1<line_sep>df.loc[200:300 'y']<augadd>2<line_sep>df['x']='a'<line_sep>df.loc[100:200 'x']='b'<line_sep>df.loc[200:300 'x']=np.NaN<line_sep>res=plot(df target_col='y')<assert_stmt>len(res[1][0 0].get_yticklabels())<eq>3<assert_stmt>res[1][0 0].get_yticklabels()[2].get_text()<eq>'dabl_mi...'<block_end><def_stmt>test_label_truncation <block_start>a=('a_really_long_name_that_would_mess_up_the_layout_a_lot'<concat>'_by_just_being_very_long')<line_sep>b=('the_target_that_has_an_equally_long_name_which_would_'<concat>'mess_up_everything_as_well_but_in_different_places')<line_sep>df=pd.DataFrame({a:np.random.uniform(0 1 1000)})<line_sep>df[b]=df[a]+np.random.uniform(0 0.1 1000)<line_sep>res=plot_regression_continuous(df target_col=b)<assert_stmt>res[0 0].get_ylabel()<eq>'the_target_that_h...'<assert_stmt>res[0 0].get_xlabel()<eq>'a_really_long_nam...'<line_sep>set_config(truncate_labels=<false>)<line_sep>res=plot_regression_continuous(df target_col=b)<assert_stmt>res[0 0].get_ylabel()<eq>b<assert_stmt>res[0 0].get_xlabel()<eq>a<line_sep>set_config(truncate_labels=<true>)<block_end> |
<import_from_stmt>._ffi.base string_types<import_from_stmt>._ffi.object register_object Object<import_from_stmt>._ffi.node register_node NodeBase<import_from_stmt>._ffi.node convert_to_node<as>_convert_to_node<import_from_stmt>._ffi.node_generic _scalar_type_inference<import_from_stmt>._ffi.function Function<import_from_stmt>._ffi.function _init_api register_func get_global_func extract_ext_funcs<import_from_stmt>._ffi.function convert_to_tvm_func<as>_convert_tvm_func<import_from_stmt>._ffi.runtime_ctypes TVMType<import_from_stmt>. _api_internal<import_from_stmt>. make<as>_make<import_from_stmt>. expr<as>_expr<import_from_stmt>. tensor<as>_tensor<import_from_stmt>. schedule<as>_schedule<import_from_stmt>. container<as>_container<import_from_stmt>. tag<as>_tag<line_sep>int8="int8"<line_sep>int32="int32"<line_sep>float32="float32"<line_sep>handle="handle"<def_stmt>min_value dtype<block_start><return>_api_internal._min_value(dtype)<block_end> |
<import_stmt>copy<import_stmt>torch<import_from_stmt>..attack Attack<class_stmt>MultiAttack(Attack)<block_start>r"""
MultiAttack is a class to attack a model with various attacks agains same images and labels.
Arguments:
model (nn.Module): model to attack.
attacks (list): list of attacks.
Examples::
>>> atk1 = torchattacks.PGD(model, eps=8/255, alpha=2/255, iters=40, random_start=True)
>>> atk2 = torchattacks.PGD(model, eps=8/255, alpha=2/255, iters=40, random_start=True)
>>> atk = torchattacks.MultiAttack([atk1, atk2])
>>> adv_images = attack(images, labels)
"""<def_stmt>__init__ self attacks verbose=<false># Check validity
<block_start>ids=[]<for_stmt>attack attacks<block_start>ids.append(id(attack.model))<block_end><if_stmt>len(set(ids))<ne>1<block_start><raise>ValueError("At least one of attacks is referencing a different model.")<block_end>super().__init__("MultiAttack" attack.model)<line_sep>self.attacks=attacks<line_sep>self.verbose=verbose<line_sep>self._accumulate_multi_atk_records=<false><line_sep>self._multi_atk_records=[0.0]<line_sep>self._supported_mode=['default']<block_end><def_stmt>forward self images labels<block_start>r"""
Overridden.
"""<line_sep>batch_size=images.shape[0]<line_sep>fails=torch.arange(batch_size).to(self.device)<line_sep>final_images=images.clone().detach().to(self.device)<line_sep>labels=labels.clone().detach().to(self.device)<line_sep>multi_atk_records=[batch_size]<for_stmt>_,attack enumerate(self.attacks)<block_start>adv_images=attack(images[fails] labels[fails])<line_sep>outputs=self.model(adv_images)<line_sep>_,pre=torch.max(outputs.data 1)<line_sep>corrects=(pre<eq>labels[fails])<line_sep>wrongs=~corrects<line_sep>succeeds=torch.masked_select(fails wrongs)<line_sep>succeeds_of_fails=torch.masked_select(torch.arange(fails.shape[0]).to(self.device) wrongs)<line_sep>final_images[succeeds]=adv_images[succeeds_of_fails]<line_sep>fails=torch.masked_select(fails corrects)<line_sep>multi_atk_records.append(len(fails))<if_stmt>len(fails)<eq>0<block_start><break><block_end><block_end><if_stmt>self.verbose<block_start>print(self._return_sr_record(multi_atk_records))<block_end><if_stmt>self._accumulate_multi_atk_records<block_start>self._update_multi_atk_records(multi_atk_records)<block_end><return>final_images<block_end><def_stmt>_clear_multi_atk_records self<block_start>self._multi_atk_records=[0.0]<block_end><def_stmt>_covert_to_success_rates self multi_atk_records<block_start>sr=[((1-multi_atk_records[i]/multi_atk_records[0])<times>100)<for>i range(1 len(multi_atk_records))]<line_sep><return>sr<block_end><def_stmt>_return_sr_record self multi_atk_records<block_start>sr=self._covert_to_success_rates(multi_atk_records)<line_sep><return>"Attack success rate: "+" | ".join(["%2.2f %%"%item<for>item sr])<block_end><def_stmt>_update_multi_atk_records self multi_atk_records<block_start><for_stmt>i,item enumerate(multi_atk_records)<block_start>self._multi_atk_records[i]<augadd>item<block_end><block_end><def_stmt>save self data_loader save_path=<none> verbose=<true> return_verbose=<false><block_start>r"""
Overridden.
"""<line_sep>self._clear_multi_atk_records()<line_sep>verbose=self.verbose<line_sep>self.verbose=<false><line_sep>self._accumulate_multi_atk_records=<true><for_stmt>i,attack enumerate(self.attacks)<block_start>self._multi_atk_records.append(0.0)<block_end>rob_acc,l2,elapsed_time=super().save(data_loader save_path verbose return_verbose)<line_sep>sr=self._covert_to_success_rates(self._multi_atk_records)<line_sep>self._clear_multi_atk_records()<line_sep>self._accumulate_multi_atk_records=<false><line_sep>self.verbose=verbose<if_stmt>return_verbose<block_start><return>rob_acc sr l2 elapsed_time<block_end><block_end><def_stmt>_save_print self progress rob_acc l2 elapsed_time end<block_start>r"""
Overridden.
"""<line_sep>print("- Save progress: %2.2f %% / Robust accuracy: %2.2f %%"%(progress rob_acc)+" / "+self._return_sr_record(self._multi_atk_records)+' / L2: %1.5f (%2.3f it/s) \t'%(l2 elapsed_time) end=end)<block_end><block_end> |
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>os<import_stmt>torch.nn.functional<as>F<class_stmt>LDS(nn.Module)<block_start><def_stmt>__init__ self <block_start>super(LDS self).__init__()<line_sep>self.pool1=nn.MaxPool2d(kernel_size=(2 2) stride=2 padding=0)<line_sep>self.pool2=nn.MaxPool2d(kernel_size=(2 2) stride=2 padding=0)<line_sep>self.pool3=nn.MaxPool2d(kernel_size=(2 2) stride=2 padding=1)<block_end><def_stmt>forward self x<block_start>x_pool1=self.pool1(x)<line_sep>x_pool2=self.pool2(x_pool1)<line_sep>x_pool3=self.pool3(x_pool2)<line_sep><return>x_pool3<block_end><block_end><class_stmt>ConvBlock(nn.Module)<block_start><def_stmt>__init__ self in_planes out_planes kernel_size stride=1 padding=0 dilation=1 groups=1 relu=<true> bn=<true> bias=<false><block_start>super(ConvBlock self).__init__()<line_sep>self.out_channels=out_planes<line_sep>self.conv=nn.Conv2d(in_planes out_planes kernel_size=kernel_size stride=stride padding=padding dilation=dilation groups=groups bias=bias)<line_sep>self.bn=nn.BatchNorm2d(out_planes eps=1e-5 momentum=0.01 affine=<true>)<if>bn<else><none><line_sep>self.relu=nn.ReLU(inplace=<false>)<if>relu<else><none><block_end><def_stmt>forward self x<block_start>x=self.conv(x)<if_stmt>self.bn<is><not><none><block_start>x=self.bn(x)<block_end><if_stmt>self.relu<is><not><none><block_start>x=self.relu(x)<block_end><return>x<block_end><block_end><class_stmt>LSN_init(nn.Module)<block_start><def_stmt>__init__ self in_planes out_planes stride=1<block_start>super(LSN_init self).__init__()<line_sep>self.out_channels=out_planes<line_sep>inter_planes=out_planes<floordiv>4<line_sep>self.part_a=nn.Sequential(ConvBlock(in_planes inter_planes kernel_size=(3 3) stride=stride padding=1) ConvBlock(inter_planes inter_planes kernel_size=1 stride=1) ConvBlock(inter_planes inter_planes kernel_size=(3 3) stride=stride padding=1))<line_sep>self.part_b=ConvBlock(inter_planes out_planes kernel_size=1 stride=1 relu=<false>)<block_end><def_stmt>forward self x<block_start>out1=self.part_a(x)<line_sep>out2=self.part_b(out1)<line_sep><return>out1 out2<block_end><block_end><class_stmt>LSN_later(nn.Module)<block_start><def_stmt>__init__ self in_planes out_planes stride=1<block_start>super(LSN_later self).__init__()<line_sep>self.out_channels=out_planes<line_sep>inter_planes=out_planes<floordiv>4<line_sep>self.part_a=ConvBlock(in_planes inter_planes kernel_size=(3 3) stride=stride padding=1)<line_sep>self.part_b=ConvBlock(inter_planes out_planes kernel_size=1 stride=1 relu=<false>)<block_end><def_stmt>forward self x<block_start>out1=self.part_a(x)<line_sep>out2=self.part_b(out1)<line_sep><return>out1 out2<block_end><block_end><class_stmt>IBN(nn.Module)<block_start><def_stmt>__init__ self out_planes bn=<true><block_start>super(IBN self).__init__()<line_sep>self.out_channels=out_planes<line_sep>self.bn=nn.BatchNorm2d(out_planes eps=1e-5 momentum=0.01 affine=<true>)<if>bn<else><none><block_end><def_stmt>forward self x<block_start><if_stmt>self.bn<is><not><none><block_start>x=self.bn(x)<block_end><return>x<block_end><block_end><class_stmt>One_Three_Conv(nn.Module)<block_start><def_stmt>__init__ self in_planes out_planes stride=1<block_start>super(One_Three_Conv self).__init__()<line_sep>self.out_channels=out_planes<line_sep>inter_planes=in_planes<floordiv>4<line_sep>self.single_branch=nn.Sequential(ConvBlock(in_planes inter_planes kernel_size=1 stride=1) ConvBlock(inter_planes out_planes kernel_size=(3 3) stride=stride padding=1 relu=<false>))<block_end><def_stmt>forward self x<block_start>out=self.single_branch(x)<line_sep><return>out<block_end><block_end><class_stmt>Relu_Conv(nn.Module)<block_start><def_stmt>__init__ self in_planes out_planes stride=1<block_start>super(Relu_Conv self).__init__()<line_sep>self.out_channels=out_planes<line_sep>self.relu=nn.ReLU(inplace=<false>)<line_sep>self.single_branch=nn.Sequential(ConvBlock(in_planes out_planes kernel_size=(3 3) stride=stride padding=1))<block_end><def_stmt>forward self x<block_start>x=self.relu(x)<line_sep>out=self.single_branch(x)<line_sep><return>out<block_end><block_end><class_stmt>Ds_Conv(nn.Module)<block_start><def_stmt>__init__ self in_planes out_planes stride=1 padding=(1 1)<block_start>super(Ds_Conv self).__init__()<line_sep>self.out_channels=out_planes<line_sep>self.single_branch=nn.Sequential(ConvBlock(in_planes out_planes kernel_size=(3 3) stride=stride padding=padding relu=<false>))<block_end><def_stmt>forward self x<block_start>out=self.single_branch(x)<line_sep><return>out<block_end><block_end><class_stmt>LRFNet(nn.Module)<block_start>"""LRFNet for object detection
The network is based on the SSD architecture.
Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
Args:
phase: (string) Can be "test" or "train"
base: VGG16 layers for input, size of either 300 or 512
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""<def_stmt>__init__ self phase size base extras head num_classes<block_start>super(LRFNet self).__init__()<line_sep>self.phase=phase<line_sep>self.num_classes=num_classes<line_sep>self.size=size<line_sep># vgg network
self.base=nn.ModuleList(base)<line_sep>self.lds=LDS()<line_sep># convs for merging the lsn and ssd features
self.Norm1=Relu_Conv(512 512 stride=1)<line_sep>self.Norm2=Relu_Conv(1024 1024 stride=1)<line_sep>self.Norm3=Relu_Conv(512 512 stride=1)<line_sep>self.Norm4=Relu_Conv(256 256 stride=1)<line_sep># convs for generate the lsn features
self.icn1=LSN_init(3 512 stride=1)<line_sep>self.icn2=LSN_later(128 1024 stride=2)<line_sep>self.icn3=LSN_later(256 512 stride=2)<line_sep># convs with s=2 to downsample the features
self.dsc1=Ds_Conv(512 1024 stride=2 padding=(1 1))<line_sep>self.dsc2=Ds_Conv(1024 512 stride=2 padding=(1 1))<line_sep>self.dsc3=Ds_Conv(512 256 stride=2 padding=(1 1))<line_sep># convs to reduce the feature dimensions of current level
self.agent1=ConvBlock(512 256 kernel_size=1 stride=1)<line_sep>self.agent2=ConvBlock(1024 512 kernel_size=1 stride=1)<line_sep>self.agent3=ConvBlock(512 256 kernel_size=1 stride=1)<line_sep># convs to reduce the feature dimensions of other levels
self.proj1=ConvBlock(1024 128 kernel_size=1 stride=1)<line_sep>self.proj2=ConvBlock(512 128 kernel_size=1 stride=1)<line_sep>self.proj3=ConvBlock(256 128 kernel_size=1 stride=1)<line_sep># convs to reduce the feature dimensions of other levels
self.convert1=ConvBlock(384 256 kernel_size=1)<line_sep>self.convert2=ConvBlock(256 512 kernel_size=1)<line_sep>self.convert3=ConvBlock(128 256 kernel_size=1)<line_sep># convs to merge the features of the current and higher level features
self.merge1=ConvBlock(512 512 kernel_size=3 stride=1 padding=1)<line_sep>self.merge2=ConvBlock(1024 1024 kernel_size=3 stride=1 padding=1)<line_sep>self.merge3=ConvBlock(512 512 kernel_size=3 stride=1 padding=1)<line_sep>self.ibn1=IBN(512 bn=<true>)<line_sep>self.ibn2=IBN(1024 bn=<true>)<line_sep>self.relu=nn.ReLU(inplace=<false>)<line_sep>self.extras=nn.ModuleList(extras)<line_sep>self.loc=nn.ModuleList(head[0])<line_sep>self.conf=nn.ModuleList(head[1])<if_stmt>self.phase<eq>'test'<block_start>self.softmax=nn.Softmax()<block_end><block_end><def_stmt>forward self x<block_start>"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
list of concat outputs from:
1: softmax layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""<line_sep>sources=list()<line_sep>loc=list()<line_sep>conf=list()<line_sep>new_sources=list()<line_sep># apply lds to the initial image
x_pool=self.lds(x)<line_sep># apply vgg up to conv4_3
<for_stmt>k range(22)<block_start>x=self.base[k](x)<block_end>conv4_3_bn=self.ibn1(x)<line_sep>x_pool1_skip,x_pool1_icn=self.icn1(x_pool)<line_sep>s=self.Norm1(conv4_3_bn<times>x_pool1_icn)<line_sep># apply vgg up to fc7
<for_stmt>k range(22 34)<block_start>x=self.base[k](x)<block_end>conv7_bn=self.ibn2(x)<line_sep>x_pool2_skip,x_pool2_icn=self.icn2(x_pool1_skip)<line_sep>p=self.Norm2(self.dsc1(s)+conv7_bn<times>x_pool2_icn)<line_sep>x=self.base[34](x)<line_sep># apply extra layers and cache source layer outputs
<for_stmt>k,v enumerate(self.extras)<block_start>x=v(x)<if_stmt>k<eq>0<block_start>x_pool3_skip,x_pool3_icn=self.icn3(x_pool2_skip)<line_sep>w=self.Norm3(self.dsc2(p)+x<times>x_pool3_icn)<block_end><elif_stmt>k<eq>2<block_start>q=self.Norm4(self.dsc3(w)+x)<line_sep>sources.append(q)<block_end><elif_stmt>k<eq>5<or>k<eq>7<block_start>sources.append(x)<block_end><else_stmt><block_start><pass><block_end><block_end># project the forward features into lower dimension.
tmp1=self.proj1(p)<line_sep>tmp2=self.proj2(w)<line_sep>tmp3=self.proj3(q)<line_sep># The conv4_3 level
proj1=F.upsample(tmp1 size=(38 38) mode='bilinear')<line_sep>proj2=F.upsample(tmp2 size=(38 38) mode='bilinear')<line_sep>proj3=F.upsample(tmp3 size=(38 38) mode='bilinear')<line_sep>proj=torch.cat([proj1 proj2 proj3] dim=1)<line_sep>agent1=self.agent1(s)<line_sep>convert1=self.convert1(proj)<line_sep>pred1=torch.cat([agent1 convert1] dim=1)<line_sep>pred1=self.merge1(pred1)<line_sep>new_sources.append(pred1)<line_sep># The fc_7 level
proj2=F.upsample(tmp2 size=(19 19) mode='bilinear')<line_sep>proj3=F.upsample(tmp3 size=(19 19) mode='bilinear')<line_sep>proj=torch.cat([proj2 proj3] dim=1)<line_sep>agent2=self.agent2(p)<line_sep>convert2=self.convert2(proj)<line_sep>pred2=torch.cat([agent2 convert2] dim=1)<line_sep>pred2=self.merge2(pred2)<line_sep>new_sources.append(pred2)<line_sep># The conv8 level
proj3=F.upsample(tmp3 size=(10 10) mode='bilinear')<line_sep>proj=proj3<line_sep>agent3=self.agent3(w)<line_sep>convert3=self.convert3(proj)<line_sep>pred3=torch.cat([agent3 convert3] dim=1)<line_sep>pred3=self.merge3(pred3)<line_sep>new_sources.append(pred3)<for_stmt>prediction sources<block_start>new_sources.append(prediction)<block_end># apply multibox head to source layers
<for_stmt>(x l c) zip(new_sources self.loc self.conf)<block_start>loc.append(l(x).permute(0 2 3 1).contiguous())<line_sep>conf.append(c(x).permute(0 2 3 1).contiguous())<block_end>loc=torch.cat([o.view(o.size(0) -1)<for>o loc] 1)<line_sep>conf=torch.cat([o.view(o.size(0) -1)<for>o conf] 1)<if_stmt>self.phase<eq>"test"<block_start>output=(loc.view(loc.size(0) -1 4) # loc preds
self.softmax(conf.view(-1 self.num_classes)) # conf preds
)<block_end><else_stmt><block_start>output=(loc.view(loc.size(0) -1 4) conf.view(conf.size(0) -1 self.num_classes) )<block_end><return>output<block_end><def_stmt>load_weights self base_file<block_start>other,ext=os.path.splitext(base_file)<if_stmt>ext<eq>'.pkl'<or>'.pth'<block_start>print('Loading weights into state dict...')<line_sep>self.load_state_dict(torch.load(base_file))<line_sep>print('Finished!')<block_end><else_stmt><block_start>print('Sorry only .pth and .pkl files supported.')<block_end><block_end><block_end><def_stmt>vgg cfg i batch_norm=<false><block_start>layers=[]<line_sep>in_channels=i<for_stmt>v cfg<block_start><if_stmt>v<eq>'M'<block_start>layers<augadd>[nn.MaxPool2d(kernel_size=2 stride=2)]<block_end><elif_stmt>v<eq>'C'<block_start>layers<augadd>[nn.MaxPool2d(kernel_size=2 stride=2 ceil_mode=<true>)]<block_end><else_stmt><block_start>conv2d=nn.Conv2d(in_channels v kernel_size=3 padding=1)<if_stmt>batch_norm<block_start>layers<augadd>[conv2d nn.BatchNorm2d(v) nn.ReLU(inplace=<false>)]<block_end><else_stmt><block_start>layers<augadd>[conv2d nn.ReLU(inplace=<false>)]<block_end>in_channels=v<block_end><block_end>pool5=nn.MaxPool2d(kernel_size=3 stride=1 padding=1)<line_sep>conv6=nn.Conv2d(512 1024 kernel_size=3 padding=6 dilation=6)<line_sep>conv7=nn.Conv2d(1024 1024 kernel_size=1)<line_sep>layers<augadd>[pool5 conv6 nn.ReLU(inplace=<false>) conv7 nn.ReLU(inplace=<false>)]<line_sep><return>layers<block_end>base={'300':[64 64 'M' 128 128 'M' 256 256 256 'C' 512 512 512 'M' 512 512 512]}<def_stmt>add_extras size cfg i batch_norm=<false># Extra layers added to VGG for feature scaling
<block_start>layers=[]<line_sep>in_channels=i<line_sep>flag=<false><for_stmt>k,v enumerate(cfg)<block_start><if_stmt>in_channels<ne>'S'<block_start><if_stmt>v<eq>'S'<block_start><if_stmt>in_channels<eq>256<and>size<eq>512<block_start>layers<augadd>[One_Three_Conv(in_channels cfg[k+1] stride=2) nn.ReLU(inplace=<false>)]<block_end><else_stmt><block_start>layers<augadd>[One_Three_Conv(in_channels cfg[k+1] stride=2) nn.ReLU(inplace=<false>)]<block_end><block_end><block_end>in_channels=v<block_end>layers<augadd>[ConvBlock(256 128 kernel_size=1 stride=1)]<line_sep>layers<augadd>[ConvBlock(128 256 kernel_size=3 stride=1)]<line_sep>layers<augadd>[ConvBlock(256 128 kernel_size=1 stride=1)]<line_sep>layers<augadd>[ConvBlock(128 256 kernel_size=3 stride=1)]<line_sep><return>layers<block_end>extras={'300':[1024 'S' 512 'S' 256]}<def_stmt>multibox size vgg extra_layers cfg num_classes<block_start>loc_layers=[]<line_sep>conf_layers=[]<line_sep>vgg_source=[1 -2]<for_stmt>k,v enumerate(vgg_source)<block_start><if_stmt>k<eq>0<block_start>loc_layers<augadd>[nn.Conv2d(512 cfg[k]<times>4 kernel_size=3 padding=1)]<line_sep>conf_layers<augadd>[nn.Conv2d(512 cfg[k]<times>num_classes kernel_size=3 padding=1)]<block_end><else_stmt><block_start>loc_layers<augadd>[nn.Conv2d(vgg[v].out_channels cfg[k]<times>4 kernel_size=3 padding=1)]<line_sep>conf_layers<augadd>[nn.Conv2d(vgg[v].out_channels cfg[k]<times>num_classes kernel_size=3 padding=1)]<block_end><block_end>i=2<line_sep>indicator=3<for_stmt>k,v enumerate(extra_layers)<block_start><if_stmt>(k<l>indicator+1<and>k%2<eq>0)<or>(k<g>indicator+1<and>k%2<ne>0)<block_start>loc_layers<augadd>[nn.Conv2d(v.out_channels cfg[i]<times>4 kernel_size=3 padding=1)]<line_sep>conf_layers<augadd>[nn.Conv2d(v.out_channels cfg[i]<times>num_classes kernel_size=3 padding=1)]<line_sep>i<augadd>1<block_end><block_end><return>vgg extra_layers (loc_layers conf_layers)<block_end>mbox={'300':[6 6 6 6 4 4]}<def_stmt>build_net phase size=300 num_classes=81<block_start><if_stmt>size<ne>300<block_start>print("Error: The input image size is not supported!")<line_sep><return><block_end><return>LRFNet(phase size *multibox(size vgg(base[str(size)] 3) add_extras(size extras[str(size)] 1024) mbox[str(size)] num_classes) num_classes)<block_end> |
{'targets':[{'target_name':'hiredis' 'sources':['src/hiredis.cc' 'src/reader.cc'] 'include_dirs':["<!(node -e \"require('nan')\")"] 'dependencies':['deps/hiredis.gyp:hiredis-c'] 'defines':['_GNU_SOURCE'] 'cflags':['-Wall' '-O3']}]}<line_sep> |
<import_stmt>os<import_from_stmt>conans ConanFile tools<import_from_stmt>conans.errors ConanInvalidConfiguration<class_stmt>CxxOptsConan(ConanFile)<block_start>name="cxxopts"<line_sep>homepage="https://github.com/jarro2783/cxxopts"<line_sep>url="https://github.com/conan-io/conan-center-index"<line_sep>description="Lightweight C++ option parser library, supporting the standard GNU style syntax for options."<line_sep>license="MIT"<line_sep>topics=("conan" "option-parser" "positional-arguments " "header-only")<line_sep>settings="compiler"<line_sep>options={"unicode":[<true> <false>]}<line_sep>default_options={"unicode":<false>}<line_sep>no_copy_source=<true><line_sep>@property<def_stmt>_source_subfolder self<block_start><return>"source_subfolder"<block_end>@property<def_stmt>_minimum_cpp_standard self<block_start><return>11<block_end>@property<def_stmt>_minimum_compilers_version self<block_start><return>{"Visual Studio":"14" "gcc":"5" "clang":"3.9" "apple-clang":"8" }<block_end><def_stmt>configure self<block_start><if_stmt>self.settings.compiler.get_safe("cppstd")<block_start>tools.check_min_cppstd(self self._minimum_cpp_standard)<block_end>min_version=self._minimum_compilers_version.get(str(self.settings.compiler))<if_stmt><not>min_version<block_start>self.output.warn("{} recipe lacks information about the {} compiler support.".format(self.name self.settings.compiler))<block_end><else_stmt><block_start><if_stmt>tools.Version(self.settings.compiler.version)<l>min_version<block_start><raise>ConanInvalidConfiguration("{} requires C++{} support. The current compiler {} {} does not support it.".format(self.name self._minimum_cpp_standard self.settings.compiler self.settings.compiler.version))<block_end><block_end><block_end><def_stmt>requirements self<block_start><if_stmt>self.options.unicode<block_start>self.requires("icu/64.2")<block_end><block_end><def_stmt>source self<block_start>tools.get(**self.conan_data["sources"][self.version])<line_sep>os.rename("{}-{}".format(self.name self.version) self._source_subfolder)<block_end><def_stmt>package self<block_start>self.copy("LICENSE" dst="licenses" src=self._source_subfolder)<line_sep>self.copy("{}.hpp".format(self.name) dst="include" src=os.path.join(self._source_subfolder "include"))<block_end><def_stmt>package_id self<block_start>self.info.header_only()<block_end><def_stmt>package_info self<block_start><if_stmt>self.options.unicode<block_start>self.cpp_info.defines=["CXXOPTS_USE_UNICODE"]<block_end><block_end><block_end> |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
<import_stmt>torch<import_from_stmt>.active_rotating_filter active_rotating_filter<import_from_stmt>.active_rotating_filter ActiveRotatingFilter<import_from_stmt>.rotation_invariant_encoding rotation_invariant_encoding<import_from_stmt>.rotation_invariant_encoding RotationInvariantEncoding<import_from_stmt>.rotation_invariant_pooling RotationInvariantPooling<line_sep>__all__=['ActiveRotatingFilter' 'active_rotating_filter' 'rotation_invariant_encoding' 'RotationInvariantEncoding' 'RotationInvariantPooling']<line_sep> |
__author__='<NAME>'<import_from_stmt>setuptools setup<line_sep>setup(name="einops" version='0.3.2' description="A new flavour of deep learning operations" long_description=open('README.md' encoding='utf-8').read() long_description_content_type='text/markdown' url='https://github.com/arogozhnikov/einops' author='<NAME>' packages=['einops' 'einops.layers'] classifiers=['Intended Audience :: Science/Research' 'Programming Language :: Python :: 3 ' ] keywords='deep learning, neural networks, tensor manipulation, machine learning, '<concat>'scientific computations, einops' install_requires=[# no run-time or installation-time dependencies
] )<line_sep> |
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
<import_stmt>os<import_stmt>numpy<as>np<import_from_stmt>shutil copy<import_stmt>subprocess<import_stmt>math<import_stmt>warnings<import_from_stmt>finn.custom_op.fpgadataflow.hlscustomop HLSCustomOp<import_from_stmt>finn.core.datatype DataType<import_from_stmt>onnx TensorProto helper<import_from_stmt>finn.util.data_packing npy_to_rtlsim_input rtlsim_output_to_npy<import_from_stmt>. templates<class_stmt>StreamingFIFO(HLSCustomOp)<block_start><def_stmt>__init__ self onnx_node<block_start>super().__init__(onnx_node)<line_sep>self.strm_fifo_wrapper=templates.strm_fifo_wrapper<block_end><def_stmt>get_nodeattr_types self<block_start>my_attrs={# FIFO depth
"depth":("i" <true> 0) # folded shape of input/output
"folded_shape":("ints" <true> []) # FINN DataTypes for inputs/outputs
"dataType":("s" <true> "") # Toggle between hls or IPI implementation
# rtl - use the hls generated IP during stitching
# vivado - use the AXI Infrastructure FIFO
"impl_style":("s" <false> "rtl" {"rtl" "vivado"}) # FPGA resource type for FIFOs when impl_style is vivado
# auto -- let Vivado decide
# block -- use BRAM
# distributed -- use LUTRAM
# ultra -- use URAM (on UltraScale+)
"ram_style":("s" <false> "auto" {"auto" "block" "distributed" "ultra"} ) }<line_sep>my_attrs.update(super().get_nodeattr_types())<line_sep><return>my_attrs<block_end><def_stmt>make_shape_compatible_op self model<block_start>exp_ishape=self.get_normal_input_shape()<line_sep>oshape=self.get_normal_output_shape()<line_sep>ishape=tuple(model.get_tensor_shape(self.onnx_node.input[0]))<assert_stmt>ishape<eq>tuple(exp_ishape) "Unexpect input shape for StreamingFIFO."<line_sep># implement tensor with correct shape
values=np.random.randn(*oshape).astype(np.float32)<line_sep><return>helper.make_node("Constant" inputs=[] outputs=[self.onnx_node.output[0]] value=helper.make_tensor(name="const_tensor" data_type=TensorProto.FLOAT dims=values.shape vals=values.flatten().astype(float) ) )<block_end><def_stmt>infer_node_datatype self model<block_start>node=self.onnx_node<line_sep>idt=model.get_tensor_datatype(node.input[0])<if_stmt>idt<ne>self.get_input_datatype()<block_start>warn_str="inputDataType changing for %s: %s -> %s "%(node.name str(self.get_input_datatype()) str(idt) )<line_sep>warnings.warn(warn_str)<block_end>self.set_nodeattr("dataType" idt.name)<line_sep># data type stays the same
model.set_tensor_datatype(node.output[0] idt)<block_end><def_stmt>verify_node self<block_start><pass><block_end><def_stmt>get_verilog_top_module_name self<block_start>"Return the Verilog top module name for this node."<line_sep>node=self.onnx_node<line_sep>prefixed_top_name="%s"%(node.name)<line_sep><return>prefixed_top_name<block_end><def_stmt>code_generation_ipgen self model fpgapart clk<block_start>code_gen_dir=self.get_nodeattr("code_gen_dir_ipgen")<line_sep>verilog_dir="{}/project_{}/sol1/impl/verilog".format(code_gen_dir self.onnx_node.name)<line_sep>os.makedirs(verilog_dir)<line_sep># copy Q_srl.v from finn-rtllib to verilog directory
memstream_dir="/workspace/finn/finn-rtllib/memstream/hdl/"<line_sep>Q_file=os.path.join(memstream_dir "Q_srl.v")<line_sep>copy(Q_file verilog_dir)<line_sep># empty code gen dictionary for new entries
self.code_gen_dict.clear()<line_sep>self.code_gen_dict["$TOPNAME$"]=["{}".format(self.onnx_node.name)]<line_sep>self.code_gen_dict["$LAYER_NAME$"]=["{}_{}".format(self.onnx_node.name self.onnx_node.name)]<line_sep># make instream width a multiple of 8 for axi interface
in_width=self.get_instream_width_padded()<line_sep>count_width=int(self.get_nodeattr("depth")-1).bit_length()<line_sep>self.code_gen_dict["$COUNT_RANGE$"]=["[{}:0]".format(count_width-1)]<line_sep>self.code_gen_dict["$IN_RANGE$"]=["[{}:0]".format(in_width-1)]<line_sep>self.code_gen_dict["$OUT_RANGE$"]=["[{}:0]".format(in_width-1)]<line_sep>self.code_gen_dict["$WIDTH$"]=[str(in_width)]<line_sep>self.code_gen_dict["$DEPTH$"]=[str(self.get_nodeattr("depth"))]<line_sep>template=self.strm_fifo_wrapper<for_stmt>key self.code_gen_dict# transform list into long string separated by '\n'
<block_start>code_gen_line="\n".join(self.code_gen_dict[key])<line_sep>template=template.replace(key code_gen_line)<block_end>f=open(os.path.join(verilog_dir "{}.v".format(self.onnx_node.name)) "w")<line_sep>f.write(template)<line_sep>f.close()<line_sep>self.code_gen_dict.clear()<block_end><def_stmt>ipgen_singlenode_code self<block_start>code_gen_dir=self.get_nodeattr("code_gen_dir_ipgen")<line_sep>verilog_dir="{}/project_{}/sol1/impl/verilog".format(code_gen_dir self.onnx_node.name)<line_sep># prepare the IP packaging tcl template
template=templates.ip_package_tcl<line_sep>self.code_gen_dict.clear()<line_sep>self.code_gen_dict["$TOPNAME$"]=["{}".format(self.onnx_node.name)]<line_sep># note: setting the root dir as absolute can cause path problems
# the ipgen script will be invoked from the sources dir so root_dir=. is OK
self.code_gen_dict["$VERILOG_DIR$"]=["."]<for_stmt>key self.code_gen_dict# transform list into long string separated by '\n'
<block_start>code_gen_line="\n".join(self.code_gen_dict[key])<line_sep>template=template.replace(key code_gen_line)<block_end>f=open(os.path.join(verilog_dir "package_ip.tcl") "w")<line_sep>f.write(template)<line_sep>f.close()<line_sep># create a shell script and call Vivado to invoke the IP pkg script
make_project_sh=verilog_dir+"/make_ip.sh"<line_sep>working_dir=os.environ["PWD"]<with_stmt>open(make_project_sh "w")<as>f<block_start>f.write("#!/bin/bash \n")<line_sep>f.write("cd {}\n".format(verilog_dir))<line_sep>f.write("vivado -mode batch -source package_ip.tcl\n")<line_sep>f.write("cd {}\n".format(working_dir))<block_end>bash_command=["bash" make_project_sh]<line_sep>process_compile=subprocess.Popen(bash_command stdout=subprocess.PIPE)<line_sep>process_compile.communicate()<line_sep># set ipgen_path and ip_path to point to the new packaged IP
self.set_nodeattr("ipgen_path" verilog_dir)<line_sep>self.set_nodeattr("ip_path" verilog_dir)<line_sep>vlnv="xilinx.com:hls:%s:1.0"%(self.onnx_node.name)<line_sep>self.set_nodeattr("ip_vlnv" vlnv)<line_sep>self.code_gen_dict.clear()<block_end><def_stmt>get_normal_input_shape self<block_start>depth=self.get_nodeattr("depth")<line_sep># depth has to be between 2 and 256 with the current
# StreamingFIFO implementation
<assert_stmt>depth<ge>2 """Depth is too low"""<if_stmt>depth<g>256<and>self.get_nodeattr("impl_style")<eq>"rtl"<block_start>warnings.warn("Depth is high, set between 2 and 256 for efficient SRL implementation")<block_end># derive normal shape from folded shape
# StreamingFIFOs are inserted in between fpgadataflow nodes
# the folded shape could be for example (1, nf, pe)
# with nf (neuron folding): mh // pe
# the normal input shape is in this case (1, mh)
# so to achieve this the two inner dimensions are multiplied
# and together with all previous dimensions
# this gives the normal input shape
folded_shape=self.get_nodeattr("folded_shape")<line_sep># extract inner dimension
inner_dim=folded_shape[-1]<line_sep># multiply with the next inner dimension
folding_factor=folded_shape[-2]<times>inner_dim<line_sep>normal_ishape=[]<line_sep># create the normal_ishape
<for_stmt>i range(len(folded_shape)-2)<block_start>normal_ishape.append(folded_shape[i])<block_end>normal_ishape.append(folding_factor)<line_sep><return>normal_ishape<block_end><def_stmt>get_normal_output_shape self<block_start><return>self.get_normal_input_shape()<block_end><def_stmt>get_folded_input_shape self<block_start><return>self.get_nodeattr("folded_shape")<block_end><def_stmt>get_folded_output_shape self<block_start><return>self.get_nodeattr("folded_shape")<block_end><def_stmt>get_instream_width self<block_start>dtype=DataType[self.get_nodeattr("dataType")]<line_sep>folded_shape=self.get_nodeattr("folded_shape")<line_sep>in_width=folded_shape[-1]<times>dtype.bitwidth()<line_sep><return>in_width<block_end><def_stmt>get_outstream_width self<block_start>dtype=DataType[self.get_nodeattr("dataType")]<line_sep>folded_shape=self.get_nodeattr("folded_shape")<line_sep>in_width=folded_shape[-1]<times>dtype.bitwidth()<line_sep><return>in_width<block_end><def_stmt>execute_node self context graph<block_start>mode=self.get_nodeattr("exec_mode")<line_sep>node=self.onnx_node<line_sep>inp=context[node.input[0]]<line_sep>exp_shape=self.get_normal_input_shape()<if_stmt>mode<eq>"cppsim"<block_start>output=inp<line_sep>output=np.asarray([output] dtype=np.float32).reshape(*exp_shape)<line_sep>context[node.output[0]]=output<block_end><elif_stmt>mode<eq>"rtlsim"<block_start>code_gen_dir=self.get_nodeattr("code_gen_dir_ipgen")<line_sep># create a npy file for the input of the node
<assert_stmt>(str(inp.dtype)<eq>"float32") """Input datatype is
not float32 as expected."""<line_sep>expected_inp_shape=self.get_folded_input_shape()<line_sep>reshaped_input=inp.reshape(expected_inp_shape)<if_stmt>DataType[self.get_nodeattr("dataType")]<eq>DataType.BIPOLAR# store bipolar activations as binary
<block_start>reshaped_input=(reshaped_input+1)/2<line_sep>export_idt=DataType.BINARY<block_end><else_stmt><block_start>export_idt=DataType[self.get_nodeattr("dataType")]<block_end># make copy before saving the array
reshaped_input=reshaped_input.copy()<line_sep>np.save(os.path.join(code_gen_dir "input_0.npy") reshaped_input)<line_sep>sim=self.get_rtlsim()<line_sep>nbits=self.get_instream_width()<line_sep>inp=npy_to_rtlsim_input("{}/input_0.npy".format(code_gen_dir) export_idt nbits)<line_sep>super().reset_rtlsim(sim)<line_sep>super().toggle_clk(sim)<line_sep>output=self.rtlsim(sim inp)<line_sep>odt=DataType[self.get_nodeattr("dataType")]<line_sep>target_bits=odt.bitwidth()<line_sep>packed_bits=self.get_outstream_width()<line_sep>out_npy_path="{}/output.npy".format(code_gen_dir)<line_sep>out_shape=self.get_folded_output_shape()<line_sep>rtlsim_output_to_npy(output out_npy_path odt out_shape packed_bits target_bits)<line_sep># load and reshape output
output=np.load(out_npy_path)<line_sep>oshape=self.get_normal_output_shape()<line_sep>output=np.asarray([output] dtype=np.float32).reshape(*oshape)<line_sep>context[node.output[0]]=output<block_end><else_stmt><block_start><raise>Exception("""Invalid value for attribute exec_mode! Is currently set to: {}
has to be set to one of the following value ("cppsim", "rtlsim")""".format(mode))<block_end><block_end><def_stmt>get_number_output_values self<block_start>folded_oshape=self.get_folded_output_shape()<line_sep><return>np.prod(folded_oshape[:-1])<block_end><def_stmt>global_includes self<block_start><pass><block_end><def_stmt>defines self var<block_start><pass><block_end><def_stmt>read_npy_data self<block_start><pass><block_end><def_stmt>strm_decl self<block_start><pass><block_end><def_stmt>docompute self<block_start><pass><block_end><def_stmt>dataoutstrm self<block_start><pass><block_end><def_stmt>save_as_npy self<block_start><pass><block_end><def_stmt>blackboxfunction self<block_start><pass><block_end><def_stmt>pragmas self<block_start><pass><block_end><def_stmt>code_generation_ipi self<block_start>impl_style=self.get_nodeattr("impl_style")<if_stmt>impl_style<eq>"rtl"<block_start><return>super().code_generation_ipi()<block_end><elif_stmt>impl_style<eq>"vivado"<block_start>cmd=[]<line_sep>node_name=self.onnx_node.name<line_sep>depth=self.get_nodeattr("depth")<line_sep>ram_style=self.get_nodeattr("ram_style")<line_sep># create a hierarchy for this layer, with the same port names
clk_name=self.get_verilog_top_module_intf_names()["clk"][0]<line_sep>rst_name=self.get_verilog_top_module_intf_names()["rst"][0]<line_sep>dout_name=self.get_verilog_top_module_intf_names()["m_axis"][0][0]<line_sep>din_name=self.get_verilog_top_module_intf_names()["s_axis"][0][0]<line_sep>cmd.append("create_bd_cell -type hier %s"%node_name)<line_sep>cmd.append("create_bd_pin -dir I -type clk /%s/%s"%(node_name clk_name))<line_sep>cmd.append("create_bd_pin -dir I -type rst /%s/%s"%(node_name rst_name))<line_sep>cmd.append("create_bd_intf_pin -mode Master "<concat>"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s"%(node_name dout_name))<line_sep>cmd.append("create_bd_intf_pin -mode Slave "<concat>"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s"%(node_name din_name))<line_sep># instantiate and configure DWC
cmd.append("create_bd_cell -type ip "<concat>"-vlnv xilinx.com:ip:axis_data_fifo:2.0 /%s/fifo"%node_name)<line_sep>cmd.append("set_property -dict [list CONFIG.FIFO_DEPTH {%d}] "<concat>"[get_bd_cells /%s/fifo]"%(depth node_name))<line_sep>cmd.append("set_property -dict [list CONFIG.FIFO_MEMORY_TYPE {%s}] "<concat>"[get_bd_cells /%s/fifo]"%(ram_style node_name))<line_sep>cmd.append("set_property -dict [list CONFIG.TDATA_NUM_BYTES {%d}] "<concat>"[get_bd_cells /%s/fifo]"%(np.ceil(self.get_outstream_width()/8) node_name))<line_sep>cmd.append("connect_bd_intf_net [get_bd_intf_pins %s/fifo/M_AXIS] "<concat>"[get_bd_intf_pins %s/%s]"%(node_name node_name dout_name))<line_sep>cmd.append("connect_bd_intf_net [get_bd_intf_pins %s/fifo/S_AXIS] "<concat>"[get_bd_intf_pins %s/%s]"%(node_name node_name din_name))<line_sep>cmd.append("connect_bd_net [get_bd_pins %s/%s] "<concat>"[get_bd_pins %s/fifo/s_axis_aresetn]"%(node_name rst_name node_name))<line_sep>cmd.append("connect_bd_net [get_bd_pins %s/%s] "<concat>"[get_bd_pins %s/fifo/s_axis_aclk]"%(node_name clk_name node_name))<line_sep><return>cmd<block_end><else_stmt><block_start><raise>Exception("FIFO implementation style %s not supported, please use rtl or vivado"%impl_style)<block_end><block_end><def_stmt>bram_estimation self<block_start>"""Calculates resource estimation for BRAM"""<line_sep>impl=self.get_nodeattr("impl_style")<line_sep>ram_type=self.get_nodeattr("ram_style")<line_sep>depth=self.get_nodeattr("depth")<line_sep>W=self.get_instream_width()<if_stmt>impl<eq>"rtl"<or>(impl<eq>"vivado"<and>ram_type<ne>"block")# Non-BRAM based implementation
<block_start><return>0<block_end><if_stmt>W<eq>1<block_start><return>math.ceil(depth/16384)<block_end><elif_stmt>W<eq>2<block_start><return>math.ceil(depth/8192)<block_end><elif_stmt>W<le>4<block_start><return>(math.ceil(depth/4096))<times>(math.ceil(W/4))<block_end><elif_stmt>W<le>9<block_start><return>(math.ceil(depth/2048))<times>(math.ceil(W/9))<block_end><elif_stmt>W<le>18<or>depth<g>512<block_start><return>(math.ceil(depth/1024))<times>(math.ceil(W/18))<block_end><else_stmt><block_start><return>(math.ceil(depth/512))<times>(math.ceil(W/36))<block_end><block_end><def_stmt>uram_estimation self<block_start>"""Calculates resource estimation for URAM"""<line_sep>impl=self.get_nodeattr("impl_style")<line_sep>ram_type=self.get_nodeattr("ram_style")<line_sep>depth=self.get_nodeattr("depth")<line_sep>W=self.get_instream_width()<if_stmt>impl<eq>"rtl"<or>(impl<eq>"vivado"<and>ram_type<ne>"ultra")# Non-BRAM based implementation
<block_start><return>0<block_end><else_stmt><block_start><return>(math.ceil(depth/4096))<times>(math.ceil(W/72))<block_end><block_end><def_stmt>bram_efficiency_estimation self<block_start>depth=self.get_nodeattr("depth")<line_sep>W=self.get_instream_width()<line_sep>bram16_est=self.bram_estimation()<if_stmt>bram16_est<eq>0<block_start><return>1<block_end>wbits=W<times>depth<line_sep>bram16_est_capacity=bram16_est<times>36<times>512<line_sep><return>wbits/bram16_est_capacity<block_end><def_stmt>lut_estimation self<block_start>"""Calculates resource estimations for LUTs"""<line_sep>impl=self.get_nodeattr("impl_style")<line_sep>ram_type=self.get_nodeattr("ram_style")<line_sep>depth=self.get_nodeattr("depth")<line_sep>W=self.get_instream_width()<line_sep>address_luts=2<times>math.ceil(math.log(depth 2))<if_stmt>impl<eq>"rtl"<or>(impl<eq>"vivado"<and>ram_type<eq>"distributed")<block_start>ram_luts=(math.ceil(depth/32))<times>(math.ceil(W/2))<block_end><else_stmt><block_start>ram_luts=0<block_end><return>int(address_luts+ram_luts)<block_end><def_stmt>prepare_rtlsim self<block_start><assert_stmt>self.get_nodeattr("impl_style")<ne>"vivado" ("StreamingFIFO impl_style "<concat>"cannot be vivado for rtlsim. Only impl_style=rtl supported.")<line_sep>super().prepare_rtlsim()<block_end><block_end> |
<import_from_stmt>django.contrib admin<import_from_stmt>django.contrib.auth.admin UserAdmin<import_from_stmt>.models CustomUser<line_sep>admin.site.register(CustomUser UserAdmin)<line_sep> |
<import_stmt>copy<import_stmt>os<import_stmt>re<import_stmt>string<import_stmt>sys<import_stmt>warnings<import_from_stmt>contextlib contextmanager<import_from_stmt>enum Enum<import_from_stmt>textwrap dedent<import_from_stmt>typing Any Dict Iterator List Optional Tuple Type Union get_type_hints <import_stmt>yaml<import_from_stmt>.errors ConfigIndexError ConfigTypeError ConfigValueError GrammarParseError OmegaConfBaseException ValidationError <import_from_stmt>.grammar_parser SIMPLE_INTERPOLATION_PATTERN parse<try_stmt><block_start><import_stmt>dataclasses<block_end><except_stmt>ImportError# pragma: no cover
<block_start>dataclasses=<none><block_end># type: ignore # pragma: no cover
<try_stmt><block_start><import_stmt>attr<block_end><except_stmt>ImportError# pragma: no cover
<block_start>attr=<none><block_end># type: ignore # pragma: no cover
# Regexprs to match key paths like: a.b, a[b], ..a[c].d, etc.
# We begin by matching the head (in these examples: a, a, ..a).
# This can be read as "dots followed by any character but `.` or `[`"
# Note that a key starting with brackets, like [a], is purposedly *not*
# matched here and will instead be handled in the next regex below (this
# is to keep this regex simple).
KEY_PATH_HEAD=re.compile(r"(\.)*[^.[]*")<line_sep># Then we match other keys. The following expression matches one key and can
# be read as a choice between two syntaxes:
# - `.` followed by anything except `.` or `[` (ex: .b, .d)
# - `[` followed by anything then `]` (ex: [b], [c])
KEY_PATH_OTHER=re.compile(r"\.([^.[]*)|\[(.*?)\]")<line_sep># source: https://yaml.org/type/bool.html
YAML_BOOL_TYPES=["y" "Y" "yes" "Yes" "YES" "n" "N" "no" "No" "NO" "true" "True" "TRUE" "false" "False" "FALSE" "on" "On" "ON" "off" "Off" "OFF" ]<class_stmt>Marker<block_start><def_stmt>__init__ self desc:str<block_start>self.desc=desc<block_end><def_stmt>__repr__ self<arrow>str<block_start><return>self.desc<block_end><block_end># To be used as default value when `None` is not an option.
_DEFAULT_MARKER_:Any=Marker("_DEFAULT_MARKER_")<class_stmt>OmegaConfDumper(yaml.Dumper)# type: ignore
<block_start>str_representer_added=<false><line_sep>@staticmethod<def_stmt>str_representer dumper:yaml.Dumper data:str<arrow>yaml.ScalarNode<block_start>with_quotes=yaml_is_bool(data)<or>is_int(data)<or>is_float(data)<line_sep><return>dumper.represent_scalar(yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG data style=("'"<if>with_quotes<else><none>) )<block_end><block_end><def_stmt>get_omega_conf_dumper <arrow>Type[OmegaConfDumper]<block_start><if_stmt><not>OmegaConfDumper.str_representer_added<block_start>OmegaConfDumper.add_representer(str OmegaConfDumper.str_representer)<line_sep>OmegaConfDumper.str_representer_added=<true><block_end><return>OmegaConfDumper<block_end><def_stmt>yaml_is_bool b:str<arrow>bool<block_start><return>b<in>YAML_BOOL_TYPES<block_end><def_stmt>get_yaml_loader <arrow>Any<block_start><class_stmt>OmegaConfLoader(yaml.SafeLoader)# type: ignore
<block_start><def_stmt>construct_mapping self node:yaml.Node deep:bool=<false><arrow>Any<block_start>keys=set()<for_stmt>key_node,value_node node.value<block_start><if_stmt>key_node.tag<ne>yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG<block_start><continue><block_end><if_stmt>key_node.value<in>keys<block_start><raise>yaml.constructor.ConstructorError("while constructing a mapping" node.start_mark f"found duplicate key {key_node.value}" key_node.start_mark )<block_end>keys.add(key_node.value)<block_end><return>super().construct_mapping(node deep=deep)<block_end><block_end>loader=OmegaConfLoader<line_sep>loader.add_implicit_resolver("tag:yaml.org,2002:float" re.compile("""^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$""" re.X ) list("-+0123456789.") )<line_sep>loader.yaml_implicit_resolvers={key:[(tag regexp)<for>tag,regexp resolvers<if>tag<ne>"tag:yaml.org,2002:timestamp"]<for>key,resolvers loader.yaml_implicit_resolvers.items()}<line_sep><return>loader<block_end><def_stmt>_get_class path:str<arrow>type<block_start><import_from_stmt>importlib import_module<line_sep>module_path,_,class_name=path.rpartition(".")<line_sep>mod=import_module(module_path)<try_stmt><block_start>klass:type=getattr(mod class_name)<block_end><except_stmt>AttributeError<block_start><raise>ImportError(f"Class {class_name} is not in module {module_path}")<block_end><return>klass<block_end><def_stmt>_is_union type_:Any<arrow>bool<block_start><return>getattr(type_ "__origin__" <none>)<is>Union<block_end><def_stmt>_resolve_optional type_:Any<arrow>Tuple[bool Any]<block_start>"""Check whether `type_` is equivalent to `typing.Optional[T]` for some T."""<if_stmt>getattr(type_ "__origin__" <none>)<is>Union<block_start>args=type_.__args__<if_stmt>len(args)<eq>2<and>args[1]<eq>type(<none>)# noqa E721
<block_start><return><true> args[0]<block_end><block_end><if_stmt>type_<is>Any<block_start><return><true> Any<block_end><return><false> type_<block_end><def_stmt>_is_optional obj:Any key:Optional[Union[int str]]=<none><arrow>bool<block_start>"""Check `obj` metadata to see if the given node is optional."""<import_from_stmt>.base Container Node<if_stmt>key<is><not><none><block_start><assert_stmt>isinstance(obj Container)<line_sep>obj=obj._get_node(key)<block_end><if_stmt>isinstance(obj Node)<block_start><return>obj._is_optional()<block_end><else_stmt># In case `obj` is not a Node, treat it as optional by default.
# This is used in `ListConfig.append` and `ListConfig.insert`
# where the appended/inserted value might or might not be a Node.
<block_start><return><true><block_end><block_end><def_stmt>_resolve_forward type_:Type[Any] module:str<arrow>Type[Any]<block_start><import_stmt>typing# lgtm [py/import-and-import-from]
forward=typing.ForwardRef<if>hasattr(typing "ForwardRef")<else>typing._ForwardRef# type: ignore
<if_stmt>type(type_)<is>forward<block_start><return>_get_class(f"{module}.{type_.__forward_arg__}")<block_end><else_stmt><block_start><if_stmt>is_dict_annotation(type_)<block_start>kt,vt=get_dict_key_value_types(type_)<if_stmt>kt<is><not><none><block_start>kt=_resolve_forward(kt module=module)<block_end><if_stmt>vt<is><not><none><block_start>vt=_resolve_forward(vt module=module)<block_end><return>Dict[kt vt]# type: ignore
<block_end><if_stmt>is_list_annotation(type_)<block_start>et=get_list_element_type(type_)<if_stmt>et<is><not><none><block_start>et=_resolve_forward(et module=module)<block_end><return>List[et]<block_end># type: ignore
<return>type_<block_end><block_end><def_stmt>extract_dict_subclass_data obj:Any parent:Any<arrow>Optional[Dict[str Any]]<block_start>"""Check if obj is an instance of a subclass of Dict. If so, extract the Dict keys/values."""<import_from_stmt>omegaconf.omegaconf _maybe_wrap<line_sep>is_type=isinstance(obj type)<line_sep>obj_type=obj<if>is_type<else>type(obj)<line_sep>subclasses_dict=is_dict_subclass(obj_type)<if_stmt>subclasses_dict<block_start>warnings.warn(f"Class `{obj_type.__name__}` subclasses `Dict`."+" Subclassing `Dict` in Structured Config classes is deprecated,"+" see github.com/omry/omegaconf/issues/663" UserWarning stacklevel=9 )<block_end><if_stmt>is_type<block_start><return><none><block_end><elif_stmt>subclasses_dict<block_start>dict_subclass_data={}<line_sep>key_type,element_type=get_dict_key_value_types(obj_type)<for_stmt>name,value obj.items()<block_start>is_optional,type_=_resolve_optional(element_type)<line_sep>type_=_resolve_forward(type_ obj.__module__)<try_stmt><block_start>dict_subclass_data[name]=_maybe_wrap(ref_type=type_ is_optional=is_optional key=name value=value parent=parent )<block_end><except_stmt>ValidationError<as>ex<block_start>format_and_raise(node=<none> key=name value=value cause=ex msg=str(ex))<block_end><block_end><return>dict_subclass_data<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>get_attr_class_field_names obj:Any<arrow>List[str]<block_start>is_type=isinstance(obj type)<line_sep>obj_type=obj<if>is_type<else>type(obj)<line_sep><return>list(attr.fields_dict(obj_type))<block_end><def_stmt>get_attr_data obj:Any allow_objects:Optional[bool]=<none><arrow>Dict[str Any]<block_start><import_from_stmt>omegaconf.omegaconf OmegaConf _maybe_wrap<line_sep>flags={"allow_objects":allow_objects}<if>allow_objects<is><not><none><else>{}<import_from_stmt>omegaconf MISSING<line_sep>d={}<line_sep>is_type=isinstance(obj type)<line_sep>obj_type=obj<if>is_type<else>type(obj)<line_sep>dummy_parent=OmegaConf.create({} flags=flags)<line_sep>dummy_parent._metadata.object_type=obj_type<for_stmt>name,attrib attr.fields_dict(obj_type).items()<block_start>is_optional,type_=_resolve_optional(attrib.type)<line_sep>type_=_resolve_forward(type_ obj.__module__)<if_stmt><not>is_type<block_start>value=getattr(obj name)<block_end><else_stmt><block_start>value=attrib.default<if_stmt>value<eq>attr.NOTHING<block_start>value=MISSING<block_end><block_end><if_stmt>_is_union(type_)<block_start>e=ConfigValueError(f"Union types are not supported:\n{name}: {type_str(type_)}")<line_sep>format_and_raise(node=<none> key=<none> value=value cause=e msg=str(e))<block_end><try_stmt><block_start>d[name]=_maybe_wrap(ref_type=type_ is_optional=is_optional key=name value=value parent=dummy_parent )<block_end><except_stmt>(ValidationError GrammarParseError)<as>ex<block_start>format_and_raise(node=dummy_parent key=name value=value cause=ex msg=str(ex))<block_end>d[name]._set_parent(<none>)<block_end>dict_subclass_data=extract_dict_subclass_data(obj=obj parent=dummy_parent)<if_stmt>dict_subclass_data<is><not><none><block_start>d.update(dict_subclass_data)<block_end><return>d<block_end><def_stmt>get_dataclass_field_names obj:Any<arrow>List[str]<block_start><return>[field.name<for>field dataclasses.fields(obj)]<block_end><def_stmt>get_dataclass_data obj:Any allow_objects:Optional[bool]=<none><arrow>Dict[str Any]<block_start><import_from_stmt>omegaconf.omegaconf MISSING OmegaConf _maybe_wrap<line_sep>flags={"allow_objects":allow_objects}<if>allow_objects<is><not><none><else>{}<line_sep>d={}<line_sep>obj_type=get_type_of(obj)<line_sep>dummy_parent=OmegaConf.create({} flags=flags)<line_sep>dummy_parent._metadata.object_type=obj_type<line_sep>resolved_hints=get_type_hints(obj_type)<for_stmt>field dataclasses.fields(obj)<block_start>name=field.name<line_sep>is_optional,type_=_resolve_optional(resolved_hints[field.name])<line_sep>type_=_resolve_forward(type_ obj.__module__)<if_stmt>hasattr(obj name)<block_start>value=getattr(obj name)<if_stmt>value<eq>dataclasses.MISSING<block_start>value=MISSING<block_end><block_end><else_stmt><block_start><if_stmt>field.default_factory<eq>dataclasses.MISSING# type: ignore
<block_start>value=MISSING<block_end><else_stmt><block_start>value=field.default_factory()<block_end><block_end># type: ignore
<if_stmt>_is_union(type_)<block_start>e=ConfigValueError(f"Union types are not supported:\n{name}: {type_str(type_)}")<line_sep>format_and_raise(node=<none> key=<none> value=value cause=e msg=str(e))<block_end><try_stmt><block_start>d[name]=_maybe_wrap(ref_type=type_ is_optional=is_optional key=name value=value parent=dummy_parent )<block_end><except_stmt>(ValidationError GrammarParseError)<as>ex<block_start>format_and_raise(node=dummy_parent key=name value=value cause=ex msg=str(ex))<block_end>d[name]._set_parent(<none>)<block_end>dict_subclass_data=extract_dict_subclass_data(obj=obj parent=dummy_parent)<if_stmt>dict_subclass_data<is><not><none><block_start>d.update(dict_subclass_data)<block_end><return>d<block_end><def_stmt>is_dataclass obj:Any<arrow>bool<block_start><import_from_stmt>omegaconf.base Node<if_stmt>dataclasses<is><none><or>isinstance(obj Node)<block_start><return><false><block_end><return>dataclasses.is_dataclass(obj)<block_end><def_stmt>is_attr_class obj:Any<arrow>bool<block_start><import_from_stmt>omegaconf.base Node<if_stmt>attr<is><none><or>isinstance(obj Node)<block_start><return><false><block_end><return>attr.has(obj)<block_end><def_stmt>is_structured_config obj:Any<arrow>bool<block_start><return>is_attr_class(obj)<or>is_dataclass(obj)<block_end><def_stmt>is_dataclass_frozen type_:Any<arrow>bool<block_start><return>type_.__dataclass_params__.frozen<block_end># type: ignore
<def_stmt>is_attr_frozen type_:type<arrow>bool# This is very hacky and probably fragile as well.
# Unfortunately currently there isn't an official API in attr that can detect that.
# noinspection PyProtectedMember
<block_start><return>type_.__setattr__<eq>attr._make._frozen_setattrs<block_end># type: ignore
<def_stmt>get_type_of class_or_object:Any<arrow>Type[Any]<block_start>type_=class_or_object<if_stmt><not>isinstance(type_ type)<block_start>type_=type(class_or_object)<block_end><assert_stmt>isinstance(type_ type)<line_sep><return>type_<block_end><def_stmt>is_structured_config_frozen obj:Any<arrow>bool<block_start>type_=get_type_of(obj)<if_stmt>is_dataclass(type_)<block_start><return>is_dataclass_frozen(type_)<block_end><if_stmt>is_attr_class(type_)<block_start><return>is_attr_frozen(type_)<block_end><return><false><block_end><def_stmt>get_structured_config_field_names obj:Any<arrow>List[str]<block_start><if_stmt>is_dataclass(obj)<block_start><return>get_dataclass_field_names(obj)<block_end><elif_stmt>is_attr_class(obj)<block_start><return>get_attr_class_field_names(obj)<block_end><else_stmt><block_start><raise>ValueError(f"Unsupported type: {type(obj).__name__}")<block_end><block_end><def_stmt>get_structured_config_data obj:Any allow_objects:Optional[bool]=<none><arrow>Dict[str Any]<block_start><if_stmt>is_dataclass(obj)<block_start><return>get_dataclass_data(obj allow_objects=allow_objects)<block_end><elif_stmt>is_attr_class(obj)<block_start><return>get_attr_data(obj allow_objects=allow_objects)<block_end><else_stmt><block_start><raise>ValueError(f"Unsupported type: {type(obj).__name__}")<block_end><block_end><class_stmt>ValueKind(Enum)<block_start>VALUE=0<line_sep>MANDATORY_MISSING=1<line_sep>INTERPOLATION=2<block_end><def_stmt>_is_missing_value value:Any<arrow>bool<block_start><import_from_stmt>omegaconf Node<if_stmt>isinstance(value Node)<block_start>value=value._value()<block_end><return>_is_missing_literal(value)<block_end><def_stmt>_is_missing_literal value:Any<arrow>bool# Uses literal '???' instead of the MISSING const for performance reasons.
<block_start><return>isinstance(value str)<and>value<eq>"???"<block_end><def_stmt>_is_none value:Any resolve:bool=<false> throw_on_resolution_failure:bool=<true><arrow>bool<block_start><import_from_stmt>omegaconf Node<if_stmt><not>isinstance(value Node)<block_start><return>value<is><none><block_end><if_stmt>resolve<block_start>value=value._maybe_dereference_node(throw_on_resolution_failure=throw_on_resolution_failure)<if_stmt><not>throw_on_resolution_failure<and>value<is><none># Resolution failure: consider that it is *not* None.
<block_start><return><false><block_end><assert_stmt>isinstance(value Node)<block_end><return>value._is_none()<block_end><def_stmt>get_value_kind value:Any strict_interpolation_validation:bool=<false><arrow>ValueKind<block_start>"""
Determine the kind of a value
Examples:
VALUE: "10", "20", True
MANDATORY_MISSING: "???"
INTERPOLATION: "${foo.bar}", "${foo.${bar}}", "${foo:bar}", "[${foo}, ${bar}]",
"ftp://${host}/path", "${foo:${bar}, [true], {'baz': ${baz}}}"
:param value: Input to classify.
:param strict_interpolation_validation: If `True`, then when `value` is a string
containing "${", it is parsed to validate the interpolation syntax. If `False`,
this parsing step is skipped: this is more efficient, but will not detect errors.
"""<if_stmt>_is_missing_value(value)<block_start><return>ValueKind.MANDATORY_MISSING<block_end>value=_get_value(value)<line_sep># We identify potential interpolations by the presence of "${" in the string.
# Note that escaped interpolations (ex: "esc: \${bar}") are identified as
# interpolations: this is intended, since they must be processed as interpolations
# for the string to be properly un-escaped.
# Keep in mind that invalid interpolations will only be detected when
# `strict_interpolation_validation` is True.
<if_stmt>isinstance(value str)<and>"${"<in>value<block_start><if_stmt>strict_interpolation_validation# First try the cheap regex matching that detects common interpolations.
<block_start><if_stmt>SIMPLE_INTERPOLATION_PATTERN.match(value)<is><none># If no match, do the more expensive grammar parsing to detect errors.
<block_start>parse(value)<block_end><block_end><return>ValueKind.INTERPOLATION<block_end><else_stmt><block_start><return>ValueKind.VALUE<block_end><block_end># DEPRECATED: remove in 2.2
<def_stmt>is_bool st:str<arrow>bool<block_start>st=str.lower(st)<line_sep><return>st<eq>"true"<or>st<eq>"false"<block_end><def_stmt>is_float st:str<arrow>bool<block_start><try_stmt><block_start>float(st)<line_sep><return><true><block_end><except_stmt>ValueError<block_start><return><false><block_end><block_end><def_stmt>is_int st:str<arrow>bool<block_start><try_stmt><block_start>int(st)<line_sep><return><true><block_end><except_stmt>ValueError<block_start><return><false><block_end><block_end># DEPRECATED: remove in 2.2
<def_stmt>decode_primitive s:str<arrow>Any<block_start><if_stmt>is_bool(s)<block_start><return>str.lower(s)<eq>"true"<block_end><if_stmt>is_int(s)<block_start><return>int(s)<block_end><if_stmt>is_float(s)<block_start><return>float(s)<block_end><return>s<block_end><def_stmt>is_primitive_list obj:Any<arrow>bool<block_start><import_from_stmt>.base Container<line_sep><return><not>isinstance(obj Container)<and>isinstance(obj (list tuple))<block_end><def_stmt>is_primitive_dict obj:Any<arrow>bool<block_start>t=get_type_of(obj)<line_sep><return>t<is>dict<block_end><def_stmt>is_dict_annotation type_:Any<arrow>bool<block_start>origin=getattr(type_ "__origin__" <none>)<if_stmt>sys.version_info<l>(3 7 0)<block_start><return>origin<is>Dict<or>type_<is>Dict# pragma: no cover
<block_end><else_stmt># pragma: no cover
# type_dict is a bit hard to detect.
# this support is tentative, if it eventually causes issues in other areas it may be dropped.
<block_start>typed_dict=hasattr(type_ "__base__")<and>type_.__base__<eq>dict<line_sep><return>origin<is>dict<or>typed_dict<block_end><block_end><def_stmt>is_list_annotation type_:Any<arrow>bool<block_start>origin=getattr(type_ "__origin__" <none>)<if_stmt>sys.version_info<l>(3 7 0)<block_start><return>origin<is>List<or>type_<is>List# pragma: no cover
<block_end><else_stmt><block_start><return>origin<is>list<block_end><block_end># pragma: no cover
<def_stmt>is_tuple_annotation type_:Any<arrow>bool<block_start>origin=getattr(type_ "__origin__" <none>)<if_stmt>sys.version_info<l>(3 7 0)<block_start><return>origin<is>Tuple<or>type_<is>Tuple# pragma: no cover
<block_end><else_stmt><block_start><return>origin<is>tuple<block_end><block_end># pragma: no cover
<def_stmt>is_dict_subclass type_:Any<arrow>bool<block_start><return>type_<is><not><none><and>isinstance(type_ type)<and>issubclass(type_ Dict)<block_end><def_stmt>is_dict obj:Any<arrow>bool<block_start><return>is_primitive_dict(obj)<or>is_dict_annotation(obj)<or>is_dict_subclass(obj)<block_end><def_stmt>is_primitive_container obj:Any<arrow>bool<block_start><return>is_primitive_list(obj)<or>is_primitive_dict(obj)<block_end><def_stmt>get_list_element_type ref_type:Optional[Type[Any]]<arrow>Any<block_start>args=getattr(ref_type "__args__" <none>)<if_stmt>ref_type<is><not>List<and>args<is><not><none><and>args[0]<block_start>element_type=args[0]<block_end><else_stmt><block_start>element_type=Any<block_end><return>element_type<block_end><def_stmt>get_dict_key_value_types ref_type:Any<arrow>Tuple[Any Any]<block_start>args=getattr(ref_type "__args__" <none>)<if_stmt>args<is><none><block_start>bases=getattr(ref_type "__orig_bases__" <none>)<if_stmt>bases<is><not><none><and>len(bases)<g>0<block_start>args=getattr(bases[0] "__args__" <none>)<block_end><block_end>key_type:Any<line_sep>element_type:Any<if_stmt>ref_type<is><none><or>ref_type<eq>Dict<block_start>key_type=Any<line_sep>element_type=Any<block_end><else_stmt><block_start><if_stmt>args<is><not><none><block_start>key_type=args[0]<line_sep>element_type=args[1]<block_end><else_stmt><block_start>key_type=Any<line_sep>element_type=Any<block_end><block_end><return>key_type element_type<block_end><def_stmt>valid_value_annotation_type type_:Any<arrow>bool<block_start><return>type_<is>Any<or>is_primitive_type(type_)<or>is_structured_config(type_)<block_end><def_stmt>_valid_dict_key_annotation_type type_:Any<arrow>bool<block_start><import_from_stmt>omegaconf DictKeyType<line_sep><return>type_<is><none><or>type_<is>Any<or>issubclass(type_ DictKeyType.__args__)<block_end># type: ignore
<def_stmt>is_primitive_type type_:Any<arrow>bool<block_start>type_=get_type_of(type_)<line_sep><return>issubclass(type_ Enum)<or>type_<in>(int float bool str type(<none>))<block_end><def_stmt>_is_interpolation v:Any strict_interpolation_validation:bool=<false><arrow>bool<block_start><if_stmt>isinstance(v str)<block_start>ret=(get_value_kind(v strict_interpolation_validation)<eq>ValueKind.INTERPOLATION)<assert_stmt>isinstance(ret bool)<line_sep><return>ret<block_end><return><false><block_end><def_stmt>_get_value value:Any<arrow>Any<block_start><import_from_stmt>.base Container<import_from_stmt>.nodes ValueNode<if_stmt>isinstance(value ValueNode)<block_start><return>value._value()<block_end><elif_stmt>isinstance(value Container)<block_start>boxed=value._value()<if_stmt>boxed<is><none><or>_is_missing_literal(boxed)<or>_is_interpolation(boxed)<block_start><return>boxed<block_end><block_end># return primitives and regular OmegaConf Containers as is
<return>value<block_end><def_stmt>get_ref_type obj:Any key:Any=<none><arrow>Optional[Type[Any]]<block_start><import_from_stmt>omegaconf Container Node<if_stmt>isinstance(obj Container)<block_start><if_stmt>key<is><not><none><block_start>obj=obj._get_node(key)<block_end><block_end><else_stmt><block_start><if_stmt>key<is><not><none><block_start><raise>ValueError("Key must only be provided when obj is a container")<block_end><block_end><if_stmt>isinstance(obj Node)<block_start>ref_type=obj._metadata.ref_type<if_stmt>obj._is_optional()<and>ref_type<is><not>Any<block_start><return>Optional[ref_type]# type: ignore
<block_end><else_stmt><block_start><return>ref_type<block_end><block_end><else_stmt><block_start><return>Any<block_end><block_end># type: ignore
<def_stmt>_raise ex:Exception cause:Exception<arrow><none># Set the environment variable OC_CAUSE=1 to get a stacktrace that includes the
# causing exception.
<block_start>env_var=os.environ["OC_CAUSE"]<if>"OC_CAUSE"<in>os.environ<else><none><line_sep>debugging=sys.gettrace()<is><not><none><line_sep>full_backtrace=(debugging<and><not>env_var<eq>"0")<or>(env_var<eq>"1")<if_stmt>full_backtrace<block_start>ex.__cause__=cause<block_end><else_stmt><block_start>ex.__cause__=<none><block_end><raise>ex.with_traceback(sys.exc_info()[2])<block_end># set end OC_CAUSE=1 for full backtrace
<def_stmt>format_and_raise node:Any key:Any value:Any msg:str cause:Exception type_override:Any=<none> <arrow><none><block_start><import_from_stmt>omegaconf OmegaConf<import_from_stmt>omegaconf.base Node<if_stmt>isinstance(cause AssertionError)<block_start><raise><block_end><if_stmt>isinstance(cause OmegaConfBaseException)<and>cause._initialized<block_start>ex=cause<if_stmt>type_override<is><not><none><block_start>ex=type_override(str(cause))<line_sep>ex.__dict__=copy.deepcopy(cause.__dict__)<block_end>_raise(ex cause)<block_end>object_type:Optional[Type[Any]]<line_sep>object_type_str:Optional[str]=<none><line_sep>ref_type:Optional[Type[Any]]<line_sep>ref_type_str:Optional[str]<line_sep>child_node:Optional[Node]=<none><if_stmt>node<is><none><block_start>full_key=key<if>key<is><not><none><else>""<line_sep>object_type=<none><line_sep>ref_type=<none><line_sep>ref_type_str=<none><block_end><else_stmt><block_start><if_stmt>key<is><not><none><and><not>node._is_none()<block_start>child_node=node._get_node(key validate_access=<false>)<block_end><try_stmt><block_start>full_key=node._get_full_key(key=key)<block_end><except_stmt>Exception<as>exc# Since we are handling an exception, raising a different one here would
# be misleading. Instead, we display it in the key.
<block_start>full_key=f"<unresolvable due to {type(exc).__name__}: {exc}>"<block_end>object_type=OmegaConf.get_type(node)<line_sep>object_type_str=type_str(object_type)<line_sep>ref_type=get_ref_type(node)<line_sep>ref_type_str=type_str(ref_type)<block_end>msg=string.Template(msg).safe_substitute(REF_TYPE=ref_type_str OBJECT_TYPE=object_type_str KEY=key FULL_KEY=full_key VALUE=value VALUE_TYPE=type_str(type(value) include_module_name=<true>) KEY_TYPE=f"{type(key).__name__}" )<if_stmt>ref_type<not><in>(<none> Any)<block_start>template=dedent("""\
$MSG
full_key: $FULL_KEY
reference_type=$REF_TYPE
object_type=$OBJECT_TYPE""")<block_end><else_stmt><block_start>template=dedent("""\
$MSG
full_key: $FULL_KEY
object_type=$OBJECT_TYPE""")<block_end>s=string.Template(template=template)<line_sep>message=s.substitute(REF_TYPE=ref_type_str OBJECT_TYPE=object_type_str MSG=msg FULL_KEY=full_key)<line_sep>exception_type=type(cause)<if>type_override<is><none><else>type_override<if_stmt>exception_type<eq>TypeError<block_start>exception_type=ConfigTypeError<block_end><elif_stmt>exception_type<eq>IndexError<block_start>exception_type=ConfigIndexError<block_end>ex=exception_type(f"{message}")<if_stmt>issubclass(exception_type OmegaConfBaseException)<block_start>ex._initialized=<true><line_sep>ex.msg=message<line_sep>ex.parent_node=node<line_sep>ex.child_node=child_node<line_sep>ex.key=key<line_sep>ex.full_key=full_key<line_sep>ex.value=value<line_sep>ex.object_type=object_type<line_sep>ex.object_type_str=object_type_str<line_sep>ex.ref_type=ref_type<line_sep>ex.ref_type_str=ref_type_str<block_end>_raise(ex cause)<block_end><def_stmt>type_str t:Any include_module_name:bool=<false><arrow>str<block_start>is_optional,t=_resolve_optional(t)<if_stmt>t<is><none><block_start><return>type(t).__name__<block_end><if_stmt>t<is>Any<block_start><return>"Any"<block_end><if_stmt>t<is><ellipsis><block_start><return>"..."<block_end><if_stmt>sys.version_info<l>(3 7 0)# pragma: no cover
# Python 3.6
<block_start><if_stmt>hasattr(t "__name__")<block_start>name=str(t.__name__)<block_end><else_stmt><block_start><if_stmt>t.__origin__<is><not><none><block_start>name=type_str(t.__origin__)<block_end><else_stmt><block_start>name=str(t)<if_stmt>name.startswith("typing.")<block_start>name=name[len("typing."):]<block_end><block_end><block_end><block_end><else_stmt># pragma: no cover
# Python >= 3.7
<block_start><if_stmt>hasattr(t "__name__")<block_start>name=str(t.__name__)<block_end><else_stmt><block_start><if_stmt>t._name<is><none><block_start><if_stmt>t.__origin__<is><not><none><block_start>name=type_str(t.__origin__ include_module_name=include_module_name)<block_end><block_end><else_stmt><block_start>name=str(t._name)<block_end><block_end><block_end>args=getattr(t "__args__" <none>)<if_stmt>args<is><not><none><block_start>args=", ".join([type_str(t include_module_name=include_module_name)<for>t t.__args__])<line_sep>ret=f"{name}[{args}]"<block_end><else_stmt><block_start>ret=name<block_end><if_stmt>include_module_name<block_start><if_stmt>(hasattr(t "__module__")<and>t.__module__<ne>"builtins"<and>t.__module__<ne>"typing"<and><not>t.__module__.startswith("omegaconf."))<block_start>module_prefix=t.__module__+"."<block_end><else_stmt><block_start>module_prefix=""<block_end>ret=module_prefix+ret<block_end><if_stmt>is_optional<block_start><return>f"Optional[{ret}]"<block_end><else_stmt><block_start><return>ret<block_end><block_end><def_stmt>_ensure_container target:Any flags:Optional[Dict[str bool]]=<none><arrow>Any<block_start><import_from_stmt>omegaconf OmegaConf<if_stmt>is_primitive_container(target)<block_start><assert_stmt>isinstance(target (list dict))<line_sep>target=OmegaConf.create(target flags=flags)<block_end><elif_stmt>is_structured_config(target)<block_start>target=OmegaConf.structured(target flags=flags)<block_end><elif_stmt><not>OmegaConf.is_config(target)<block_start><raise>ValueError("Invalid input. Supports one of "+"[dict,list,DictConfig,ListConfig,dataclass,dataclass instance,attr class,attr class instance]")<block_end><return>target<block_end><def_stmt>is_generic_list type_:Any<arrow>bool<block_start>"""
Checks if a type is a generic list, for example:
list returns False
typing.List returns False
typing.List[T] returns True
:param type_: variable type
:return: bool
"""<line_sep><return>is_list_annotation(type_)<and>get_list_element_type(type_)<is><not><none><block_end><def_stmt>is_generic_dict type_:Any<arrow>bool<block_start>"""
Checks if a type is a generic dict, for example:
list returns False
typing.List returns False
typing.List[T] returns True
:param type_: variable type
:return: bool
"""<line_sep><return>is_dict_annotation(type_)<and>len(get_dict_key_value_types(type_))<g>0<block_end><def_stmt>is_container_annotation type_:Any<arrow>bool<block_start><return>is_list_annotation(type_)<or>is_dict_annotation(type_)<block_end><def_stmt>split_key key:str<arrow>List[str]<block_start>"""
Split a full key path into its individual components.
This is similar to `key.split(".")` but also works with the getitem syntax:
"a.b" -> ["a", "b"]
"a[b]" -> ["a, "b"]
".a.b[c].d" -> ["", "a", "b", "c", "d"]
"[a].b" -> ["a", "b"]
"""<line_sep># Obtain the first part of the key (in docstring examples: a, a, .a, '')
first=KEY_PATH_HEAD.match(key)<assert_stmt>first<is><not><none><line_sep>first_stop=first.span()[1]<line_sep># `tokens` will contain all elements composing the key.
tokens=key[0:first_stop].split(".")<line_sep># Optimization in case `key` has no other component: we are done.
<if_stmt>first_stop<eq>len(key)<block_start><return>tokens<block_end><if_stmt>key[first_stop]<eq>"["<and><not>tokens[-1]# This is a special case where the first key starts with brackets, e.g.
# [a] or ..[a]. In that case there is an extra "" in `tokens` that we
# need to get rid of:
# [a] -> tokens = [""] but we would like []
# ..[a] -> tokens = ["", "", ""] but we would like ["", ""]
<block_start>tokens.pop()<block_end># Identify other key elements (in docstring examples: b, b, b/c/d, b)
others=KEY_PATH_OTHER.findall(key[first_stop:])<line_sep># There are two groups in the `KEY_PATH_OTHER` regex: one for keys starting
# with a dot (.b, .d) and one for keys starting with a bracket ([b], [c]).
# Only one group can be non-empty.
tokens<augadd>[dot_key<if>dot_key<else>bracket_key<for>dot_key,bracket_key others]<line_sep><return>tokens<block_end># Similar to Python 3.7+'s `contextlib.nullcontext` (which should be used instead,
# once support for Python 3.6 is dropped).
@contextmanager<def_stmt>nullcontext enter_result:Any=<none><arrow>Iterator[Any]<block_start><yield>enter_result<block_end> |
<import_stmt>unittest<import_from_stmt>test support<import_stmt>base64<import_stmt>binascii<import_stmt>os<import_stmt>sys<import_stmt>subprocess<class_stmt>LegacyBase64TestCase(unittest.TestCase)<block_start><def_stmt>test_encodebytes self<block_start>eq=self.assertEqual<line_sep>eq(base64.encodebytes(b"www.python.org") b"d3d3LnB5dGhvbi5vcmc=\n")<line_sep>eq(base64.encodebytes(b"a") b"YQ==\n")<line_sep>eq(base64.encodebytes(b"ab") b"YWI=\n")<line_sep>eq(base64.encodebytes(b"abc") b"YWJj\n")<line_sep>eq(base64.encodebytes(b"") b"")<line_sep>eq(base64.encodebytes(b"abcdefghijklmnopqrstuvwxyz"<concat>b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"<concat>b"0123456789!@#0^&*();:<>,. []{}") b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"<concat>b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"<concat>b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n")<line_sep># Non-bytes
eq(base64.encodebytes(bytearray(b'abc')) b'YWJj\n')<line_sep>self.assertRaises(TypeError base64.encodebytes "")<block_end><def_stmt>test_decodebytes self<block_start>eq=self.assertEqual<line_sep>eq(base64.decodebytes(b"d3d3LnB5dGhvbi5vcmc=\n") b"www.python.org")<line_sep>eq(base64.decodebytes(b"YQ==\n") b"a")<line_sep>eq(base64.decodebytes(b"YWI=\n") b"ab")<line_sep>eq(base64.decodebytes(b"YWJj\n") b"abc")<line_sep>eq(base64.decodebytes(b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"<concat>b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"<concat>b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n") b"abcdefghijklmnopqrstuvwxyz"<concat>b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"<concat>b"0123456789!@#0^&*();:<>,. []{}")<line_sep>eq(base64.decodebytes(b'') b'')<line_sep># Non-bytes
eq(base64.decodebytes(bytearray(b'YWJj\n')) b'abc')<line_sep>self.assertRaises(TypeError base64.decodebytes "")<block_end><def_stmt>test_encode self<block_start>eq=self.assertEqual<import_from_stmt>io BytesIO StringIO<line_sep>infp=BytesIO(b'abcdefghijklmnopqrstuvwxyz'<concat>b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'<concat>b'0123456789!@#0^&*();:<>,. []{}')<line_sep>outfp=BytesIO()<line_sep>base64.encode(infp outfp)<line_sep>eq(outfp.getvalue() b'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE'<concat>b'RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT'<concat>b'Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n')<line_sep># Non-binary files
self.assertRaises(TypeError base64.encode StringIO('abc') BytesIO())<line_sep>self.assertRaises(TypeError base64.encode BytesIO(b'abc') StringIO())<line_sep>self.assertRaises(TypeError base64.encode StringIO('abc') StringIO())<block_end><def_stmt>test_decode self<block_start><import_from_stmt>io BytesIO StringIO<line_sep>infp=BytesIO(b'd3d3LnB5dGhvbi5vcmc=')<line_sep>outfp=BytesIO()<line_sep>base64.decode(infp outfp)<line_sep>self.assertEqual(outfp.getvalue() b'www.python.org')<line_sep># Non-binary files
self.assertRaises(TypeError base64.encode StringIO('YWJj\n') BytesIO())<line_sep>self.assertRaises(TypeError base64.encode BytesIO(b'YWJj\n') StringIO())<line_sep>self.assertRaises(TypeError base64.encode StringIO('YWJj\n') StringIO())<block_end><block_end><class_stmt>BaseXYTestCase(unittest.TestCase)<block_start><def_stmt>test_b64encode self<block_start>eq=self.assertEqual<line_sep># Test default alphabet
eq(base64.b64encode(b"www.python.org") b"d3d3LnB5dGhvbi5vcmc=")<line_sep>eq(base64.b64encode(b'\x00') b'AA==')<line_sep>eq(base64.b64encode(b"a") b"YQ==")<line_sep>eq(base64.b64encode(b"ab") b"YWI=")<line_sep>eq(base64.b64encode(b"abc") b"YWJj")<line_sep>eq(base64.b64encode(b"") b"")<line_sep>eq(base64.b64encode(b"abcdefghijklmnopqrstuvwxyz"<concat>b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"<concat>b"0123456789!@#0^&*();:<>,. []{}") b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"<concat>b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"<concat>b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")<line_sep># Test with arbitrary alternative characters
eq(base64.b64encode(b'\xd3V\xbeo\xf7\x1d' altchars=b'*$') b'01a*b$cd')<line_sep># Non-bytes
eq(base64.b64encode(bytearray(b'abcd')) b'YWJjZA==')<line_sep>eq(base64.b64encode(b'\xd3V\xbeo\xf7\x1d' altchars=bytearray(b'*$')) b'01a*b$cd')<line_sep># Check if passing a str object raises an error
self.assertRaises(TypeError base64.b64encode "")<line_sep>self.assertRaises(TypeError base64.b64encode b"" altchars="")<line_sep># Test standard alphabet
eq(base64.standard_b64encode(b"www.python.org") b"d3d3LnB5dGhvbi5vcmc=")<line_sep>eq(base64.standard_b64encode(b"a") b"YQ==")<line_sep>eq(base64.standard_b64encode(b"ab") b"YWI=")<line_sep>eq(base64.standard_b64encode(b"abc") b"YWJj")<line_sep>eq(base64.standard_b64encode(b"") b"")<line_sep>eq(base64.standard_b64encode(b"abcdefghijklmnopqrstuvwxyz"<concat>b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"<concat>b"0123456789!@#0^&*();:<>,. []{}") b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"<concat>b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"<concat>b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")<line_sep># Non-bytes
eq(base64.standard_b64encode(bytearray(b'abcd')) b'YWJjZA==')<line_sep># Check if passing a str object raises an error
self.assertRaises(TypeError base64.standard_b64encode "")<line_sep># Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64encode(b'\xd3V\xbeo\xf7\x1d') b'01a-b_cd')<line_sep># Non-bytes
eq(base64.urlsafe_b64encode(bytearray(b'\xd3V\xbeo\xf7\x1d')) b'01a-b_cd')<line_sep># Check if passing a str object raises an error
self.assertRaises(TypeError base64.urlsafe_b64encode "")<block_end><def_stmt>test_b64decode self<block_start>eq=self.assertEqual<line_sep>tests={b"d3d3LnB5dGhvbi5vcmc=":b"www.python.org" b'AA==':b'\x00' b"YQ==":b"a" b"YWI=":b"ab" b"YWJj":b"abc" b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"<concat>b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"<concat>b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==":b"abcdefghijklmnopqrstuvwxyz"<concat>b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"<concat>b"0123456789!@#0^&*();:<>,. []{}" b'':b'' }<for_stmt>data,res tests.items()<block_start>eq(base64.b64decode(data) res)<line_sep>eq(base64.b64decode(data.decode('ascii')) res)<block_end># Non-bytes
eq(base64.b64decode(bytearray(b"YWJj")) b"abc")<line_sep># Test with arbitrary alternative characters
tests_altchars={(b'01a*b$cd' b'*$'):b'\xd3V\xbeo\xf7\x1d' }<for_stmt>(data altchars),res tests_altchars.items()<block_start>data_str=data.decode('ascii')<line_sep>altchars_str=altchars.decode('ascii')<line_sep>eq(base64.b64decode(data altchars=altchars) res)<line_sep>eq(base64.b64decode(data_str altchars=altchars) res)<line_sep>eq(base64.b64decode(data altchars=altchars_str) res)<line_sep>eq(base64.b64decode(data_str altchars=altchars_str) res)<block_end># Test standard alphabet
<for_stmt>data,res tests.items()<block_start>eq(base64.standard_b64decode(data) res)<line_sep>eq(base64.standard_b64decode(data.decode('ascii')) res)<block_end># Non-bytes
eq(base64.standard_b64decode(bytearray(b"YWJj")) b"abc")<line_sep># Test with 'URL safe' alternative characters
tests_urlsafe={b'01a-b_cd':b'\xd3V\xbeo\xf7\x1d' b'':b'' }<for_stmt>data,res tests_urlsafe.items()<block_start>eq(base64.urlsafe_b64decode(data) res)<line_sep>eq(base64.urlsafe_b64decode(data.decode('ascii')) res)<block_end># Non-bytes
eq(base64.urlsafe_b64decode(bytearray(b'01a-b_cd')) b'\xd3V\xbeo\xf7\x1d')<block_end><def_stmt>test_b64decode_padding_error self<block_start>self.assertRaises(binascii.Error base64.b64decode b'abc')<line_sep>self.assertRaises(binascii.Error base64.b64decode 'abc')<block_end><def_stmt>test_b64decode_invalid_chars self# issue 1466065: Test some invalid characters.
<block_start>tests=((b'%3d==' b'\xdd') (b'$3d==' b'\xdd') (b'[==' b'') (b'YW]3=' b'am') (b'3{d==' b'\xdd') (b'3d}==' b'\xdd') (b'@@' b'') (b'!' b'') (b'YWJj\nYWI=' b'abcab'))<for_stmt>bstr,res tests<block_start>self.assertEqual(base64.b64decode(bstr) res)<line_sep>self.assertEqual(base64.b64decode(bstr.decode('ascii')) res)<with_stmt>self.assertRaises(binascii.Error)<block_start>base64.b64decode(bstr validate=<true>)<block_end><with_stmt>self.assertRaises(binascii.Error)<block_start>base64.b64decode(bstr.decode('ascii') validate=<true>)<block_end><block_end><block_end><def_stmt>test_b32encode self<block_start>eq=self.assertEqual<line_sep>eq(base64.b32encode(b'') b'')<line_sep>eq(base64.b32encode(b'\x00') b'AA======')<line_sep>eq(base64.b32encode(b'a') b'ME======')<line_sep>eq(base64.b32encode(b'ab') b'MFRA====')<line_sep>eq(base64.b32encode(b'abc') b'MFRGG===')<line_sep>eq(base64.b32encode(b'abcd') b'MFRGGZA=')<line_sep>eq(base64.b32encode(b'abcde') b'MFRGGZDF')<line_sep># Non-bytes
eq(base64.b32encode(bytearray(b'abcd')) b'MFRGGZA=')<line_sep>self.assertRaises(TypeError base64.b32encode "")<block_end><def_stmt>test_b32decode self<block_start>eq=self.assertEqual<line_sep>tests={b'':b'' b'AA======':b'\x00' b'ME======':b'a' b'MFRA====':b'ab' b'MFRGG===':b'abc' b'MFRGGZA=':b'abcd' b'MFRGGZDF':b'abcde' }<for_stmt>data,res tests.items()<block_start>eq(base64.b32decode(data) res)<line_sep>eq(base64.b32decode(data.decode('ascii')) res)<block_end># Non-bytes
eq(base64.b32decode(bytearray(b'MFRGG===')) b'abc')<block_end><def_stmt>test_b32decode_casefold self<block_start>eq=self.assertEqual<line_sep>tests={b'':b'' b'ME======':b'a' b'MFRA====':b'ab' b'MFRGG===':b'abc' b'MFRGGZA=':b'abcd' b'MFRGGZDF':b'abcde' # Lower cases
b'me======':b'a' b'mfra====':b'ab' b'mfrgg===':b'abc' b'mfrggza=':b'abcd' b'mfrggzdf':b'abcde' }<for_stmt>data,res tests.items()<block_start>eq(base64.b32decode(data <true>) res)<line_sep>eq(base64.b32decode(data.decode('ascii') <true>) res)<block_end>self.assertRaises(binascii.Error base64.b32decode b'me======')<line_sep>self.assertRaises(binascii.Error base64.b32decode 'me======')<line_sep># Mapping zero and one
eq(base64.b32decode(b'MLO23456') b'b\xdd\xad\xf3\xbe')<line_sep>eq(base64.b32decode('MLO23456') b'b\xdd\xad\xf3\xbe')<line_sep>map_tests={(b'M1023456' b'L'):b'b\xdd\xad\xf3\xbe' (b'M1023456' b'I'):b'b\x1d\xad\xf3\xbe' }<for_stmt>(data map01),res map_tests.items()<block_start>data_str=data.decode('ascii')<line_sep>map01_str=map01.decode('ascii')<line_sep>eq(base64.b32decode(data map01=map01) res)<line_sep>eq(base64.b32decode(data_str map01=map01) res)<line_sep>eq(base64.b32decode(data map01=map01_str) res)<line_sep>eq(base64.b32decode(data_str map01=map01_str) res)<line_sep>self.assertRaises(binascii.Error base64.b32decode data)<line_sep>self.assertRaises(binascii.Error base64.b32decode data_str)<block_end><block_end><def_stmt>test_b32decode_error self<block_start><for_stmt>data [b'abc' b'ABCDEF==' b'==ABCDEF']<block_start><with_stmt>self.assertRaises(binascii.Error)<block_start>base64.b32decode(data)<block_end><with_stmt>self.assertRaises(binascii.Error)<block_start>base64.b32decode(data.decode('ascii'))<block_end><block_end><block_end><def_stmt>test_b16encode self<block_start>eq=self.assertEqual<line_sep>eq(base64.b16encode(b'\x01\x02\xab\xcd\xef') b'0102ABCDEF')<line_sep>eq(base64.b16encode(b'\x00') b'00')<line_sep># Non-bytes
eq(base64.b16encode(bytearray(b'\x01\x02\xab\xcd\xef')) b'0102ABCDEF')<line_sep>self.assertRaises(TypeError base64.b16encode "")<block_end><def_stmt>test_b16decode self<block_start>eq=self.assertEqual<line_sep>eq(base64.b16decode(b'0102ABCDEF') b'\x01\x02\xab\xcd\xef')<line_sep>eq(base64.b16decode('0102ABCDEF') b'\x01\x02\xab\xcd\xef')<line_sep>eq(base64.b16decode(b'00') b'\x00')<line_sep>eq(base64.b16decode('00') b'\x00')<line_sep># Lower case is not allowed without a flag
self.assertRaises(binascii.Error base64.b16decode b'0102abcdef')<line_sep>self.assertRaises(binascii.Error base64.b16decode '0102abcdef')<line_sep># Case fold
eq(base64.b16decode(b'0102abcdef' <true>) b'\x01\x02\xab\xcd\xef')<line_sep>eq(base64.b16decode('0102abcdef' <true>) b'\x01\x02\xab\xcd\xef')<line_sep># Non-bytes
eq(base64.b16decode(bytearray(b"0102ABCDEF")) b'\x01\x02\xab\xcd\xef')<block_end><def_stmt>test_decode_nonascii_str self<block_start>decode_funcs=(base64.b64decode base64.standard_b64decode base64.urlsafe_b64decode base64.b32decode base64.b16decode)<for_stmt>f decode_funcs<block_start>self.assertRaises(ValueError f 'with non-ascii \xcb')<block_end><block_end><def_stmt>test_ErrorHeritage self<block_start>self.assertTrue(issubclass(binascii.Error ValueError))<block_end><block_end><class_stmt>TestMain(unittest.TestCase)<block_start><def_stmt>tearDown self<block_start><if_stmt>os.path.exists(support.TESTFN)<block_start>os.unlink(support.TESTFN)<block_end><block_end><def_stmt>get_output self *args **options<block_start>args=(sys.executable '-m' 'base64')+args<line_sep><return>subprocess.check_output(args **options)<block_end><def_stmt>test_encode_decode self<block_start>output=self.get_output('-t')<line_sep>self.assertSequenceEqual(output.splitlines() (b"b'Aladdin:open sesame'" br"b'QWxhZGRpbjpvcGVuIHNlc2FtZQ==\n'" b"b'Aladdin:open sesame'" ))<block_end><def_stmt>test_encode_file self<block_start><with_stmt>open(support.TESTFN 'wb')<as>fp<block_start>fp.write(b'a\xffb\n')<block_end>output=self.get_output('-e' support.TESTFN)<line_sep>self.assertEqual(output.rstrip() b'Yf9iCg==')<with_stmt>open(support.TESTFN 'rb')<as>fp<block_start>output=self.get_output('-e' stdin=fp)<block_end>self.assertEqual(output.rstrip() b'Yf9iCg==')<block_end><def_stmt>test_decode self<block_start><with_stmt>open(support.TESTFN 'wb')<as>fp<block_start>fp.write(b'Yf9iCg==')<block_end>output=self.get_output('-d' support.TESTFN)<line_sep>self.assertEqual(output.rstrip() b'a\xffb')<block_end><block_end><def_stmt>test_main <block_start>support.run_unittest(__name__)<block_end><if_stmt>__name__<eq>'__main__'<block_start>test_main()<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>sklearn metrics<import_from_stmt>neupy algorithms<import_from_stmt>base BaseTestCase<class_stmt>CMACTestCase(BaseTestCase)<block_start><def_stmt>test_cmac self<block_start>X_train=np.reshape(np.linspace(0 2<times>np.pi 100) (100 1))<line_sep>X_train_before=X_train.copy()<line_sep>X_test=np.reshape(np.linspace(np.pi 2<times>np.pi 50) (50 1))<line_sep>y_train=np.sin(X_train)<line_sep>y_train_before=y_train.copy()<line_sep>y_test=np.sin(X_test)<line_sep>cmac=algorithms.CMAC(quantization=100 associative_unit_size=32 step=0.2 verbose=<false> )<line_sep>cmac.train(X_train y_train epochs=100)<line_sep>predicted_test=cmac.predict(X_test)<line_sep>predicted_test=predicted_test.reshape((len(predicted_test) 1))<line_sep>error=metrics.mean_absolute_error(y_test predicted_test)<line_sep>self.assertAlmostEqual(error 0.0024 places=4)<line_sep># Test that algorithm didn't modify data samples
np.testing.assert_array_equal(X_train X_train_before)<line_sep>np.testing.assert_array_equal(X_train X_train_before)<line_sep>np.testing.assert_array_equal(y_train y_train_before)<line_sep>self.assertPickledNetwork(cmac X_train)<block_end><def_stmt>test_train_different_inputs self<block_start>self.assertInvalidVectorTrain(network=algorithms.CMAC() input_vector=np.array([1 2 3]) target=np.array([1 2 3]))<block_end><def_stmt>test_predict_different_inputs self<block_start>cmac=algorithms.CMAC()<line_sep>data=np.array([[1 2 3]]).T<line_sep>target=np.array([[1 2 3]]).T<line_sep>cmac.train(data target epochs=100)<line_sep>self.assertInvalidVectorPred(network=cmac input_vector=np.array([1 2 3]) target=target decimal=2)<block_end><def_stmt>test_cmac_multi_output self<block_start>X_train=np.linspace(0 2<times>np.pi 100)<line_sep>X_train=np.vstack([X_train X_train])<line_sep>X_test=np.linspace(0 2<times>np.pi 100)<line_sep>X_test=np.vstack([X_test X_test])<line_sep>y_train=np.sin(X_train)<line_sep>y_test=np.sin(X_test)<line_sep>cmac=algorithms.CMAC(quantization=100 associative_unit_size=32 step=0.2 )<line_sep>cmac.train(X_train y_train X_test y_test epochs=100)<line_sep>predicted_test=cmac.predict(X_test)<line_sep>error=metrics.mean_absolute_error(y_test predicted_test)<line_sep>self.assertAlmostEqual(error 0 places=6)<block_end><def_stmt>test_cmac_training_exceptions self<block_start>cmac=algorithms.CMAC(quantization=100 associative_unit_size=32 step=0.2 )<with_stmt>self.assertRaises(ValueError)<block_start>cmac.train(X_train=<true> y_train=<true> X_test=<none> y_test=<true>)<block_end><block_end><block_end> |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
<import_from_stmt>azure.cli.core.decorators Completer<import_from_stmt>azure.cli.core.commands.client_factory get_subscription_id<import_from_stmt>._client_factory cf_policy_insights<line_sep>@Completer<def_stmt>get_policy_remediation_completion_list cmd prefix namespace **kwargs# pylint: disable=unused-argument
<block_start>client=cf_policy_insights(cmd.cli_ctx)<line_sep>sub=get_subscription_id(cmd.cli_ctx)<line_sep>rg=getattr(namespace 'resource_group_name' <none>)<line_sep>management_group=getattr(namespace 'management_group_name' <none>)<if_stmt>rg<block_start>result=client.remediations.list_for_resource_group(subscription_id=sub resource_group_name=rg)<block_end><elif_stmt>management_group<block_start>result=client.remediations.list_for_management_group(management_group_id=management_group)<block_end><else_stmt><block_start>result=client.remediations.list_for_subscription(subscription_id=sub)<block_end><return>[i.name<for>i result]<block_end>@Completer<def_stmt>get_policy_metadata_completion_list cmd prefix namespace **kwargs# pylint: disable=unused-argument
<block_start>client=cf_policy_insights(cmd.cli_ctx).policy_metadata<import_from_stmt>azure.mgmt.policyinsights.models QueryOptions<line_sep>query_options=QueryOptions(top=2000)<line_sep><return>[metadata.name<for>metadata client.list(query_options)<if>metadata.name.startswith(prefix)]<block_end> |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-25 22:22
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<import_stmt>django.utils.timezone<import_stmt>django_smalluuid.models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("hordak" "0010_auto_20161216_1202")]<line_sep>operations=[migrations.CreateModel(name="TransactionImport" fields=[("id" models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name="ID") ) ("uuid" django_smalluuid.models.SmallUUIDField(default=django_smalluuid.models.UUIDDefault() editable=<false> unique=<true>) ) ("timestamp" models.DateTimeField(default=django.utils.timezone.now editable=<false>) ) ("has_headings" models.BooleanField(default=<true> verbose_name="First line of file contains headings") ) ("file" models.FileField(upload_to="transaction_imports" verbose_name="CSV file to import") ) ("state" models.CharField(choices=[("pending" "Pending") ("uploaded" "Uploaded, ready to import") ("done" "Import complete") ] default="pending" max_length=20 ) ) ("date_format" models.CharField(choices=[("%d-%m-%Y" "dd-mm-yyyy") ("%d/%m/%Y" "dd/mm/yyyy") ("%d.%m.%Y" "dd.mm.yyyy") ("%d-%Y-%m" "dd-yyyy-mm") ("%d/%Y/%m" "dd/yyyy/mm") ("%d.%Y.%m" "dd.yyyy.mm") ("%m-%d-%Y" "mm-dd-yyyy") ("%m/%d/%Y" "mm/dd/yyyy") ("%m.%d.%Y" "mm.dd.yyyy") ("%m-%Y-%d" "mm-yyyy-dd") ("%m/%Y/%d" "mm/yyyy/dd") ("%m.%Y.%d" "mm.yyyy.dd") ("%Y-%d-%m" "yyyy-dd-mm") ("%Y/%d/%m" "yyyy/dd/mm") ("%Y.%d.%m" "yyyy.dd.mm") ("%Y-%m-%d" "yyyy-mm-dd") ("%Y/%m/%d" "yyyy/mm/dd") ("%Y.%m.%d" "yyyy.mm.dd") ("%d-%m-%y" "dd-mm-yy") ("%d/%m/%y" "dd/mm/yy") ("%d.%m.%y" "dd.mm.yy") ("%d-%y-%m" "dd-yy-mm") ("%d/%y/%m" "dd/yy/mm") ("%d.%y.%m" "dd.yy.mm") ("%m-%d-%y" "mm-dd-yy") ("%m/%d/%y" "mm/dd/yy") ("%m.%d.%y" "mm.dd.yy") ("%m-%y-%d" "mm-yy-dd") ("%m/%y/%d" "mm/yy/dd") ("%m.%y.%d" "mm.yy.dd") ("%y-%d-%m" "yy-dd-mm") ("%y/%d/%m" "yy/dd/mm") ("%y.%d.%m" "yy.dd.mm") ("%y-%m-%d" "yy-mm-dd") ("%y/%m/%d" "yy/mm/dd") ("%y.%m.%d" "yy.mm.dd") ] default="%d-%m-%Y" max_length=50 ) ) ("hordak_import" models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to="hordak.StatementImport") ) ] ) migrations.CreateModel(name="TransactionImportColumn" fields=[("id" models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name="ID") ) ("column_number" models.PositiveSmallIntegerField()) ("column_heading" models.CharField(blank=<true> default="" max_length=100 verbose_name="Column") ) ("to_field" models.CharField(blank=<true> choices=[(<none> "-- Do not import --") ("date" "Date") ("amount" "Amount") ("amount_out" "Amount (money in only)") ("amount_in" "Amount (money out only)") ("description" "Description / Notes") ] default=<none> max_length=20 null=<true> verbose_name="Is" ) ) ("example" models.CharField(blank=<true> default="" max_length=200)) ("transaction_import" models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="columns" to="hordak.TransactionImport" ) ) ] options={"ordering":["transaction_import" "column_number"]} ) migrations.AlterUniqueTogether(name="transactionimportcolumn" unique_together=set([("transaction_import" "column_number") ("transaction_import" "to_field")]) ) ]<block_end> |
<import_stmt>asyncio<import_stmt>uuid<import_stmt>pytest<import_from_stmt>aiomisc_pytest.pytest_plugin TCPProxy<import_stmt>aiormq<async_keyword><def_stmt>test_simple amqp_channel:aiormq.Channel<block_start><await>amqp_channel.basic_qos(prefetch_count=1)<assert_stmt>amqp_channel.number<line_sep>queue=asyncio.Queue()<line_sep>deaclare_ok=<await>amqp_channel.queue_declare(auto_delete=<true>)<line_sep>consume_ok=<await>amqp_channel.basic_consume(deaclare_ok.queue queue.put)<line_sep><await>amqp_channel.basic_publish(b"foo" routing_key=deaclare_ok.queue properties=aiormq.spec.Basic.Properties(message_id="123") )<line_sep>message=<await>queue.get()# type: DeliveredMessage
<assert_stmt>message.body<eq>b"foo"<line_sep>cancel_ok=<await>amqp_channel.basic_cancel(consume_ok.consumer_tag)<assert_stmt>cancel_ok.consumer_tag<eq>consume_ok.consumer_tag<assert_stmt>cancel_ok.consumer_tag<not><in>amqp_channel.consumers<line_sep><await>amqp_channel.queue_delete(deaclare_ok.queue)<line_sep>deaclare_ok=<await>amqp_channel.queue_declare(auto_delete=<true>)<line_sep><await>amqp_channel.basic_publish(b"foo bar" routing_key=deaclare_ok.queue)<line_sep>message=<await>amqp_channel.basic_get(deaclare_ok.queue no_ack=<true>)<assert_stmt>message.body<eq>b"foo bar"<block_end><async_keyword><def_stmt>test_blank_body amqp_channel:aiormq.Channel<block_start><await>amqp_channel.basic_qos(prefetch_count=1)<assert_stmt>amqp_channel.number<line_sep>queue=asyncio.Queue()<line_sep>deaclare_ok=<await>amqp_channel.queue_declare(auto_delete=<true>)<line_sep>consume_ok=<await>amqp_channel.basic_consume(deaclare_ok.queue queue.put)<line_sep><await>amqp_channel.basic_publish(b"" routing_key=deaclare_ok.queue properties=aiormq.spec.Basic.Properties(message_id="123") )<line_sep>message=<await>queue.get()# type: DeliveredMessage
<assert_stmt>message.body<eq>b""<line_sep>cancel_ok=<await>amqp_channel.basic_cancel(consume_ok.consumer_tag)<assert_stmt>cancel_ok.consumer_tag<eq>consume_ok.consumer_tag<assert_stmt>cancel_ok.consumer_tag<not><in>amqp_channel.consumers<line_sep><await>amqp_channel.queue_delete(deaclare_ok.queue)<line_sep>deaclare_ok=<await>amqp_channel.queue_declare(auto_delete=<true>)<line_sep><await>amqp_channel.basic_publish(b"foo bar" routing_key=deaclare_ok.queue)<line_sep>message=<await>amqp_channel.basic_get(deaclare_ok.queue no_ack=<true>)<assert_stmt>message.body<eq>b"foo bar"<block_end>@pytest.mark.no_catch_loop_exceptions<async_keyword><def_stmt>test_bad_consumer amqp_channel:aiormq.Channel loop<block_start>channel=amqp_channel# type: aiormq.Channel
<await>channel.basic_qos(prefetch_count=1)<line_sep>declare_ok=<await>channel.queue_declare()<line_sep>future=loop.create_future()<line_sep><await>channel.basic_publish(b"urgent" routing_key=declare_ok.queue)<line_sep>consumer_tag=loop.create_future()<async_keyword><def_stmt>bad_consumer message<block_start><await>channel.basic_cancel(<await>consumer_tag)<line_sep>future.set_result(message)<line_sep><raise>Exception<block_end>consume_ok=<await>channel.basic_consume(declare_ok.queue bad_consumer no_ack=<false> )<line_sep>consumer_tag.set_result(consume_ok.consumer_tag)<line_sep>message=<await>future<line_sep><await>channel.basic_reject(message.delivery.delivery_tag requeue=<true>)<assert_stmt>message.body<eq>b"urgent"<line_sep>future=loop.create_future()<line_sep><await>channel.basic_consume(declare_ok.queue future.set_result no_ack=<true> )<line_sep>message=<await>future<assert_stmt>message.body<eq>b"urgent"<block_end><async_keyword><def_stmt>test_ack_nack_reject amqp_channel:aiormq.Channel<block_start>channel=amqp_channel# type: aiormq.Channel
<await>channel.basic_qos(prefetch_count=1)<line_sep>declare_ok=<await>channel.queue_declare(auto_delete=<true>)<line_sep>queue=asyncio.Queue()<line_sep><await>channel.basic_consume(declare_ok.queue queue.put no_ack=<false>)<line_sep><await>channel.basic_publish(b"rejected" routing_key=declare_ok.queue)<line_sep>message=<await>queue.get()<assert_stmt>message.body<eq>b"rejected"<line_sep><await>channel.basic_reject(message.delivery.delivery_tag requeue=<false>)<line_sep><await>channel.basic_publish(b"nacked" routing_key=declare_ok.queue)<line_sep>message=<await>queue.get()<assert_stmt>message.body<eq>b"nacked"<line_sep><await>channel.basic_nack(message.delivery.delivery_tag requeue=<false>)<line_sep><await>channel.basic_publish(b"acked" routing_key=declare_ok.queue)<line_sep>message=<await>queue.get()<assert_stmt>message.body<eq>b"acked"<line_sep><await>channel.basic_ack(message.delivery.delivery_tag)<block_end><async_keyword><def_stmt>test_confirm_multiple amqp_channel:aiormq.Channel<block_start>"""
RabbitMQ has been observed to send confirmations in a strange pattern
when publishing simultaneously where only some messages are delivered
to a queue. It sends acks like this 1 2 4 5(multiple, confirming also 3).
This test is probably inconsequential without publisher_confirms
This is a regression for https://github.com/mosquito/aiormq/issues/10
"""<line_sep>channel=amqp_channel# type: aiormq.Channel
exchange=uuid.uuid4().hex<line_sep><await>channel.exchange_declare(exchange exchange_type="topic")<try_stmt><block_start>declare_ok=<await>channel.queue_declare(exclusive=<true>)<line_sep><await>channel.queue_bind(declare_ok.queue exchange routing_key="test.5" )<for_stmt>i range(10)<block_start>messages=[asyncio.ensure_future(channel.basic_publish(b"test" exchange=exchange routing_key="test.{}".format(i) ))<for>i range(10)]<line_sep>_,pending=<await>asyncio.wait(messages timeout=0.2)<assert_stmt><not>pending "not all publishes were completed (confirmed)"<line_sep><await>asyncio.sleep(0.05)<block_end><block_end><finally_stmt><block_start><await>channel.exchange_delete(exchange)<block_end><block_end><async_keyword><def_stmt>test_exclusive_queue_locked amqp_connection<block_start>channel0=<await>amqp_connection.channel()<line_sep>channel1=<await>amqp_connection.channel()<line_sep>qname=str(uuid.uuid4())<line_sep><await>channel0.queue_declare(qname exclusive=<true>)<try_stmt><block_start><await>channel0.basic_consume(qname print exclusive=<true>)<with_stmt>pytest.raises(aiormq.exceptions.ChannelLockedResource)<block_start><await>channel1.queue_declare(qname)<line_sep><await>channel1.basic_consume(qname print exclusive=<true>)<block_end><block_end><finally_stmt><block_start><await>channel0.queue_delete(qname)<block_end><block_end><async_keyword><def_stmt>test_remove_writer_when_closed amqp_channel:aiormq.Channel<block_start><with_stmt>pytest.raises(aiormq.exceptions.ChannelClosed)<block_start><await>amqp_channel.queue_declare("amq.forbidden_queue_name" auto_delete=<true> )<block_end><with_stmt>pytest.raises(aiormq.exceptions.ChannelInvalidStateError)<block_start><await>amqp_channel.queue_delete("amq.forbidden_queue_name")<block_end><block_end><async_keyword><def_stmt>test_proxy_connection proxy_connection proxy:TCPProxy<block_start>channel=<await>proxy_connection.channel()# type: aiormq.Channel
<await>channel.queue_declare(auto_delete=<true>)<block_end><async_keyword><def_stmt>test_declare_queue_timeout proxy_connection proxy:TCPProxy<block_start><for_stmt>_ range(3)<block_start>channel=<await>proxy_connection.channel()# type: aiormq.Channel
qname=str(uuid.uuid4())<with_stmt>proxy.slowdown(read_delay=5 write_delay=0)<block_start><with_stmt>pytest.raises(asyncio.TimeoutError)<block_start><await>channel.queue_declare(qname auto_delete=<true> timeout=0.5)<block_end><block_end><block_end><block_end> |
# -*- coding: utf-8 -*-
"""Parser for the CCleaner Registry key."""<import_stmt>re<import_from_stmt>dfdatetime time_elements<as>dfdatetime_time_elements<import_from_stmt>plaso.containers events<import_from_stmt>plaso.containers time_events<import_from_stmt>plaso.lib definitions<import_from_stmt>plaso.parsers winreg_parser<import_from_stmt>plaso.parsers.winreg_plugins interface<class_stmt>CCleanerConfigurationEventData(events.EventData)<block_start>"""CCleaner configuration event data.
Attributes:
configuration (str): CCleaner configuration.
key_path (str): Windows Registry key path.
"""<line_sep>DATA_TYPE='ccleaner:configuration'<def_stmt>__init__ self<block_start>"""Initializes event data."""<line_sep>super(CCleanerConfigurationEventData self).__init__(data_type=self.DATA_TYPE)<line_sep>self.configuration=<none><line_sep>self.key_path=<none><block_end><block_end><class_stmt>CCleanerUpdateEventData(events.EventData)<block_start>"""CCleaner update event data.
Attributes:
key_path (str): Windows Registry key path.
"""<line_sep>DATA_TYPE='ccleaner:update'<def_stmt>__init__ self<block_start>"""Initializes event data."""<line_sep>super(CCleanerUpdateEventData self).__init__(data_type=self.DATA_TYPE)<line_sep>self.key_path=<none><block_end><block_end><class_stmt>CCleanerPlugin(interface.WindowsRegistryPlugin)<block_start>"""Gathers the CCleaner Keys for NTUSER hive.
Known Windows Registry values within the CCleaner key:
* (App)Cookies [REG_SZ], contains "True" if the cookies should be cleaned;
* (App)Delete Index.dat files [REG_SZ]
* (App)History [REG_SZ]
* (App)Last Download Location [REG_SZ]
* (App)Other Explorer MRUs [REG_SZ]
* (App)Recent Documents [REG_SZ]
* (App)Recently Typed URLs [REG_SZ]
* (App)Run (in Start Menu) [REG_SZ]
* (App)Temporary Internet Files [REG_SZ]
* (App)Thumbnail Cache [REG_SZ]
* CookiesToSave [REG_SZ]
* UpdateKey [REG_SZ], contains a date and time formatted as:
"MM/DD/YYYY hh:mm:ss [A|P]M", for example "07/13/2013 10:03:14 AM";
* WINDOW_HEIGHT [REG_SZ], contains the windows height in number of pixels;
* WINDOW_LEFT [REG_SZ]
* WINDOW_MAX [REG_SZ]
* WINDOW_TOP [REG_SZ]
* WINDOW_WIDTH [REG_SZ], contains the windows width in number of pixels;
Also see:
http://cheeky4n6monkey.blogspot.com/2012/02/writing-ccleaner-regripper-plugin-part_05.html
"""<line_sep>NAME='ccleaner'<line_sep>DATA_FORMAT='CCleaner Registry data'<line_sep>FILTERS=frozenset([interface.WindowsRegistryKeyPathFilter('HKEY_CURRENT_USER\\Software\\Piriform\\CCleaner')])<line_sep># Date and time string formatted as: "MM/DD/YYYY hh:mm:ss [A|P]M"
# for example "07/13/2013 10:03:14 AM"
# TODO: determine if this is true for other locales.
_UPDATE_DATE_TIME_RE=re.compile(r'([0-9][0-9])/([0-9][0-9])/([0-9][0-9][0-9][0-9]) '<concat>r'([0-9][0-9]):([0-9][0-9]):([0-9][0-9]) ([A|P]M)')<def_stmt>_ParseUpdateKeyValue self parser_mediator registry_value<block_start>"""Parses the UpdateKey value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_value (dfwinreg.WinRegistryValue): Windows Registry value.
Returns:
dfdatetime_time_elements.TimeElements: date and time value or None
if not available.
"""<if_stmt><not>registry_value.DataIsString()<block_start>parser_mediator.ProduceExtractionWarning('unsupported UpdateKey value data type: {0:s}'.format(registry_value.data_type_string))<line_sep><return><none><block_end>date_time_string=registry_value.GetDataAsObject()<if_stmt><not>date_time_string<block_start>parser_mediator.ProduceExtractionWarning('missing UpdateKey value data')<line_sep><return><none><block_end>re_match=self._UPDATE_DATE_TIME_RE.match(date_time_string)<if_stmt><not>re_match<block_start>parser_mediator.ProduceExtractionWarning('unsupported UpdateKey value data: {0!s}'.format(date_time_string))<line_sep><return><none><block_end>month,day_of_month,year,hours,minutes,seconds,part_of_day=(re_match.groups())<try_stmt><block_start>year=int(year 10)<line_sep>month=int(month 10)<line_sep>day_of_month=int(day_of_month 10)<line_sep>hours=int(hours 10)<line_sep>minutes=int(minutes 10)<line_sep>seconds=int(seconds 10)<block_end><except_stmt>(TypeError ValueError)<block_start>parser_mediator.ProduceExtractionWarning('invalid UpdateKey date time value: {0!s}'.format(date_time_string))<line_sep><return><none><block_end><if_stmt>part_of_day<eq>'PM'<block_start>hours<augadd>12<block_end>time_elements_tuple=(year month day_of_month hours minutes seconds)<try_stmt><block_start>date_time=dfdatetime_time_elements.TimeElements(time_elements_tuple=time_elements_tuple)<line_sep>date_time.is_local_time=<true><block_end><except_stmt>ValueError<block_start>parser_mediator.ProduceExtractionWarning('invalid UpdateKey date time value: {0!s}'.format(time_elements_tuple))<line_sep><return><none><block_end><return>date_time<block_end><def_stmt>ExtractEvents self parser_mediator registry_key **kwargs<block_start>"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""<line_sep>configuration=[]<line_sep>date_time=<none><for_stmt>registry_value registry_key.GetValues()<block_start><if_stmt><not>registry_value.name<or><not>registry_value.data<block_start><continue><block_end><if_stmt>registry_value.name<eq>'UpdateKey'<block_start>date_time=self._ParseUpdateKeyValue(parser_mediator registry_value)<block_end><else_stmt><block_start>value=registry_value.GetDataAsObject()<line_sep>configuration.append('{0:s}: {1!s}'.format(registry_value.name value))<block_end><block_end><if_stmt>date_time<block_start>event_data=CCleanerUpdateEventData()<line_sep>event_data.key_path=registry_key.path<line_sep>event=time_events.DateTimeValuesEvent(date_time definitions.TIME_DESCRIPTION_UPDATE time_zone=parser_mediator.timezone)<line_sep>parser_mediator.ProduceEventWithEventData(event event_data)<block_end>event_data=CCleanerConfigurationEventData()<line_sep>event_data.configuration=' '.join(sorted(configuration))<or><none><line_sep>event_data.key_path=registry_key.path<line_sep>event=time_events.DateTimeValuesEvent(registry_key.last_written_time definitions.TIME_DESCRIPTION_WRITTEN)<line_sep>parser_mediator.ProduceEventWithEventData(event event_data)<block_end><block_end>winreg_parser.WinRegistryParser.RegisterPlugin(CCleanerPlugin)<line_sep> |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Evaluate lazy slot filling results."""<import_stmt>codecs<import_stmt>collections<import_stmt>gzip<import_stmt>json<import_stmt>random<import_stmt>re<import_stmt>string<import_stmt>unicodedata<import_from_stmt>absl app<import_from_stmt>absl flags<import_from_stmt>bert tokenization<import_from_stmt>language.labs.drkit input_fns<import_stmt>numpy<as>np<import_stmt>tensorflow.compat.v1<as>tf<line_sep>PUNCTUATION=frozenset(string.punctuation)<line_sep>FLAGS=flags.FLAGS<line_sep>## Required parameters
flags.DEFINE_string("ground_truth_file" <none> "File with ground truth answers.")<line_sep>flags.DEFINE_string("predicted_answers_file" <none> "File with predicted answers from model.")<line_sep>flags.DEFINE_string("relation_counts_file" <none> "JSON file with relation counts.")<class_stmt>NumpyEncoder(json.JSONEncoder)<block_start>"""Special json encoder for numpy types."""<def_stmt>default self obj<block_start><if_stmt>isinstance(obj (np.int_ np.intc np.intp np.int8 np.int16 np.int32 np.int64 np.uint8 np.uint16 np.uint32 np.uint64))<block_start><return>int(obj)<block_end><elif_stmt>isinstance(obj (np.float_ np.float16 np.float32 np.float64))<block_start><return>float(obj)<block_end><elif_stmt>isinstance(obj (np.ndarray ))# This is the fix
<block_start><return>obj.tolist()<block_end><return>json.JSONEncoder.default(self obj)<block_end><block_end><def_stmt>wikimovie_eval_fn dataset results name_map output_prediction_file **kwargs<block_start>"""Compute evaluation metrics for OneHopDataset or TwoHopDataset.
Args:
dataset: An object of type OneHopDataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""<del_stmt>kwargs<line_sep># Collect ground truth answers.
gt_answer={ex.qas_id:ex.answer_entity<for>ex dataset.examples}<line_sep>gt_ques={ex.qas_id:ex.question_text<for>ex dataset.examples}<line_sep>gt_entity={ex.qas_id:ex.subject_entity[0]<for>ex dataset.examples}<line_sep>inf_chain={ex.qas_id:ex.inference_chain<for>ex dataset.examples}<line_sep># Compute basic metrics.
num_correct=0.<line_sep>all_predictions={}<line_sep>chain2stats={ch:[0. 0.]<for>ch inf_chain.values()}<line_sep>incorrect_results,correct_results=[] []<for_stmt>result results<block_start>qas_id=result["qas_ids"]<line_sep>prediction=result["predictions"]<if_stmt>prediction<in>gt_answer[qas_id]<block_start>num_correct<augadd>1<line_sep>chain2stats[inf_chain[qas_id]][0]<augadd>1<line_sep>correct_results.append({"qas_id":result["qas_ids"] "question":gt_ques[qas_id] "answers":gt_answer[qas_id] "subject":gt_entity[qas_id] "inf-chain":inf_chain[qas_id] "predictions":result["predictions"] })<for_stmt>hop range(3)<block_start><if_stmt>"sparse_%d"%hop<in>result<block_start>correct_results[-1].update({"sparse_%d"%hop:result["sparse_%d"%hop] "dense_%d"%hop:result["dense_%d"%hop] "mention_%d"%hop:result["mention_%d"%hop] "entity_%d"%hop:result["entity_%d"%hop] "sparse_scores_%d"%hop:result["sparse_scores_%d"%hop] "dense_scores_%d"%hop:result["dense_scores_%d"%hop] "mention_scores_%d"%hop:result["mention_scores_%d"%hop] "entity_scores_%d"%hop:result["entity_scores_%d"%hop] })<block_end><block_end><block_end><else_stmt><block_start>incorrect_results.append({"qas_id":result["qas_ids"] "question":gt_ques[qas_id] "answers":gt_answer[qas_id] "subject":gt_entity[qas_id] "inf-chain":inf_chain[qas_id] "predictions":result["predictions"] })<for_stmt>hop range(3)<block_start><if_stmt>"sparse_%d"%hop<in>result<block_start>incorrect_results[-1].update({"sparse_%d"%hop:result["sparse_%d"%hop] "dense_%d"%hop:result["dense_%d"%hop] "mention_%d"%hop:result["mention_%d"%hop] "entity_%d"%hop:result["entity_%d"%hop] "sparse_scores_%d"%hop:result["sparse_scores_%d"%hop] "dense_scores_%d"%hop:result["dense_scores_%d"%hop] "mention_scores_%d"%hop:result["mention_scores_%d"%hop] "entity_scores_%d"%hop:result["entity_scores_%d"%hop] })<block_end><block_end><block_end>chain2stats[inf_chain[qas_id]][1]<augadd>1<line_sep>all_predictions[qas_id]=name_map[str(prediction)]<block_end>accuracy=num_correct/len(all_predictions)<line_sep>json.dump(all_predictions tf.gfile.Open(output_prediction_file "w"))<line_sep>json.dump(random.sample(incorrect_results 100) tf.gfile.Open(output_prediction_file+".incorrect" "w") cls=NumpyEncoder)<line_sep>json.dump(random.sample(correct_results 100) tf.gfile.Open(output_prediction_file+".correct" "w") cls=NumpyEncoder)<line_sep># Return metrics.
metrics={"accuracy":accuracy }<for_stmt>ch,stats chain2stats.items()<block_start>metrics["inference-chains-acc/"+ch]=stats[0]/stats[1]<block_end><return>metrics<block_end><def_stmt>multihop_eval_fn dataset results name_map output_prediction_file supervision="mention" **kwargs<block_start>"""Compute evaluation metrics for OneHopDataset or TwoHopDataset.
Args:
dataset: An object of type OneHopDataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
supervision: Type of supervision used in the model.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""<del_stmt>kwargs<line_sep># Collect ground truth answers.
gt_mentions={ex.qas_id:ex.answer_mention[0]<for>ex dataset.examples}<if_stmt>supervision<eq>"mention"<block_start>gt_answer=gt_mentions<block_end><else_stmt><block_start>gt_answer={ex.qas_id:ex.answer_entity[0]<for>ex dataset.examples}<block_end># Compute basic metrics.
num_correct=0.<line_sep>all_predictions={}<for_stmt>result results<block_start>qas_id=result["qas_ids"]<line_sep>prediction=result["predictions"]<if_stmt>prediction<eq>gt_answer[qas_id]<block_start>num_correct<augadd>1<block_end>all_predictions[qas_id]=name_map[str(prediction)]<block_end>accuracy=num_correct/len(all_predictions)<line_sep># Compute advanced metrics.
json.dump(all_predictions tf.gfile.Open(output_prediction_file "w"))<line_sep>micro,macro,_,_=compute_scores(dataset.gt_file output_prediction_file)<line_sep># Return metrics.
metrics={"accuracy":accuracy "micro-p":micro[0] "micro-r":micro[1] "micro-f":micro[2] "macro-p":macro[0] "macro-r":macro[1] "macro-f":macro[2] }<line_sep><return>metrics<block_end><def_stmt>hotpot_eval_fn dataset results name_map output_prediction_file **kwargs<block_start>"""Compute evaluation metrics for HotpotQADataset.
Args:
dataset: An object of type HotpotQADataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""<del_stmt>kwargs<line_sep># Collect ground truth answers.
gt_answer={ex.qas_id:ex.answer_entity<for>ex dataset.examples}<line_sep>gt_types={ex.qas_id:ex.inference_chain<for>ex dataset.examples}<line_sep># Compute basic metrics.
num_correct={2:0. 5:0. 10:0. 20:0.}<line_sep>aps=[]<line_sep>no_answer=0.<line_sep>all_predictions={}<line_sep>bridge_acc,comp_acc=0. 0.<line_sep>bridge_tot,comp_tot=0 0<line_sep>single_acc=0.<line_sep>layer_weights=np.zeros_like(results[0]["layer_probs"])<line_sep>num_layer_entities={i:0.<for>i range(layer_weights.shape[0])}<line_sep>num_new_entities={i:0.<for>i range(layer_weights.shape[0])}<for_stmt>result results<block_start>qas_id=result["qas_ids"].decode("utf-8")<line_sep>preds=result["top_idx"]<line_sep>scores=result["top_vals"]<line_sep>ans=gt_answer[qas_id]<line_sep>my_type=gt_types[qas_id]<if_stmt>my_type<eq>"bridge"<block_start>bridge_tot<augadd>1<block_end><else_stmt><block_start>comp_tot<augadd>1<block_end>ranks=np.where(np.in1d(preds ans))[0]<line_sep>ranks=np.sort(ranks)<line_sep>ap=0.<line_sep>cnt=0.<if_stmt>any(rr<l>10<for>rr ranks)<block_start>single_acc<augadd>1<block_end><if_stmt>ranks.shape[0]<eq>0<block_start>no_answer<augadd>1<block_end><for_stmt>rr ranks<block_start>cnt<augadd>1<line_sep>ap<augadd>cnt/(rr+1)<block_end><if_stmt>ans<block_start>aps.append(ap/len(ans))<block_end><else_stmt><block_start>aps.append(0.)<block_end>found=<false><for_stmt>key [2 5 10 20]<block_start><if_stmt>found<or>np.in1d(ans preds[:key]).all()<block_start>num_correct[key]<augadd>1<line_sep>found=<true><if_stmt>key<eq>10<block_start><if_stmt>my_type<eq>"bridge"<block_start>bridge_acc<augadd>1<block_end><else_stmt><block_start>comp_acc<augadd>1<block_end><block_end><block_end><block_end># Non-accuracy stats
layer_weights<augadd>result["layer_probs"]<line_sep>layer_entities={i:set()<for>i range(layer_weights.shape[0])}<line_sep>all_predictions[qas_id]={}<for_stmt>i range(layer_weights.shape[0])<block_start>layer_entities[i]=set([ee<for>ee result["layer_%d_ent"%i]<if>ee<ne>-1])<line_sep>num_layer_entities[i]<augadd>len(layer_entities[i])<line_sep>num_new_entities[i]<augadd>len(layer_entities[i]-layer_entities[0])<line_sep># all_predictions[qas_id]["layer_%d" % i] = [
# name_map[str(ee)] for ee in layer_entities[i]]
<block_end>all_predictions[qas_id]["predictions"]=[(name_map[str(pred)] str(scores[i]))<for>i,pred enumerate(preds)]<block_end>tf.logging.info("Evaluated %d items" len(all_predictions))<line_sep>accuracy={key:(num_correct[key]/len(all_predictions))<for>key num_correct}<line_sep># Compute advanced metrics.
json.dump(all_predictions tf.gfile.Open(output_prediction_file "w"))<line_sep># Return metrics.
metrics={"eval/@%d"%key:accuracy[key]<for>key accuracy}<line_sep>metrics["accuracy"]=accuracy[10]<line_sep>metrics["eval/map"]=sum(aps)/len(all_predictions)<line_sep>metrics["eval/bridge_accuracy"]=bridge_acc/bridge_tot<line_sep>metrics["eval/comparison_accuracy"]=comp_acc/comp_tot<line_sep>metrics["analysis/single_accuracy"]=single_acc/len(all_predictions)<line_sep>metrics["analysis/no_answers"]=no_answer/len(all_predictions)<for_stmt>i range(layer_weights.shape[0])<block_start>metrics["analysis/layer_weight_%d"%i]=layer_weights[i]/len(all_predictions)<line_sep>metrics["analysis/num_entities_%d"%i]=num_layer_entities[i]/len(all_predictions)<line_sep>metrics["analysis/num_new_entities_%d"%i]=num_new_entities[i]/len(all_predictions)<block_end><return>metrics<block_end><def_stmt>normalize_answer s<block_start>"""Lower text and remove punctuation, articles and extra whitespace."""<def_stmt>remove_articles text<block_start><return>re.sub(r"\b(a|an|the)\b" " " text)<block_end><def_stmt>white_space_fix text<block_start><return>" ".join(text.split())<block_end><def_stmt>remove_punc text<block_start>exclude=set(string.punctuation)<line_sep><return>"".join(ch<for>ch text<if>ch<not><in>exclude)<block_end><def_stmt>lower text<block_start><return>text.lower()<block_end><return>white_space_fix(remove_articles(remove_punc(lower(s))))<block_end><def_stmt>f1_score prediction ground_truth<block_start>"""Compute F1 score."""<line_sep>prediction_tokens=normalize_answer(prediction).split()<line_sep>ground_truth_tokens=normalize_answer(ground_truth).split()<line_sep>common=collections.Counter(prediction_tokens)&collections.Counter(ground_truth_tokens)<line_sep>num_same=sum(common.values())<if_stmt>num_same<eq>0<block_start><return>0<block_end>precision=1.0<times>num_same/len(prediction_tokens)<line_sep>recall=1.0<times>num_same/len(ground_truth_tokens)<line_sep>f1=(2<times>precision<times>recall)/(precision+recall)<line_sep><return>f1<block_end><def_stmt>exact_match_score prediction ground_truth<block_start>"""Compute EM score."""<line_sep><return>normalize_answer(prediction)<eq>normalize_answer(ground_truth)<block_end><def_stmt>metric_max_over_ground_truths metric_fn prediction ground_truths<block_start>scores_for_ground_truths=[]<for_stmt>ground_truth ground_truths<block_start>my_score=metric_fn(prediction ground_truth)<line_sep>scores_for_ground_truths.append(my_score)<block_end><return>max(scores_for_ground_truths)<block_end><def_stmt>read_predictions prediction_file<block_start><with_stmt>tf.gfile.Open(prediction_file)<as>f<block_start>predictions=json.load(f)<block_end><return>predictions<block_end><def_stmt>read_answers gold_file<block_start>"""Read ground truth answers."""<line_sep>answers={}<line_sep>f=tf.gfile.Open(gold_file)<if_stmt>gold_file.endswith(".gz")<block_start>f=gzip.GzipFile(fileobj=f)<block_end><for_stmt>i,line enumerate(f)<block_start>example=json.loads(line)<if_stmt>i<eq>0<and>"header"<in>example<block_start><continue><block_end><for_stmt>qa example["qas"]<block_start>answers[qa["qid"]]=qa["answers"]<block_end><block_end>f.close()<line_sep><return>answers<block_end><def_stmt>evaluate answers predictions skip_no_answer=<false><block_start>"""Compute F1 and EM scores."""<line_sep>f1=exact_match=total=0<for_stmt>qid,ground_truths answers.items()<block_start><if_stmt>qid<not><in>predictions<block_start><if_stmt><not>skip_no_answer<block_start>message="Unanswered question %s will receive score 0."%qid<line_sep>print(message)<line_sep>total<augadd>1<block_end><continue><block_end>total<augadd>1<line_sep>prediction=predictions[qid]<line_sep>exact_match<augadd>metric_max_over_ground_truths(exact_match_score prediction ground_truths)<line_sep>f1<augadd>metric_max_over_ground_truths(f1_score prediction ground_truths)<block_end>exact_match=100.0<times>exact_match/total<line_sep>f1=100.0<times>f1/total<line_sep><return>{"exact_match":exact_match "f1":f1}<block_end><def_stmt>mrqa_eval_fn dataset_file predictions_file skip_no_answer=<true><block_start>answers=read_answers(dataset_file)<line_sep>predictions=read_predictions(predictions_file)<line_sep><return>evaluate(answers predictions skip_no_answer)<block_end><def_stmt>compute_scores ground_truth_file predicted_answers_file<block_start>"""Read predictions and ground truth and return P, R, F."""<line_sep>telemetry,incorrect=read_results(ground_truth_file predicted_answers_file)<line_sep>micro=aprf(telemetry)<line_sep>relationwise=aprf_relationwise(telemetry)<line_sep>macro=sum([val[0]<for>_,val relationwise.items()])<line_sep>macro=macro/len(relationwise)<line_sep><return>micro macro relationwise incorrect<block_end><def_stmt>read_results ground_truth_file predicted_answers_file<block_start>"""Read results and ground truth and return data structure with stats."""<with_stmt>codecs.getreader("utf-8")(tf.gfile.GFile(ground_truth_file "r"))<as>read<block_start>data_={}<for_stmt>line read<block_start>item=json.loads(line.strip())<if_stmt>isinstance(item["relation"] dict)<block_start>relation=item["relation"]["wikidata_id"]<block_end><elif_stmt>isinstance(item["relation"] list)<block_start>relation=(item["relation"][0]["wikidata_id"]+"_"+item["relation"][1]["wikidata_id"])<block_end>data_[item["id"]]=[relation item["subject"]["wikidata_id"]]<if_stmt>"is_impossible"<in>item<and>item["is_impossible"]<block_start><continue><block_end><if_stmt>item["object"]<is><none><block_start><continue><block_end><if_stmt>isinstance(item["object"]["mention"] dict)<block_start>data_[item["id"]]<augadd>[item["object"]["mention"]["text"]]<block_end><if_stmt>"name"<in>item["object"]<block_start>data_[item["id"]]<augadd>[item["object"]["name"]]<block_end><if_stmt>"aliases"<in>item["object"]<block_start>data_[item["id"]]<augadd>item["object"]["aliases"].keys()<block_end><block_end><block_end><with_stmt>codecs.getreader("utf-8")(tf.gfile.GFile(predicted_answers_file "r"))<as>fin<block_start>predictions=json.load(fin)<line_sep>telemetry,incorrect=[] []<line_sep>n=0<for_stmt>key data_<block_start><if_stmt>key<not><in>predictions<block_start><continue><block_end>g=data_[key][2:]<line_sep>a=predictions[key]<line_sep>m=data_[key][:2]<line_sep>stats=score(g a)<line_sep>telemetry.append([m[0] m[1] g a stats])<if_stmt>stats[0]<eq>0.<and>stats[3]<g>0.<block_start>incorrect.append(key)<block_end>n<augadd>1<block_end><return>telemetry incorrect<block_end><block_end><def_stmt>aprf_relationwise g<block_start>"""Returns precision, recall and F score for each relation."""<line_sep>rel_to_stats=collections.defaultdict(list)<for_stmt>item g<block_start>rel_to_stats[item[0]].append(item)<block_end>rel_to_scores={}<for_stmt>rel,stats rel_to_stats.items()<block_start>rel_to_scores[rel]=[aprf(stats) len(stats)]<block_end><return>rel_to_scores<block_end><def_stmt>aprf g<block_start>"""Returns precision, recall and F of the given statistics."""<line_sep>tp,_,sys_pos,real_pos=sum([x[-1]<for>x g])<if_stmt>tp<eq>0<block_start>p=r=f=0.0<block_end><else_stmt><block_start>p=tp/float(sys_pos)<if>sys_pos<g>0<else>0.<line_sep>r=tp/float(real_pos)<if>real_pos<g>0<else>0.<line_sep>f=2<times>p<times>r/(p+r)<block_end><return>np.asarray([p r f])<block_end><def_stmt>score gold answer<block_start>"""Compares answer to ground truth to return TP / FP stats."""<if_stmt>gold<block_start>gold=set([simplify(g)<for>g gold])<block_end>answer=simplify(answer)<line_sep>result=np.zeros(4)<if_stmt>gold<block_start>result[3]<augadd>1<if_stmt>answer<in>gold<block_start>result[0]<augadd>1<block_end><block_end><else_stmt><block_start><if_stmt><not>answer<block_start>result[1]<augadd>1<block_end><block_end><if_stmt>answer<block_start>result[2]<augadd>1<block_end><return>result<block_end><def_stmt>strip_accents_and_punct text<block_start>"""Strips accents from a piece of text."""<line_sep>text=unicodedata.normalize("NFD" text)<line_sep>output=[]<for_stmt>char text<block_start><if_stmt>char<in>PUNCTUATION<block_start><continue><block_end>cat=unicodedata.category(char)<if_stmt>cat<eq>"Mn"<block_start><continue><block_end>output.append(char)<block_end><return>"".join(output)<block_end><def_stmt>simplify answer<block_start>"""Pre-process answer string."""<line_sep>toks=[]<line_sep>articles={"the" "a" "an" "and" ""}<for_stmt>t answer.strip().lower().split()<block_start>tok=strip_accents_and_punct(t)<if_stmt>tok<not><in>articles<block_start>toks.append(tok)<block_end><block_end><return>"".join(toks)<block_end><def_stmt>rare_relation_scores relationwise relation2counts<block_start>"""Print statistics of rare relations for different thresholds."""<for_stmt>thresh [5 100 500 1000]<block_start>freq_stats,freq_total=np.array([0. 0. 0.]) 0<line_sep>rare_stats,rare_total=np.array([0. 0. 0.]) 0<for_stmt>relation,(stats _) relationwise.items()<block_start><if_stmt>relation2counts.get(relation 0)<l>thresh<block_start>rare_stats<augadd>stats<line_sep>rare_total<augadd>1<block_end><else_stmt><block_start>freq_stats<augadd>stats<line_sep>freq_total<augadd>1<block_end><block_end>rare_stats<augdiv>rare_total<line_sep>freq_stats<augdiv>freq_total<line_sep>print("Threshold =" thresh "rare" rare_total "Micro-P %.3f Micro-R %.3f Micro-F %.3f"%(rare_stats[0] rare_stats[1] rare_stats[2]) "freq" freq_total "Micro-P %.3f Micro-R %.3f Micro-F %.3f"%(freq_stats[0] freq_stats[1] freq_stats[2]))<block_end><block_end><def_stmt>main _<block_start>eval_type="hotpot"<if_stmt>eval_type<eq>"hotpot"<block_start>test_hotpot_eval()<block_end><else_stmt><block_start>micro,macro,rwise,_=compute_scores(FLAGS.ground_truth_file FLAGS.predicted_answers_file)<line_sep>print("Micro" micro)<line_sep>print("Macro" macro)<if_stmt>FLAGS.relation_counts_file<is><not><none><block_start>r2c=json.load(tf.gfile.Open(FLAGS.relation_counts_file))<line_sep>rare_relation_scores(rwise r2c)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>app.run(main)<block_end> |
# tests/test_provider_Mongey_kafka-connect.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:20:11 UTC)
<def_stmt>test_provider_import <block_start><import_stmt>terrascript.provider.Mongey.kafka_connect<block_end><def_stmt>test_resource_import <block_start><import_from_stmt>terrascript.resource.Mongey.kafka_connect kafka_connect_connector<block_end># TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.Mongey.kafka_connect
#
# t = terrascript.provider.Mongey.kafka_connect.kafka_connect()
# s = str(t)
#
# assert 'https://github.com/Mongey/terraform-provider-kafka-connect' in s
# assert '0.2.3' in s
|
# -*- encoding: utf-8 -*-
<import_stmt>json<import_stmt>os<import_stmt>shutil<import_stmt>tempfile<import_from_stmt>collections OrderedDict<import_from_stmt>datetime timedelta<import_from_stmt>pyparsing ParseBaseException ParseException ParseSyntaxException<import_stmt>mock<import_stmt>pytest<import_from_stmt>pyhocon ConfigFactory ConfigParser ConfigSubstitutionException ConfigTree <import_from_stmt>pyhocon.exceptions ConfigException ConfigMissingException ConfigWrongTypeException <try_stmt><block_start><import_from_stmt>dateutil.relativedelta relativedelta<as>period<block_end><except_stmt>Exception<block_start><import_from_stmt>datetime timedelta<as>period<block_end><class_stmt>TestConfigParser(object)<block_start><def_stmt>test_parse_simple_value self<block_start>config=ConfigFactory.parse_string("""t = {
c = 5
"d" = true
e.y = {
f: 7
g: "hey dude!"
h: hey man
i = \"\"\"
"first line"
"second" line
\"\"\"
}
j = [1, 2, 3]
u = 192.168.1.3/32
g = null
}
""")<assert_stmt>config.get_string('t.c')<eq>'5'<assert_stmt>config.get_int('t.c')<eq>5<assert_stmt>config.get_float('t.c')<eq>5.0<assert_stmt>config.get('t.e.y.f')<eq>7<assert_stmt>config.get('t.e.y.g')<eq>'hey dude!'<assert_stmt>config.get('t.e.y.h')<eq>'hey man'<assert_stmt>[v.strip()<for>v config.get('t.e.y.i').split('\n')]<eq>['' '"first line"' '"second" line' '']<assert_stmt>config.get_bool('t.d')<is><true><assert_stmt>config.get_int('t.e.y.f')<eq>7<assert_stmt>config.get('t.j')<eq>[1 2 3]<assert_stmt>config.get('t.u')<eq>'192.168.1.3/32'<assert_stmt>config.get_int('t.g')<is><none><assert_stmt>config.get_float('t.g')<is><none><assert_stmt>config.get_string('t.g')<is><none><assert_stmt>config.get_bool('t.g')<is><none><assert_stmt>config.get_list('t.g')<is><none><assert_stmt>config.get_config('t.g')<is><none><block_end>@pytest.mark.parametrize('forbidden_char' ['+' '`' '^' '?' '!' '@' '*' '&'])<def_stmt>test_fail_parse_forbidden_characters self forbidden_char<block_start><with_stmt>pytest.raises(ParseBaseException)<block_start>ConfigFactory.parse_string('a: hey man{}'.format(forbidden_char))<block_end><block_end>@pytest.mark.parametrize('forbidden_char' ['$' '"'])<def_stmt>test_fail_parse_forbidden_characters_in_context self forbidden_char<block_start><with_stmt>pytest.raises(ParseException)<block_start>ConfigFactory.parse_string('a: hey man{}'.format(forbidden_char))<block_end><block_end>@pytest.mark.parametrize('forbidden_char' ['+' '`' '^' '?' '!' '@' '*' '&'])<def_stmt>test_parse_forbidden_characters_quoted self forbidden_char<block_start>value="hey man{}".format(forbidden_char)<line_sep>config=ConfigFactory.parse_string('a: "{}"'.format(value))<assert_stmt>config.get_string("a")<eq>value<block_end><def_stmt>test_parse_with_enclosing_brace self<block_start>config=ConfigFactory.parse_string("""
{
a: {
b: 5
}
}
""")<assert_stmt>config.get_string('a.b')<eq>'5'<block_end>@pytest.mark.parametrize('data_set' [('a: 1 minutes' period(minutes=1)) ('a: 1minutes' period(minutes=1)) ('a: 2 minute' period(minutes=2)) ('a: 3 m' period(minutes=3)) ('a: 3m' period(minutes=3)) ('a: 3 min' '3 min') ('a: 4 seconds' period(seconds=4)) ('a: 5 second' period(seconds=5)) ('a: 6 s' period(seconds=6)) ('a: 6 sec' '6 sec') ('a: 7 hours' period(hours=7)) ('a: 8 hour' period(hours=8)) ('a: 9 h' period(hours=9)) ('a: 10 weeks' period(weeks=10)) ('a: 11 week' period(weeks=11)) ('a: 12 w' period(weeks=12)) ('a: 10 days' period(days=10)) ('a: 11 day' period(days=11)) ('a: 12 d' period(days=12)) ('a: 110 microseconds' period(microseconds=110)) ('a: 111 microsecond' period(microseconds=111)) ('a: 112 micros' period(microseconds=112)) ('a: 113 micro' period(microseconds=113)) ('a: 114 us' period(microseconds=114)) ('a: 110 milliseconds' timedelta(milliseconds=110)) ('a: 111 millisecond' timedelta(milliseconds=111)) ('a: 112 millis' timedelta(milliseconds=112)) ('a: 113 milli' timedelta(milliseconds=113)) ('a: 114 ms' timedelta(milliseconds=114)) ('a: 110 nanoseconds' period(microseconds=0)) ('a: 11000 nanoseconds' period(microseconds=11)) ('a: 1110000 nanosecond' period(microseconds=1110)) ('a: 1120000 nanos' period(microseconds=1120)) ('a: 1130000 nano' period(microseconds=1130)) ('a: 1140000 ns' period(microseconds=1140)) ])<def_stmt>test_parse_string_with_duration self data_set<block_start>config=ConfigFactory.parse_string(data_set[0])<assert_stmt>config['a']<eq>data_set[1]<block_end><def_stmt>test_parse_string_with_duration_with_long_unit_name self<block_start>config=ConfigFactory.parse_string("""
a: foo
b: 10 weeks
c: bar
""")<assert_stmt>config['b']<eq>period(weeks=10)<block_end><def_stmt>test_parse_with_list_mixed_types_with_durations_and_trailing_comma self<block_start>config=ConfigFactory.parse_string("""
a: foo
b: [a, 1, 10 weeks, 5 minutes,]
c: bar
""")<assert_stmt>config['b']<eq>['a' 1 period(weeks=10) period(minutes=5)]<block_end><def_stmt>test_parse_with_enclosing_square_bracket self<block_start>config=ConfigFactory.parse_string("[1, 2, 3]")<assert_stmt>config<eq>[1 2 3]<block_end><def_stmt>test_quoted_key_with_dots self<block_start>config=ConfigFactory.parse_string("""
"a.b.c.d": 3
t {
"d": {
"c": 5
}
}
k {
"b.f.d": 7
}
""")<assert_stmt>config['"a.b.c.d"']<eq>3<assert_stmt>config['t.d.c']<eq>5<assert_stmt>config['k."b.f.d"']<eq>7<block_end><def_stmt>test_dotted_notation_merge self<block_start>config=ConfigFactory.parse_string("""
a {
b = foo
c = bar
}
a.c = ${a.b}" "${a.b}
a.d = baz
""")<assert_stmt>config['a.b']<eq>"foo"<assert_stmt>config['a.c']<eq>"foo foo"<assert_stmt>config['a.d']<eq>"baz"<block_end><def_stmt>test_comma_to_separate_expr self<block_start>config=ConfigFactory.parse_string("""
a=1,
b="abc",
c=the man,
d=woof,
a-b-c-d=test,
a b c d=test2,
"a b c d e"=test3
""")<assert_stmt>config.get('a')<eq>1<assert_stmt>config.get('b')<eq>'abc'<assert_stmt>config.get('c')<eq>'the man'<assert_stmt>config.get('d')<eq>'woof'<assert_stmt>config.get('a-b-c-d')<eq>'test'<assert_stmt>config.get('a b c d')<eq>'test2'<assert_stmt>config.get('a b c d e')<eq>'test3'<block_end><def_stmt>test_dict_merge self<block_start>config=ConfigFactory.parse_string("""
a {
d {
g.h.j.u: 5
g {
h.d: 4
}
g.h.k: f d
}
h.i.m = 7
h.i {
d: 5
}
h.i {
e:65
}
}
""")<line_sep>expected_result={"a":{"d":{"g":{"h":{"j":{"u":5} "d":4 "k":"f d"}}} "h":{"i":{"m":7 "d":5 "e":65}}}}<assert_stmt>expected_result<eq>config<block_end><def_stmt>test_parse_with_comments self<block_start>config=ConfigFactory.parse_string("""
// comment 1
# comment 2
{
c = test // comment 0
g = 6 test # comment 0
# comment 3
a: { # comment 4
b: test, # comment 5
} # comment 6
t = [1, # comment 7
2, # comment 8
3, # comment 9
]
} # comment 10
// comment 11
// comment 12
""")<assert_stmt>config.get('c')<eq>'test'<assert_stmt>config.get('g')<eq>'6 test'<assert_stmt>config.get('a.b')<eq>'test'<assert_stmt>config.get_string('a.b')<eq>'test'<assert_stmt>config.get('t')<eq>[1 2 3]<block_end><def_stmt>test_missing_config self<block_start>config=ConfigFactory.parse_string("""
a = 5
""")<line_sep># b is not set so show raise an exception
<with_stmt>pytest.raises(ConfigMissingException)<block_start>config.get('b')<block_end><block_end><def_stmt>test_parse_null self<block_start>config=ConfigFactory.parse_string("""
a = null
b = [null]
""")<assert_stmt>config.get('a')<is><none><assert_stmt>config.get('b')[0]<is><none><block_end><def_stmt>test_parse_override self<block_start>config=ConfigFactory.parse_string("""
{
a: {
b: {
c = 5
}
}
a.b {
c = 7
d = 8
}
}
""")<assert_stmt>config.get('a.b.c')<eq>7<assert_stmt>config.get('a.b.d')<eq>8<block_end><def_stmt>test_concat_dict self<block_start>config=ConfigFactory.parse_string("""
a: {b: 1}
a: {c: 2}
b: {c: 3} {d: 4} {
c: 5
}
""")<assert_stmt>config.get('a.b')<eq>1<assert_stmt>config.get('a.c')<eq>2<assert_stmt>config.get('b.c')<eq>5<assert_stmt>config.get('b.d')<eq>4<block_end><def_stmt>test_concat_string self<block_start>config=ConfigFactory.parse_string("""
a = a b c
b = 5 b
c = b 7
""")<assert_stmt>config.get('a')<eq>'a b c'<assert_stmt>config.get('b')<eq>'5 b'<assert_stmt>config.get('c')<eq>'b 7'<block_end><def_stmt>test_concat_list self<block_start>config=ConfigFactory.parse_string("""
a = [1, 2] [3, 4] [
5,
6
]
""")<assert_stmt>config.get('a')<eq>[1 2 3 4 5 6]<assert_stmt>config.get_list('a')<eq>[1 2 3 4 5 6]<block_end><def_stmt>test_bad_concat self<block_start>ConfigFactory.parse_string('a = 45\n')<with_stmt>pytest.raises(ConfigWrongTypeException)<block_start>ConfigFactory.parse_string('a = [4] "4"')<block_end><with_stmt>pytest.raises(ConfigWrongTypeException)<block_start>ConfigFactory.parse_string('a = "4" [5]')<block_end><with_stmt>pytest.raises(ConfigWrongTypeException)<block_start>ConfigFactory.parse_string('a = {b: 5} "4"')<block_end><block_end><def_stmt>test_string_substitutions self<block_start>config1=ConfigFactory.parse_string("""
{
a: {
b: {
c = str
e = "str "
}
}
d = ${a.b.c}
f = ${a.b.e}
}
""")<assert_stmt>config1.get('a.b.c')<eq>'str'<assert_stmt>config1.get('d')<eq>'str'<assert_stmt>config1.get('f')<eq>'str '<line_sep>config2=ConfigFactory.parse_string("""
{
a: {
b: {
c = str
e = "str "
}
}
d = test ${a.b.c}
f = test ${a.b.e}
}
""")<assert_stmt>config2.get('a.b.c')<eq>'str'<assert_stmt>config2.get('d')<eq>'test str'<assert_stmt>config2.get('f')<eq>'test str '<line_sep>config3=ConfigFactory.parse_string(u"""
{
a: {
b: {
c = str
e = "str "
}
}
d = test ${a.b.c} me
f = test ${a.b.e} me
}
""")<assert_stmt>config3.get('a.b.c')<eq>'str'<assert_stmt>config3.get('d')<eq>'test str me'<assert_stmt>config3.get('f')<eq>'test str me'<block_end><def_stmt>test_string_substitutions_with_no_space self<block_start>config=ConfigFactory.parse_string("""
app.heap_size = 128
app.java_opts = [
-Xms${app.heap_size}m
-Xmx${app.heap_size}m
]
""")<assert_stmt>config.get('app.java_opts')<eq>['-Xms128m' '-Xmx128m']<block_end><def_stmt>test_int_substitutions self<block_start>config1=ConfigFactory.parse_string("""
{
a: {
b: {
c = 5
}
}
d = ${a.b.c}
}
""")<assert_stmt>config1.get('a.b.c')<eq>5<assert_stmt>config1.get('d')<eq>5<line_sep>config2=ConfigFactory.parse_string("""
{
a: {
b: {
c = 5
}
}
d = test ${a.b.c}
}
""")<assert_stmt>config2.get('a.b.c')<eq>5<assert_stmt>config2.get('d')<eq>'test 5'<line_sep>config3=ConfigFactory.parse_string("""
{
a: {
b: {
c = 5
}
}
d = test ${a.b.c} me
}
""")<assert_stmt>config3.get('a.b.c')<eq>5<assert_stmt>config3.get('d')<eq>'test 5 me'<block_end><def_stmt>test_cascade_string_substitutions self<block_start>config=ConfigFactory.parse_string("""
{
a: {
b: {
c = ${e}
}
}
d = test ${a.b.c} me
e = 7
}
""")<assert_stmt>config.get('a.b.c')<eq>7<assert_stmt>config.get('d')<eq>'test 7 me'<block_end><def_stmt>test_multiple_substitutions self<block_start>config=ConfigFactory.parse_string("""
a = 5
b=${a}${a}
c=${a} ${a}
""")<assert_stmt>config<eq>{'a':5 'b':'55' 'c':'5 5'}<block_end><def_stmt>test_dict_substitutions self<block_start>config=ConfigFactory.parse_string("""
data-center-generic = { cluster-size = 6 }
data-center-east = ${data-center-generic} {name = "east"}
""")<assert_stmt>config.get('data-center-east.cluster-size')<eq>6<assert_stmt>config.get('data-center-east.name')<eq>'east'<line_sep>config2=ConfigFactory.parse_string("""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic}
""")<assert_stmt>config2.get('data-center-east.cluster-size')<eq>6<assert_stmt>config2.get('data-center-east.name')<eq>'east'<line_sep>config3=ConfigFactory.parse_string("""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic} { cluster-size = 9, opts = "-Xmx4g" }
""")<assert_stmt>config3.get('data-center-east.cluster-size')<eq>9<assert_stmt>config3.get('data-center-east.name')<eq>'east'<assert_stmt>config3.get('data-center-east.opts')<eq>'-Xmx4g'<line_sep>config4=ConfigFactory.parse_string("""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic}
data-center-east-prod = ${data-center-east} {tmpDir=/tmp}
""")<assert_stmt>config4.get('data-center-east.cluster-size')<eq>6<assert_stmt>config4.get('data-center-east.name')<eq>'east'<assert_stmt>config4.get('data-center-east-prod.cluster-size')<eq>6<assert_stmt>config4.get('data-center-east-prod.tmpDir')<eq>'/tmp'<line_sep>config5=ConfigFactory.parse_string("""
data-center-generic = { cluster-size = 6 }
data-center-east = ${data-center-generic}
data-center-east = { name = "east" }
""")<assert_stmt>config5['data-center-east']<eq>{'name':'east' 'cluster-size':6}<line_sep>config6=ConfigFactory.parse_string("""
data-center-generic = { cluster-size = 6 }
data-center-east = { name = "east" }
data-center-east = ${data-center-generic}
""")<assert_stmt>config6['data-center-east']<eq>{'name':'east' 'cluster-size':6}<block_end><def_stmt>test_dos_chars_with_unquoted_string_noeol self<block_start>config=ConfigFactory.parse_string("foo = bar")<assert_stmt>config['foo']<eq>'bar'<block_end><def_stmt>test_dos_chars_with_quoted_string_noeol self<block_start>config=ConfigFactory.parse_string('foo = "5"')<assert_stmt>config['foo']<eq>'5'<block_end><def_stmt>test_dos_chars_with_triple_quoted_string_noeol self<block_start>config=ConfigFactory.parse_string('foo = """5"""')<assert_stmt>config['foo']<eq>'5'<block_end><def_stmt>test_dos_chars_with_int_noeol self<block_start>config=ConfigFactory.parse_string("foo = 5")<assert_stmt>config['foo']<eq>5<block_end><def_stmt>test_dos_chars_with_float_noeol self<block_start>config=ConfigFactory.parse_string("foo = 5.0")<assert_stmt>config['foo']<eq>5.0<block_end><def_stmt>test_list_substitutions self<block_start>config=ConfigFactory.parse_string("""
common_modules = [php, python]
host_modules = ${common_modules} [java]
""")<assert_stmt>config.get('host_modules')<eq>['php' 'python' 'java']<line_sep>config2=ConfigFactory.parse_string("""
common_modules = [php, python]
host_modules = [java] ${common_modules}
""")<assert_stmt>config2.get('host_modules')<eq>['java' 'php' 'python']<line_sep>config3=ConfigFactory.parse_string("""
common_modules = [php, python]
host_modules = [java] ${common_modules} [perl]
""")<assert_stmt>config3.get('common_modules')<eq>['php' 'python']<assert_stmt>config3.get('host_modules')<eq>['java' 'php' 'python' 'perl']<line_sep>config4=ConfigFactory.parse_string("""
common_modules = [php, python]
host_modules = [java] ${common_modules} [perl]
full_modules = ${host_modules} [c, go]
""")<assert_stmt>config4.get('common_modules')<eq>['php' 'python']<assert_stmt>config4.get('host_modules')<eq>['java' 'php' 'python' 'perl']<assert_stmt>config4.get('full_modules')<eq>['java' 'php' 'python' 'perl' 'c' 'go']<block_end><def_stmt>test_list_element_substitution self<block_start>config=ConfigFactory.parse_string("""
main_language = php
languages = [java, ${main_language}]
""")<assert_stmt>config.get('languages')<eq>['java' 'php']<block_end><def_stmt>test_substitution_list_with_append self<block_start>config=ConfigFactory.parse_string("""
application.foo = 128mm
application.large-jvm-opts = ["-XX:+UseParNewGC"] [-Xm16g, ${application.foo}]
application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ["-XX:+UseParNewGC"]
""")<assert_stmt>config["application.large-jvm-opts"]<eq>['-XX:+UseParNewGC' '-Xm16g' '128mm']<assert_stmt>config["application.large-jvm-opts2"]<eq>['-Xm16g' '128mm' '-XX:+UseParNewGC' ]<block_end><def_stmt>test_substitution_list_with_append_substitution self<block_start>config=ConfigFactory.parse_string("""
application.foo = 128mm
application.default-jvm-opts = ["-XX:+UseParNewGC"]
application.large-jvm-opts = ${application.default-jvm-opts} [-Xm16g, ${application.foo}]
application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ${application.default-jvm-opts}
""")<assert_stmt>config["application.large-jvm-opts"]<eq>['-XX:+UseParNewGC' '-Xm16g' '128mm']<assert_stmt>config["application.large-jvm-opts2"]<eq>['-Xm16g' '128mm' '-XX:+UseParNewGC']<block_end><def_stmt>test_non_existent_substitution self<block_start><with_stmt>pytest.raises(ConfigSubstitutionException)<block_start>ConfigFactory.parse_string("""
common_modules = ${non_existent}
""")<block_end><with_stmt>pytest.raises(ConfigSubstitutionException)<block_start>ConfigFactory.parse_string("""
common_modules = abc ${non_existent}
""")<block_end><with_stmt>pytest.raises(ConfigSubstitutionException)<block_start>ConfigFactory.parse_string("""
common_modules = ${non_existent} abc
""")<block_end><with_stmt>pytest.raises(ConfigSubstitutionException)<block_start>ConfigFactory.parse_string("""
common_modules = abc ${non_existent} def
""")<block_end><block_end><def_stmt>test_non_compatible_substitution self<block_start><with_stmt>pytest.raises(ConfigWrongTypeException)<block_start>ConfigFactory.parse_string("""
common_modules = [perl]
host_modules = 55 ${common_modules}
""")<block_end><with_stmt>pytest.raises(ConfigWrongTypeException)<block_start>ConfigFactory.parse_string("""
common_modules = [perl]
host_modules = ${common_modules} 55
""")<block_end><with_stmt>pytest.raises(ConfigWrongTypeException)<block_start>ConfigFactory.parse_string("""
common_modules = [perl]
host_modules = aa ${common_modules} bb
""")<block_end><with_stmt>pytest.raises(ConfigWrongTypeException)<block_start>ConfigFactory.parse_string("""
common_modules = [perl]
host_modules = aa ${common_modules}
""")<block_end><with_stmt>pytest.raises(ConfigWrongTypeException)<block_start>ConfigFactory.parse_string("""
common_modules = [perl]
host_modules = ${common_modules} aa
""")<block_end><with_stmt>pytest.raises(ConfigWrongTypeException)<block_start>ConfigFactory.parse_string("""
common_modules = [perl]
host_modules = aa ${common_modules} bb
""")<block_end><block_end><def_stmt>test_self_ref_substitution_array self<block_start>config=ConfigFactory.parse_string("""
x = [1,2]
x = ${x} [3,4]
x = [-1, 0] ${x} [5, 6]
x = [-3, -2] ${x}
""")<assert_stmt>config.get("x")<eq>[-3 -2 -1 0 1 2 3 4 5 6]<block_end><def_stmt>test_self_append_array self<block_start>config=ConfigFactory.parse_string("""
x = [1,2]
x += [3,4]
""")<assert_stmt>config.get("x")<eq>[1 2 3 4]<block_end><def_stmt>test_self_append_string self<block_start>'''
Should be equivalent to
x = abc
x = ${?x} def
'''<line_sep>config=ConfigFactory.parse_string("""
x = abc
x += def
""")<assert_stmt>config.get("x")<eq>"abc def"<block_end><def_stmt>test_self_append_non_existent_string self<block_start>'''
Should be equivalent to x = ${?x} def
'''<line_sep>config=ConfigFactory.parse_string("""
x += def
""")<assert_stmt>config.get("x")<eq>" def"<block_end><def_stmt>test_self_append_nonexistent_array self<block_start>config=ConfigFactory.parse_string("""
x += [1,2]
""")<assert_stmt>config.get("x")<eq>[1 2]<block_end><def_stmt>test_self_append_object self<block_start>config=ConfigFactory.parse_string("""
x = {a: 1}
x += {b: 2}
""")<assert_stmt>config.get("x")<eq>{'a':1 'b':2}<block_end><def_stmt>test_self_append_nonexistent_object self<block_start>config=ConfigFactory.parse_string("""
x += {a: 1}
""")<assert_stmt>config.get("x")<eq>{'a':1}<block_end><def_stmt>test_self_ref_substitution_array_to_dict self<block_start>config=ConfigFactory.parse_string("""
x = [1,2]
x = {x: [3,4]}
x = {y: [5,6]}
x = {z: ${x}}
""")<assert_stmt>config.get("x.x")<eq>[3 4]<assert_stmt>config.get("x.y")<eq>[5 6]<assert_stmt>config.get("x.z")<eq>{'x':[3 4] 'y':[5 6]}<block_end><def_stmt>test_self_ref_substitiotion_dict_in_array self<block_start>config=ConfigFactory.parse_string("""
x = {x: [3,4]}
x = [${x}, 2, 3]
""")<line_sep>(one two three)=config.get("x")<assert_stmt>one<eq>{'x':[3 4]}<assert_stmt>two<eq>2<assert_stmt>three<eq>3<block_end><def_stmt>test_self_ref_substitution_dict_path self<block_start>config=ConfigFactory.parse_string("""
x = {y: {z: 1}}
x = ${x.y}
""")<assert_stmt>config.get("x.y")<eq>{'z':1}<assert_stmt>config.get("x.z")<eq>1<assert_stmt>set(config.get("x").keys())<eq>set(['y' 'z'])<block_end><def_stmt>test_self_ref_substitution_dict_path_hide self<block_start>config=ConfigFactory.parse_string("""
x = {y: {y: 1}}
x = ${x.y}
""")<assert_stmt>config.get("x.y")<eq>1<assert_stmt>set(config.get("x").keys())<eq>set(['y'])<block_end><def_stmt>test_self_ref_substitution_dict_recurse self<block_start><with_stmt>pytest.raises(ConfigSubstitutionException)<block_start>ConfigFactory.parse_string("""
x = ${x}
""")<block_end><block_end><def_stmt>test_self_ref_substitution_dict_recurse2 self<block_start><with_stmt>pytest.raises(ConfigSubstitutionException)<block_start>ConfigFactory.parse_string("""
x = ${x}
x = ${x}
""")<block_end><block_end><def_stmt>test_self_ref_substitution_dict_merge self<block_start>'''
Example from HOCON spec
'''<line_sep>config=ConfigFactory.parse_string("""
foo : { a : { c : 1 } }
foo : ${foo.a}
foo : { a : 2 }
""")<assert_stmt>config.get('foo')<eq>{'a':2 'c':1}<assert_stmt>set(config.keys())<eq>set(['foo'])<block_end><def_stmt>test_self_ref_substitution_dict_otherfield self<block_start>'''
Example from HOCON spec
'''<line_sep>config=ConfigFactory.parse_string("""
bar : {
foo : 42,
baz : ${bar.foo}
}
""")<assert_stmt>config.get("bar")<eq>{'foo':42 'baz':42}<assert_stmt>set(config.keys())<eq>set(['bar'])<block_end><def_stmt>test_self_ref_substitution_dict_otherfield_merged_in self<block_start>'''
Example from HOCON spec
'''<line_sep>config=ConfigFactory.parse_string("""
bar : {
foo : 42,
baz : ${bar.foo}
}
bar : { foo : 43 }
""")<assert_stmt>config.get("bar")<eq>{'foo':43 'baz':43}<assert_stmt>set(config.keys())<eq>set(['bar'])<block_end><def_stmt>test_self_ref_substitution_dict_otherfield_merged_in_mutual self<block_start>'''
Example from HOCON spec
'''<line_sep>config=ConfigFactory.parse_string("""
// bar.a should end up as 4
bar : { a : ${foo.d}, b : 1 }
bar.b = 3
// foo.c should end up as 3
foo : { c : ${bar.b}, d : 2 }
foo.d = 4
""")<assert_stmt>config.get("bar")<eq>{'a':4 'b':3}<assert_stmt>config.get("foo")<eq>{'c':3 'd':4}<assert_stmt>set(config.keys())<eq>set(['bar' 'foo'])<block_end><def_stmt>test_self_ref_substitution_string_opt_concat self<block_start>'''
Example from HOCON spec
'''<line_sep>config=ConfigFactory.parse_string("""
a = ${?a}foo
""")<assert_stmt>config.get("a")<eq>'foo'<assert_stmt>set(config.keys())<eq>set(['a'])<block_end><def_stmt>test_self_ref_substitution_dict_recurse_part self<block_start><with_stmt>pytest.raises(ConfigSubstitutionException)<block_start>ConfigFactory.parse_string("""
x = ${x} {y: 1}
x = ${x.y}
""")<block_end><block_end><def_stmt>test_self_ref_substitution_object self<block_start>config=ConfigFactory.parse_string("""
x = {a: 1, b: 2}
x = ${x} {c: 3}
x = {z: 0} ${x}
x = {y: -1} ${x} {d: 4}
""")<assert_stmt>config.get("x")<eq>{'a':1 'b':2 'c':3 'z':0 'y':-1 'd':4}<block_end><def_stmt>test_self_ref_child self<block_start>config=ConfigFactory.parse_string("""
a.b = 3
a.b = ${a.b}
a.b = ${a.b}
a.c = [1,2]
a.c = ${a.c}
a.d = {foo: bar}
a.d = ${a.d}
""")<assert_stmt>config.get("a")<eq>{'b':3 'c':[1 2] 'd':{'foo':'bar'}}<block_end><def_stmt>test_concat_multi_line_string self<block_start>config=ConfigFactory.parse_string("""
common_modules = perl \
java \
python
""")<assert_stmt>[x.strip()<for>x config['common_modules'].split()<if>x.strip(' ')<ne>'']<eq>['perl' 'java' 'python']<block_end><def_stmt>test_concat_multi_line_list self<block_start>config=ConfigFactory.parse_string("""
common_modules = [perl] \
[java] \
[python]
""")<assert_stmt>config['common_modules']<eq>['perl' 'java' 'python']<block_end><def_stmt>test_concat_multi_line_dict self<block_start>config=ConfigFactory.parse_string("""
common_modules = {a:perl} \
{b:java} \
{c:python}
""")<assert_stmt>config['common_modules']<eq>{'a':'perl' 'b':'java' 'c':'python'}<block_end><def_stmt>test_parse_URL_from_samples self<block_start>config=ConfigFactory.parse_URL("file:samples/aws.conf")<assert_stmt>config.get('data-center-generic.cluster-size')<eq>6<assert_stmt>config.get('large-jvm-opts')<eq>['-XX:+UseParNewGC' '-Xm16g']<block_end><def_stmt>test_parse_URL_from_invalid self<block_start>config=ConfigFactory.parse_URL("https://nosuchurl")<assert_stmt>config<eq>[]<block_end><def_stmt>test_include_dict_from_samples self<block_start>config=ConfigFactory.parse_file("samples/animals.conf")<assert_stmt>config.get('cat.garfield.say')<eq>'meow'<assert_stmt>config.get('dog.mutt.hates.garfield.say')<eq>'meow'<block_end><def_stmt>test_include_glob_dict_from_samples self<block_start>config=ConfigFactory.parse_file("samples/all_animals.conf")<assert_stmt>config.get('animals.garfield.say')<eq>'meow'<assert_stmt>config.get('animals.mutt.hates.garfield.say')<eq>'meow'<block_end><def_stmt>test_include_glob_list_from_samples self<block_start>config=ConfigFactory.parse_file("samples/all_bars.conf")<line_sep>bars=config.get_list('bars')<assert_stmt>len(bars)<eq>10<line_sep>names={bar['name']<for>bar bars}<line_sep>types={bar['type']<for>bar bars<if>'type'<in>bar}<line_sep>print(types '(((((')<assert_stmt>'<NAME>'<in>names<assert_stmt>'Homer\'s favorite coffee'<in>names<assert_stmt>'milk'<in>types<block_end><def_stmt>test_list_of_dicts self<block_start>config=ConfigFactory.parse_string("""
a: [
{a: 1, b: 2},
{a: 3, c: 4},
]
""")<assert_stmt>config['a']<eq>[{'a':1 'b':2} {'a':3 'c':4}]<block_end><def_stmt>test_list_of_lists self<block_start>config=ConfigFactory.parse_string("""
a: [
[1, 2]
[3, 4]
]
""")<assert_stmt>config['a']<eq>[[1 2] [3 4]]<block_end><def_stmt>test_list_of_dicts_with_merge self<block_start>config=ConfigFactory.parse_string("""
b = {f: 4}
a: [
${b} {a: 1, b: 2},
{a: 3, c: 4} ${b},
{a: 3} ${b} {c: 6},
]
""")<assert_stmt>config['a']<eq>[{'a':1 'b':2 'f':4} {'a':3 'c':4 'f':4} {'a':3 'c':6 'f':4}]<block_end><def_stmt>test_list_of_lists_with_merge self<block_start>config=ConfigFactory.parse_string("""
b = [5, 6]
a: [
${b} [1, 2]
[3, 4] ${b}
[1, 2] ${b} [7, 8]
]
""")<assert_stmt>config['a']<eq>[[5 6 1 2] [3 4 5 6] [1 2 5 6 7 8]]<block_end><def_stmt>test_invalid_assignment self<block_start><with_stmt>pytest.raises(ParseSyntaxException)<block_start>ConfigFactory.parse_string('common_modules [perl]')<block_end><with_stmt>pytest.raises(ParseException)<block_start>ConfigFactory.parse_string('common_modules {} {perl: 1}')<block_end><with_stmt>pytest.raises(ParseSyntaxException)<block_start>ConfigFactory.parse_string("""
a = {f: 5}
common_modules ${a} {perl: 1}
""")<block_end><block_end><def_stmt>test_invalid_dict self<block_start><with_stmt>pytest.raises(ParseSyntaxException)<block_start>ConfigFactory.parse_string("""
a = {
f: 5
g
}
""")<block_end><with_stmt>pytest.raises(ParseSyntaxException)<block_start>ConfigFactory.parse_string('a = {g}')<block_end><block_end><def_stmt>test_include_file self<block_start><with_stmt>tempfile.NamedTemporaryFile('w')<as>fdin<block_start>fdin.write('[1, 2]')<line_sep>fdin.flush()<line_sep>config1=ConfigFactory.parse_string("""
a: [
include "{tmp_file}"
]
""".format(tmp_file=fdin.name))<assert_stmt>config1['a']<eq>[1 2]<line_sep>config2=ConfigFactory.parse_string("""
a: [
include file("{tmp_file}")
]
""".format(tmp_file=fdin.name))<assert_stmt>config2['a']<eq>[1 2]<line_sep>config3=ConfigFactory.parse_string("""
a: [
include url("file://{tmp_file}")
]
""".format(tmp_file=fdin.name))<assert_stmt>config3['a']<eq>[1 2]<block_end><block_end><def_stmt>test_include_missing_file self<block_start>config1=ConfigFactory.parse_string("""
a: [
include "dummy.txt"
3
4
]
""")<assert_stmt>config1['a']<eq>[3 4]<block_end><def_stmt>test_include_required_file self<block_start>config=ConfigFactory.parse_string("""
a {
include required("samples/animals.d/cat.conf")
t = 2
}
""")<line_sep>expected={'a':{'garfield':{'say':'meow'} 't':2}}<assert_stmt>expected<eq>config<line_sep>config2=ConfigFactory.parse_string("""
a {
include required(file("samples/animals.d/cat.conf"))
t = 2
}
""")<assert_stmt>expected<eq>config2<block_end><def_stmt>test_include_missing_required_file self<block_start><with_stmt>pytest.raises(IOError)<block_start>ConfigFactory.parse_string("""
a: [
include required("dummy.txt")
3
4
]
""")<block_end><block_end><def_stmt>test_resolve_package_path self<block_start>path=ConfigParser.resolve_package_path("pyhocon:config_parser.py")<assert_stmt>os.path.exists(path)<block_end><def_stmt>test_resolve_package_path_format self<block_start><with_stmt>pytest.raises(ValueError)<block_start>ConfigParser.resolve_package_path("pyhocon/config_parser.py")<block_end><block_end><def_stmt>test_resolve_package_path_missing self<block_start><with_stmt>pytest.raises(ImportError)<block_start>ConfigParser.resolve_package_path("non_existent_module:foo.py")<block_end><block_end><def_stmt>test_include_package_file self monkeypatch<block_start>temp_dir=tempfile.mkdtemp()<try_stmt><block_start>module_dir=os.path.join(temp_dir 'my_module')<line_sep>module_conf=os.path.join(module_dir 'my.conf')<line_sep># create the module folder and necessary files (__init__ and config)
os.mkdir(module_dir)<line_sep>open(os.path.join(module_dir '__init__.py') 'a').close()<with_stmt>open(module_conf 'w')<as>fdin<block_start>fdin.write("{c: 3}")<block_end># add the temp dir to sys.path so that 'my_module' can be discovered
monkeypatch.syspath_prepend(temp_dir)<line_sep># load the config and include the other config file from 'my_module'
config=ConfigFactory.parse_string("""
a: 1
b: 2
include package("my_module:my.conf")
""")<line_sep># check that the contents of both config files are available
<assert_stmt>dict(config.as_plain_ordered_dict())<eq>{'a':1 'b':2 'c':3}<block_end><finally_stmt><block_start>shutil.rmtree(temp_dir ignore_errors=<true>)<block_end><block_end><def_stmt>test_include_dict self<block_start>expected_res={'a':1 'b':2 'c':3 'd':4}<with_stmt>tempfile.NamedTemporaryFile('w')<as>fdin<block_start>fdin.write('{a: 1, b: 2}')<line_sep>fdin.flush()<line_sep>config1=ConfigFactory.parse_string("""
a: {{
include "{tmp_file}"
c: 3
d: 4
}}
""".format(tmp_file=fdin.name))<assert_stmt>config1['a']<eq>expected_res<line_sep>config2=ConfigFactory.parse_string("""
a: {{
c: 3
d: 4
include "{tmp_file}"
}}
""".format(tmp_file=fdin.name))<assert_stmt>config2['a']<eq>expected_res<line_sep>config3=ConfigFactory.parse_string("""
a: {{
c: 3
include "{tmp_file}"
d: 4
}}
""".format(tmp_file=fdin.name))<assert_stmt>config3['a']<eq>expected_res<block_end><block_end><def_stmt>test_include_substitution self<block_start><with_stmt>tempfile.NamedTemporaryFile('w')<as>fdin<block_start>fdin.write('y = ${x}')<line_sep>fdin.flush()<line_sep>config=ConfigFactory.parse_string("""
include "{tmp_file}"
x = 42
""".format(tmp_file=fdin.name))<assert_stmt>config['x']<eq>42<assert_stmt>config['y']<eq>42<block_end><block_end>@pytest.mark.xfail<def_stmt>test_include_substitution2 self<block_start><with_stmt>tempfile.NamedTemporaryFile('w')<as>fdin<block_start>fdin.write('{ x : 10, y : ${x} }')<line_sep>fdin.flush()<line_sep>config=ConfigFactory.parse_string("""
{
a : { include """+'"'+fdin.name+"""" }
a : { x : 42 }
}
""")<assert_stmt>config['a']['x']<eq>42<assert_stmt>config['a']['y']<eq>42<block_end><block_end><def_stmt>test_var_with_include_keyword self<block_start>config=ConfigFactory.parse_string("""
include-database=true
""")<assert_stmt>config<eq>{'include-database':<true>}<block_end><def_stmt>test_substitution_override self<block_start>config=ConfigFactory.parse_string("""
database {
host = localhost
port = 5432
user = people
name = peopledb
pass = <PASSWORD>
}
user=test_user
pass=<PASSWORD>
database {
user = ${user}
pass = ${pass}
}
""")<assert_stmt>config['database.user']<eq>'test_user'<assert_stmt>config['database.pass']<eq>'<PASSWORD>'<block_end><def_stmt>test_substitution_flat_override self<block_start>config=ConfigFactory.parse_string("""
database {
name = peopledb
pass = <PASSWORD>
name = ${?NOT_EXISTS}
pass = ${?NOT_EXISTS}
}
""")<assert_stmt>config['database.name']<eq>'peopledb'<assert_stmt>config['database.pass']<eq>'<PASSWORD>'<block_end><def_stmt>test_substitution_multiple_override self<block_start>config=ConfigFactory.parse_string("""
a: 1
b: foo
c: ${a} ${b}
c: ${b} ${a}
d: ${a} ${b}
d: ${a} bar
""")<assert_stmt>config['c']<eq>'foo 1'<assert_stmt>config['d']<eq>'1 bar'<block_end><def_stmt>test_substitution_nested_override self<block_start>config=ConfigFactory.parse_string("""
database {
name = peopledb
pass = <PASSWORD>
}
database {
name = ${?user}
pass = ${?pass}
}
""")<assert_stmt>config['database.name']<eq>'peopledb'<assert_stmt>config['database.pass']<eq>'<PASSWORD>'<block_end><def_stmt>test_optional_with_merge self<block_start>unresolved=ConfigFactory.parse_string("""
foo: 42
foo: ${?a}
""" resolve=<false>)<line_sep>source=ConfigFactory.parse_string("""
b: 14
""")<line_sep>config=unresolved.with_fallback(source)<assert_stmt>config['foo']<eq>42<line_sep>config=source.with_fallback(unresolved)<assert_stmt>config['foo']<eq>42<block_end><def_stmt>test_fallback_with_resolve self<block_start>config3=ConfigFactory.parse_string("c=5")<line_sep>config2=ConfigFactory.parse_string("b=${c}" resolve=<false>)<line_sep>config1=ConfigFactory.parse_string("a=${b}" resolve=<false>).with_fallback(config2 resolve=<false>).with_fallback(config3)<assert_stmt>{'a':5 'b':5 'c':5}<eq>config1<block_end><def_stmt>test_optional_substitution self<block_start>config=ConfigFactory.parse_string("""
a = 45
b = ${?c}
d = ${?c} 4
e = ${?a}
g = ${?c1} ${?c2}
h = ${?c1} ${?c2} 1
""")<assert_stmt>'b'<not><in>config<assert_stmt>config['d']<eq>4<assert_stmt>config['e']<eq>45<assert_stmt>'g'<not><in>config<assert_stmt>config['h']<eq>1<block_end><def_stmt>test_cascade_optional_substitution self<block_start>config=ConfigFactory.parse_string("""
num = 3
retries_msg = You have ${num} retries
retries_msg = ${?CUSTOM_MSG}
""")<assert_stmt>config<eq>{'num':3 'retries_msg':'You have 3 retries'}<block_end><def_stmt>test_substitution_cycle self<block_start><with_stmt>pytest.raises(ConfigSubstitutionException)<block_start>ConfigFactory.parse_string("""
a = ${b}
b = ${c}
c = ${a}
""")<block_end><block_end><def_stmt>test_assign_number_with_eol self<block_start>config=ConfigFactory.parse_string("""
a =
4
b = # test
# test2
5
c =
6
""")<assert_stmt>config['a']<eq>4<assert_stmt>config['b']<eq>5<assert_stmt>config['c']<eq>6<block_end><def_stmt>test_assign_int self<block_start>config=ConfigFactory.parse_string("""
short = 12
long = 12321321837612378126213217321
negative = -15
""")<line_sep># on python 3 long will be an int but on python 2 long with be a long
<assert_stmt>config['short']<eq>12<assert_stmt>isinstance(config['short'] int)<assert_stmt>config['long']<eq>12321321837612378126213217321<assert_stmt>isinstance(config['negative'] int)<assert_stmt>config['negative']<eq>-15<block_end><def_stmt>test_assign_float self<block_start>config=ConfigFactory.parse_string("""
a = 121.22
b = -121.22
c = .54
d = -.54
""")<line_sep># on python 3 long will be an int but on python 2 long with be a long
<assert_stmt>config['a']<eq>121.22<assert_stmt>config['b']<eq>-121.22<assert_stmt>config['c']<eq>.54<assert_stmt>config['d']<eq>-.54<block_end><def_stmt>test_sci_real self<block_start>"""
Test scientific expression of number
"""<line_sep>config=ConfigFactory.parse_string("""
short = 12.12321
long1 = 121.22E3423432
neg_long1 = 121.22E-1
long2 = 121.22e3423432
neg_long2 = 121.22e-3
""")<line_sep># on python 3 long will be an int but on python 2 long with be a long
<assert_stmt>config['short']<eq>12.12321<assert_stmt>config['long1']<eq>121.22E3423432<assert_stmt>config['neg_long1']<eq>121.22E-1<assert_stmt>config['long2']<eq>121.22E3423432<assert_stmt>config['neg_long2']<eq>121.22E-3<block_end><def_stmt>test_assign_strings_with_eol self<block_start>config=ConfigFactory.parse_string("""
a =
"a"
b = # test
# test2
"b"
c =
"c"
""")<assert_stmt>config['a']<eq>'a'<assert_stmt>config['b']<eq>'b'<assert_stmt>config['c']<eq>'c'<block_end><def_stmt>test_assign_list_numbers_with_eol self<block_start>config=ConfigFactory.parse_string("""
a =
[
1,
2,
]
b = # test
# test2
[
3,
4,]
c =
[
5,
6
]
""")<assert_stmt>config['a']<eq>[1 2]<assert_stmt>config['b']<eq>[3 4]<assert_stmt>config['c']<eq>[5 6]<block_end><def_stmt>test_assign_list_strings_with_eol self<block_start>config=ConfigFactory.parse_string("""
a =
[
"a",
"b",
]
b = # test
# test2
[
"c",
"d",]
c =
[
"e",
"f"
]
""")<assert_stmt>config['a']<eq>['a' 'b']<assert_stmt>config['b']<eq>['c' 'd']<assert_stmt>config['c']<eq>['e' 'f']<block_end><def_stmt>test_assign_dict_strings_with_equal_sign_with_eol self<block_start>config=ConfigFactory.parse_string("""
a =
{
a: 1,
b: 2,
}
b = # test
# test2
{
c: 3,
d: 4,}
c =
{
e: 5,
f: 6
}
""")<assert_stmt>config['a']<eq>{'a':1 'b':2}<assert_stmt>config['b']<eq>{'c':3 'd':4}<assert_stmt>config['c']<eq>{'e':5 'f':6}<block_end><def_stmt>test_assign_dict_strings_no_equal_sign_with_eol self<block_start>config=ConfigFactory.parse_string("""
a
{
a: 1,
b: 2,
}
b # test
# test2
{
c: 3,
d: 4,}
c
{
e: 5,
f: 6
}
""")<assert_stmt>config['a']<eq>{'a':1 'b':2}<assert_stmt>config['b']<eq>{'c':3 'd':4}<assert_stmt>config['c']<eq>{'e':5 'f':6}<block_end><def_stmt>test_substitutions_overwrite self<block_start>config1=ConfigFactory.parse_string("""
a = 123
a = ${?test}
a = 5
""")<assert_stmt>config1['a']<eq>5<line_sep>config2=ConfigFactory.parse_string("""
{
database {
host = "localhost"
port = 8000
url = ${database.host}":"${database.port}
}
database {
host = ${?DB_HOST}
}
database {
host = "other.host.net"
port = 433
}
}
""")<assert_stmt>config2['database']['host']<eq>'other.host.net'<assert_stmt>config2['database']['port']<eq>433<assert_stmt>config2['database']['url']<eq>'other.host.net:433'<block_end><def_stmt>test_fallback_substitutions_overwrite self<block_start>config1=ConfigFactory.parse_string("""
a = {
b: 1
c: 2
}
""")<line_sep>config2=ConfigFactory.parse_string("""
a.b = 4
a.d = 3
""")<line_sep>config3=config1.with_fallback(config2)<assert_stmt>config3['a']<eq>{'b':1 'c':2 'd':3}<line_sep>config4=ConfigFactory.parse_string("""
name: foo
""")<line_sep>config5=ConfigFactory.parse_string(u"""
longName: "long "${?name}
""" resolve=<false>)<line_sep>config6=config4.with_fallback(config5)<assert_stmt>config6<eq>{'longName':'long foo' 'name':'foo'}<block_end><def_stmt>test_fallback_substitutions_overwrite_file self<block_start>config1=ConfigFactory.parse_string("""
{
data-center-generic = { cluster-size: 8 }
misc = "mist"
}
""")<line_sep># use unicode path here for regression testing https://github.com/chimpler/pyhocon/issues/44
config2=config1.with_fallback(u'samples/aws.conf')<assert_stmt>config2<eq>{'data-center-generic':{'cluster-size':8} 'data-center-east':{'cluster-size':8 'name':'east'} 'misc':'mist' 'default-jvm-opts':['-XX:+UseParNewGC'] 'large-jvm-opts':['-XX:+UseParNewGC' '-Xm16g']}<block_end><def_stmt>test_fallback_self_ref_substitutions_append self<block_start>config1=ConfigFactory.parse_string("""
list = [ 1, 2, 3 ]
""")<line_sep>config2=ConfigFactory.parse_string("""
list = ${list} [ 4, 5, 6 ]
""" resolve=<false>)<line_sep>config2=config2.with_fallback(config1)<assert_stmt>config2.get("list")<eq>[1 2 3 4 5 6]<block_end><def_stmt>test_fallback_self_ref_substitutions_append_plus_equals self<block_start>config1=ConfigFactory.parse_string("""
list = [ 1, 2, 3 ]
""")<line_sep>config2=ConfigFactory.parse_string("""
list += [ 4, 5, 6 ]
""" resolve=<false>)<line_sep>config2=config2.with_fallback(config1)<assert_stmt>config2.get("list")<eq>[1 2 3 4 5 6]<block_end><def_stmt>test_self_merge_ref_substitutions_object self<block_start>config1=ConfigFactory.parse_string("""
a : { }
b : 1
c : ${a} { d : [ ${b} ] }
""" resolve=<false>)<line_sep>config2=ConfigFactory.parse_string("""
e : ${a} {
}
""" resolve=<false>)<line_sep>merged=ConfigTree.merge_configs(config1 config2)<line_sep>ConfigParser.resolve_substitutions(merged)<assert_stmt>merged.get("c.d")<eq>[1]<block_end><def_stmt>test_self_merge_ref_substitutions_object2 self<block_start>config1=ConfigFactory.parse_string("""
x : { v1: 1 }
b1 : {v2: 2 }
b = [${b1}]
""" resolve=<false>)<line_sep>config2=ConfigFactory.parse_string("""
b2 : ${x} {v2: 3}
b += [${b2}]
""" resolve=<false>)<line_sep>merged=ConfigTree.merge_configs(config1 config2)<line_sep>ConfigParser.resolve_substitutions(merged)<line_sep>b=merged.get("b")<assert_stmt>len(b)<eq>2<assert_stmt>b[0]<eq>{'v2':2}<assert_stmt>b[1]<eq>{'v1':1 'v2':3}<block_end><def_stmt>test_self_merge_ref_substitutions_object3 self<block_start>config1=ConfigFactory.parse_string("""
b1 : { v1: 1 }
b = [${b1}]
""" resolve=<false>)<line_sep>config2=ConfigFactory.parse_string("""
b1 : { v1: 2, v2: 3 }
""" resolve=<false>)<line_sep>merged=ConfigTree.merge_configs(config1 config2)<line_sep>ConfigParser.resolve_substitutions(merged)<assert_stmt>merged.get("b1")<eq>{"v1":2 "v2":3}<line_sep>b=merged.get("b")<assert_stmt>len(b)<eq>1<assert_stmt>b[0]<eq>{"v1":2 "v2":3}<block_end><def_stmt>test_fallback_self_ref_substitutions_merge self<block_start>config1=ConfigFactory.parse_string("""
dict = { x: 1 }
""")<line_sep>config2=ConfigFactory.parse_string("""
dict = ${dict} { y: 2 }
""" resolve=<false>)<line_sep>config2=config2.with_fallback(config1)<assert_stmt>config2.get("dict")<eq>{'x':1 'y':2}<block_end><def_stmt>test_fallback_self_ref_substitutions_concat_string self<block_start>config1=ConfigFactory.parse_string("""
string = abc
""")<line_sep>config2=ConfigFactory.parse_string("""
string = ${string}def
""" resolve=<false>)<line_sep>result=config2.with_fallback(config1)<assert_stmt>result.get("string")<eq>'abcdef'<line_sep># test no mutation on config1
<assert_stmt>result<is><not>config1<line_sep># test no mutation on config2
<assert_stmt>"abc"<not><in>str(config2)<block_end><def_stmt>test_fallback_non_root self<block_start>root=ConfigFactory.parse_string("""
a = 1
mid.b = 1
""")<line_sep>config=root.get_config("mid").with_fallback(root)<assert_stmt>config['a']<eq>1<and>config['b']<eq>1<block_end><def_stmt>test_object_field_substitution self<block_start>config=ConfigFactory.parse_string("""
A = ${Test}
Test {
field1 = 1
field2 = ${Test.field1}"2"
field3 = ${Test.field2}"3"
}
""")<assert_stmt>config.get_string("A.field1")<eq>"1"<assert_stmt>config.get_string("A.field2")<eq>"12"<assert_stmt>config.get_string("A.field3")<eq>"123"<assert_stmt>config.get_string("Test.field1")<eq>"1"<assert_stmt>config.get_string("Test.field2")<eq>"12"<assert_stmt>config.get_string("Test.field3")<eq>"123"<block_end><def_stmt>test_one_line_quote_escape self<block_start>config=ConfigFactory.parse_string("""
test_no_quotes: abc\\n\\n
test_quotes: "abc\\n\\n"
""")<assert_stmt>config<eq>{'test_no_quotes':'abc\n\n' 'test_quotes':'abc\n\n'}<block_end><def_stmt>test_multi_line_escape self<block_start>config=ConfigFactory.parse_string("""
with-escaped-backslash: \"\"\"
\\\\
\"\"\"
with-newline-escape-sequence: \"\"\"
\\n
\"\"\"
with-escaped-newline-escape-sequence: \"\"\"
\\\\n
\"\"\"
""")<assert_stmt>config['with-escaped-backslash']<eq>'\n\\\\\n'<assert_stmt>config['with-newline-escape-sequence']<eq>'\n\\n\n'<assert_stmt>config['with-escaped-newline-escape-sequence']<eq>'\n\\\\n\n'<block_end><def_stmt>test_multiline_with_backslash self<block_start>config=ConfigFactory.parse_string("""
test = line1 \
line2
test2 = test
""")<assert_stmt>config<eq>{'test':'line1 line2' 'test2':'test'}<block_end><def_stmt>test_from_dict_with_dict self<block_start>d={'banana':3 'apple':4 'pear':1 'orange':2 }<line_sep>config=ConfigFactory.from_dict(d)<assert_stmt>config<eq>d<block_end><def_stmt>test_from_dict_with_ordered_dict self<block_start>d=OrderedDict()<line_sep>d['banana']=3<line_sep>d['apple']=4<line_sep>d['pear']=1<line_sep>d['orange']=2<line_sep>config=ConfigFactory.from_dict(d)<assert_stmt>config<eq>d<block_end><def_stmt>test_from_dict_with_nested_dict self<block_start>d=OrderedDict()<line_sep>d['banana']=3<line_sep>d['apple']=4<line_sep>d['pear']=1<line_sep>d['tree']={'a':'abc\ntest\n' 'b':[1 2 3]}<line_sep>config=ConfigFactory.from_dict(d)<assert_stmt>config<eq>d<block_end><def_stmt>test_object_concat self<block_start>config=ConfigFactory.parse_string("""o1 = {
foo : {
a : 1
b : 2
}
}
o2 = {
foo : {
b : 3
c : 4
}
}
o3 = ${o1} ${o2}
""")<assert_stmt>config.get_int('o1.foo.b')<eq>2<assert_stmt>config.get_int('o2.foo.b')<eq>3<assert_stmt>config.get_int('o3.foo.b')<eq>3<assert_stmt>config.get_int('o1.foo.c' default=42)<eq>42<assert_stmt>config.get_int('o3.foo.a')<eq>1<assert_stmt>config.get_int('o3.foo.c')<eq>4<block_end><def_stmt>test_issue_75 self<block_start>config=ConfigFactory.parse_string("""base : {
bar: ["a"]
}
sub : ${base} {
baz: ${base.bar} ["b"]
}
sub2: ${sub}
""")<assert_stmt>config.get_list('base.bar')<eq>["a"]<assert_stmt>config.get_list('sub.baz')<eq>["a" "b"]<assert_stmt>config.get_list('sub2.baz')<eq>["a" "b"]<block_end><def_stmt>test_plain_ordered_dict self<block_start>config=ConfigFactory.parse_string("""
e : ${a} {
}
""" resolve=<false>)<with_stmt>pytest.raises(ConfigException)<block_start>config.as_plain_ordered_dict()<block_end><block_end><def_stmt>test_quoted_strings_with_ws self<block_start>config=ConfigFactory.parse_string("""
no_trailing_ws = "foo" "bar "
trailing_ws = "foo" "bar "{ws}
trailing_ws_with_comment = "foo" "bar "{ws}// comment
""".format(ws=' '))<assert_stmt>config<eq>{'no_trailing_ws':"foo bar " 'trailing_ws':"foo bar " 'trailing_ws_with_comment':"foo bar "}<block_end><def_stmt>test_unquoted_strings_with_ws self<block_start>config=ConfigFactory.parse_string("""
a = foo bar
""")<assert_stmt>config<eq>{'a':'foo bar'}<block_end><def_stmt>test_quoted_unquoted_strings_with_ws self<block_start>config=ConfigFactory.parse_string("""
a = foo "bar" dummy
""")<assert_stmt>config<eq>{'a':'foo bar dummy'}<block_end><def_stmt>test_quoted_unquoted_strings_with_ws_substitutions self<block_start>config=ConfigFactory.parse_string("""
x = 5
b = test
a = foo "bar" ${b} dummy
c = foo ${x} bv
d = foo ${x} 43
""")<assert_stmt>config<eq>{'x':5 'b':'test' 'a':'foo bar test dummy' 'c':'foo 5 bv' 'd':'foo 5 43'}<block_end><def_stmt>test_complex_substitutions self<block_start>config=ConfigFactory.parse_string("""
a: 1
b: ${c} {
pa: [${a}]
pb: ${b.pa}
}
c: { }
d: { pc: ${b.pa} }
e: ${b}
""" resolve=<true>)<assert_stmt>config<eq>{'a':1 'b':{'pa':[1] 'pb':[1]} 'c':{} 'd':{'pc':[1]} 'e':{'pa':[1] 'pb':[1]}}<block_end><def_stmt>test_assign_next_line self<block_start>config=ConfigFactory.parse_string("""
a = // abc
abc
c =
5
""")<assert_stmt>config<eq>{'a':'abc' 'c':5}<block_end>@mock.patch.dict(os.environ STRING_VAR='value_from_environment')<def_stmt>test_string_from_environment self<block_start>config=ConfigFactory.parse_string("""
string_from_env = ${STRING_VAR}
""")<assert_stmt>config<eq>{'string_from_env':'value_from_environment'}<block_end>@mock.patch.dict(os.environ STRING_VAR='value_from_environment')<def_stmt>test_string_from_environment_self_ref self<block_start>config=ConfigFactory.parse_string("""
STRING_VAR = ${STRING_VAR}
""")<assert_stmt>config<eq>{'STRING_VAR':'value_from_environment'}<block_end>@mock.patch.dict(os.environ STRING_VAR='value_from_environment')<def_stmt>test_string_from_environment_self_ref_optional self<block_start>config=ConfigFactory.parse_string("""
STRING_VAR = ${?STRING_VAR}
""")<assert_stmt>config<eq>{'STRING_VAR':'value_from_environment'}<block_end>@mock.patch.dict(os.environ TRUE_OR_FALSE='false')<def_stmt>test_bool_from_environment self<block_start>config=ConfigFactory.parse_string("""
bool_from_env = ${TRUE_OR_FALSE}
""")<assert_stmt>config<eq>{'bool_from_env':'false'}<assert_stmt>config.get_bool('bool_from_env')<is><false><block_end>@mock.patch.dict(os.environ INT_VAR='5')<def_stmt>test_int_from_environment self<block_start>config=ConfigFactory.parse_string("""
int_from_env = ${INT_VAR}
""")<assert_stmt>config<eq>{'int_from_env':'5'}<assert_stmt>config.get_int('int_from_env')<eq>5<block_end><def_stmt>test_unicode_dict_key self<block_start>input_string=u"""
www.sample.com {
us {
name = "first domain"
}
}
www.example-ö.com {
us {
name = "second domain"
}
}
"""<line_sep>config=ConfigFactory.parse_string(input_string)<assert_stmt>config.get_string(u'www.sample.com.us.name')<eq>'first domain'<assert_stmt>config.get_string(u'www.example-ö.com.us.name')<eq>'second domain'<with_stmt>pytest.raises(ConfigWrongTypeException)<block_start>config.put(u'www.example-ö' 'append_failure' append=<true>)<block_end><with_stmt>pytest.raises(ConfigMissingException)<block_start>config.get_string(u'missing_unicode_key_ö')<block_end><with_stmt>pytest.raises(ConfigException)<block_start>config.get_bool(u'www.example-ö.com.us.name')<block_end><with_stmt>pytest.raises(ConfigException)<block_start>config.get_list(u'www.example-ö.com.us.name')<block_end><with_stmt>pytest.raises(ConfigException)<block_start>config.get_config(u'www.example-ö.com.us.name')<block_end><with_stmt>pytest.raises(ConfigWrongTypeException)<block_start>config.get_string(u'www.example-ö.com.us.name.missing')<block_end><block_end><def_stmt>test_with_comment_on_last_line self# Adress issue #102
<block_start>config_tree=ConfigFactory.parse_string("""
foo: "1"
bar: "2"
# DO NOT CHANGE ANY OF THE ABOVE SETTINGS!""")<assert_stmt>config_tree<eq>{'foo':'1' 'bar':'2'}<block_end><def_stmt>test_triple_quotes_same_line self<block_start>config_tree=ConfigFactory.parse_string('a:["""foo"""", "bar"]')<assert_stmt>config_tree<eq>{'a':['foo"' "bar"]}<block_end><def_stmt>test_pop self<block_start>config_tree=ConfigFactory.parse_string('a:{b: 3, d: 6}')<assert_stmt>3<eq>config_tree.pop('a.b' 5)<assert_stmt>5<eq>config_tree.pop('a.c' 5)<line_sep>expected={'a':{'d':6}}<assert_stmt>expected<eq>config_tree<block_end><def_stmt>test_merge_overriden self# Adress issue #110
# ConfigValues must merge with its .overriden_value
# if both are ConfigTree
<block_start>config_tree=ConfigFactory.parse_string("""
foo: ${bar}
foo: ${baz}
bar: {r: 1, s: 2}
baz: {s: 3, t: 4}
""")<assert_stmt>'r'<in>config_tree['foo']<and>'t'<in>config_tree['foo']<and>config_tree['foo']['s']<eq>3<block_end><def_stmt>test_attr_syntax self<block_start>config=ConfigFactory.parse_string("""
a: 1
b: {
pb: 5
}
""")<assert_stmt>5<eq>config.b.pb<block_end><def_stmt>test_escape_quote self<block_start>config=ConfigFactory.parse_string("""
quoted: "abc\\"test"
unquoted: abc\\"test
""")<assert_stmt>'abc"test'<eq>config['quoted']<assert_stmt>'abc"test'<eq>config['unquoted']<block_end><def_stmt>test_escape_quote_complex self<block_start>config=ConfigFactory.parse_string("""
value: "{\\"critical\\":\\"0.00\\",\\"warning\\":\\"99.99\\"}"
""")<assert_stmt>'{"critical":"0.00","warning":"99.99"}'<eq>config['value']<block_end><def_stmt>test_keys_with_slash self<block_start>config=ConfigFactory.parse_string("""
/abc/cde1: abc
"/abc/cde2": "cde"
/abc/cde3: "fgh"
""")<assert_stmt>'abc'<eq>config['/abc/cde1']<assert_stmt>'cde'<eq>config['/abc/cde2']<assert_stmt>'fgh'<eq>config['/abc/cde3']<block_end><def_stmt>test_mutation_values self<block_start>config=ConfigFactory.parse_string("""
common : {
}
b1 = []
var = "wrong"
compilerCommon : ${common} {
VAR : ${var}
}
substrate-suite: {
VAR : "right"
}
b1 = [
${compilerCommon} ${substrate-suite}
${compilerCommon} ${substrate-suite}
]
b2 = [
${compilerCommon} ${substrate-suite}
${compilerCommon} ${substrate-suite}
]
""")<assert_stmt>config.get("b1")[1]['VAR']<eq>'right'<assert_stmt>config.get("b2")[1]['VAR']<eq>'right'<block_end><def_stmt>test_escape_sequences_json_equivalence self<block_start>"""
Quoted strings are in the same format as JSON strings,
See: https://github.com/lightbend/config/blob/master/HOCON.md#unchanged-from-json
"""<line_sep>source=r"""
{
"plain-backslash": "\\",
"tab": "\t",
"no-tab": "\\t",
"newline": "\n",
"no-newline": "\\n",
"cr": "\r",
"no-cr": "\\r",
"windows": "c:\\temp"
}
"""<line_sep>expected={'plain-backslash':'\\' 'tab':'\t' 'no-tab':'\\t' 'newline':'\n' 'no-newline':'\\n' 'cr':'\r' 'no-cr':'\\r' 'windows':'c:\\temp' }<line_sep>config=ConfigFactory.parse_string(source)<assert_stmt>config<eq>expected<assert_stmt>config<eq>json.loads(source)<block_end><block_end><try_stmt><block_start><import_from_stmt>dateutil.relativedelta relativedelta<line_sep>@pytest.mark.parametrize('data_set' [('a: 1 months' relativedelta(months=1)) ('a: 1months' relativedelta(months=1)) ('a: 2 month' relativedelta(months=2)) ('a: 3 mo' relativedelta(months=3)) ('a: 3mo' relativedelta(months=3)) ('a: 3 mon' '3 mon') ('a: 1 years' relativedelta(years=1)) ('a: 1years' relativedelta(years=1)) ('a: 2 year' relativedelta(years=2)) ('a: 3 y' relativedelta(years=3)) ('a: 3y' relativedelta(years=3)) ])<def_stmt>test_parse_string_with_duration_optional_units data_set<block_start>config=ConfigFactory.parse_string(data_set[0])<assert_stmt>config['a']<eq>data_set[1]<block_end><block_end><except_stmt>Exception<block_start><pass><block_end> |
<import_stmt>mock<import_stmt>pytest<import_stmt>py_zipkin.storage<line_sep>@pytest.fixture(autouse=<true> scope="module")<def_stmt>create_zipkin_attrs # The following tests all expect _thread_local.zipkin_attrs to exist: if it
# doesn't, mock.patch will fail.
<block_start>py_zipkin.storage.ThreadLocalStack().get()<block_end><def_stmt>test_get_zipkin_attrs_returns_none_if_no_zipkin_attrs <block_start>tracer=py_zipkin.storage.get_default_tracer()<with_stmt>mock.patch.object(tracer._context_stack "_storage" [])<block_start><assert_stmt><not>py_zipkin.storage.ThreadLocalStack().get()<assert_stmt><not>py_zipkin.storage.ThreadLocalStack().get()<block_end><block_end><def_stmt>test_get_zipkin_attrs_with_context_returns_none_if_no_zipkin_attrs <block_start><with_stmt>mock.patch.object(py_zipkin.storage.log "warning" autospec=<true>)<as>log<block_start><assert_stmt><not>py_zipkin.storage.Stack([]).get()<assert_stmt>log.call_count<eq>1<block_end><block_end><def_stmt>test_storage_stack_still_works_if_you_dont_pass_in_storage # Let's make sure this still works if we don't pass in a custom storage.
<block_start><assert_stmt><not>py_zipkin.storage.Stack().get()<block_end><def_stmt>test_get_zipkin_attrs_returns_the_last_of_the_list <block_start>tracer=py_zipkin.storage.get_default_tracer()<with_stmt>mock.patch.object(tracer._context_stack "_storage" ["foo"])<block_start><assert_stmt>"foo"<eq>py_zipkin.storage.ThreadLocalStack().get()<block_end><block_end><def_stmt>test_get_zipkin_attrs_with_context_returns_the_last_of_the_list <block_start><assert_stmt>"foo"<eq>py_zipkin.storage.Stack(["bar" "foo"]).get()<block_end><def_stmt>test_pop_zipkin_attrs_does_nothing_if_no_requests <block_start>tracer=py_zipkin.storage.get_default_tracer()<with_stmt>mock.patch.object(tracer._context_stack "_storage" [])<block_start><assert_stmt><not>py_zipkin.storage.ThreadLocalStack().pop()<block_end><block_end><def_stmt>test_pop_zipkin_attrs_with_context_does_nothing_if_no_requests <block_start><assert_stmt><not>py_zipkin.storage.Stack([]).pop()<block_end><def_stmt>test_pop_zipkin_attrs_removes_the_last_zipkin_attrs <block_start>tracer=py_zipkin.storage.get_default_tracer()<with_stmt>mock.patch.object(tracer._context_stack "_storage" ["foo" "bar"])<block_start><assert_stmt>"bar"<eq>py_zipkin.storage.ThreadLocalStack().pop()<assert_stmt>"foo"<eq>py_zipkin.storage.ThreadLocalStack().get()<block_end><block_end><def_stmt>test_pop_zipkin_attrs_with_context_removes_the_last_zipkin_attrs <block_start>context_stack=py_zipkin.storage.Stack(["foo" "bar"])<assert_stmt>"bar"<eq>context_stack.pop()<assert_stmt>"foo"<eq>context_stack.get()<block_end><def_stmt>test_push_zipkin_attrs_adds_new_zipkin_attrs_to_list <block_start>tracer=py_zipkin.storage.get_default_tracer()<with_stmt>mock.patch.object(tracer._context_stack "_storage" ["foo"])<block_start><assert_stmt>"foo"<eq>py_zipkin.storage.ThreadLocalStack().get()<line_sep>py_zipkin.storage.ThreadLocalStack().push("bar")<assert_stmt>"bar"<eq>py_zipkin.storage.ThreadLocalStack().get()<block_end><block_end><def_stmt>test_push_zipkin_attrs_with_context_adds_new_zipkin_attrs_to_list <block_start>stack=py_zipkin.storage.Stack(["foo"])<assert_stmt>"foo"<eq>stack.get()<line_sep>stack.push("bar")<assert_stmt>"bar"<eq>stack.get()<block_end><def_stmt>test_stack_copy <block_start>stack=py_zipkin.storage.Stack()<line_sep>stack.push("a")<line_sep>stack.push("b")<line_sep>the_copy=stack.copy()<line_sep>the_copy.push("c")<line_sep>stack.push("d")<assert_stmt>["a" "b" "c"]<eq>the_copy._storage<assert_stmt>["a" "b" "d"]<eq>stack._storage<block_end> |
<import_from_stmt>desktop_local_tests.local_packet_capture_test_case_with_disrupter LocalPacketCaptureTestCaseWithDisrupter<import_from_stmt>desktop_local_tests.windows.windows_dns_force_public_dns_servers_disrupter WindowsDNSForcePublicDNSServersDisrupter<class_stmt>TestWindowsPacketCaptureDisruptForcePublicDNSServers(LocalPacketCaptureTestCaseWithDisrupter)# TODO: Make the packet capture here DNS specific?
<block_start><def_stmt>__init__ self devices parameters<block_start>super().__init__(WindowsDNSForcePublicDNSServersDisrupter devices parameters)<block_end><block_end> |
"""Lighting channels module for Zigbee Home Automation."""<import_from_future_stmt> annotations<import_from_stmt>contextlib suppress<import_from_stmt>zigpy.zcl.clusters lighting<import_from_stmt>.. registries<import_from_stmt>..const REPORT_CONFIG_DEFAULT<import_from_stmt>.base ClientChannel ZigbeeChannel<line_sep>@registries.ZIGBEE_CHANNEL_REGISTRY.register(lighting.Ballast.cluster_id)<class_stmt>Ballast(ZigbeeChannel)<block_start>"""Ballast channel."""<block_end>@registries.CLIENT_CHANNELS_REGISTRY.register(lighting.Color.cluster_id)<class_stmt>ColorClientChannel(ClientChannel)<block_start>"""Color client channel."""<block_end>@registries.BINDABLE_CLUSTERS.register(lighting.Color.cluster_id)@registries.ZIGBEE_CHANNEL_REGISTRY.register(lighting.Color.cluster_id)<class_stmt>ColorChannel(ZigbeeChannel)<block_start>"""Color channel."""<line_sep>CAPABILITIES_COLOR_XY=0x08<line_sep>CAPABILITIES_COLOR_TEMP=0x10<line_sep>UNSUPPORTED_ATTRIBUTE=0x86<line_sep>REPORT_CONFIG=({"attr":"current_x" "config":REPORT_CONFIG_DEFAULT} {"attr":"current_y" "config":REPORT_CONFIG_DEFAULT} {"attr":"color_temperature" "config":REPORT_CONFIG_DEFAULT} )<line_sep>MAX_MIREDS:int=500<line_sep>MIN_MIREDS:int=153<line_sep>ZCL_INIT_ATTRS={"color_mode":<false> "color_temp_physical_min":<true> "color_temp_physical_max":<true> "color_capabilities":<true> "color_loop_active":<false> }<line_sep>@property<def_stmt>color_capabilities self<arrow>int<block_start>"""Return color capabilities of the light."""<with_stmt>suppress(KeyError)<block_start><return>self.cluster["color_capabilities"]<block_end><if_stmt>self.cluster.get("color_temperature")<is><not><none><block_start><return>self.CAPABILITIES_COLOR_XY|self.CAPABILITIES_COLOR_TEMP<block_end><return>self.CAPABILITIES_COLOR_XY<block_end>@property<def_stmt>color_mode self<arrow>int|<none><block_start>"""Return cached value of the color_mode attribute."""<line_sep><return>self.cluster.get("color_mode")<block_end>@property<def_stmt>color_loop_active self<arrow>int|<none><block_start>"""Return cached value of the color_loop_active attribute."""<line_sep><return>self.cluster.get("color_loop_active")<block_end>@property<def_stmt>color_temperature self<arrow>int|<none><block_start>"""Return cached value of color temperature."""<line_sep><return>self.cluster.get("color_temperature")<block_end>@property<def_stmt>current_x self<arrow>int|<none><block_start>"""Return cached value of the current_x attribute."""<line_sep><return>self.cluster.get("current_x")<block_end>@property<def_stmt>current_y self<arrow>int|<none><block_start>"""Return cached value of the current_y attribute."""<line_sep><return>self.cluster.get("current_y")<block_end>@property<def_stmt>min_mireds self<arrow>int<block_start>"""Return the coldest color_temp that this channel supports."""<line_sep><return>self.cluster.get("color_temp_physical_min" self.MIN_MIREDS)<block_end>@property<def_stmt>max_mireds self<arrow>int<block_start>"""Return the warmest color_temp that this channel supports."""<line_sep><return>self.cluster.get("color_temp_physical_max" self.MAX_MIREDS)<block_end><block_end> |
<import_from_stmt>querybuilder.fields RankField RowNumberField DenseRankField PercentRankField CumeDistField NTileField LagField LeadField FirstValueField LastValueField NthValueField NumStdDevField <import_from_stmt>querybuilder.query QueryWindow Query<import_from_stmt>querybuilder.tests.models Order<import_from_stmt>querybuilder.tests.query_tests QueryTestCase get_comparison_str<class_stmt>QueryWindowTest(QueryTestCase)<block_start><def_stmt>test_query_window self<block_start>query_window=QueryWindow()<line_sep>query_str=query_window.get_sql()<line_sep>expected_query='OVER ()'<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_query_window_partition self<block_start>query_window=QueryWindow().partition_by('field_one')<line_sep>query_str=query_window.get_sql()<line_sep>expected_query='OVER (PARTITION BY field_one)'<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_query_window_order self<block_start>query_window=QueryWindow().order_by('field_one')<line_sep>query_str=query_window.get_sql()<line_sep>expected_query='OVER (ORDER BY field_one ASC)'<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_query_window_partition_order self<block_start>query_window=QueryWindow().partition_by('field_one').order_by('field_one')<line_sep>query_str=query_window.get_sql()<line_sep>expected_query='OVER (PARTITION BY field_one ORDER BY field_one ASC)'<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_query_window_partition_order_many self<block_start>query_window=QueryWindow().partition_by('field_one').partition_by('field_two').order_by('field_one').order_by('-field_two')<line_sep>query_str=query_window.get_sql()<line_sep>expected_query='OVER (PARTITION BY field_one, field_two ORDER BY field_one ASC, field_two DESC)'<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><block_end><class_stmt>WindowFunctionTest(QueryTestCase)<block_start><def_stmt>test_rank_no_over self<block_start>query=Query().from_table(table=Order fields=[RankField()])<line_sep>query_str=query.get_sql()<line_sep>expected_query='SELECT RANK() AS "rank" FROM querybuilder_tests_order'<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_rank_over self<block_start>query=Query().from_table(table=Order fields=[RankField(over=QueryWindow())])<line_sep>query_str=query.get_sql()<line_sep>expected_query='SELECT RANK() OVER () AS "rank" FROM querybuilder_tests_order'<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_rank_over_order self<block_start>query=Query().from_table(table=Order fields=['id' RankField(over=QueryWindow().order_by('id'))])<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.id, RANK() OVER (ORDER BY id ASC) AS "rank" FROM querybuilder_tests_order')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_rank_over_partition self<block_start>query=Query().from_table(table=Order fields=['id' RankField(over=QueryWindow().partition_by('account_id'))])<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.id, RANK() OVER (PARTITION BY account_id) AS "rank" FROM '<concat>'querybuilder_tests_order')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_row_number self<block_start>query=Query().from_table(table=Order fields=['*' RowNumberField(over=QueryWindow().order_by('-margin'))]).order_by('row_number')<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.*, '<concat>'ROW_NUMBER() OVER (ORDER BY margin DESC) AS "row_number" '<concat>'FROM querybuilder_tests_order '<concat>'ORDER BY row_number '<concat>'ASC')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_rank self<block_start>query=Query().from_table(table=Order fields=['id' RankField(over=QueryWindow().partition_by('account_id').order_by('id'))]).order_by('-rank')<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.id, '<concat>'RANK() OVER (PARTITION BY account_id ORDER BY id ASC) AS "rank" '<concat>'FROM querybuilder_tests_order '<concat>'ORDER BY rank '<concat>'DESC')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_dense_rank self<block_start>query=Query().from_table(table=Order fields=['*' DenseRankField(over=QueryWindow().order_by('-margin'))]).order_by('dense_rank')<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.*, '<concat>'DENSE_RANK() OVER (ORDER BY margin DESC) AS "dense_rank" '<concat>'FROM querybuilder_tests_order '<concat>'ORDER BY dense_rank '<concat>'ASC')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_rank_percent self<block_start>query=Query().from_table(table=Order fields=['*' PercentRankField(over=QueryWindow().order_by('-margin'))]).order_by('percent_rank')<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.*, '<concat>'PERCENT_RANK() OVER (ORDER BY margin DESC) AS "percent_rank" '<concat>'FROM querybuilder_tests_order '<concat>'ORDER BY percent_rank '<concat>'ASC')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_cume_dist self<block_start>query=Query().from_table(table=Order fields=['*' CumeDistField(over=QueryWindow().order_by('-margin'))]).order_by('cume_dist')<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.*, '<concat>'CUME_DIST() OVER (ORDER BY margin DESC) AS "cume_dist" '<concat>'FROM querybuilder_tests_order '<concat>'ORDER BY cume_dist '<concat>'ASC')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_ntile self<block_start>query=Query().from_table(table=Order fields=['*' NTileField(num_buckets=2 over=QueryWindow().order_by('-margin'))]).order_by('ntile')<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.*, '<concat>'NTILE(2) OVER (ORDER BY margin DESC) AS "ntile" '<concat>'FROM querybuilder_tests_order '<concat>'ORDER BY ntile '<concat>'ASC')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_lag self<block_start>query=Query().from_table(table=Order fields=['*' LagField('margin' over=QueryWindow().order_by('-margin'))])<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.*, '<concat>'LAG(querybuilder_tests_order.margin, 1) OVER (ORDER BY margin DESC) AS "margin_lag" '<concat>'FROM querybuilder_tests_order')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_lag_default self<block_start>query=Query().from_table(table=Order fields=['*' LagField('margin' default=0 over=QueryWindow().order_by('-margin'))])<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.*, '<concat>'LAG(querybuilder_tests_order.margin, 1, \'0\') OVER (ORDER BY margin DESC) AS "margin_lag" '<concat>'FROM querybuilder_tests_order')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_lead self<block_start>query=Query().from_table(table=Order fields=['*' LeadField('margin' over=QueryWindow().order_by('-margin'))])<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.*, '<concat>'LEAD(querybuilder_tests_order.margin, 1) OVER (ORDER BY margin DESC) AS "margin_lead" '<concat>'FROM querybuilder_tests_order')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_first_value self<block_start>query=Query().from_table(table=Order fields=['*' FirstValueField('margin' over=QueryWindow().order_by('-margin'))])<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.*, '<concat>'FIRST_VALUE(querybuilder_tests_order.margin) OVER (ORDER BY margin DESC) AS "margin_first_value" '<concat>'FROM querybuilder_tests_order')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_last_value self<block_start>query=Query().from_table(table=Order fields=['*' LastValueField('margin' over=QueryWindow().order_by('margin'))])<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.*, '<concat>'LAST_VALUE(querybuilder_tests_order.margin) OVER (ORDER BY margin ASC) AS "margin_last_value" '<concat>'FROM querybuilder_tests_order')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_nth_value self<block_start>query=Query().from_table(table=Order fields=['*' NthValueField('margin' n=2 over=QueryWindow().order_by('-margin'))])<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.*, '<concat>'NTH_VALUE(querybuilder_tests_order.margin, 2) OVER (ORDER BY margin DESC) AS "margin_nth_value" '<concat>'FROM querybuilder_tests_order')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><def_stmt>test_num_stddev self<block_start>query=Query().from_table(table=Order fields=['*' NumStdDevField('margin' over=QueryWindow())]).order_by('-margin_num_stddev')<line_sep>query_str=query.get_sql()<line_sep>expected_query=('SELECT querybuilder_tests_order.*, '<concat>'(CASE WHEN (STDDEV(querybuilder_tests_order.margin) OVER ()) <> 0 '<concat>'THEN ((querybuilder_tests_order.margin - ('<concat>'AVG(querybuilder_tests_order.margin) OVER ())) / (STDDEV(querybuilder_tests_order.margin) OVER ())) '<concat>'ELSE 0 '<concat>'END) '<concat>'AS "margin_num_stddev" '<concat>'FROM querybuilder_tests_order '<concat>'ORDER BY margin_num_stddev '<concat>'DESC')<line_sep>self.assertEqual(query_str expected_query get_comparison_str(query_str expected_query))<block_end><block_end> |
<import_from_stmt>datetime timedelta<import_from_stmt>dateutil.relativedelta relativedelta<import_from_stmt>django.core.management.base BaseCommand CommandError<import_from_stmt>django.utils timezone<import_from_stmt>...models Request<line_sep>DURATION_OPTIONS={'hours':<lambda>amount:timezone.now()-timedelta(hours=amount) 'days':<lambda>amount:timezone.now()-timedelta(days=amount) 'weeks':<lambda>amount:timezone.now()-timedelta(weeks=amount) 'months':<lambda>amount:timezone.now()+relativedelta(months=-amount) 'years':<lambda>amount:timezone.now()+relativedelta(years=-amount) }<try_stmt># to keep backward Python 2 compatibility
<block_start>input=raw_input<block_end><except_stmt>NameError<block_start><pass><block_end><class_stmt>Command(BaseCommand)<block_start>help='Purge old requests.'<def_stmt>add_arguments self parser<block_start>parser.add_argument('amount' type=int )<line_sep>parser.add_argument('duration')<line_sep>parser.add_argument('--noinput' action='store_false' dest='interactive' default=<true> help='Tells Django to NOT prompt the user for input of any kind.')<block_end><def_stmt>handle self *args **options<block_start>amount=options['amount']<line_sep>duration=options['duration']<line_sep># Check we have the correct values
<if_stmt>duration[-1]<ne>'s'# If its not plural, make it plural
<block_start>duration_plural='{0}s'.format(duration)<block_end><else_stmt><block_start>duration_plural=duration<block_end><if_stmt>duration_plural<not><in>DURATION_OPTIONS<block_start><raise>CommandError('Amount must be {0}'.format(', '.join(DURATION_OPTIONS)))<block_end>qs=Request.objects.filter(time__lte=DURATION_OPTIONS[duration_plural](amount))<line_sep>count=qs.count()<if_stmt>count<eq>0<block_start>print('There are no requests to delete.')<line_sep><return><block_end><if_stmt>options.get('interactive')<block_start>confirm=input('''
You have requested a database reset.
This will IRREVERSIBLY DESTROY any
requests created before {0} {1} ago.
That is a total of {2} requests.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel:'''.format(amount duration count))<block_end><else_stmt><block_start>confirm='yes'<block_end><if_stmt>confirm<eq>'yes'<block_start>qs.delete()<block_end><else_stmt><block_start>print('Purge cancelled')<block_end><block_end><block_end> |
<def_stmt>getRoot config<block_start><if_stmt><not>config.parent<block_start><return>config<block_end><return>getRoot(config.parent)<block_end>root=getRoot(config)<line_sep># We only run a small set of tests on Windows for now.
# Override the parent directory's "unsupported" decision until we can handle
# all of its tests.
<if_stmt>root.host_os<in>['Windows']<block_start>config.unsupported=<false><block_end><else_stmt><block_start>config.unsupported=<true><block_end> |
# Copyright (c) 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>os<import_from_stmt>urllib.parse urlparse<import_stmt>click<import_stmt>uvicorn<import_from_stmt>starlette.applications Starlette<import_from_stmt>starlette.middleware Middleware<import_from_stmt>starlette.responses PlainTextResponse<import_from_stmt>starlette.routing Mount Route WebSocketRoute<import_from_stmt>starlette.staticfiles StaticFiles<import_from_stmt>starlette.status HTTP_403_FORBIDDEN<import_from_stmt>platformio.commands.home.rpc.handlers.account AccountRPC<import_from_stmt>platformio.commands.home.rpc.handlers.app AppRPC<import_from_stmt>platformio.commands.home.rpc.handlers.ide IDERPC<import_from_stmt>platformio.commands.home.rpc.handlers.misc MiscRPC<import_from_stmt>platformio.commands.home.rpc.handlers.os OSRPC<import_from_stmt>platformio.commands.home.rpc.handlers.piocore PIOCoreRPC<import_from_stmt>platformio.commands.home.rpc.handlers.project ProjectRPC<import_from_stmt>platformio.commands.home.rpc.server WebSocketJSONRPCServerFactory<import_from_stmt>platformio.compat aio_get_running_loop<import_from_stmt>platformio.exception PlatformioException<import_from_stmt>platformio.package.manager.core get_core_package_dir<import_from_stmt>platformio.proc force_exit<class_stmt>ShutdownMiddleware<block_start><def_stmt>__init__ self app<block_start>self.app=app<block_end><async_keyword><def_stmt>__call__ self scope receive send<block_start><if_stmt>scope["type"]<eq>"http"<and>b"__shutdown__"<in>scope.get("query_string" {})<block_start><await>shutdown_server()<block_end><await>self.app(scope receive send)<block_end><block_end><async_keyword><def_stmt>shutdown_server _=<none><block_start>aio_get_running_loop().call_later(0.5 force_exit)<line_sep><return>PlainTextResponse("Server has been shutdown!")<block_end><async_keyword><def_stmt>protected_page _<block_start><return>PlainTextResponse("Protected PlatformIO Home session" status_code=HTTP_403_FORBIDDEN)<block_end><def_stmt>run_server host port no_open shutdown_timeout home_url<block_start>contrib_dir=get_core_package_dir("contrib-piohome")<if_stmt><not>os.path.isdir(contrib_dir)<block_start><raise>PlatformioException("Invalid path to PIO Home Contrib")<block_end>ws_rpc_factory=WebSocketJSONRPCServerFactory(shutdown_timeout)<line_sep>ws_rpc_factory.addObjectHandler(AccountRPC() namespace="account")<line_sep>ws_rpc_factory.addObjectHandler(AppRPC() namespace="app")<line_sep>ws_rpc_factory.addObjectHandler(IDERPC() namespace="ide")<line_sep>ws_rpc_factory.addObjectHandler(MiscRPC() namespace="misc")<line_sep>ws_rpc_factory.addObjectHandler(OSRPC() namespace="os")<line_sep>ws_rpc_factory.addObjectHandler(PIOCoreRPC() namespace="core")<line_sep>ws_rpc_factory.addObjectHandler(ProjectRPC() namespace="project")<line_sep>path=urlparse(home_url).path<line_sep>routes=[WebSocketRoute(path+"wsrpc" ws_rpc_factory name="wsrpc") Route(path+"__shutdown__" shutdown_server methods=["POST"]) Mount(path StaticFiles(directory=contrib_dir html=<true>) name="static") ]<if_stmt>path<ne>"/"<block_start>routes.append(Route("/" protected_page))<block_end>uvicorn.run(Starlette(middleware=[Middleware(ShutdownMiddleware)] routes=routes on_startup=[<lambda>:click.echo("PIO Home has been started. Press Ctrl+C to shutdown.") <lambda>:<none><if>no_open<else>click.launch(home_url) ] ) host=host port=port log_level="warning" )<block_end> |
<import_stmt>pyredner<import_stmt>numpy<as>np<import_stmt>torch<line_sep>cam=pyredner.Camera(position=torch.tensor([0.0 0.0 -5.0]) look_at=torch.tensor([0.0 0.0 0.0]) up=torch.tensor([0.0 1.0 0.0]) fov=torch.tensor([45.0]) # in degree
clip_near=1e-2 # needs to > 0
resolution=(256 256) fisheye=<false>)<line_sep>mat_grey=pyredner.Material(diffuse_reflectance=torch.tensor([0.5 0.5 0.5] device=pyredner.get_device()))<line_sep>materials=[mat_grey]<line_sep>shape_triangle=pyredner.Shape(vertices=torch.tensor([[-1.7 1.0 0.0] [1.0 1.0 0.0] [-0.5 -1.0 0.0]] device=pyredner.get_device()) indices=torch.tensor([[0 1 2]] dtype=torch.int32 device=pyredner.get_device()) uvs=<none> normals=<none> material_id=0)<line_sep>shape_light=pyredner.Shape(vertices=torch.tensor([[-1.0 -1.0 -7.0] [1.0 -1.0 -7.0] [-1.0 1.0 -7.0] [1.0 1.0 -7.0]] device=pyredner.get_device()) indices=torch.tensor([[0 1 2] [1 3 2]] dtype=torch.int32 device=pyredner.get_device()) uvs=<none> normals=<none> material_id=0)<line_sep>shapes=[shape_triangle shape_light]<line_sep>light=pyredner.AreaLight(shape_id=1 intensity=torch.tensor([20.0 20.0 20.0]))<line_sep>area_lights=[light]<line_sep>scene=pyredner.Scene(cam shapes materials area_lights)<line_sep>scene_state_dict=scene.state_dict()<line_sep>scene=pyredner.Scene.load_state_dict(scene_state_dict)<line_sep>scene_args=pyredner.RenderFunction.serialize_scene(scene=scene num_samples=16 max_bounces=1)<line_sep>render=pyredner.RenderFunction.apply<line_sep>img=render(0 *scene_args)<line_sep>pyredner.imwrite(img.cpu() 'results/test_serialize/img.exr')<line_sep> |
##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
<import_stmt>maya.cmds<import_stmt>IECore<import_stmt>IECoreImage<import_stmt>IECoreMaya<class_stmt>ImageConverterTest(IECoreMaya.TestCase)<block_start><def_stmt>test self<block_start>imageA=IECore.Reader.create("test/IECoreImage/data/exr/colorBarsWithAlpha.exr").read()<line_sep>toMaya=IECoreMaya.ToMayaImageConverter(imageA)<line_sep>mImage=maya.OpenMaya.MImage()<line_sep>toMaya.convert(mImage)<line_sep>fromMaya=IECoreMaya.FromMayaImageConverter(mImage)<line_sep>imageB=fromMaya.convert()<line_sep>self.assertFalse(IECoreImage.ImageDiffOp()(imageA=imageA imageB=imageB maxError=1.0/256).value)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>IECoreMaya.TestProgram()<block_end> |
"""Types for the Todoist component."""<import_from_future_stmt> annotations<import_from_stmt>typing TypedDict<class_stmt>DueDate(TypedDict)<block_start>"""Dict representing a due date in a todoist api response."""<line_sep>date:str<line_sep>is_recurring:bool<line_sep>lang:str<line_sep>string:str<line_sep>timezone:str|<none><block_end> |
<import_from_stmt>abc ABC abstractmethod<import_from_stmt>contextlib contextmanager<import_from_stmt>uuid uuid4<import_stmt>pytest<import_from_stmt>sqlalchemy delete select UniqueConstraint <class_stmt>AbstractBaseTest(ABC)<block_start>@pytest.fixture<def_stmt>cls_ self<block_start>"""
Return class under test.
Assumptions: if the class under test is Foo, then the class grouping
the tests should be a subclass of BaseTest, named TestFoo.
"""<line_sep>prefix=len("Test")<line_sep>class_name=self.__class__.__name__[prefix:]<line_sep><return>getattr(self.get_model() class_name)<block_end>@abstractmethod<def_stmt>get_model self<block_start><pass><block_end><block_end><def_stmt>dbcleanup_wrapper session obj where_clause=<none><block_start><with_stmt>dbcleanup(session obj where_clause)<block_start><yield>obj<block_end><block_end>@contextmanager<def_stmt>dbcleanup session obj where_clause=<none><block_start>"""
Use the session to store obj in database; delete from database on exit, bypassing the session.
If obj does not have an id field, a SQLAlchemy WHERE clause should be provided to construct
a custom select statement.
"""<line_sep>return_id=where_clause<is><none><try_stmt><block_start>obj_id=persist(session obj return_id)<line_sep><yield>obj_id<block_end><finally_stmt><block_start>table=obj.__table__<if_stmt>where_clause<is><none><block_start>where_clause=_get_default_where_clause(type(obj) obj_id)<block_end>stmt=delete(table).where(where_clause)<line_sep>session.execute(stmt)<block_end><block_end><def_stmt>persist session obj return_id=<true><block_start>"""
Use the session to store obj in database, then remove obj from session,
so that on a subsequent load from the database we get a clean instance.
"""<line_sep>session.add(obj)<line_sep>session.flush()<line_sep>obj_id=obj.id<if>return_id<else><none># save this before obj is expunged
session.expunge(obj)<line_sep><return>obj_id<block_end><def_stmt>delete_from_database session objects<block_start>"""
Delete each object in objects from database.
May be called at the end of a test if use of a context manager is impractical.
(Assume all objects have the id field as their primary key.)
"""<line_sep># Ensure we have a list of objects (check for list explicitly: a model can be iterable)
<if_stmt><not>isinstance(objects list)<block_start>objects=[objects]<block_end><for_stmt>obj objects<block_start>table=obj.__table__<line_sep>stmt=delete(table).where(table.c.id<eq>obj.id)<line_sep>session.execute(stmt)<block_end><block_end><def_stmt>get_stored_obj session cls obj_id=<none> where_clause=<none> unique=<false># Either obj_id or where_clause must be provided, but not both
<block_start><assert_stmt>bool(obj_id)^(where_clause<is><not><none>)<if_stmt>where_clause<is><none><block_start>where_clause=_get_default_where_clause(cls obj_id)<block_end>stmt=select(cls).where(where_clause)<line_sep>result=session.execute(stmt)<line_sep># unique() is required if result contains joint eager loads against collections
# https://gerrit.sqlalchemy.org/c/sqlalchemy/sqlalchemy/+/2253
<if_stmt>unique<block_start>result=result.unique()<block_end><return>result.scalar_one()<block_end><def_stmt>has_unique_constraint table fields<block_start><for_stmt>constraint table.constraints<block_start><if_stmt>isinstance(constraint UniqueConstraint)<block_start>col_names={c.name<for>c constraint.columns}<if_stmt>set(fields)<eq>col_names<block_start><return><true><block_end><block_end><block_end><block_end><def_stmt>has_index table fields<block_start><for_stmt>index table.indexes<block_start>col_names={c.name<for>c index.columns}<if_stmt>set(fields)<eq>col_names<block_start><return><true><block_end><block_end><block_end><def_stmt>collection_consists_of_objects collection *objects<block_start>"""
Returns True iff list(collection) == list(objects), where object equality is determined
by primary key equality: object1.id == object2.id.
"""<if_stmt>len(collection)<ne>len(objects)# False if lengths are different
<block_start><return><false><block_end><if_stmt><not>collection# True if both are empty
<block_start><return><true><block_end># Sort, then compare each member by its 'id' attribute, which must be its primary key.
collection.sort(key=<lambda>item:item.id)<line_sep>objects_l=list(objects)<line_sep>objects_l.sort(key=<lambda>item:item.id)<for_stmt>item1,item2 zip(collection objects_l)<block_start><if_stmt>item1.id<is><none><or>item2.id<is><none><or>item1.id<ne>item2.id<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>get_unique_value <block_start>"""Generate unique values to accommodate unique constraints."""<line_sep><return>uuid4().hex<block_end><def_stmt>_get_default_where_clause cls obj_id<block_start>where_clause=cls.__table__.c.id<eq>obj_id<line_sep><return>where_clause<block_end> |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
<import_stmt>argparse<def_stmt>get_name_or_id_validator dest child_type=<none> resource_type='Frontdoors' resource_namespace='Microsoft.Network' resource_name_dest='front_door_name'<block_start><def_stmt>_validate_name_or_id cmd namespace<block_start><import_from_stmt>azure.cli.core.commands.client_factory get_subscription_id<import_from_stmt>msrestazure.tools is_valid_resource_id resource_id<line_sep>subscription_id=get_subscription_id(cmd.cli_ctx)<line_sep>resource_group=namespace.resource_group_name<line_sep>names_or_ids=getattr(namespace dest)<line_sep>is_list=<true><line_sep># treat single values as a list, but convert back in the end
<if_stmt><not>isinstance(names_or_ids list)<block_start>is_list=<false><line_sep>names_or_ids=[names_or_ids]<block_end><if_stmt>names_or_ids<eq>[<none>]<or><not>names_or_ids<block_start><return><block_end>ids=[]<for_stmt>val names_or_ids<block_start>id_params={'subscription':subscription_id 'resource_group':resource_group 'namespace':resource_namespace 'type':resource_type 'name':getattr(namespace resource_name_dest)<if>child_type<else>val 'child_type_1':child_type 'child_name_1':val<if>child_type<else><none>}<if_stmt><not>is_valid_resource_id(val)<block_start>val=resource_id(**id_params)<block_end>ids.append(val)<block_end>setattr(namespace dest ids<if>is_list<else>ids[0])<block_end><return>_validate_name_or_id<block_end><def_stmt>validate_waf_policy cmd namespace<block_start>get_name_or_id_validator(dest='waf_policy' resource_type='WebApplicationFirewallPolicy')(cmd namespace)<block_end><def_stmt>validate_keyvault cmd namespace<block_start>get_name_or_id_validator(dest='vault' resource_type='vaults' resource_namespace='Microsoft.Keyvault')(cmd namespace)<block_end><def_stmt>validate_load_balancing_settings cmd namespace<block_start>get_name_or_id_validator('load_balancing_settings' 'loadBalancingSettings')(cmd namespace)<block_end><def_stmt>validate_probe_settings cmd namespace<block_start>get_name_or_id_validator('probe_settings' 'healthProbeSettings')(cmd namespace)<block_end><def_stmt>validate_frontend_endpoints cmd namespace<block_start>get_name_or_id_validator('frontend_endpoints' 'frontendEndpoints')(cmd namespace)<block_end><def_stmt>validate_backend_pool cmd namespace<block_start>get_name_or_id_validator('backend_pool' 'backendPools')(cmd namespace)<block_end><def_stmt>validate_rules_engine cmd namespace<block_start>get_name_or_id_validator('rules_engine' 'rulesEngines')(cmd namespace)<block_end># pylint: disable=protected-access
<class_stmt>MatchConditionAction(argparse._AppendAction)# pylint: disable=no-self-use
<block_start><def_stmt>parse_match_condition self values<block_start><import_from_stmt>azext_front_door.vendored_sdks.models MatchCondition<if_stmt><not>isinstance(values list)<block_start>values=values.split(' ')<block_end><try_stmt><block_start><return>MatchCondition(match_variable=values[0] operator=values[1] match_value=values[2:])<block_end><except_stmt>IndexError<block_start><import_from_stmt>knack.util CLIError<line_sep><raise>CLIError('usage error: --match-condition VARIABLE OPERATOR [VALUE [VALUE ...]]')<block_end><block_end><def_stmt>__call__ self parser namespace values option_string=<none><block_start>match_condition=self.parse_match_condition(values)<line_sep>super(MatchConditionAction self).__call__(parser namespace match_condition option_string)<block_end><block_end> |
"""Various utility functions.
.. todo::
Reorganize this package in a more meaningful way.
"""<import_from_future_stmt> print_function<import_from_future_stmt> absolute_import<line_sep># from builtins import str
# from builtins import range
<import_stmt>torch<import_from_stmt>torch.nn.parameter Parameter<import_from_stmt>torch.autograd Variable<import_from_stmt>.libraries.modules.stn_nd STN_ND_BCXYZ<import_from_stmt>.data_wrapper AdaptVal<import_from_stmt>.data_wrapper MyTensor<import_from_stmt>. smoother_factory<as>sf<import_from_stmt>.data_wrapper USE_CUDA<import_stmt>numpy<as>np<import_from_stmt>. finite_differences<as>fd<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.init<as>init<import_from_stmt>. module_parameters<as>pars<import_from_stmt>.spline_interpolation SplineInterpolation_ND_BCXYZ<import_stmt>os<try_stmt><block_start><import_from_stmt>.libraries.functions.nn_interpolation get_nn_interpolation<block_end><except_stmt>ImportError<block_start>print('WARNING: nn_interpolation could not be imported (only supported in CUDA at the moment). '<concat>'Some functionality may not be available.')<block_end><def_stmt>my_hasnan x<block_start>"""Check if any input elements are NaNs.
:param x: numpy array
:return: True if NaNs are present, False else
"""<line_sep><return>(x<ne>x).any()<block_end><def_stmt>create_symlink_with_correct_ext sf tf<block_start>abs_s=os.path.abspath(sf)<line_sep>ext_s=os.path.splitext(abs_s)[1]<line_sep>abs_t=os.path.abspath(tf)<line_sep>root_t,ext_t=os.path.splitext(abs_t)<line_sep>abs_t_with_right_ext=root_t+ext_s<if_stmt>os.path.isfile(abs_t_with_right_ext)<block_start><if_stmt>os.path.samefile(abs_s abs_t_with_right_ext)# nothing to do here, these are already the same file
<block_start><return><block_end><else_stmt><block_start>os.remove(abs_t_with_right_ext)<block_end><block_end># now we can do the symlink
os.symlink(abs_s abs_t_with_right_ext)<block_end><def_stmt>combine_dict d1 d2<block_start>"""Creates a dictionary which has entries from both of them.
:param d1: dictionary 1
:param d2: dictionary 2
:return: resulting dictionary
"""<line_sep>d=d1.copy()<line_sep>d.update(d2)<line_sep><return>d<block_end><def_stmt>get_parameter_list_from_parameter_dict pd<block_start>"""Takes a dictionary which contains key value pairs for model parameters and converts it into a list of
parameters that can be used as an input to an optimizer.
:param pd: parameter dictionary
:return: list of parameters
"""<line_sep>pl=[]<for_stmt>key pd<block_start>pl.append(pd[key])<block_end><return>pl<block_end><def_stmt>get_parameter_list_and_par_to_name_dict_from_parameter_dict pd<block_start>"""Same as get_parameter_list_from_parameter_dict; but also returns a dictionary which keeps track of the keys
based on memory id.
:param pd: parameter dictionary
:return: tuple of (parameter_list, name_dictionary)
"""<line_sep>par_to_name_dict=dict()<line_sep>pl=[]<for_stmt>key pd<block_start>pl.append(pd[key])<line_sep>par_to_name_dict[pd[key]]=key<block_end><return>pl par_to_name_dict<block_end><def_stmt>remove_infs_from_variable v# 32 - bit floating point: torch.FloatTensor, torch.cuda.FloatTensor
# 64 - bit floating point: torch.DoubleTensor, torch.cuda.DoubleTensor
# 16 - bit floating point: torch.HalfTensor, torch.cuda.HalfTensor
# todo: maybe find a cleaner way of handling this
# this is to make sure that subsequent sums work (hence will be smaller than it could be,
# but values of this size should not occur in practice anyway
<block_start>sz=v.size()<line_sep>reduction_factor=np.prod(np.array(sz))<line_sep>condition=<true><if_stmt>type(v.data)<eq>torch.cuda.FloatTensor<or>v.data.dtype<eq>torch.float32<block_start><return>torch.clamp(v min=(np.asscalar(np.finfo('float32').min))/reduction_factor max=(np.asscalar(np.finfo('float32').max))/reduction_factor)<block_end><elif_stmt>v.data.dtype<eq>torch.DoubleTensor<or>type(v.data)<eq>torch.cuda.DoubleTensor<block_start><return>torch.clamp(v min=(np.asscalar(np.finfo('float64').min))/reduction_factor max=(np.asscalar(np.finfo('float64').max))/reduction_factor)<block_end><elif_stmt>v.data.dtype<eq>torch.HalfTensor<or>type(v.data)<eq>torch.cuda.HalfTensor<block_start><return>torch.clamp(v min=(np.asscalar(np.finfo('float16').min))/reduction_factor max=(np.asscalar(np.finfo('float16').max))/reduction_factor)<block_end><else_stmt><block_start><raise>ValueError('Unknown data type: '+str(type(v.data)))<block_end><block_end><def_stmt>lift_to_dimension A dim<block_start>"""Creates a view of A of dimension dim (by adding dummy dimensions if necessary).
:param A: numpy array
:param dim: desired dimension of view
:return: returns view of A of appropriate dimension
"""<line_sep>current_dim=len(A.shape)<if_stmt>current_dim<g>dim<block_start><raise>ValueError('Can only add dimensions, but not remove them')<block_end><if_stmt>current_dim<eq>dim<block_start><return>A<block_end><else_stmt><block_start><return>A.reshape([1]<times>(dim-current_dim)+list(A.shape))<block_end><block_end><def_stmt>get_dim_of_affine_transform Ab<block_start>"""Returns the number of dimensions corresponding to an affine transformation of the
form y=Ax+b stored in a column vector. For A =[a1,a2,a3], the parameter vector is simply
[a1;a2;a3;b], i.e., all columns stacked on top of each other.
:param Ab: parameter vector
:return: dimensionality of transform (1,2,or 3)
"""<line_sep>nr=len(Ab)<if_stmt>nr<eq>2<block_start><return>1<block_end><elif_stmt>nr<eq>6<block_start><return>2<block_end><elif_stmt>nr<eq>12<block_start><return>3<block_end><else_stmt><block_start><raise>ValueError('Only supports dimensions 1, 2, and 3.')<block_end><block_end><def_stmt>set_affine_transform_to_identity Ab<block_start>"""Sets the affine transformation as given by the column vector Ab to the identity transform.
:param Ab: Affine parameter vector (will be overwritten with the identity transform)
:return:
"""<line_sep>dim=get_dim_of_affine_transform(Ab)<if_stmt>dim<eq>1<block_start>Ab.zero_()<line_sep>Ab[0]=1.<block_end><elif_stmt>dim<eq>2<block_start>Ab.zero_()<line_sep>Ab[0]=1.<line_sep>Ab[3]=1.<block_end><elif_stmt>dim<eq>3<block_start>Ab.zero_()<line_sep>Ab[0]=1.<line_sep>Ab[4]=1.<line_sep>Ab[8]=1.<block_end><else_stmt><block_start><raise>ValueError('Only supports dimensions 1, 2, and 3.')<block_end><block_end><def_stmt>set_affine_transform_to_identity_multiN Ab<block_start>"""Set the affine transforms to the identity (in the case of arbitrary batch size).
:param Ab: Parameter vectors B x pars (batch size x param. vector); will be overwritten with identity trans.
:return:
"""<line_sep>sz=Ab.size()<line_sep>nr_of_images=sz[0]<for_stmt>nrI range(nr_of_images)<block_start>set_affine_transform_to_identity(Ab[nrI :])<block_end><block_end><def_stmt>get_inverse_affine_param Ab<block_start>"""Computes inverse of affine transformation.
Formally: C(Ax+b)+d = CAx+Cb+d = x; C = inv(A), d = -Cb
:param Ab: B x pars (batch size x param. vector)
:return: Inverse of affine parameters
"""<line_sep>dim=0<if_stmt>Ab.shape[1]<eq>2<block_start>dim=1<block_end><elif_stmt>Ab.shape[1]<eq>6<block_start>dim=2<block_end><elif_stmt>Ab.shape[1]<eq>12<block_start>dim=3<block_end><if_stmt>dim<not><in>[1 2 3]<block_start><raise>ValueError('Only supports dimensions 1, 2, and 3.')<block_end>Ab=Ab.view(Ab.shape[0] dim+1 dim).transpose(1 2)<line_sep>Ab_inv=torch.zeros_like(Ab)<for_stmt>n range(Ab.shape[0])<block_start>tm_inv=torch.inverse(Ab[n : :dim])<line_sep>Ab_inv[n : :dim]=tm_inv<line_sep>Ab_inv[n : dim]=-torch.matmul(tm_inv Ab[n : dim])<block_end>inv_affine_param=Ab_inv.transpose(1 2).contiguous().view(Ab.shape[0] -1)<line_sep><return>inv_affine_param<block_end><def_stmt>update_affine_param Ab Cd<block_start>"""Update affine parameters.
Formally: C(Ax+b)+d = CAx+Cb+d
:param Ab: B x pars (batch size x param. vector)
:return: Updated affine parameters
"""<line_sep>dim=0<if_stmt>Ab.shape[1]<eq>2<block_start>dim=1<block_end><elif_stmt>Ab.shape[1]<eq>6<block_start>dim=2<block_end><elif_stmt>Ab.shape[1]<eq>12<block_start>dim=3<block_end><if_stmt>dim<not><in>[1 2 3]<block_start><raise>ValueError('Only supports dimensions 1, 2, and 3.')<block_end>Ab=Ab.view(Ab.shape[0] dim+1 dim).transpose(1 2)<line_sep>Cd=Cd.view(Cd.shape[0] dim+1 dim).transpose(1 2)<line_sep>updated_param=torch.zeros_like(Ab)<for_stmt>n range(Ab.shape[0])<block_start>tm_param=torch.matmul(Cd[n : :dim] Ab[n : :dim])<line_sep>updated_param[n : :dim]=tm_param<line_sep>updated_param[n : dim]=torch.matmul(Cd[n : :dim] Ab[n : dim])+Cd[n : dim]<line_sep>updated_param=updated_param.transpose(1 2).contiguous().view(Ab.shape[0] -1)<block_end><return>updated_param<block_end><def_stmt>apply_affine_transform_to_map Ab phi<block_start>"""Applies an affine transform to a map.
:param Ab: affine transform parameter column vector
:param phi: map; format nrCxXxYxZ (nrC corresponds to dimension)
:return: returns transformed map
"""<line_sep>sz=phi.size()<line_sep>dim=len(sz)-1<if_stmt>dim<not><in>[1 2 3]<block_start><raise>ValueError('Only supports dimensions 1, 2, and 3.')<block_end>phiR=MyTensor(sz).zero_().type_as(phi)<if_stmt>dim<eq>1<block_start>phiR=phi<times>Ab[0]+Ab[1]<block_end><elif_stmt>dim<eq>2<block_start>phiR[0 <ellipsis>]=Ab[0]<times>phi[0 <ellipsis>]+Ab[2]<times>phi[1 <ellipsis>]+Ab[4]# a_11x+a_21y+b1
phiR[1 <ellipsis>]=Ab[1]<times>phi[0 <ellipsis>]+Ab[3]<times>phi[1 <ellipsis>]+Ab[5]# a_12x+a_22y+b2
<block_end><elif_stmt>dim<eq>3<block_start>phiR[0 <ellipsis>]=Ab[0]<times>phi[0 <ellipsis>]+Ab[3]<times>phi[1 <ellipsis>]+Ab[6]<times>phi[2 <ellipsis>]+Ab[9]<line_sep>phiR[1 <ellipsis>]=Ab[1]<times>phi[0 <ellipsis>]+Ab[4]<times>phi[1 <ellipsis>]+Ab[7]<times>phi[2 <ellipsis>]+Ab[10]<line_sep>phiR[2 <ellipsis>]=Ab[2]<times>phi[0 <ellipsis>]+Ab[5]<times>phi[1 <ellipsis>]+Ab[8]<times>phi[2 <ellipsis>]+Ab[11]<block_end><else_stmt><block_start><raise>ValueError('Only supports dimensions 1, 2, and 3.')<block_end><return>phiR<block_end><def_stmt>apply_affine_transform_to_map_multiNC Ab phi<block_start>"""Applies an affine transform to maps (for arbitrary batch size).
:param Ab: affine transform parameter column vectors (batch size x param. vector)
:param phi: maps; format batchxnrCxXxYxZ (nrC corresponds to dimension)
:return: returns transformed maps
"""<line_sep>sz=phi.size()<line_sep>dim=get_dim_of_affine_transform(Ab[0 :])<line_sep>nr_of_images=Ab.size()[0]<if_stmt>nr_of_images<ne>sz[0]<block_start><raise>ValueError('Incompatible number of affine transforms')<block_end><if_stmt>dim<ne>len(sz)-2<block_start><raise>ValueError('Incompatible number of affine transforms')<block_end>phiR=MyTensor(sz).zero_().type_as(phi)<for_stmt>nrI range(nr_of_images)<block_start>phiR[nrI <ellipsis>]=apply_affine_transform_to_map(Ab[nrI :] phi[nrI <ellipsis>])<block_end><return>phiR<block_end><def_stmt>compute_normalized_gaussian X mu sig<block_start>"""Computes a normalized Gaussian.
:param X: map with coordinates at which to evaluate
:param mu: array indicating the mean
:param sig: array indicating the standard deviations for the different dimensions
:return: Normalized Gaussian evaluated at coordinates in X
Example::
>>> mu, sig = [1,1], [1,1]
>>> X = [0,0]
>>> print(compute_normalized_gaussian(X, mu, sig)
"""<line_sep>dim=len(mu)<if_stmt>dim<eq>1<block_start>g=np.exp(-np.power(X[0 :]-mu[0] 2.)/(2<times>np.power(sig[0] 2.)))<line_sep>g=g/g.sum()<line_sep><return>g<block_end><elif_stmt>dim<eq>2<block_start>g=np.exp(-np.power(X[0 : :]-mu[0] 2.)/(2<times>np.power(sig[0] 2.))-np.power(X[1 : :]-mu[1] 2.)/(2<times>np.power(sig[1] 2.)))<line_sep>g=g/g.sum()<line_sep><return>g<block_end><elif_stmt>dim<eq>3<block_start>g=np.exp(-np.power(X[0 : : :]-mu[0] 2.)/(2<times>np.power(sig[0] 2.))-np.power(X[1 : : :]-mu[1] 2.)/(2<times>np.power(sig[1] 2.))-np.power(X[2 : : :]-mu[2] 2.)/(2<times>np.power(sig[2] 2.)))<line_sep>g=g/g.sum()<line_sep><return>g<block_end><else_stmt><block_start><raise>ValueError('Can only compute Gaussians in dimensions 1-3')<block_end><block_end><def_stmt>_compute_warped_image_multiNC_1d I0 phi spacing spline_order zero_boundary=<false> use_01_input=<true><block_start><if_stmt>spline_order<not><in>[0 1 2 3 4 5 6 7 8 9]<block_start><raise>ValueError('Currently only orders 0 to 9 are supported')<block_end><if_stmt>spline_order<eq>0<block_start>stn=STN_ND_BCXYZ(spacing zero_boundary use_bilinear=<false> use_01_input=use_01_input)<block_end><elif_stmt>spline_order<eq>1<block_start>stn=STN_ND_BCXYZ(spacing zero_boundary use_bilinear=<true> use_01_input=use_01_input)<block_end><else_stmt><block_start>stn=SplineInterpolation_ND_BCXYZ(spacing spline_order)<block_end>I1_warped=stn(I0 phi)<line_sep><return>I1_warped<block_end><def_stmt>_compute_warped_image_multiNC_2d I0 phi spacing spline_order zero_boundary=<false> use_01_input=<true><block_start><if_stmt>spline_order<not><in>[0 1 2 3 4 5 6 7 8 9]<block_start><raise>ValueError('Currently only orders 0 to 9 are supported')<block_end><if_stmt>spline_order<eq>0<block_start>stn=STN_ND_BCXYZ(spacing zero_boundary use_bilinear=<false> use_01_input=use_01_input)<block_end><elif_stmt>spline_order<eq>1<block_start>stn=STN_ND_BCXYZ(spacing zero_boundary use_bilinear=<true> use_01_input=use_01_input)<block_end><else_stmt><block_start>stn=SplineInterpolation_ND_BCXYZ(spacing spline_order)<block_end>I1_warped=stn(I0 phi)<line_sep><return>I1_warped<block_end><def_stmt>_compute_warped_image_multiNC_3d I0 phi spacing spline_order zero_boundary=<false> use_01_input=<true><block_start><if_stmt>spline_order<not><in>[0 1 2 3 4 5 6 7 8 9]<block_start><raise>ValueError('Currently only orders 0 to 9 are supported')<block_end><if_stmt>spline_order<eq>0# return get_warped_label_map(I0,phi,spacing)
<block_start>stn=STN_ND_BCXYZ(spacing zero_boundary use_bilinear=<false> use_01_input=use_01_input)<block_end><elif_stmt>spline_order<eq>1<block_start>stn=STN_ND_BCXYZ(spacing zero_boundary use_bilinear=<true> use_01_input=use_01_input)<block_end><else_stmt><block_start>stn=SplineInterpolation_ND_BCXYZ(spacing spline_order)<block_end>I1_warped=stn(I0 phi)<line_sep><return>I1_warped<block_end><def_stmt>compute_warped_image I0 phi spacing spline_order zero_boundary=<false> use_01_input=<true><block_start>"""Warps image.
:param I0: image to warp, image size XxYxZ
:param phi: map for the warping, size dimxXxYxZ
:param spacing: image spacing [dx,dy,dz]
:return: returns the warped image of size XxYxZ
"""<line_sep># implements this by creating a different view (effectively adding dimensions)
Iw=compute_warped_image_multiNC(I0.view(torch.Size([1 1]+list(I0.size()))) phi.view(torch.Size([1]+list(phi.size()))) spacing spline_order zero_boundary use_01_input)<line_sep><return>Iw.view(I0.size())<block_end><def_stmt>compute_warped_image_multiNC I0 phi spacing spline_order zero_boundary=<false> use_01_input=<true><block_start>"""Warps image.
:param I0: image to warp, image size BxCxXxYxZ
:param phi: map for the warping, size BxdimxXxYxZ
:param spacing: image spacing [dx,dy,dz]
:return: returns the warped image of size BxCxXxYxZ
"""<line_sep>dim=I0.dim()-2<if_stmt>dim<eq>1<block_start><return>_compute_warped_image_multiNC_1d(I0 phi spacing spline_order zero_boundary use_01_input=use_01_input)<block_end><elif_stmt>dim<eq>2<block_start><return>_compute_warped_image_multiNC_2d(I0 phi spacing spline_order zero_boundary use_01_input=use_01_input)<block_end><elif_stmt>dim<eq>3<block_start><return>_compute_warped_image_multiNC_3d(I0 phi spacing spline_order zero_boundary use_01_input=use_01_input)<block_end><else_stmt><block_start><raise>ValueError('Images can only be warped in dimensions 1 to 3')<block_end><block_end><def_stmt>_get_low_res_spacing_from_spacing spacing sz lowResSize<block_start>"""Computes spacing for the low-res parametrization from image spacing.
:param spacing: image spacing
:param sz: size of image
:param lowResSize: size of low re parameterization
:return: returns spacing of low res parameterization
"""<line_sep>#todo: check that this is the correct way of doing it
<return>spacing<times>(np.array(sz[2::])-1)/(np.array(lowResSize[2::])-1)<block_end><def_stmt>_get_low_res_size_from_size sz factor<block_start>"""Returns the corresponding low-res size from a (high-res) sz.
:param sz: size (high-res)
:param factor: low-res factor (needs to be <1)
:return: low res size
"""<if_stmt>(factor<is><none>)<or>(factor<ge>1)<block_start>print('WARNING: Could not compute low_res_size as factor was '+str(factor))<line_sep><return>np.array(sz)<block_end><else_stmt><block_start>low_res_sz=np.array(sz)<line_sep>low_res_sz[2::]=(np.ceil((np.array(sz[2::])<times>factor))).astype('int16')<line_sep><return>low_res_sz<block_end><block_end><def_stmt>_compute_low_res_image I spacing low_res_size spline_order<block_start><import_stmt>mermaid.image_sampling<as>IS<line_sep>sampler=IS.ResampleImage()<line_sep>low_res_image,_=sampler.downsample_image_to_size(I spacing low_res_size[2::] spline_order)<line_sep><return>low_res_image<block_end><def_stmt>individual_parameters_to_model_parameters ind_pars<block_start>model_pars=dict()<if_stmt>type(ind_pars)<eq>type(dict())# should already be in the right format
<block_start>model_pars=ind_pars<block_end><else_stmt># if ind_pars is not a dictionary assume that they come from the optimizer
# (i.e., list and each list element has a dictionary with keys 'name' and 'model_params'
<block_start><for_stmt>par ind_pars<block_start>model_pars[par['name']]=par['model_params']<block_end><block_end><return>model_pars<block_end><def_stmt>compute_vector_momentum_from_scalar_momentum_multiNC lam I sz spacing<block_start>"""Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`.
:param lam: scalar momentum, BxCxXxYxZ
:param I: image, BxCxXxYxZ
:param sz: size of image
:param spacing: spacing of image
:return: returns the vector momentum
"""<line_sep>nrOfI=sz[0]# number of images
m=create_ND_vector_field_variable_multiN(sz[2::] nrOfI)# attention that the second dimension here is image dim, not nrOfC
nrOfC=sz[1]<for_stmt>c range(nrOfC)# loop over all the channels and add the results
<block_start>m=m+compute_vector_momentum_from_scalar_momentum_multiN(lam[: c <ellipsis>] I[: c <ellipsis>] nrOfI sz[2::] spacing)<block_end><return>m<block_end><def_stmt>compute_vector_momentum_from_scalar_momentum_multiN lam I nrOfI sz spacing<block_start>"""Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`.
:param lam: scalar momentum, batchxXxYxZ
:param I: image, batchXxYxZ
:param sz: size of image
:param spacing: spacing of image
:return: returns the vector momentum
"""<line_sep>fdt=fd.FD_torch(spacing)<line_sep>dim=len(sz)<line_sep>m=create_ND_vector_field_variable_multiN(sz nrOfI)<if_stmt>dim<eq>1<block_start>m[: 0 :]=fdt.dXc(I)<times>lam<block_end><elif_stmt>dim<eq>2<block_start>m[: 0 : :]=fdt.dXc(I)<times>lam<line_sep>m[: 1 : :]=fdt.dYc(I)<times>lam<block_end><elif_stmt>dim<eq>3<block_start>m[: 0 : : :]=fdt.dXc(I)<times>lam<line_sep>m[: 1 : : :]=fdt.dYc(I)<times>lam<line_sep>m[: 2 : : :]=fdt.dZc(I)<times>lam<block_end><else_stmt><block_start><raise>ValueError('Can only convert scalar to vector momentum in dimensions 1-3')<block_end><return>m<block_end><def_stmt>create_ND_vector_field_variable_multiN sz nr_of_images=1<block_start>"""
Create vector field torch Variable of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""<line_sep>dim=len(sz)<line_sep>csz=np.array(sz)# just to make sure it is a numpy array
csz=np.array([nr_of_images dim]+list(csz))<line_sep><return>MyTensor(*(csz.tolist())).normal_(0. 1e-7)<block_end><def_stmt>create_ND_vector_field_variable sz<block_start>"""Create vector field torch Variable of given size.
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:return: returns vector field of size dimxXxYxZ
"""<line_sep>dim=len(sz)<line_sep>csz=np.array(sz)# just to make sure it is a numpy array
csz=np.array([dim]+list(csz))<line_sep><return>MyTensor(*(csz.tolist())).normal_(0. 1e-7)<block_end><def_stmt>create_vector_parameter nr_of_elements<block_start>"""Creates a vector parameters with a specified number of elements.
:param nr_of_elements: number of vector elements
:return: returns the parameter vector
"""<line_sep><return>Parameter(MyTensor(nr_of_elements).normal_(0. 1e-7))<block_end><def_stmt>create_ND_vector_field_parameter_multiN sz nrOfI=1 get_field_from_external_network=<false><block_start>"""Create vector field torch Parameter of given size.
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""<line_sep>dim=len(sz)<line_sep>csz=np.array(sz)# just to make sure it is a numpy array
csz=np.array([nrOfI dim]+list(csz))<if_stmt>get_field_from_external_network<block_start>tmp=MyTensor(*(csz.tolist())).normal_(0. 1e-7)<line_sep>tmp.requires_grad=<true><block_end><else_stmt><block_start>tmp=Parameter(MyTensor(*(csz.tolist())).normal_(0. 1e-7))<block_end><return>tmp<block_end><def_stmt>create_local_filter_weights_parameter_multiN sz gaussian_std_weights nrOfI=1 sched='w_K_w' get_preweight_from_network=<false><block_start>"""
Create vector field torch Parameter of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""<line_sep>nr_of_mg_weights=len(gaussian_std_weights)<line_sep>csz=np.array(sz)# just to make sure it is a numpy array
csz=np.array([nrOfI nr_of_mg_weights]+list(csz))<line_sep>weights=torch.empty(*csz)<line_sep># set the default
<if_stmt>sched<eq>'w_K_w'<block_start>gaussian_std_weights=[torch.sqrt(std_w)<for>std_w gaussian_std_weights]<block_end><for_stmt>g range(nr_of_mg_weights)<block_start>weights[: g <ellipsis>]=gaussian_std_weights[g]<block_end>tmp=AdaptVal(weights)<if_stmt>get_preweight_from_network<block_start>tmp.requires_grad=<true><block_end><else_stmt><block_start>tmp=Parameter(tmp)<block_end><return>tmp<block_end><def_stmt>create_ND_scalar_field_parameter_multiNC sz nrOfI=1 nrOfC=1<block_start>"""
Create vector field torch Parameter of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:param nrOfC: number of channels
:return: returns vector field of size nrOfIxnrOfCxXxYxZ
"""<line_sep>csz=np.array(sz)# just to make sure it is a numpy array
csz=np.array([nrOfI nrOfC]+list(csz))<line_sep><return>Parameter(MyTensor(*(csz.tolist())).normal_(0. 1e-7))<block_end><def_stmt>centered_identity_map_multiN sz spacing dtype='float32'<block_start>"""
Create a centered identity map (shifted so it is centered around 0)
:param sz: size of an image in BxCxXxYxZ format
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map
"""<line_sep>dim=len(sz)-2<line_sep>nrOfI=sz[0]<if_stmt>dim<eq>1<block_start>id=np.zeros([nrOfI 1 sz[2]] dtype=dtype)<block_end><elif_stmt>dim<eq>2<block_start>id=np.zeros([nrOfI 2 sz[2] sz[3]] dtype=dtype)<block_end><elif_stmt>dim<eq>3<block_start>id=np.zeros([nrOfI 3 sz[2] sz[3] sz[4]] dtype=dtype)<block_end><else_stmt><block_start><raise>ValueError('Only dimensions 1-3 are currently supported for the identity map')<block_end><for_stmt>n range(nrOfI)<block_start>id[n <ellipsis>]=centered_identity_map(sz[2::] spacing dtype=dtype)<block_end><return>id<block_end><def_stmt>identity_map_multiN sz spacing dtype='float32'<block_start>"""
Create an identity map
:param sz: size of an image in BxCxXxYxZ format
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map
"""<line_sep>dim=len(sz)-2<line_sep>nrOfI=int(sz[0])<if_stmt>dim<eq>1<block_start>id=np.zeros([nrOfI 1 sz[2]] dtype=dtype)<block_end><elif_stmt>dim<eq>2<block_start>id=np.zeros([nrOfI 2 sz[2] sz[3]] dtype=dtype)<block_end><elif_stmt>dim<eq>3<block_start>id=np.zeros([nrOfI 3 sz[2] sz[3] sz[4]] dtype=dtype)<block_end><else_stmt><block_start><raise>ValueError('Only dimensions 1-3 are currently supported for the identity map')<block_end><for_stmt>n range(nrOfI)<block_start>id[n <ellipsis>]=identity_map(sz[2::] spacing dtype=dtype)<block_end><return>id<block_end><def_stmt>centered_identity_map sz spacing dtype='float32'<block_start>"""
Returns a centered identity map (with 0 in the middle) if the sz is odd
Otherwise shifts everything by 0.5*spacing
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""<line_sep>dim=len(sz)<if_stmt>dim<eq>1<block_start>id=np.mgrid[0:sz[0]]<block_end><elif_stmt>dim<eq>2<block_start>id=np.mgrid[0:sz[0] 0:sz[1]]<block_end><elif_stmt>dim<eq>3<block_start>id=np.mgrid[0:sz[0] 0:sz[1] 0:sz[2]]<block_end><else_stmt><block_start><raise>ValueError('Only dimensions 1-3 are currently supported for the identity map')<block_end># now get it into range [0,(sz-1)*spacing]^d
id=np.array(id.astype(dtype))<if_stmt>dim<eq>1<block_start>id=id.reshape(1 sz[0])<block_end># add a dummy first index
<for_stmt>d range(dim)<block_start>id[d]<augmul>spacing[d]<if_stmt>sz[d]%2<eq>0#even
<block_start>id[d]<augsub>spacing[d]<times>(sz[d]<floordiv>2)<block_end><else_stmt>#odd
<block_start>id[d]<augsub>spacing[d]<times>((sz[d]+1)<floordiv>2)<block_end><block_end># and now store it in a dim+1 array
<if_stmt>dim<eq>1<block_start>idnp=np.zeros([1 sz[0]] dtype=dtype)<line_sep>idnp[0 :]=id[0]<block_end><elif_stmt>dim<eq>2<block_start>idnp=np.zeros([2 sz[0] sz[1]] dtype=dtype)<line_sep>idnp[0 : :]=id[0]<line_sep>idnp[1 : :]=id[1]<block_end><elif_stmt>dim<eq>3<block_start>idnp=np.zeros([3 sz[0] sz[1] sz[2]] dtype=dtype)<line_sep>idnp[0 : : :]=id[0]<line_sep>idnp[1 : : :]=id[1]<line_sep>idnp[2 : : :]=id[2]<block_end><else_stmt><block_start><raise>ValueError('Only dimensions 1-3 are currently supported for the centered identity map')<block_end><return>idnp<block_end>#
# def centered_min_normalized_identity_map(sz, spacing, dtype='float32'):
# """
# Returns a centered identity map (with 0 in the middle) if the sz is odd
# Otherwise shifts everything by 0.5*spacing
#
# :param sz: just the spatial dimensions, i.e., XxYxZ
# :param spacing: list with spacing information [sx,sy,sz]
# :param dtype: numpy data-type ('float32', 'float64', ...)
# :return: returns the identity map of dimension dimxXxYxZ
# """
# dim = len(sz)
# if dim == 1:
# id = np.mgrid[0:sz[0]]
# elif dim == 2:
# id = np.mgrid[0:sz[0], 0:sz[1]]
# elif dim == 3:
# id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
# else:
# raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
#
# min_spacing = np.min(spacing)
# spacing_ratio = spacing/min_spacing
#
#
# # now get it into range [0,(sz-1)*spacing]^d
# id = np.array(id.astype(dtype))
# if dim == 1:
# id = id.reshape(1, sz[0]) # add a dummy first index
#
# for d in range(dim):
# id[d] *= spacing[d]
# if sz[d]%2==0:
# #even
# id[d] -= spacing[d]*(sz[d]//2)
# else:
# #odd
# id[d] -= spacing[d]*((sz[d]+1)//2)
#
# # and now store it in a dim+1 array and rescale by the ratio
# if dim == 1:
# idnp = np.zeros([1, sz[0]], dtype=dtype)
# idnp[0, :] = id[0] * spacing_ratio[0]
# elif dim == 2:
# idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
# idnp[0, :, :] = id[0] * spacing_ratio[0]
# idnp[1, :, :] = id[1] * spacing_ratio[1]
# elif dim == 3:
# idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)
# idnp[0, :, :, :] = id[0] * spacing_ratio[0]
# idnp[1, :, :, :] = id[1] * spacing_ratio[1]
# idnp[2, :, :, :] = id[2] * spacing_ratio[2]
# else:
# raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')
#
# return idnp
#
# def tranfrom_var_list_into_min_normalized_space(var_list,spacing,do_transform=True):
# if do_transform:
# min_spacing = np.min(spacing)
# spacing_ratio =min_spacing/spacing
# dim = spacing.size
# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))
# sp_sz = [1]+[dim] +[1]*dim
# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)
# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]
# else:
# new_var_list = var_list
# return new_var_list
# def recover_var_list_from_min_normalized_space(var_list,spacing,do_transform=True):
# if do_transform:
# min_spacing = np.min(spacing)
# spacing_ratio =spacing/min_spacing
# dim = spacing.size
# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))
# sp_sz = [1]+[dim] +[1]*dim
# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)
# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]
# else:
# new_var_list = var_list
# return new_var_list
#
<def_stmt>identity_map sz spacing dtype='float32'<block_start>"""
Returns an identity map.
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""<line_sep>dim=len(sz)<if_stmt>dim<eq>1<block_start>id=np.mgrid[0:sz[0]]<block_end><elif_stmt>dim<eq>2<block_start>id=np.mgrid[0:sz[0] 0:sz[1]]<block_end><elif_stmt>dim<eq>3<block_start>id=np.mgrid[0:sz[0] 0:sz[1] 0:sz[2]]<block_end><else_stmt><block_start><raise>ValueError('Only dimensions 1-3 are currently supported for the identity map')<block_end># now get it into range [0,(sz-1)*spacing]^d
id=np.array(id.astype(dtype))<if_stmt>dim<eq>1<block_start>id=id.reshape(1 sz[0])<block_end># add a dummy first index
<for_stmt>d range(dim)<block_start>id[d]<augmul>spacing[d]<line_sep>#id[d]*=2./(sz[d]-1)
#id[d]-=1.
<block_end># and now store it in a dim+1 array
<if_stmt>dim<eq>1<block_start>idnp=np.zeros([1 sz[0]] dtype=dtype)<line_sep>idnp[0 :]=id[0]<block_end><elif_stmt>dim<eq>2<block_start>idnp=np.zeros([2 sz[0] sz[1]] dtype=dtype)<line_sep>idnp[0 : :]=id[0]<line_sep>idnp[1 : :]=id[1]<block_end><elif_stmt>dim<eq>3<block_start>idnp=np.zeros([3 sz[0] sz[1] sz[2]] dtype=dtype)<line_sep>idnp[0 : : :]=id[0]<line_sep>idnp[1 : : :]=id[1]<line_sep>idnp[2 : : :]=id[2]<block_end><else_stmt><block_start><raise>ValueError('Only dimensions 1-3 are currently supported for the identity map')<block_end><return>idnp<block_end><def_stmt>omt_boundary_weight_mask img_sz spacing mask_range=5 mask_value=5 smoother_std=0.05<block_start>"""generate a smooth weight mask for the omt """<line_sep>dim=len(img_sz)<line_sep>mask_sz=[1 1]+list(img_sz)<line_sep>mask=AdaptVal(torch.ones(*mask_sz))<times>mask_value<if_stmt>dim<eq>2<block_start>mask[: : mask_range:-mask_range mask_range:-mask_range]=1<block_end><elif_stmt>dim<eq>3<block_start>mask[: : mask_range:-mask_range mask_range:-mask_range mask_range:-mask_range]=1<block_end>sm=get_single_gaussian_smoother(smoother_std img_sz spacing)<line_sep>mask=sm.smooth(mask)<line_sep><return>mask.detach()<block_end><def_stmt>momentum_boundary_weight_mask img_sz spacing mask_range=5 smoother_std=0.05 pow=2<block_start>"""generate a smooth weight mask for the omt """<line_sep>dim=len(img_sz)<line_sep>mask_sz=[1 1]+list(img_sz)<line_sep>mask=AdaptVal(torch.zeros(*mask_sz))<if_stmt>dim<eq>2<block_start>mask[: : mask_range:-mask_range mask_range:-mask_range]=1<block_end><elif_stmt>dim<eq>3<block_start>mask[: : mask_range:-mask_range mask_range:-mask_range mask_range:-mask_range]=1<block_end>sm=get_single_gaussian_smoother(smoother_std img_sz spacing)<line_sep>mask=sm.smooth(mask)<if_stmt>pow<eq>2<block_start>mask=mask<power>2<block_end><if_stmt>pow<eq>3<block_start>mask=mask<times>mask<times>mask<block_end><return>mask<block_end># def compute_omt_const(stds,param,dim):
# omt_power = param['forward_model']['smoother']['omt_power']
# omt_weight_penalty = param['forward_model']['smoother']['omt_weight_penalty']
# min_std = torch.min(stds)
# max_std = torch.max(stds)
# omt_const = torch.abs(torch.log(max_std/stds))**omt_power
# omt_const = omt_const/(torch.abs(torch.log(max_std / min_std)) ** omt_power)
# omt_const = omt_const*omt_weight_penalty/(EV.reg_factor_in_mermaid*2)
# sz = [1]+ [len(stds)] +[1]*(dim+1)
# return omt_const.view(*sz)
<def_stmt>get_single_gaussian_smoother gaussian_std sz spacing<block_start>s_m_params=pars.ParameterDict()<line_sep>s_m_params['smoother']['type']='gaussian'<line_sep>s_m_params['smoother']['gaussian_std']=gaussian_std<line_sep>s_m=sf.SmootherFactory(sz spacing).create_smoother(s_m_params)<line_sep><return>s_m<block_end><def_stmt>get_warped_label_map label_map phi spacing sched='nn'<block_start><if_stmt>sched<eq>'nn'<block_start>warped_label_map=compute_warped_image_multiNC(label_map phi spacing spline_order=0 zero_boundary=<true>)<line_sep># check if here should be add assert
<assert_stmt>abs(torch.sum(warped_label_map.data-warped_label_map.data.round()))<l>0.1 "nn interpolation is not precise"<block_end><else_stmt><block_start><raise>ValueError(" the label warping method is not implemented")<block_end><return>warped_label_map<block_end><def_stmt>t2np v<block_start>"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""<line_sep><return>(v.detach()).cpu().numpy()<block_end><def_stmt>cxyz_to_xyzc v<block_start>"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""<line_sep>dim=len(v.shape)-2<if_stmt>dim<eq>2<block_start>v=v.permute(0 2 3 1)<block_end><if_stmt>dim<eq>3<block_start>v=v.permute(0 2 3 4 1)<block_end><return>v<block_end><def_stmt>get_scalar v<block_start><if_stmt>isinstance(v float)<block_start><return>v<block_end><elif_stmt>isinstance(v np.ndarray)<and>v.size<eq>1<block_start><return>float(v)<block_end><block_end><def_stmt>checkNan x<block_start>""""
input should be list of Variable
"""<line_sep><return>[len(np.argwhere(np.isnan(elem.detach().cpu().numpy())))<for>elem x]<block_end><def_stmt>noramlized_spacing_to_smallest spacing<block_start>min_sp=np.min(spacing)<line_sep>spacing[spacing<g>min_sp]=min_sp<line_sep><return>spacing<block_end><def_stmt>time_warped_function f<block_start><def_stmt>__time_warped_function input=<none><block_start>start=torch.cuda.Event(enable_timing=<true>)<line_sep>end=torch.cuda.Event(enable_timing=<true>)<line_sep>start.record()<line_sep>output=f(input)<line_sep>end.record()<line_sep># Waits for everything to finish running
torch.cuda.synchronize()<line_sep>print(start.elapsed_time(end))<line_sep><return>output<block_end><return>__time_warped_function<block_end><def_stmt>interoplate_boundary_right tensor<block_start>dim=len(tensor.shape)-2<if_stmt>dim<eq>1<block_start>tensor[: : -1]=tensor[: :-2]+tensor[: :-2]-tensor[: :-3]<block_end><if_stmt>dim<eq>2<block_start>tensor[: : -1 :]=tensor[: : -2 :]+tensor[: : -2 :]-tensor[: : -3 :]<line_sep>tensor[: : : -1]=tensor[: : : -2]+tensor[: : : -2]-tensor[: : : -3]<block_end><if_stmt>dim<eq>3<block_start>tensor[: : : -1 : :]=tensor[: : -2 :]+tensor[: : -2 :]-tensor[: : -3 :]<line_sep>tensor[: : : : -1 :]=tensor[: : : -2]+tensor[: : : -2]-tensor[: : : -3]<line_sep>tensor[: : : : : -1]=tensor[: : : -2]+tensor[: : : -2]-tensor[: : : -3]<block_end><block_end><def_stmt>get_resampled_image I spacing desiredSize spline_order=1 zero_boundary=<false> identity_map=<none><block_start>"""
:param I: B C X Y Z
:param spacing: spx spy spz
:param desiredSize: B C X Y Z
:param spline_order:
:param zero_boundary:
:param identity_map:
:return:
"""<if_stmt>spacing<is><none><block_start>img_sz=I.shape[2:]<line_sep>spacing=1./(np.array(img_sz)-1)<block_end><if_stmt>identity_map<is><not><none># todo will remove, currently fix for symmetric training
<block_start><if_stmt>I.shape[0]<ne>identity_map.shape[0]<block_start>n_batch=I.shape[0]<line_sep>desiredSize=desiredSize.copy()<line_sep>desiredSize[0]=n_batch<line_sep>identity_map=identity_map[:n_batch]<block_end><block_end>resampled,new_spacing=resample_image(I spacing desiredSize spline_order=spline_order zero_boundary=zero_boundary identity_map=identity_map)<line_sep><return>resampled<block_end><def_stmt>resample_image I spacing desiredSize spline_order=1 zero_boundary=<false> identity_map=<none><block_start>"""
Resample an image to a given desired size
:param I: Input image (expected to be of BxCxXxYxZ format)
:param spacing: array describing the spatial spacing
:param desiredSize: array for the desired size (excluding B and C, i.e, 1 entry for 1D, 2 for 2D, and 3 for 3D)
:return: returns a tuple: the downsampled image, the new spacing after downsampling
"""<line_sep>desiredSize=desiredSize[2:]<line_sep>is_numpy=<false><if_stmt><not>isinstance(I torch.Tensor)<block_start>I=torch.Tensor(I)<line_sep>is_numpy=<true><block_end>sz=np.array(list(I.size()))<line_sep># check that the batch size and the number of channels is the same
nrOfI=sz[0]<line_sep>nrOfC=sz[1]<line_sep>desiredSizeNC=np.array([nrOfI nrOfC]+list(desiredSize))<line_sep>newspacing=spacing<times>((sz[2::].astype('float')-1.)/(desiredSizeNC[2::].astype('float')-1.))<line_sep>###########################################
<if_stmt>identity_map<is><not><none><block_start>idDes=identity_map<block_end><else_stmt><block_start>idDes=AdaptVal(torch.from_numpy(identity_map_multiN(desiredSizeNC newspacing)))<block_end># now use this map for resampling
ID=compute_warped_image_multiNC(I idDes newspacing spline_order zero_boundary)<line_sep><return>ID<if><not>is_numpy<else>ID.numpy() newspacing<block_end><def_stmt>get_res_size_from_size sz factor<block_start>"""
Returns the corresponding low-res size from a (high-res) sz
:param sz: size (high-res)
:param factor: low-res factor (needs to be <1)
:return: low res size
"""<if_stmt>(factor<is><none>)<block_start>print('WARNING: Could not compute low_res_size as factor was '+str(factor))<line_sep><return>sz<block_end><else_stmt><block_start>lowResSize=np.array(sz)<if_stmt><not>isinstance(factor list)<block_start>lowResSize[2::]=(np.ceil((np.array(sz[2:])<times>factor))).astype('int16')<block_end><else_stmt><block_start>lowResSize[2::]=(np.ceil((np.array(sz[2:])<times>np.array(factor)))).astype('int16')<block_end><if_stmt>lowResSize[-1]%2<ne>0<block_start>lowResSize[-1]<augsub>1<line_sep>print('\n\nWARNING: forcing last dimension to be even: fix properly in the Fourier transform later!\n\n')<block_end><return>lowResSize<block_end><block_end><def_stmt>get_res_spacing_from_spacing spacing sz lowResSize<block_start>"""
Computes spacing for the low-res parameterization from image spacing
:param spacing: image spacing
:param sz: size of image
:param lowResSize: size of low re parameterization
:return: returns spacing of low res parameterization
"""<line_sep># todo: check that this is the correct way of doing it
<return>spacing<times>(np.array(sz[2::])-1)/(np.array(lowResSize[2::])-1)<block_end>########################################## Adaptive Net ###################################################3
<def_stmt>space_normal tensors std=0.1<block_start>"""
space normalize for the net kernel
:param tensor:
:param mean:
:param std:
:return:
"""<if_stmt>isinstance(tensors Variable)<block_start>space_normal(tensors.data std=std)<line_sep><return>tensors<block_end><for_stmt>n range(tensors.size()[0])<block_start><for_stmt>c range(tensors.size()[1])<block_start>dim=tensors[n][c].dim()<line_sep>sz=tensors[n][c].size()<line_sep>mus=np.zeros(dim)<line_sep>stds=std<times>np.ones(dim)<line_sep>print('WARNING: What should the spacing be here? Needed for new identity map code')<line_sep><raise>ValueError('Double check the spacing here before running this code')<line_sep>spacing=np.ones(dim)<line_sep>centered_id=centered_identity_map(sz spacing)<line_sep>g=compute_normalized_gaussian(centered_id mus stds)<line_sep>tensors[n c]=torch.from_numpy(g)<block_end><block_end><block_end><def_stmt>weights_init_uniform m<block_start>classname=m.__class__.__name__<line_sep># print(classname)
<if_stmt>classname.find('Conv')<ne>-1<block_start>init.uniform(m.weight.data 0.038 0.042)<block_end><elif_stmt>classname.find('Linear')<ne>-1<block_start>init.uniform(m.weight.data 0.0 0.02)<block_end><elif_stmt>classname.find('BatchNorm2d')<ne>-1<block_start>init.uniform(m.weight.data 1.0 0.02)<line_sep>init.constant(m.bias.data 0.0)<block_end><block_end><def_stmt>weights_init_normal m<block_start>classname=m.__class__.__name__<line_sep># print(classname)
<if_stmt>classname.find('Conv')<ne>-1<block_start>space_normal(m.weight.data)<block_end><elif_stmt>classname.find('Linear')<ne>-1<block_start>space_normal(m.weight.data)<block_end><elif_stmt>classname.find('BatchNorm2d')<ne>-1<block_start>init.uniform(m.weight.data 1.0 0.02)<line_sep>init.constant(m.bias.data 0.0)<block_end><block_end><def_stmt>weights_init_rd_normal m<block_start>classname=m.__class__.__name__<line_sep># print(classname)
<if_stmt>classname.find('Conv')<ne>-1<block_start>init.normal(m.weight.data)<block_end><elif_stmt>classname.find('Linear')<ne>-1<block_start>init.normal(m.weight.data)<block_end><elif_stmt>classname.find('BatchNorm2d')<ne>-1<block_start>init.uniform(m.weight.data 1.0 0.02)<line_sep>init.constant(m.bias.data 0.0)<block_end><block_end><def_stmt>weights_init_xavier m<block_start>classname=m.__class__.__name__<line_sep># print(classname)
<if_stmt>classname.find('Conv')<ne>-1<block_start>init.xavier_normal(m.weight.data gain=1)<block_end><elif_stmt>classname.find('Linear')<ne>-1<block_start>init.xavier_normal(m.weight.data gain=1)<block_end><elif_stmt>classname.find('BatchNorm2d')<ne>-1<block_start>init.uniform(m.weight.data 1.0 0.02)<line_sep>init.constant(m.bias.data 0.0)<block_end><block_end><def_stmt>weights_init_kaiming m<block_start>classname=m.__class__.__name__<line_sep># print(classname)
<if_stmt>classname.find('Conv')<ne>-1<block_start>init.kaiming_normal(m.weight.data a=0 mode='fan_in')<block_end><elif_stmt>classname.find('Linear')<ne>-1<block_start>init.kaiming_normal(m.weight.data a=0 mode='fan_in')<block_end><elif_stmt>classname.find('BatchNorm2d')<ne>-1<block_start>init.uniform(m.weight.data 1.0 0.02)<line_sep>init.constant(m.bias.data 0.0)<block_end><block_end><def_stmt>weights_init_orthogonal m<block_start>classname=m.__class__.__name__<line_sep>print(classname)<if_stmt>classname.find('Conv')<ne>-1<block_start>init.orthogonal(m.weight.data gain=1)<block_end><elif_stmt>classname.find('Linear')<ne>-1<block_start>init.orthogonal(m.weight.data gain=1)<block_end><elif_stmt>classname.find('BatchNorm2d')<ne>-1<block_start>init.uniform(m.weight.data 1.0 0.02)<line_sep>init.constant(m.bias.data 0.0)<block_end><block_end><def_stmt>init_weights net init_type='normal'<block_start>print('initialization method [%s]'%init_type)<if_stmt>init_type<eq>'rd_normal'<block_start>net.apply(weights_init_rd_normal)<block_end><elif_stmt>init_type<eq>'normal'<block_start>net.apply(weights_init_normal)<block_end><elif_stmt>init_type<eq>'uniform'<block_start>net.apply(weights_init_uniform)<block_end><elif_stmt>init_type<eq>'xavier'<block_start>net.apply(weights_init_xavier)<block_end><elif_stmt>init_type<eq>'kaiming'<block_start>net.apply(weights_init_kaiming)<block_end><elif_stmt>init_type<eq>'orthogonal'<block_start>net.apply(weights_init_orthogonal)<block_end><else_stmt><block_start><raise>NotImplementedError('initialization method [%s] is not implemented'%init_type)<block_end><block_end><def_stmt>organize_data moving target sched='depth_concat'<block_start><if_stmt>sched<eq>'depth_concat'<block_start>input=torch.cat([moving target] dim=1)<block_end><elif_stmt>sched<eq>'width_concat'<block_start>input=torch.cat((moving target) dim=3)<block_end><elif_stmt>sched<eq>'list_concat'<block_start>input=torch.cat((moving.unsqueeze(0) target.unsqueeze(0)) dim=0)<block_end><elif_stmt>sched<eq>'difference'<block_start>input=moving-target<block_end><return>input<block_end><def_stmt>bh m gi go<block_start>print("Grad Input")<line_sep>print((torch.sum(gi[0].data) torch.sum(gi[1].data)))<line_sep>print("Grad Output")<line_sep>print(torch.sum(go[0].data))<line_sep><return>gi[0] gi[1] gi[2]<block_end><class_stmt>ConvBnRel(nn.Module)# conv + bn (optional) + relu
<block_start><def_stmt>__init__ self in_channels out_channels kernel_size stride=1 active_unit='relu' same_padding=<false> bn=<false> reverse=<false> bias=<false><block_start>super(ConvBnRel self).__init__()<line_sep>padding=int((kernel_size-1)<floordiv>2)<if>same_padding<else>0<if_stmt><not>reverse<block_start>self.conv=nn.Conv2d(in_channels out_channels kernel_size stride padding=padding bias=bias)<block_end><else_stmt><block_start>self.conv=nn.ConvTranspose2d(in_channels out_channels kernel_size stride padding=padding bias=bias)<block_end>#y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
#When affine=False the output of BatchNorm is equivalent to considering gamma=1 and beta=0 as constants.
self.bn=nn.BatchNorm2d(out_channels eps=0.0001 momentum=0 affine=<true>)<if>bn<else><none><if_stmt>active_unit<eq>'relu'<block_start>self.active_unit=nn.ReLU(inplace=<true>)<block_end><elif_stmt>active_unit<eq>'elu'<block_start>self.active_unit=nn.ELU(inplace=<true>)<block_end><else_stmt><block_start>self.active_unit=<none><block_end><block_end><def_stmt>forward self x<block_start>x=self.conv(x)<if_stmt>self.bn<is><not><none><block_start>x=self.bn(x)<block_end><if_stmt>self.active_unit<is><not><none><block_start>x=self.active_unit(x)<block_end><return>x<block_end><block_end><class_stmt>FcRel(nn.Module)# fc+ relu(option)
<block_start><def_stmt>__init__ self in_features out_features active_unit='relu'<block_start>super(FcRel self).__init__()<line_sep>self.fc=nn.Linear(in_features out_features)<if_stmt>active_unit<eq>'relu'<block_start>self.active_unit=nn.ReLU(inplace=<true>)<block_end><elif_stmt>active_unit<eq>'elu'<block_start>self.active_unit=nn.ELU(inplace=<true>)<block_end><else_stmt><block_start>self.active_unit=<none><block_end><block_end><def_stmt>forward self x<block_start>x=self.fc(x)<if_stmt>self.active_unit<is><not><none><block_start>x=self.active_unit(x)<block_end><return>x<block_end><block_end><class_stmt>AdpSmoother(nn.Module)<block_start>"""
a simple conv. implementation, generate displacement field
"""<def_stmt>__init__ self inputs dim net_sched=<none># settings should include [using_bias, using bn, using elu]
# inputs should be a dictionary could contain ['s'],['t']
<block_start>super(AdpSmoother self).__init__()<line_sep>self.dim=dim<line_sep>self.net_sched='m_only'<line_sep>self.s=inputs['s'].detach()<line_sep>self.t=inputs['t'].detach()<line_sep>self.mask=Parameter(torch.cat([torch.ones(inputs['s'].size())]<times>dim 1) requires_grad=<true>)<line_sep>self.get_net_sched()<line_sep>#self.net.register_backward_hook(bh)
<block_end><def_stmt>get_net_sched self debugging=<true> using_bn=<true> active_unit='relu' using_sigmoid=<false> kernel_size=5# return the self.net and self.net_input
<block_start>padding_size=(kernel_size-1)<floordiv>2<if_stmt>self.net_sched<eq>'m_only'<block_start><if_stmt>debugging<block_start>self.net=nn.Conv2d(2 2 kernel_size 1 padding=padding_size bias=<false> groups=2)<block_end><else_stmt><block_start>net=[ConvBnRel(self.dim 20 5 active_unit=active_unit same_padding=<true> bn=using_bn) ConvBnRel(20 self.dim 5 active_unit=active_unit same_padding=<true> bn=using_bn)]<if_stmt>using_sigmoid<block_start>net<augadd>[nn.Sigmoid()]<block_end>self.net=nn.Sequential(*net)<block_end><block_end><elif_stmt>self.net_sched<eq>'m_f_s'<block_start><if_stmt>debugging<block_start>self.net=nn.Conv2d(self.dim+1 self.dim kernel_size 1 padding=padding_size bias=<false>)<block_end><else_stmt><block_start>net=[ConvBnRel(self.dim+1 20 5 active_unit=active_unit same_padding=<true> bn=using_bn) ConvBnRel(20 self.dim 5 active_unit=active_unit same_padding=<true> bn=using_bn)]<if_stmt>using_sigmoid<block_start>net<augadd>[nn.Sigmoid()]<block_end>self.net=nn.Sequential(*net)<block_end><block_end><elif_stmt>self.net_sched<eq>'m_d_s'<block_start><if_stmt>debugging<block_start>self.net=nn.Conv2d(self.dim+1 self.dim kernel_size 1 padding=padding_size bias=<false>)<block_end><else_stmt><block_start>net=[ConvBnRel(self.dim+1 20 5 active_unit=active_unit same_padding=<true> bn=using_bn) ConvBnRel(20 self.dim 5 active_unit=active_unit same_padding=<true> bn=using_bn)]<if_stmt>using_sigmoid<block_start>net<augadd>[nn.Sigmoid()]<block_end>self.net=nn.Sequential(*net)<block_end><block_end><elif_stmt>self.net_sched<eq>'m_f_s_t'<block_start><if_stmt>debugging<block_start>self.net=nn.Conv2d(self.dim+2 self.dim kernel_size 1 padding=padding_size bias=<false>)<block_end><else_stmt><block_start>net=[ConvBnRel(self.dim+2 20 5 active_unit=active_unit same_padding=<true> bn=using_bn) ConvBnRel(20 self.dim 5 active_unit=active_unit same_padding=<true> bn=using_bn)]<if_stmt>using_sigmoid<block_start>net<augadd>[nn.Sigmoid()]<block_end>self.net=nn.Sequential(*net)<block_end><block_end><elif_stmt>self.net_sched<eq>'m_d_s_f_t'<block_start><if_stmt>debugging<block_start>self.net=nn.Conv2d(self.dim+2 self.dim kernel_size 1 padding=padding_size bias=<false>)<block_end><else_stmt><block_start>net=[ConvBnRel(self.dim+2 20 5 active_unit=active_unit same_padding=<true> bn=using_bn) ConvBnRel(20 self.dim 5 active_unit=active_unit same_padding=<true> bn=using_bn)]<if_stmt>using_sigmoid<block_start>net<augadd>[nn.Sigmoid()]<block_end>self.net=nn.Sequential(*net)<block_end><block_end><block_end><def_stmt>prepare_data self m new_s<block_start>input=<none><if_stmt>self.net_sched<eq>'m_only'<block_start>input=m<block_end><elif_stmt>self.net_sched<eq>'m_f_s'<block_start>input=organize_data(m self.s sched='depth_concat')<block_end><elif_stmt>self.net_sched<eq>'m_d_s'<block_start>input=organize_data(m new_s sched='depth_concat')<block_end><elif_stmt>self.net_sched<eq>'m_f_s_t'<block_start>input=organize_data(m self.s sched='depth_concat')<line_sep>input=organize_data(input self.t sched='depth_concat')<block_end><elif_stmt>self.net_sched<eq>'m_f_s_t'<block_start>input=organize_data(m self.s sched='depth_concat')<line_sep>input=organize_data(input self.t sched='depth_concat')<block_end><elif_stmt>self.net_sched<eq>'m_d_s_f_t'<block_start>input=organize_data(m new_s sched='depth_concat')<line_sep>input=organize_data(input self.t sched='depth_concat')<block_end><return>input<block_end><def_stmt>forward self m new_s=<none><block_start>m=m<times>self.mask<line_sep>input=self.prepare_data(m new_s)<line_sep>x=input<line_sep>x=self.net(x)<line_sep><return>x<block_end><block_end> |
<import_stmt>getopt<import_stmt>sys<line_sep>comment=('#'+sys.argv[1]).encode()<line_sep>opts,args=getopt.getopt(sys.argv[2:] 'cf:o:xy')<line_sep>optstring=''<line_sep>length=len(comment)<for_stmt>opt,arg opts<block_start><if_stmt>opt<eq>'-o'<block_start>out=arg<block_end><elif_stmt>opt<not><in>('-f' '-K')<block_start>optstring=optstring+' '+opt<block_end><block_end>infile=open(args[0] 'rb')<line_sep>outfile=open(out 'wb')<line_sep>outfile.write((optstring+"\n").encode())<for_stmt>l infile.readlines()<block_start><if_stmt>l[:length]<ne>comment<block_start>outfile.write(l)<block_end><block_end>sys.exit(0)<line_sep> |
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
<import_from_stmt>torch.nn functional<as>F<import_from_stmt>torch Tensor<import_stmt>argparse<import_from_stmt>. register_classification_loss_fn<import_from_stmt>.. BaseCriteria<line_sep>@register_classification_loss_fn(name="binary_cross_entropy")<class_stmt>ClsBinaryCrossEntropy(BaseCriteria)<block_start>"""Binary CE for classification tasks"""<def_stmt>__init__ self opts *args **kwargs<arrow><none><block_start>super().__init__()<block_end><def_stmt>forward self input_sample:Tensor prediction:Tensor target:Tensor *args **kwargs<arrow>Tensor<block_start><if_stmt>target.dim()<ne>prediction.dim()<block_start>target=F.one_hot(target num_classes=prediction.shape[-1])<block_end><return>F.binary_cross_entropy_with_logits(input=prediction target=target.to(prediction.dtype) weight=<none> reduction="sum" )<block_end><def_stmt>__repr__ self<arrow>str<block_start><return>"{}()".format(self.__class__.__name__)<block_end><block_end> |
# Submit a function to be run either locally or in a computing cluster.
# Compared to original StyleGAN implementation, we extend the support for automatic training resumption,
# and network recompilation.
<import_stmt>copy<import_stmt>inspect<import_stmt>os<import_stmt>pathlib<import_stmt>pickle<import_stmt>platform<import_stmt>pprint<import_stmt>re<import_stmt>shutil<import_stmt>sys<import_stmt>time<import_stmt>traceback<import_from_stmt>enum Enum<import_from_stmt>.. util<import_from_stmt>..util EasyDict<import_from_stmt>. internal<class_stmt>SubmitTarget(Enum)# The target where the function should be run
# LOCAL: Run it locally
<block_start>LOCAL=1<block_end><class_stmt>PathType(Enum)# Determines in which format should a path be formatted
# WINDOWS: Format with Windows style
# LINUX: Format with Linux/Posix style
# AUTO: Use current OS type to select either WINDOWS or LINUX
<block_start>WINDOWS=1<line_sep>LINUX=2<line_sep>AUTO=3<block_end><class_stmt>PlatformExtras# A mixed bag of values used by dnnlib heuristics
# Attributes:
# data_reader_buffer_size: Used by DataReader to size internal shared memory buffers
# data_reader_process_count: Number of worker processes to spawn (zero for single
# thread operation)
<block_start><def_stmt>__init__ self<block_start>self.data_reader_buffer_size=1<lshift>30# 1 GB
self.data_reader_process_count=0<block_end># single threaded default
<block_end>_user_name_override=<none><class_stmt>SubmitConfig(util.EasyDict)# Strongly typed config dict needed to submit runs
# Attributes:
# run_dir_root: Path to the run dir root. Can be optionally templated with tags
# Needs to always be run through get_path_from_template
# run_desc: Description of the run. Will be used in the run dir and task name
# run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir
# run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will
# be the src directory inside the run dir
# submit_target: Submit target enum value. Used to select where the run is actually launched
# num_gpus: Number of GPUs used/requested for the run
# print_info: Whether to print debug information when submitting
# local.do_not_copy_source_files: Do not copy source files from the working directory to the
# run dir.
# run_id: Automatically populated value during submit
# run_name: Automatically populated value during submit
# run_dir: Automatically populated value during submit
# run_func_name: Automatically populated value during submit
# run_func_kwargs: Automatically populated value during submit
# user_name: Automatically populated value during submit. Can be set by the user which will then
# override the automatic value
# task_name: Automatically populated value during submit
# host_name: Automatically populated value during submit
# platform_extras: Automatically populated values during submit. Used by various dnnlib libraries
# such as the DataReader class
<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep># run (set these)
self.run_dir_root=""# should always be passed through get_path_from_template
self.run_desc=""<line_sep>self.run_dir_ignore=["__pycache__" "*.pyproj" "*.sln" "*.suo" ".cache" ".idea" ".vs" ".vscode" "_cudacache"]<line_sep>self.run_dir_extra_files=[]<line_sep># submit (set these)
self.submit_target=SubmitTarget.LOCAL<line_sep>self.num_gpus=1<line_sep>self.print_info=<false><line_sep>self.nvprof=<false><line_sep>self.local=internal.local.TargetOptions()<line_sep>self.datasets=[]<line_sep># (automatically populated)
self.run_id=<none><line_sep>self.run_name=<none><line_sep>self.run_dir=<none><line_sep>self.run_func_name=<none><line_sep>self.run_func_kwargs=<none><line_sep>self.user_name=<none><line_sep>self.task_name=<none><line_sep>self.host_name="localhost"<line_sep>self.platform_extras=PlatformExtras()<block_end><block_end><def_stmt>get_path_from_template path_template:str path_type:PathType=PathType.AUTO<arrow>str# Replace tags in the given path template and return either Windows or Linux formatted path
# automatically select path type depending on running OS
<block_start><if_stmt>path_type<eq>PathType.AUTO<block_start><if_stmt>platform.system()<eq>"Windows"<block_start>path_type=PathType.WINDOWS<block_end><elif_stmt>platform.system()<eq>"Linux"<block_start>path_type=PathType.LINUX<block_end><else_stmt><block_start><raise>RuntimeError("Unknown platform")<block_end><block_end>path_template=path_template.replace("<USERNAME>" get_user_name())<line_sep># return correctly formatted path
<if_stmt>path_type<eq>PathType.WINDOWS<block_start><return>str(pathlib.PureWindowsPath(path_template))<block_end><elif_stmt>path_type<eq>PathType.LINUX<block_start><return>str(pathlib.PurePosixPath(path_template))<block_end><else_stmt><block_start><raise>RuntimeError("Unknown platform")<block_end><block_end><def_stmt>get_template_from_path path:str<arrow>str# Convert a normal path back to its template representation
<block_start>path=path.replace("\\" "/")<line_sep><return>path<block_end><def_stmt>convert_path path:str path_type:PathType=PathType.AUTO<arrow>str# Convert a normal path to template and the convert it back to a normal path with given path type
<block_start>path_template=get_template_from_path(path)<line_sep>path=get_path_from_template(path_template path_type)<line_sep><return>path<block_end><def_stmt>set_user_name_override name:str<arrow><none># Set the global username override value
<block_start><global>_user_name_override<line_sep>_user_name_override=name<block_end><def_stmt>get_user_name # Get the current user name
<block_start><if_stmt>_user_name_override<is><not><none><block_start><return>_user_name_override<block_end><elif_stmt>platform.system()<eq>"Windows"<block_start><return>os.getlogin()<block_end><elif_stmt>platform.system()<eq>"Linux"<block_start><try_stmt><block_start><import_stmt>pwd<line_sep><return>pwd.getpwuid(os.geteuid()).pw_name<block_end><except_stmt><block_start><return>"unknown"<block_end><block_end><else_stmt><block_start><raise>RuntimeError("Unknown platform")<block_end><block_end><def_stmt>make_run_dir_path *paths# Make a path/filename that resides under the current submit run_dir
# Args:
# *paths: Path components to be passed to os.path.join
# Returns:
# A file/dirname rooted at submit_config.run_dir. If there's no
# submit_config or run_dir, the base directory is the current
# working directory.
# E.g., `os.path.join(dnnlib.submit_config.run_dir, "output.txt"))`
<block_start><import_stmt>dnnlib<if_stmt>(dnnlib.submit_config<is><none>)<or>(dnnlib.submit_config.run_dir<is><none>)<block_start><return>os.path.join(os.getcwd() *paths)<block_end><return>os.path.join(dnnlib.submit_config.run_dir *paths)<block_end><def_stmt>_create_run_dir_local submit_config:SubmitConfig resume:bool create_new:str<arrow>str# Create a new run dir with increasing ID number at the start
<block_start>run_dir_root=get_path_from_template(submit_config.run_dir_root PathType.AUTO)<if_stmt><not>os.path.exists(run_dir_root)<block_start>os.makedirs(run_dir_root)<block_end>run_dir=os.path.join(run_dir_root submit_config.run_name)<if_stmt><not>resume<block_start><if_stmt>os.path.exists(run_dir)<and>create_new<block_start><raise>RuntimeError("The run dir already exists! ({0})".format(run_dir))<block_end><if_stmt><not>os.path.exists(run_dir)<block_start>os.makedirs(run_dir)<block_end><block_end><return>run_dir<block_end><def_stmt>_get_next_run_id_local run_dir_root:str<arrow>int# Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id
# Assumes IDs are numbers at the start of the directory names
<block_start>dir_names=[d<for>d os.listdir(run_dir_root)<if>os.path.isdir(os.path.join(run_dir_root d))]<line_sep>r=re.compile("^\\d+")# match one or more digits at the start of the string
run_id=0<for_stmt>dir_name dir_names<block_start>m=r.match(dir_name)<if_stmt>m<is><not><none><block_start>i=int(m.group())<line_sep>run_id=max(run_id i+1)<block_end><block_end><return>run_id<block_end><def_stmt>_populate_run_dir submit_config:SubmitConfig run_dir:str<arrow><none># Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable
<block_start>pickle.dump(submit_config open(os.path.join(run_dir "submit_config.pkl") "wb"))<with_stmt>open(os.path.join(run_dir "submit_config.txt") "w")<as>f<block_start>pprint.pprint(submit_config stream=f indent=4 width=200 compact=<false>)<block_end><if_stmt>(submit_config.submit_target<eq>SubmitTarget.LOCAL)<and>submit_config.local.do_not_copy_source_files<block_start><return><block_end>files=[]<line_sep>run_func_module_dir_path=util.get_module_dir_by_obj_name(submit_config.run_func_name)<assert_stmt>"."<in>submit_config.run_func_name<for_stmt>_idx range(submit_config.run_func_name.count(".")-1)<block_start>run_func_module_dir_path=os.path.dirname(run_func_module_dir_path)<block_end>files<augadd>util.list_dir_recursively_with_ignore(run_func_module_dir_path ignores=submit_config.run_dir_ignore add_base_to_relative=<false>)<line_sep>dnnlib_module_dir_path=util.get_module_dir_by_obj_name("dnnlib")<line_sep>files<augadd>util.list_dir_recursively_with_ignore(dnnlib_module_dir_path ignores=submit_config.run_dir_ignore add_base_to_relative=<true>)<line_sep>files<augadd>submit_config.run_dir_extra_files<line_sep>files=[(f[0] os.path.join(run_dir "src" f[1]))<for>f files]<line_sep>files<augadd>[(os.path.join(dnnlib_module_dir_path "submission" "internal" "run.py") os.path.join(run_dir "run.py"))]<line_sep>util.copy_files_and_create_dirs(files)<block_end><def_stmt>run_wrapper submit_config:SubmitConfig<arrow><none># Wrap the actual run function call for handling logging, exceptions, typing, etc
<block_start>is_local=submit_config.submit_target<eq>SubmitTarget.LOCAL<line_sep># when running locally, redirect stderr to stdout, log stdout to a file, and force flushing
<if_stmt>is_local<block_start>logger=util.Logger(file_name=os.path.join(submit_config.run_dir "log.txt") file_mode="a" should_flush=<true>)<block_end><else_stmt># when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh)
<block_start>logger=util.Logger(file_name=<none> should_flush=<true>)<block_end><import_stmt>dnnlib<line_sep>dnnlib.submit_config=submit_config<line_sep>exit_with_errcode=<false><try_stmt><block_start>print("dnnlib: Running {0}() on {1}...".format(submit_config.run_func_name submit_config.host_name))<line_sep>start_time=time.time()<line_sep>run_func_obj=util.get_obj_by_name(submit_config.run_func_name)<assert_stmt>callable(run_func_obj)<line_sep>sig=inspect.signature(run_func_obj)<if_stmt>"submit_config"<in>sig.parameters<block_start>run_func_obj(submit_config=submit_config **submit_config.run_func_kwargs)<block_end><else_stmt><block_start>run_func_obj(**submit_config.run_func_kwargs)<block_end>print("dnnlib: Finished {0}() in {1}.".format(submit_config.run_func_name util.format_time(time.time()-start_time)))<block_end><except_stmt><block_start><if_stmt>is_local<block_start><raise><block_end><else_stmt><block_start>traceback.print_exc()<line_sep>log_src=os.path.join(submit_config.run_dir "log.txt")<line_sep>log_dst=os.path.join(get_path_from_template(submit_config.run_dir_root) "{0}-error.txt".format(submit_config.run_name))<line_sep>shutil.copyfile(log_src log_dst)<line_sep># Defer sys.exit(1) to happen after we close the logs and create a _finished.txt
exit_with_errcode=<true><block_end><block_end><finally_stmt><block_start>open(os.path.join(submit_config.run_dir "_finished.txt") "w").close()<block_end>dnnlib.RunContext.get().close()<line_sep>dnnlib.submit_config=<none><line_sep>logger.close()<line_sep># If we hit an error, get out of the script now and signal the error
# to whatever process that started this script.
<if_stmt>exit_with_errcode<block_start>sys.exit(1)<block_end><return>submit_config<block_end><def_stmt>open_file_or_url file_or_url<block_start><if_stmt>util.is_url(file_or_url)<block_start><return>util.open_url(file_or_url cache_dir=".stylegan2-cache")<block_end><return>open(file_or_url "rb")<block_end><def_stmt>load_pkl file_or_url<block_start><with_stmt>open_file_or_url(file_or_url)<as>file<block_start><return>pickle.load(file encoding="latin1")<block_end><block_end><def_stmt>submit_run submit_config:SubmitConfig run_func_name:str create_newdir:bool=<false> resume:bool=<false> load_config:bool=<false> **run_func_kwargs<arrow><none># Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place.
# create_newdir: enforces the creation of a new run directory
# resume: resumes a prior experiment using its existing run directory
# load_config: in case resume = True, load prior experiment config instead of using the current command-line parameters
<block_start>submit_config=copy.deepcopy(submit_config)<line_sep>submit_target=submit_config.submit_target<line_sep>farm=<none><if_stmt>submit_target<eq>SubmitTarget.LOCAL<block_start>farm=internal.local.Target()<block_end><assert_stmt>farm<is><not><none># unknown target
# Disallow submitting jobs with zero num_gpus
<if_stmt>(submit_config.num_gpus<is><none>)<or>(submit_config.num_gpus<eq>0)<block_start><raise>RuntimeError("submit_config.num_gpus must be set to a non-zero value")<block_end><if_stmt>submit_config.user_name<is><none><block_start>submit_config.user_name=get_user_name()<block_end>submit_config.run_func_name=run_func_name<line_sep>submit_config.run_func_kwargs=run_func_kwargs<line_sep>#--------------------------------------------------------------------
# Prepare submission by populating the run dir
#--------------------------------------------------------------------
host_run_dir=_create_run_dir_local(submit_config resume create_new=create_newdir)<line_sep>submit_config.task_name="{}-{:05d}-{}".format(submit_config.user_name submit_config.run_id submit_config.run_desc)<line_sep>docker_valid_name_regex="^[a-zA-Z0-9][a-zA-Z0-9_.-]+$"<if_stmt><not>re.match(docker_valid_name_regex submit_config.task_name)<block_start><raise>RuntimeError("Invalid task name. Probable reason: unacceptable characters in your submit_config.run_desc. Task name must be accepted by the following regex: "+docker_valid_name_regex+", got "+submit_config.task_name)<block_end># Farm specific preparations for a submit
farm.finalize_submit_config(submit_config host_run_dir)<line_sep># In case of resumption, load_config = True to load the prior submit_config file from the directory
# (so to maintain the original configuration of the experiment rather than the newly provided
# command-line arguments.
<if_stmt>load_config<block_start>config_file=os.path.join(host_run_dir "submit_config.pkl")<if_stmt>os.path.exists(config_file)<block_start>old_submit_config=submit_config<line_sep>submit_config=load_pkl(config_file)<line_sep>submit_config["run_id"]=old_submit_config["run_id"]<line_sep>submit_config["run_name"]=old_submit_config["run_name"]<if_stmt>"resume_pkl"<in>old_submit_config["run_func_kwargs"]<block_start>submit_config["run_func_kwargs"]["resume_pkl"]=old_submit_config["run_func_kwargs"]["resume_pkl"]<line_sep>submit_config["run_func_kwargs"]["resume_kimg"]=old_submit_config["run_func_kwargs"]["resume_kimg"]<block_end><block_end><block_end>_populate_run_dir(submit_config host_run_dir)<line_sep><return>farm.submit(submit_config host_run_dir)<block_end> |
df8.cbind(df9)<line_sep># A B C D A0 B0 C0 D0
# ----- ------ ------ ------ ------ ----- ----- -----
# -0.09 0.944 0.160 0.271 -0.351 1.66 -2.32 -0.86
# -0.95 0.669 0.664 1.535 -0.633 -1.78 0.32 1.27
# 0.17 0.657 0.970 -0.419 -1.413 -0.51 0.64 -1.25
# 0.58 -0.516 -1.598 -1.346 0.711 1.09 0.05 0.63
# 1.04 -0.281 -0.411 0.959 -0.009 -0.47 0.41 -0.52
# 0.49 0.170 0.124 -0.170 -0.722 -0.79 -0.91 -2.09
# 1.42 -0.409 -0.525 2.155 -0.841 -0.19 0.13 0.63
# 0.94 1.192 -1.075 0.017 0.167 0.54 0.52 1.42
# -0.53 0.777 -1.090 -2.237 -0.693 0.24 -0.56 1.45
# 0.34 -0.456 -1.220 -0.456 -0.315 1.10 1.38 -0.05
#
# [100 rows x 8 columns]
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-IPxlatCfg
GUID : 3e5ac668-af52-4c15-b99b-a3e7a6616ebd
"""<import_from_stmt>construct Int8sl Int8ul Int16ul Int16sl Int32sl Int32ul Int64sl Int64ul Bytes Double Float32l Struct<import_from_stmt>etl.utils WString CString SystemTime Guid<import_from_stmt>etl.dtyp Sid<import_from_stmt>etl.parsers.etw.core Etw declare guid<line_sep>@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd") event_id=1001 version=0)<class_stmt>Microsoft_Windows_IPxlatCfg_1001_0(Etw)<block_start>pattern=Struct("ErrorString"/CString "ErrorCode"/Int32ul)<block_end>@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd") event_id=1002 version=0)<class_stmt>Microsoft_Windows_IPxlatCfg_1002_0(Etw)<block_start>pattern=Struct("ErrorString"/CString "ErrorCode"/Int32ul "InterfaceLuid"/Int64ul)<block_end>@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd") event_id=1003 version=0)<class_stmt>Microsoft_Windows_IPxlatCfg_1003_0(Etw)<block_start>pattern=Struct("InfoString"/CString)<block_end>@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd") event_id=1005 version=0)<class_stmt>Microsoft_Windows_IPxlatCfg_1005_0(Etw)<block_start>pattern=Struct("IPv4Address"/Int32ul "IPv4Prefix"/Int32ul)<block_end>@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd") event_id=1006 version=0)<class_stmt>Microsoft_Windows_IPxlatCfg_1006_0(Etw)<block_start>pattern=Struct("InfoString"/CString "InterfaceLuid"/Int64ul)<block_end>@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd") event_id=1007 version=0)<class_stmt>Microsoft_Windows_IPxlatCfg_1007_0(Etw)<block_start>pattern=Struct("InterfaceLuid"/Int64ul "PrefixLength"/Int32ul)<block_end>@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd") event_id=1008 version=0)<class_stmt>Microsoft_Windows_IPxlatCfg_1008_0(Etw)<block_start>pattern=Struct("InterfaceLuid"/Int64ul "IPv4Address"/Int32ul)<block_end>@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd") event_id=1009 version=0)<class_stmt>Microsoft_Windows_IPxlatCfg_1009_0(Etw)<block_start>pattern=Struct("InterfaceLuid"/Int64ul)<block_end>@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd") event_id=1010 version=0)<class_stmt>Microsoft_Windows_IPxlatCfg_1010_0(Etw)<block_start>pattern=Struct("InterfaceLuid"/Int64ul)<block_end>@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd") event_id=1011 version=0)<class_stmt>Microsoft_Windows_IPxlatCfg_1011_0(Etw)<block_start>pattern=Struct("InfoString"/CString "MTU"/Int32ul)<block_end>@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd") event_id=1101 version=0)<class_stmt>Microsoft_Windows_IPxlatCfg_1101_0(Etw)<block_start>pattern=Struct("InterfaceLuid"/Int64ul "Metric"/Int32ul "RemotePrefixLength"/Int32ul "LocalPrefixLength"/Int32ul)<block_end>@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd") event_id=1102 version=0)<class_stmt>Microsoft_Windows_IPxlatCfg_1102_0(Etw)<block_start>pattern=Struct("InterfaceLuid"/Int64ul "Metric"/Int32ul "RemotePrefixLength"/Int32ul "LocalPrefixLength"/Int32ul)<block_end>@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd") event_id=1103 version=0)<class_stmt>Microsoft_Windows_IPxlatCfg_1103_0(Etw)<block_start>pattern=Struct("InterfaceLuid"/Int64ul "PrefixLength"/Int32ul)<block_end> |
# CHECK-TREE: { const <- \x -> \y -> x; y <- const #true #true; z <- const #false #false; #record { const: const, y : y, z: z, }}
const=<lambda>x y:x<line_sep>y=const(<true> <true>)<line_sep>z=const(<false> <false>)<line_sep> |
<import_from_stmt>distutils.core setup Extension<line_sep>setup(name='qconf_py' version='1.2.2' ext_modules=[Extension('qconf_py' ['lib/python_qconf.cc'] include_dirs=['/usr/local/include/qconf'] extra_objects=['/usr/local/qconf/lib/libqconf.a'])])<line_sep> |
# log_battery.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Wed, Sep 1, 2021 5:05:45 PM
"""Example to continuously read the battery (with no Wifi connection)"""<import_stmt>csv<import_stmt>time<import_stmt>logging<import_stmt>argparse<import_stmt>threading<import_from_stmt>pathlib Path<import_from_stmt>datetime datetime<import_from_stmt>dataclasses dataclass<import_from_stmt>typing Optional Tuple Literal List<import_from_stmt>rich.console Console<import_from_stmt>open_gopro GoPro<import_from_stmt>open_gopro.constants StatusId<import_from_stmt>open_gopro.util setup_logging set_logging_level<line_sep>logger=logging.getLogger(__name__)<line_sep>console=Console()# rich consoler printer
BarsType=Literal[0 1 2 3]<line_sep>@dataclass<class_stmt>Sample<block_start>"""Simple class to store battery samples"""<line_sep>index:int<line_sep>percentage:int<line_sep>bars:BarsType<def_stmt>__post_init__ self<arrow><none><block_start>self.time=datetime.now()<block_end><def_stmt>__str__ self<arrow>str# pylint: disable=missing-return-doc
<block_start><return>f"Index {self.index} @ time {self.time.strftime('%H:%M:%S')} --> bars: {self.bars}, percentage: {self.percentage}"<block_end><block_end>SAMPLE_INDEX=0<line_sep>SAMPLES:List[Sample]=[]<def_stmt>dump_results_as_csv location:Path<arrow><none><block_start>"""Write all of the samples to a csv file
Args:
location (Path): File to write to
"""<line_sep>console.print(f"Dumping results as CSV to {location}")<with_stmt>open(location mode="w")<as>f<block_start>w=csv.writer(f delimiter="," quotechar='"' quoting=csv.QUOTE_MINIMAL)<line_sep>w.writerow(["index" "time" "percentage" "bars"])<line_sep>initial_time=SAMPLES[0].time<for_stmt>s SAMPLES<block_start>w.writerow([s.index (s.time-initial_time).seconds s.percentage s.bars])<block_end><block_end><block_end><def_stmt>process_battery_notifications gopro:GoPro initial_bars:BarsType initial_percentage:int<arrow><none><block_start>"""Separate thread to continuously check for and store battery notifications.
If the CLI parameter was set to poll, this isn't used.
Args:
gopro (GoPro): instance to get updates from
initial_bars (BarsType): Initial bars level when notifications were enabled
initial_percentage (int): Initial percentage when notifications were enabled
"""<line_sep>last_percentage=initial_percentage<line_sep>last_bars=initial_bars<while_stmt><true># Block until we receive an update
<block_start>notification=gopro.get_update()<line_sep># Update data points if they have changed
last_percentage=(notification.data[StatusId.INT_BATT_PER]<if>StatusId.INT_BATT_PER<in>notification.data<else>last_percentage)<line_sep>last_bars=(notification.data[StatusId.BATT_LEVEL]<if>StatusId.BATT_LEVEL<in>notification.data<else>last_bars)<line_sep># Append and print sample
<global>SAMPLE_INDEX<line_sep>SAMPLES.append(Sample(index=SAMPLE_INDEX percentage=last_percentage bars=last_bars))<line_sep>console.print(str(SAMPLES[-1]))<line_sep>SAMPLE_INDEX<augadd>1<block_end><block_end><def_stmt>main <arrow>int<block_start>"""Main program functionality
Returns:
int: program return code
"""<line_sep>identifier,log_location,poll=parse_arguments()<line_sep><global>logger<line_sep>logger=setup_logging(logger log_location)<line_sep><global>SAMPLE_INDEX<line_sep>gopro:Optional[GoPro]=<none><line_sep>return_code=0<try_stmt><block_start><with_stmt>GoPro(identifier enable_wifi=<false>)<as>gopro<block_start>set_logging_level(logger logging.ERROR)<line_sep># # Setup notifications if we are not polling
<if_stmt>poll<is><none><block_start>console.print("Configuring battery notifications...")<line_sep># Enable notifications of the relevant battery statuses. Also store initial values.
bars=gopro.ble_status.batt_level.register_value_update().flatten<line_sep>percentage=gopro.ble_status.int_batt_per.register_value_update().flatten<line_sep># Start a thread to handle asynchronous battery level notifications
threading.Thread(target=process_battery_notifications args=(gopro bars percentage) daemon=<true>).start()<with_stmt>console.status("[bold green]Receiving battery notifications until it dies...")# Sleep forever, allowing notification handler thread to deal with battery level notifications
<block_start><while_stmt><true><block_start>time.sleep(1)<block_end><block_end><block_end># Otherwise, poll
<else_stmt><block_start><with_stmt>console.status("[bold green]Polling the battery until it dies...")<block_start><while_stmt><true><block_start>SAMPLES.append(Sample(index=SAMPLE_INDEX percentage=gopro.ble_status.int_batt_per.get_value().flatten bars=gopro.ble_status.batt_level.get_value().flatten ))<line_sep>console.print(str(SAMPLES[-1]))<line_sep>SAMPLE_INDEX<augadd>1<line_sep>time.sleep(poll)<block_end><block_end><block_end><block_end><block_end><except_stmt>Exception<as>e# pylint: disable=broad-except
<block_start>logger.error(repr(e))<line_sep>return_code=1<block_end><except_stmt>KeyboardInterrupt<block_start>logger.warning("Received keyboard interrupt. Shutting down...")<block_end><finally_stmt><block_start><if_stmt>len(SAMPLES)<g>0<block_start>csv_location=Path(log_location.parent)/"battery_results.csv"<line_sep>dump_results_as_csv(csv_location)<block_end><if_stmt>gopro<is><not><none><block_start>gopro.close()<block_end>console.print("Exiting...")<line_sep><return>return_code<block_end><block_end># pylint: disable=lost-exception
<def_stmt>parse_arguments <arrow>Tuple[str Path Optional[int]]<block_start>"""Parse command line arguments
Returns:
Tuple[str, Path, Path]: (identifier, path to save log, path to VLC)
"""<line_sep>parser=argparse.ArgumentParser(description="Connect to the GoPro via BLE only and continuously read the battery (either by polling or notifications).")<line_sep>parser.add_argument("-i" "--identifier" type=str help="Last 4 digits of GoPro serial number, which is the last 4 digits of the default camera SSID. \
If not used, first discovered GoPro will be connected to" default=<none> )<line_sep>parser.add_argument("-l" "--log" type=Path help="Location to store detailed log" default="log_battery.log" )<line_sep>parser.add_argument("-p" "--poll" type=int help="Set to poll the battery at a given interval. If not set, battery level will be notified instead. Defaults to notifications." default=<none> )<line_sep>args=parser.parse_args()<line_sep><return>args.identifier args.log args.poll<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
"""
neo.io have been split in 2 level API:
* neo.io: this API give neo object
* neo.rawio: this API give raw data as they are in files.
Developper are encourage to use neo.rawio.
When this is done the neo.io is done automagically with
this king of following code.
Author: sgarcia
"""<import_from_stmt>neo.io.basefromrawio BaseFromRaw<import_from_stmt>neo.rawio.examplerawio ExampleRawIO<class_stmt>ExampleIO(ExampleRawIO BaseFromRaw)<block_start>name='example IO'<line_sep>description="Fake IO"<line_sep># This is an inportant choice when there are several channels.
# 'split-all' : 1 AnalogSignal each 1 channel
# 'group-by-same-units' : one 2D AnalogSignal for each group of channel with same units
_prefered_signal_group_mode='group-by-same-units'<def_stmt>__init__ self filename=''<block_start>ExampleRawIO.__init__(self filename=filename)<line_sep>BaseFromRaw.__init__(self filename)<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('scrapyproject' '0002_auto_20170208_1738') ]<line_sep>operations=[migrations.AlterField(model_name='project' name='link_generator' field=models.TextField(blank=<true>) ) migrations.AlterField(model_name='project' name='scraper_function' field=models.TextField(blank=<true>) ) migrations.AlterField(model_name='project' name='settings' field=models.TextField(blank=<true>) ) ]<block_end> |
<import_stmt>re<import_stmt>discord<import_from_stmt>redbot.core commands<class_stmt>Covfefe(commands.Cog)<block_start>"""
Convert almost any word into covfefe
"""<def_stmt>__init__ self bot<block_start>self.bot=bot<block_end><async_keyword><def_stmt>covfefe self x k="aeiouy])"<block_start>"""
https://codegolf.stackexchange.com/a/123697
"""<try_stmt><block_start>b,c,v=re.findall(f"(.*?[{k}([^{k}.*?([{k}" x)[0]<line_sep><return>b+c+(("bcdfgkpstvz"+c)["pgtvkgbzdfs".find(c)]+v)<times>2<block_end><except_stmt>IndexError<block_start><return><none><block_end><block_end><async_keyword><def_stmt>red_delete_data_for_user self **kwargs<block_start>"""
Nothing to delete
"""<line_sep><return><block_end>@commands.command()<async_keyword><def_stmt>covefy self ctx msg<block_start>"""Convert almost any word into covfefe"""<line_sep>newword=<await>self.covfefe(msg)<if_stmt>newword<is><not><none><block_start><await>ctx.send(newword)<block_end><else_stmt><block_start><await>ctx.send("I cannot covfefeify that word")<block_end><block_end><block_end> |
<import_stmt>numpy<as>np<import_from_stmt>gym.spaces Box<import_from_stmt>metaworld.envs reward_utils<import_from_stmt>metaworld.envs.asset_path_utils full_v2_path_for<import_from_stmt>metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env SawyerXYZEnv _assert_task_is_set<class_stmt>SawyerDialTurnEnvV2(SawyerXYZEnv)<block_start>TARGET_RADIUS=0.07<def_stmt>__init__ self<block_start>hand_low=(-0.5 0.40 0.05)<line_sep>hand_high=(0.5 1 0.5)<line_sep>obj_low=(-0.1 0.7 0.0)<line_sep>obj_high=(0.1 0.8 0.0)<line_sep>goal_low=(-0.1 0.73 0.0299)<line_sep>goal_high=(0.1 0.83 0.0301)<line_sep>super().__init__(self.model_name hand_low=hand_low hand_high=hand_high )<line_sep>self.init_config={'obj_init_pos':np.array([0 0.7 0.0]) 'hand_init_pos':np.array([0 0.6 0.2] dtype=np.float32) }<line_sep>self.goal=np.array([0. 0.73 0.08])<line_sep>self.obj_init_pos=self.init_config['obj_init_pos']<line_sep>self.hand_init_pos=self.init_config['hand_init_pos']<line_sep>self._random_reset_space=Box(np.array(obj_low) np.array(obj_high) )<line_sep>self.goal_space=Box(np.array(goal_low) np.array(goal_high))<block_end>@property<def_stmt>model_name self<block_start><return>full_v2_path_for('sawyer_xyz/sawyer_dial.xml')<block_end>@_assert_task_is_set<def_stmt>evaluate_state self obs action<block_start>(reward tcp_to_obj _ target_to_obj object_grasped in_place)=self.compute_reward(action obs)<line_sep>info={'success':float(target_to_obj<le>self.TARGET_RADIUS) 'near_object':float(tcp_to_obj<le>0.01) 'grasp_success':1. 'grasp_reward':object_grasped 'in_place_reward':in_place 'obj_to_target':target_to_obj 'unscaled_reward':reward }<line_sep><return>reward info<block_end><def_stmt>_get_pos_objects self<block_start>dial_center=self.get_body_com('dial').copy()<line_sep>dial_angle_rad=self.data.get_joint_qpos('knob_Joint_1')<line_sep>offset=np.array([np.sin(dial_angle_rad) -np.cos(dial_angle_rad) 0])<line_sep>dial_radius=0.05<line_sep>offset<augmul>dial_radius<line_sep><return>dial_center+offset<block_end><def_stmt>_get_quat_objects self<block_start><return>self.sim.data.get_body_xquat('dial')<block_end><def_stmt>reset_model self<block_start>self._reset_hand()<line_sep>self._target_pos=self.goal.copy()<line_sep>self.obj_init_pos=self.init_config['obj_init_pos']<line_sep>self.prev_obs=self._get_curr_obs_combined_no_goal()<if_stmt>self.random_init<block_start>goal_pos=self._get_state_rand_vec()<line_sep>self.obj_init_pos=goal_pos[:3]<line_sep>final_pos=goal_pos.copy()+np.array([0 0.03 0.03])<line_sep>self._target_pos=final_pos<block_end>self.sim.model.body_pos[self.model.body_name2id('dial')]=self.obj_init_pos<line_sep>self.dial_push_position=self._get_pos_objects()+np.array([0.05 0.02 0.09])<line_sep><return>self._get_obs()<block_end><def_stmt>compute_reward self action obs<block_start>obj=self._get_pos_objects()<line_sep>dial_push_position=self._get_pos_objects()+np.array([0.05 0.02 0.09])<line_sep>tcp=self.tcp_center<line_sep>target=self._target_pos.copy()<line_sep>target_to_obj=(obj-target)<line_sep>target_to_obj=np.linalg.norm(target_to_obj)<line_sep>target_to_obj_init=(self.dial_push_position-target)<line_sep>target_to_obj_init=np.linalg.norm(target_to_obj_init)<line_sep>in_place=reward_utils.tolerance(target_to_obj bounds=(0 self.TARGET_RADIUS) margin=abs(target_to_obj_init-self.TARGET_RADIUS) sigmoid='long_tail' )<line_sep>dial_reach_radius=0.005<line_sep>tcp_to_obj=np.linalg.norm(dial_push_position-tcp)<line_sep>tcp_to_obj_init=np.linalg.norm(self.dial_push_position-self.init_tcp)<line_sep>reach=reward_utils.tolerance(tcp_to_obj bounds=(0 dial_reach_radius) margin=abs(tcp_to_obj_init-dial_reach_radius) sigmoid='gaussian' )<line_sep>gripper_closed=min(max(0 action[-1]) 1)<line_sep>reach=reward_utils.hamacher_product(reach gripper_closed)<line_sep>tcp_opened=0<line_sep>object_grasped=reach<line_sep>reward=10<times>reward_utils.hamacher_product(reach in_place)<line_sep><return>(reward tcp_to_obj tcp_opened target_to_obj object_grasped in_place)<block_end><block_end> |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
<import_stmt>numpy<as>np<import_from_stmt>openvino.tools.mo.front.common.partial_infer.utils dynamic_dimension_value shape_array set_input_shapes<import_from_stmt>openvino.tools.mo.ops.op Op<class_stmt>ExperimentalDetectronDetectionOutput(Op)<block_start>op='ExperimentalDetectronDetectionOutput'<line_sep>enabled=<true><def_stmt>__init__ self graph attrs<block_start>mandatory_props=dict(type=self.op op=self.op version='opset6' infer=self.infer reverse_infer=self.reverse_infer type_infer=self.type_infer in_ports_count=4 out_ports_count=3 )<line_sep>super().__init__(graph mandatory_props attrs)<block_end><def_stmt>backend_attrs self<block_start><return>[('class_agnostic_box_regression' <lambda>node:str(bool(node['class_agnostic_box_regression'])).lower()) 'max_detections_per_image' 'nms_threshold' 'num_classes' 'post_nms_count' 'score_threshold' 'max_delta_log_wh' ('deltas_weights' <lambda>node:','.join(map(str node['deltas_weights'])))]<block_end>@staticmethod<def_stmt>infer node<block_start>rois_num=node.max_detections_per_image<line_sep># boxes
node.out_port(0).data.set_shape([rois_num 4])<line_sep># classes, scores, batch indices
# We use range(1, 1 + max(node.out_ports().keys())) instead of range(1, 3), because there are incorrectly
# generated models where ExperimentalDetectronDetectionOutput has 4 outputs.
<for_stmt>port_ind range(1 1+max(node.out_ports().keys()))<block_start><if_stmt><not>node.out_port(port_ind).disconnected()<block_start>node.out_port(port_ind).data.set_shape([rois_num])<block_end><block_end><block_end>@staticmethod<def_stmt>type_infer node<block_start>in_data_type=node.in_port(0).get_data_type()<line_sep>node.out_port(0).set_data_type(in_data_type)<line_sep>node.out_port(1).set_data_type(np.int32)# the second output contains class indices
node.out_port(2).set_data_type(in_data_type)<if_stmt>node.is_out_port_connected(3)<block_start>node.out_port(3).set_data_type(np.int32)<block_end><block_end># the fourth output contains batch indices
@staticmethod<def_stmt>reverse_infer node<block_start>set_input_shapes(node shape_array([dynamic_dimension_value 4]) shape_array([dynamic_dimension_value node['num_classes']<times>4]) shape_array([dynamic_dimension_value node['num_classes']]) shape_array([1 3]))<block_end><block_end> |
<import_from_stmt>.torch2onnx torch2onnx<import_from_stmt>.onnx2trt onnx2trt<import_from_stmt>.torch2trt torch2trt<import_from_stmt>.base load save<line_sep> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("LIKELIHOODPDFDBREADER")<line_sep># process.load("MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff")
process.source=cms.Source("EmptySource" numberEventsInRun=cms.untracked.uint32(1) firstRun=cms.untracked.uint32(1))<line_sep>process.load("Configuration.StandardSequences.MagneticField_cff")<line_sep>process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")<line_sep>process.load("Geometry.CommonTopologies.globalTrackingGeometry_cfi")<line_sep>process.load("RecoMuon.DetLayers.muonDetLayerGeometry_cfi")<line_sep>process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")<line_sep>process.load("RecoMuon.TrackingTools.MuonServiceProxy_cff")<line_sep># process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring()
# )
process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1))<line_sep>process.poolDBESSource=cms.ESSource("PoolDBESSource" BlobStreamerName=cms.untracked.string('TBufferBlobStreamingService') DBParameters=cms.PSet(messageLevel=cms.untracked.int32(2) authenticationPath=cms.untracked.string('/afs/cern.ch/cms/DB/conddb')) timetype=cms.untracked.string('runnumber') connect=cms.string('sqlite_file:dummy2.db') toGet=cms.VPSet(cms.PSet(record=cms.string('MuScleFitLikelihoodPdfRcd') tag=cms.string('MuScleFitLikelihoodPdf_2_1_12'))))<line_sep>process.LikelihoodPdfDBReaderModule=cms.EDAnalyzer("LikelihoodPdfDBReader")<line_sep>process.p1=cms.Path(process.LikelihoodPdfDBReaderModule)<line_sep> |
<import_stmt>torch.utils.data<as>data<import_from_stmt>PIL Image<import_stmt>os<import_stmt>os.path<import_stmt>numpy<as>np<import_stmt>pdb<import_stmt>glob<line_sep>IMG_EXTENSIONS=['.jpg' '.JPG' '.jpeg' '.JPEG' '.png' '.PNG' '.ppm' '.PPM' '.bmp' '.BMP' ]<def_stmt>is_image_file filename<block_start><return>any(filename.endswith(extension)<for>extension IMG_EXTENSIONS)<block_end><def_stmt>dataloader filepath<block_start>left_fold='image_2/'<line_sep>train=glob.glob(filepath+left_fold+'/0*.jpg')<line_sep>train=sorted(train)<line_sep>l0_train=[]<line_sep>l1_train=[]<line_sep>flow_train=[]<for_stmt>img train<block_start>img1=('%s_%s.jpg'%(img.rsplit('_' 1)[0] '%05d'%(1+int(img.split('.')[0].split('_')[-1]))))<line_sep>flowp=img.replace('.jpg' '.png').replace('image_2' 'flow_occ')<if_stmt>(img1<in>train<and>len(glob.glob(flowp))<g>0<and>('01000'<not><in>img))<block_start>l0_train.append(img)<line_sep>l1_train.append(img1)<line_sep>flow_train.append(flowp)<block_end><block_end><return>l0_train l1_train flow_train<block_end> |
<import_stmt>re<import_from_stmt>curtsies.formatstring fmtstr FmtStr<import_from_stmt>curtsies.termformatconstants FG_COLORS BG_COLORS colors<as>CURTSIES_COLORS <import_from_stmt>functools partial<import_from_stmt>..lazyre LazyReCompile<line_sep>COLORS=CURTSIES_COLORS+("default" )<line_sep>CNAMES=dict(zip("krgybmcwd" COLORS))<line_sep># hack for finding the "inverse"
INVERSE_COLORS={CURTSIES_COLORS[idx]:CURTSIES_COLORS[(idx+(len(CURTSIES_COLORS)<floordiv>2))%len(CURTSIES_COLORS)]<for>idx range(len(CURTSIES_COLORS))}<line_sep>INVERSE_COLORS["default"]=INVERSE_COLORS[CURTSIES_COLORS[0]]<def_stmt>func_for_letter letter_color_code:str default:str="k"<block_start>"""Returns FmtStr constructor for a bpython-style color code"""<if_stmt>letter_color_code<eq>"d"<block_start>letter_color_code=default<block_end><elif_stmt>letter_color_code<eq>"D"<block_start>letter_color_code=default.upper()<block_end><return>partial(fmtstr fg=CNAMES[letter_color_code.lower()] bold=letter_color_code.isupper() )<block_end><def_stmt>color_for_letter letter_color_code:str default:str="k"<block_start><if_stmt>letter_color_code<eq>"d"<block_start>letter_color_code=default<block_end><return>CNAMES[letter_color_code.lower()]<block_end><def_stmt>parse s<block_start>"""Returns a FmtStr object from a bpython-formatted colored string"""<line_sep>rest=s<line_sep>stuff=[]<while_stmt><true><block_start><if_stmt><not>rest<block_start><break><block_end>start,rest=peel_off_string(rest)<line_sep>stuff.append(start)<block_end><return>(sum((fs_from_match(d)<for>d stuff[1:]) fs_from_match(stuff[0]))<if>len(stuff)<g>0<else>FmtStr())<block_end><def_stmt>fs_from_match d<block_start>atts={}<if_stmt>d["fg"]# this isn't according to spec as I understand it
<block_start><if_stmt>d["fg"].isupper()<block_start>d["bold"]=<true><block_end># TODO figure out why boldness isn't based on presence of \x02
color=CNAMES[d["fg"].lower()]<if_stmt>color<ne>"default"<block_start>atts["fg"]=FG_COLORS[color]<block_end><block_end><if_stmt>d["bg"]<block_start><if_stmt>d["bg"]<eq>"I"# hack for finding the "inverse"
<block_start>color=INVERSE_COLORS[color]<block_end><else_stmt><block_start>color=CNAMES[d["bg"].lower()]<block_end><if_stmt>color<ne>"default"<block_start>atts["bg"]=BG_COLORS[color]<block_end><block_end><if_stmt>d["bold"]<block_start>atts["bold"]=<true><block_end><return>fmtstr(d["string"] **atts)<block_end>peel_off_string_re=LazyReCompile(r"""(?P<colormarker>\x01
(?P<fg>[krgybmcwdKRGYBMCWD]?)
(?P<bg>[krgybmcwdKRGYBMCWDI]?)?)
(?P<bold>\x02?)
\x03
(?P<string>[^\x04]*)
\x04
(?P<rest>.*)
""" re.VERBOSE|re.DOTALL )<def_stmt>peel_off_string s<block_start>m=peel_off_string_re.match(s)<assert_stmt>m repr(s)<line_sep>d=m.groupdict()<line_sep>rest=d["rest"]<del_stmt>d["rest"]<line_sep><return>d rest<block_end> |
<import_from_stmt>..imports *<import_from_stmt>.. utils<as>U<import_from_stmt>..core GenLearner<class_stmt>NodeClassLearner(GenLearner)<block_start>"""
```
Main class used to tune and train Keras models for node classification
Main parameters are:
model (Model): A compiled instance of keras.engine.training.Model
train_data (Iterator): a Iterator instance for training set
val_data (Iterator): A Iterator instance for validation set
```
"""<def_stmt>__init__ self model train_data=<none> val_data=<none> batch_size=U.DEFAULT_BS eval_batch_size=U.DEFAULT_BS workers=1 use_multiprocessing=<false><block_start>super().__init__(model train_data=train_data val_data=val_data batch_size=batch_size eval_batch_size=eval_batch_size workers=workers use_multiprocessing=use_multiprocessing)<line_sep><return><block_end><def_stmt>view_top_losses self n=4 preproc=<none> val_data=<none><block_start>"""
```
Views observations with top losses in validation set.
Typically over-ridden by Learner subclasses.
Args:
n(int or tuple): a range to select in form of int or tuple
e.g., n=8 is treated as n=(0,8)
preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor.
For some data like text data, a preprocessor
is required to undo the pre-processing
to correctly view raw data.
val_data: optional val_data to use instead of self.val_data
Returns:
list of n tuples where first element is either
filepath or id of validation example and second element
is loss.
```
"""<line_sep>val=self._check_val(val_data)<line_sep># get top losses and associated data
tups=self.top_losses(n=n val_data=val preproc=preproc)<line_sep># get multilabel status and class names
classes=preproc.get_classes()<if>preproc<is><not><none><else><none><line_sep># iterate through losses
<for_stmt>tup tups# get data
<block_start>idx=tup[0]<line_sep>loss=tup[1]<line_sep>truth=tup[2]<line_sep>pred=tup[3]<line_sep>print('----------')<line_sep>print("id:%s | loss:%s | true:%s | pred:%s)\n"%(idx round(loss 2) truth pred))<line_sep>#print(obs)
<block_end><return><block_end><def_stmt>layer_output self layer_id example_id=0 batch_id=0 use_val=<false><block_start>"""
```
Prints output of layer with index <layer_id> to help debug models.
Uses first example (example_id=0) from training set, by default.
```
"""<line_sep><raise>Exception('currently_unsupported: layer_output method is not yet supported for '+'graph neural networks in ktrain')<block_end><block_end><class_stmt>LinkPredLearner(GenLearner)<block_start>"""
```
Main class used to tune and train Keras models for link prediction
Main parameters are:
model (Model): A compiled instance of keras.engine.training.Model
train_data (Iterator): a Iterator instance for training set
val_data (Iterator): A Iterator instance for validation set
```
"""<def_stmt>__init__ self model train_data=<none> val_data=<none> batch_size=U.DEFAULT_BS eval_batch_size=U.DEFAULT_BS workers=1 use_multiprocessing=<false><block_start>super().__init__(model train_data=train_data val_data=val_data batch_size=batch_size eval_batch_size=eval_batch_size workers=workers use_multiprocessing=use_multiprocessing)<line_sep><return><block_end><def_stmt>view_top_losses self n=4 preproc=<none> val_data=<none><block_start>"""
```
Views observations with top losses in validation set.
Typically over-ridden by Learner subclasses.
Args:
n(int or tuple): a range to select in form of int or tuple
e.g., n=8 is treated as n=(0,8)
preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor.
For some data like text data, a preprocessor
is required to undo the pre-processing
to correctly view raw data.
val_data: optional val_data to use instead of self.val_data
Returns:
list of n tuples where first element is either
filepath or id of validation example and second element
is loss.
```
"""<line_sep>val=self._check_val(val_data)<line_sep># get top losses and associated data
tups=self.top_losses(n=n val_data=val preproc=preproc)<line_sep># get multilabel status and class names
classes=preproc.get_classes()<if>preproc<is><not><none><else><none><line_sep># iterate through losses
<for_stmt>tup tups# get data
<block_start>idx=tup[0]<line_sep>loss=tup[1]<line_sep>truth=tup[2]<line_sep>pred=tup[3]<line_sep>print('----------')<line_sep>print("id:%s | loss:%s | true:%s | pred:%s)\n"%(idx round(loss 2) truth pred))<line_sep>#print(obs)
<block_end><return><block_end><def_stmt>layer_output self layer_id example_id=0 batch_id=0 use_val=<false><block_start>"""
```
Prints output of layer with index <layer_id> to help debug models.
Uses first example (example_id=0) from training set, by default.
```
"""<line_sep><raise>Exception('currently_unsupported: layer_output method is not yet supported for '+'graph neural networks in ktrain')<block_end><block_end> |
<import_from_stmt>collections defaultdict<import_stmt>json<import_stmt>re<import_stmt>time<import_from_stmt>urllib.parse urlparse<import_stmt>uuid<import_stmt>boto3<import_stmt>boto3.exceptions<import_stmt>botocore.exceptions<import_stmt>markus<import_stmt>redis.exceptions<import_stmt>requests<import_stmt>requests.exceptions<import_from_stmt>sqlalchemy select<import_stmt>sqlalchemy.exc<import_from_stmt>ichnaea.data _map_content_enabled<import_from_stmt>ichnaea.models ApiKey BlueObservation BlueReport BlueShard CellObservation CellReport CellShard DataMap ExportConfig Report WifiObservation WifiReport WifiShard <import_from_stmt>ichnaea.models.content encode_datamap_grid<import_from_stmt>ichnaea util<line_sep>WHITESPACE=re.compile(r"\s" flags=re.UNICODE)<line_sep>METRICS=markus.get_metrics()<class_stmt>IncomingQueue(object)<block_start>"""
The incoming queue contains the data collected in the web application. It
is the single entrypoint from which all other data pipelines get their
data.
It distributes the data into the configured export queues, checks those
queues and if they contain enough or old enough data schedules an async
export task to process the data in each queue.
"""<def_stmt>__init__ self task<block_start>self.task=task<block_end><def_stmt>__call__ self export_task<block_start>redis_client=self.task.redis_client<line_sep>data_queue=self.task.app.data_queues["update_incoming"]<line_sep>data=data_queue.dequeue()<line_sep>grouped=defaultdict(list)<for_stmt>item data<block_start>grouped[(item["api_key"] item.get("source" "gnss"))].append({"api_key":item["api_key"] "report":item["report"]})<block_end><with_stmt>self.task.db_session(commit=<false>)<as>session<block_start>export_configs=ExportConfig.all(session)<block_end><with_stmt>self.task.redis_pipeline()<as>pipe<block_start><for_stmt>(api_key source),items grouped.items()<block_start><for_stmt>config export_configs<block_start><if_stmt>config.allowed(api_key source)<block_start>queue_key=config.queue_key(api_key source)<line_sep>queue=config.queue(queue_key redis_client)<line_sep>queue.enqueue(items pipe=pipe)<block_end><block_end><block_end><block_end><for_stmt>config export_configs# Check all queues if they now contain enough data or
# old enough data to be ready for processing.
<block_start><for_stmt>queue_key config.partitions(redis_client)<block_start>queue=config.queue(queue_key redis_client)<if_stmt>queue.ready()<block_start>export_task.delay(config.name queue_key)<block_end><block_end><block_end><if_stmt>data_queue.ready()<block_start>self.task.apply_countdown()<block_end><block_end><block_end><class_stmt>ReportExporter(object)<block_start>_retriable=(IOError )<line_sep>_retries=3<line_sep>_retry_wait=1.0<def_stmt>__init__ self task config queue_key<block_start>self.task=task<line_sep>self.config=config<line_sep>self.queue_key=queue_key<line_sep>self.queue=config.queue(queue_key task.redis_client)<line_sep>self.stats_tags=["key:"+self.config.name]<block_end>@staticmethod<def_stmt>export task name queue_key<block_start><with_stmt>task.db_session(commit=<false>)<as>session<block_start>config=ExportConfig.get(session name)<block_end>exporter_types={"dummy":DummyExporter "geosubmit":GeosubmitExporter "internal":InternalExporter "s3":S3Exporter }<line_sep>exporter_type=exporter_types.get(config.schema)<if_stmt>exporter_type<is><not><none><block_start>exporter_type(task config queue_key)()<block_end><block_end><def_stmt>__call__ self<block_start>queue_items=self.queue.dequeue()<if_stmt><not>queue_items<block_start><return><block_end>success=<false><for_stmt>i range(self._retries)<block_start><try_stmt><block_start><with_stmt>METRICS.timer("data.export.upload.timing" tags=self.stats_tags)<block_start>self.send(queue_items)<block_end>success=<true><block_end><except_stmt>self._retriable<block_start>success=<false><line_sep>time.sleep(self._retry_wait<times>(i<power>2+1))<block_end><if_stmt>success<block_start>METRICS.incr("data.export.batch" tags=self.stats_tags)<line_sep><break><block_end><block_end><if_stmt>success<and>self.queue.ready()<block_start>self.task.apply_countdown(args=[self.config.name self.queue_key])<block_end><block_end><def_stmt>send self queue_items<block_start><raise>NotImplementedError()<block_end><block_end><class_stmt>DummyExporter(ReportExporter)<block_start><def_stmt>send self queue_items<block_start><pass><block_end><block_end><class_stmt>GeosubmitExporter(ReportExporter)<block_start>_retriable=(IOError requests.exceptions.RequestException)<def_stmt>send self queue_items# ignore metadata
<block_start>reports=[item["report"]<for>item queue_items]<line_sep>headers={"Content-Encoding":"gzip" "Content-Type":"application/json" "User-Agent":"ichnaea" }<line_sep>response=requests.post(self.config.url data=util.encode_gzip(json.dumps({"items":reports}).encode() compresslevel=5) headers=headers timeout=60.0 )<line_sep># log upload_status and trigger exception for bad responses
# this causes the task to be re-tried
METRICS.incr("data.export.upload" tags=self.stats_tags+["status:%s"%response.status_code] )<line_sep>response.raise_for_status()<block_end><block_end><class_stmt>S3Exporter(ReportExporter)<block_start>_retriable=(IOError boto3.exceptions.Boto3Error botocore.exceptions.BotoCoreError )<def_stmt>send self queue_items# ignore metadata
<block_start>reports=[item["report"]<for>item queue_items]<line_sep>_,bucketname,path=urlparse(self.config.url)[:3]<line_sep># s3 key names start without a leading slash
path=path.lstrip("/")<if_stmt><not>path.endswith("/")<block_start>path<augadd>"/"<block_end>year,month,day=util.utcnow().timetuple()[:3]<line_sep># strip away queue prefix again
parts=self.queue_key.split(":")<line_sep>source=parts[1]<line_sep>api_key=parts[2]<line_sep>obj_name=path.format(source=source api_key=api_key year=year month=month day=day)<line_sep>obj_name<augadd>uuid.uuid1().hex+".json.gz"<try_stmt><block_start>data=util.encode_gzip(json.dumps({"items":reports}).encode() compresslevel=7)<line_sep>s3=boto3.resource("s3")<line_sep>bucket=s3.Bucket(bucketname)<line_sep>obj=bucket.Object(obj_name)<line_sep>obj.put(Body=data ContentEncoding="gzip" ContentType="application/json")<line_sep>METRICS.incr("data.export.upload" tags=self.stats_tags+["status:success"])<block_end><except_stmt>Exception<block_start>METRICS.incr("data.export.upload" tags=self.stats_tags+["status:failure"])<line_sep><raise><block_end><block_end><block_end><class_stmt>InternalTransform(object)<block_start>"""
This maps the geosubmit v2 schema used in view code and external
transfers (backup, forward to partners) to the internal submit v1
schema used in our own database models.
"""<line_sep># *_id maps a source section id to a target section id
# *_map maps fields inside the section from source to target id
# if the names are equal, a simple string can be specified instead
# of a two-tuple
position_id=("position" <none>)<line_sep>position_map=[("latitude" "lat") ("longitude" "lon") "accuracy" "altitude" ("altitudeAccuracy" "altitude_accuracy") "heading" "pressure" "speed" "source" ]<line_sep>blue_id=("bluetoothBeacons" "blue")<line_sep>blue_map=[("macAddress" "mac") "age" ("signalStrength" "signal")]<line_sep>cell_id=("cellTowers" "cell")<line_sep>cell_map=[("radioType" "radio") ("mobileCountryCode" "mcc") ("mobileNetworkCode" "mnc") ("locationAreaCode" "lac") ("cellId" "cid") "age" "asu" ("primaryScramblingCode" "psc") "serving" ("signalStrength" "signal") ("timingAdvance" "ta") ]<line_sep>wifi_id=("wifiAccessPoints" "wifi")<line_sep>wifi_map=[("macAddress" "mac") "age" "channel" "frequency" ("radioType" "radio") ("signalToNoiseRatio" "snr") ("signalStrength" "signal") ]<def_stmt>_map_dict self item_source field_map<block_start>value={}<for_stmt>spec field_map<block_start><if_stmt>isinstance(spec tuple)<block_start>source,target=spec<block_end><else_stmt><block_start>source=spec<line_sep>target=spec<block_end>source_value=item_source.get(source)<if_stmt>source_value<is><not><none><block_start>value[target]=source_value<block_end><block_end><return>value<block_end><def_stmt>_parse_dict self item report key_map field_map<block_start>value={}<line_sep>item_source=item.get(key_map[0])<if_stmt>item_source<block_start>value=self._map_dict(item_source field_map)<block_end><if_stmt>value<block_start><if_stmt>key_map[1]<is><none><block_start>report.update(value)<block_end><else_stmt><block_start>report[key_map[1]]=value<block_end><block_end><return>value<block_end><def_stmt>_parse_list self item report key_map field_map<block_start>values=[]<for_stmt>value_item item.get(key_map[0] ())<block_start>value=self._map_dict(value_item field_map)<if_stmt>value<block_start>values.append(value)<block_end><block_end><if_stmt>values<block_start>report[key_map[1]]=values<block_end><return>values<block_end><def_stmt>__call__ self item<block_start>report={}<line_sep>self._parse_dict(item report self.position_id self.position_map)<line_sep>blues=self._parse_list(item report self.blue_id self.blue_map)<line_sep>cells=self._parse_list(item report self.cell_id self.cell_map)<line_sep>wifis=self._parse_list(item report self.wifi_id self.wifi_map)<line_sep>position=item.get("position")<or>{}<line_sep>gps_age=position.get("age" 0)<line_sep>timestamp=item.get("timestamp")<if_stmt>timestamp# turn timestamp into GPS timestamp
<block_start>report["timestamp"]=timestamp-gps_age<block_end><if_stmt>gps_age# Normalize age fields to be relative to GPS time
<block_start><for_stmt>type_ ("blue" "cell" "wifi")<block_start><for_stmt>record report.get(type_ ())<block_start>record["age"]=record.get("age" 0)-gps_age<block_end><block_end><block_end><if_stmt>blues<or>cells<or>wifis<block_start><return>report<block_end><return>{}<block_end><block_end><class_stmt>InternalExporter(ReportExporter)<block_start>_retriable=(IOError redis.exceptions.RedisError sqlalchemy.exc.InternalError)<line_sep>transform=InternalTransform()<def_stmt>send self queue_items<block_start>api_keys=set()<line_sep>api_keys_known=set()<line_sep>metrics={}<line_sep>items=[]<for_stmt>item queue_items# preprocess items and extract set of API keys
<block_start>item["report"]=self.transform(item["report"])<if_stmt>item["report"]<block_start>items.append(item)<line_sep>api_keys.add(item["api_key"])<block_end><block_end><for_stmt>api_key api_keys<block_start>metrics[api_key]={}<for_stmt>type_ ("report" "blue" "cell" "wifi")<block_start><for_stmt>action ("drop" "upload")<block_start>metrics[api_key]["%s_%s"%(type_ action)]=0<block_end><block_end><block_end><with_stmt>self.task.db_session(commit=<false>)<as>session# limit database session to get API keys
<block_start>keys=[key<for>key api_keys<if>key]<if_stmt>keys<block_start>columns=ApiKey.__table__.c<line_sep>rows=session.execute(select([columns.valid_key]).where(columns.valid_key.in_(keys))).fetchall()<for_stmt>row rows<block_start>api_keys_known.add(row.valid_key)<block_end><block_end><block_end>positions=[]<line_sep>observations={"blue":[] "cell":[] "wifi":[]}<for_stmt>item items<block_start>api_key=item["api_key"]<line_sep>report=item["report"]<line_sep>obs,malformed_obs=self.process_report(report)<line_sep>any_data=<false><for_stmt>name ("blue" "cell" "wifi")<block_start><if_stmt>obs.get(name)<block_start>observations[name].extend(obs[name])<line_sep>metrics[api_key][name+"_upload"]<augadd>len(obs[name])<line_sep>any_data=<true><block_end>metrics[api_key][name+"_drop"]<augadd>malformed_obs.get(name 0)<block_end>metrics[api_key]["report_upload"]<augadd>1<if_stmt>any_data<block_start>positions.append((report["lat"] report["lon"]))<block_end><else_stmt><block_start>metrics[api_key]["report_drop"]<augadd>1<block_end><block_end><with_stmt>self.task.redis_pipeline()<as>pipe<block_start>self.queue_observations(pipe observations)<if_stmt>_map_content_enabled<and>positions<block_start>self.process_datamap(pipe positions)<block_end><block_end>self.emit_metrics(api_keys_known metrics)<block_end><def_stmt>queue_observations self pipe observations<block_start><for_stmt>datatype,shard_model,shard_key,queue_prefix (("blue" BlueShard "mac" "update_blue_") ("cell" CellShard "cellid" "update_cell_") ("wifi" WifiShard "mac" "update_wifi_") )<block_start>queued_obs=defaultdict(list)<for_stmt>obs observations[datatype]# group by sharded queue
<block_start>shard_id=shard_model.shard_id(getattr(obs shard_key))<line_sep>queue_id=queue_prefix+shard_id<line_sep>queued_obs[queue_id].append(obs.to_json())<block_end><for_stmt>queue_id,values queued_obs.items()# enqueue values for each queue
<block_start>queue=self.task.app.data_queues[queue_id]<line_sep>queue.enqueue(values pipe=pipe)<block_end><block_end><block_end><def_stmt>emit_metrics self api_keys_known metrics<block_start><for_stmt>api_key,key_metrics metrics.items()<block_start>api_tag=[]<if_stmt>api_key<and>api_key<in>api_keys_known<block_start>api_tag=["key:%s"%api_key]<block_end><for_stmt>name,count key_metrics.items()<block_start><if_stmt><not>count<block_start><continue><block_end>type_,action=name.split("_")<if_stmt>type_<eq>"report"<block_start>suffix="report"<line_sep>tags=api_tag<block_end><else_stmt><block_start>suffix="observation"<line_sep>tags=["type:%s"%type_]+api_tag<block_end>METRICS.incr("data.%s.%s"%(suffix action) count tags=tags)<block_end><block_end><block_end><def_stmt>process_report self data<block_start>report=Report.create(**data)<if_stmt>report<is><none><block_start><return>({} {})<block_end>malformed={}<line_sep>observations={}<for_stmt>name,report_cls,obs_cls (("blue" BlueReport BlueObservation) ("cell" CellReport CellObservation) ("wifi" WifiReport WifiObservation) )<block_start>malformed[name]=0<line_sep>observations[name]={}<if_stmt>data.get(name)<block_start><for_stmt>item data[name]# validate the blue/cell/wifi specific fields
<block_start>item_report=report_cls.create(**item)<if_stmt>item_report<is><none><block_start>malformed[name]<augadd>1<line_sep><continue><block_end># combine general and specific report data into one
item_obs=obs_cls.combine(report item_report)<line_sep>item_key=item_obs.unique_key<line_sep># if we have better data for the same key, ignore
existing=observations[name].get(item_key)<if_stmt>existing<is><not><none><and>existing.better(item_obs)<block_start><continue><block_end>observations[name][item_key]=item_obs<block_end><block_end><block_end>obs={"blue":observations["blue"].values() "cell":observations["cell"].values() "wifi":observations["wifi"].values() }<line_sep><return>(obs malformed)<block_end><def_stmt>process_datamap self pipe positions<block_start>grids=set()<for_stmt>lat,lon positions<block_start><if_stmt>lat<is><not><none><and>lon<is><not><none><block_start>grids.add(DataMap.scale(lat lon))<block_end><block_end>shards=defaultdict(set)<for_stmt>lat,lon grids<block_start>shards[DataMap.shard_id(lat lon)].add(encode_datamap_grid(lat lon))<block_end><for_stmt>shard_id,values shards.items()<block_start>queue=self.task.app.data_queues["update_datamap_"+shard_id]<line_sep>queue.enqueue(list(values) pipe=pipe)<block_end><block_end><block_end> |
<import_stmt>hugectr<import_from_stmt>mpi4py MPI<line_sep>solver=hugectr.CreateSolver(model_name="dcn" max_eval_batches=1 batchsize_eval=16384 batchsize=16384 lr=0.001 vvgpu=[[0]] repeat_dataset=<true> use_mixed_precision=<false> scaler=1.0 use_cuda_graph=<true> metrics_spec={hugectr.MetricsType.AUC:1.0})<line_sep>reader=hugectr.DataReaderParams(data_reader_type=hugectr.DataReaderType_t.Norm source=["./dcn_data/file_list.txt"] eval_source="./dcn_data/file_list_test.txt" check_type=hugectr.Check_t.Sum num_workers=16)<line_sep>optimizer=hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam update_type=hugectr.Update_t.Global beta1=0.9 beta2=0.999 epsilon=0.0001)<line_sep>model=hugectr.Model(solver reader optimizer)<line_sep>model.add(hugectr.Input(label_dim=1 label_name="label" dense_dim=13 dense_name="dense" data_reader_sparse_param_array=[hugectr.DataReaderSparseParam("data1" 2 <false> 26)]))<line_sep>model.add(hugectr.SparseEmbedding(embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash workspace_size_per_gpu_in_mb=300 embedding_vec_size=16 combiner="sum" sparse_embedding_name="sparse_embedding1" bottom_name="data1" optimizer=optimizer))<line_sep>model.add(hugectr.DenseLayer(layer_type=hugectr.Layer_t.Reshape bottom_names=["sparse_embedding1"] top_names=["reshape1"] leading_dim=416))<line_sep>model.add(hugectr.DenseLayer(layer_type=hugectr.Layer_t.Concat bottom_names=["reshape1" "dense"] top_names=["concat1"]))<line_sep>model.add(hugectr.DenseLayer(layer_type=hugectr.Layer_t.Slice bottom_names=["concat1"] top_names=["slice11" "slice12"] ranges=[(0 429) (0 429)]))<line_sep>model.add(hugectr.DenseLayer(layer_type=hugectr.Layer_t.MultiCross bottom_names=["slice11"] top_names=["multicross1"] num_layers=1))<line_sep>model.add(hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct bottom_names=["slice12"] top_names=["fc1"] num_output=1024))<line_sep>model.add(hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU bottom_names=["fc1"] top_names=["relu1"]))<line_sep>model.add(hugectr.DenseLayer(layer_type=hugectr.Layer_t.Dropout bottom_names=["relu1"] top_names=["dropout1"] dropout_rate=0.5))<line_sep>model.add(hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct bottom_names=["dropout1"] top_names=["fc2"] num_output=1024))<line_sep>model.add(hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU bottom_names=["fc2"] top_names=["relu2"]))<line_sep>model.add(hugectr.DenseLayer(layer_type=hugectr.Layer_t.Dropout bottom_names=["relu2"] top_names=["dropout2"] dropout_rate=0.5))<line_sep>model.add(hugectr.DenseLayer(layer_type=hugectr.Layer_t.Concat bottom_names=["dropout2" "multicross1"] top_names=["concat2"]))<line_sep>model.add(hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct bottom_names=["concat2"] top_names=["fc3"] num_output=1))<line_sep>model.add(hugectr.DenseLayer(layer_type=hugectr.Layer_t.BinaryCrossEntropyLoss bottom_names=["fc3" "label"] top_names=["loss"]))<line_sep>model.compile()<line_sep>model.summary()<line_sep>model.graph_to_json(graph_config_file="/dump_infer/dcn.json")<line_sep>model.fit(max_iter=2300 display=200 eval_interval=2000 snapshot=2000 snapshot_prefix="/dump_infer/dcn")<line_sep>model.export_predictions("/dump_infer/dcn_pred_"+str(2000) "/dump_infer/dcn_label_"+str(2000))<import_from_stmt>hugectr.inference InferenceParams CreateInferenceSession<import_stmt>numpy<as>np<line_sep>batch_size=16384<line_sep>num_batches=1<line_sep>data_source="./dcn_data/file_list_test.txt"<line_sep>inference_params=InferenceParams(model_name="dcn" max_batchsize=batch_size hit_rate_threshold=1.0 dense_model_file="/dump_infer/dcn_dense_2000.model" sparse_model_files=["/dump_infer/dcn0_sparse_2000.model"] device_id=0 use_gpu_embedding_cache=<false> cache_size_percentage=1.0 i64_input_key=<false> use_mixed_precision=<false> use_cuda_graph=<true>)<line_sep>inference_session=CreateInferenceSession("/dump_infer/dcn.json" inference_params)<line_sep>predictions=inference_session.predict(num_batches=num_batches source=data_source data_reader_type=hugectr.DataReaderType_t.Norm check_type=hugectr.Check_t.Sum)<line_sep>grount_truth=np.loadtxt("/dump_infer/dcn_pred_2000")<line_sep>diff=predictions-grount_truth<line_sep>mse=np.mean(diff<times>diff)<if_stmt>mse<g>1e-3<block_start><raise>RuntimeError("Too large mse between DCN multi hot inference and training: {}".format(mse))<line_sep>sys.exit(1)<block_end><else_stmt><block_start>print("DCN multi hot inference results are consistent with those during training, mse: {}".format(mse))<block_end> |
'''
pymmh3 was written by <NAME> and enhanced by <NAME>, and is placed in the public
domain. The authors hereby disclaim copyright to this source code.
pure python implementation of the murmur3 hash algorithm
https://code.google.com/p/smhasher/wiki/MurmurHash3
This was written for the times when you do not want to compile c-code and install modules,
and you only want a drop-in murmur3 implementation.
As this is purely python it is FAR from performant and if performance is anything that is needed
a proper c-module is suggested!
This module is written to have the same format as mmh3 python package found here for simple conversions:
https://pypi.python.org/pypi/mmh3/2.3.1
'''<import_stmt>sys<as>_sys<if_stmt>(_sys.version_info<g>(3 0))<block_start><def_stmt>xrange a b c<block_start><return>list(range(a b c))<block_end><def_stmt>xencode x<block_start><if_stmt>isinstance(x bytes)<or>isinstance(x bytearray)<block_start><return>x<block_end><else_stmt><block_start><return>x.encode()<block_end><block_end><block_end><else_stmt><block_start><def_stmt>xencode x<block_start><return>x<block_end><block_end><del_stmt>_sys<def_stmt>hash key seed=0x0<block_start>''' Implements 32bit murmur3 hash. '''<line_sep>key=bytearray(xencode(key))<def_stmt>fmix h<block_start>h<augxor>h<rshift>16<line_sep>h=(h<times>0x85ebca6b)&0xFFFFFFFF<line_sep>h<augxor>h<rshift>13<line_sep>h=(h<times>0xc2b2ae35)&0xFFFFFFFF<line_sep>h<augxor>h<rshift>16<line_sep><return>h<block_end>length=len(key)<line_sep>nblocks=int(length/4)<line_sep>h1=seed<line_sep>c1=0xcc9e2d51<line_sep>c2=0x1b873593<line_sep># body
<for_stmt>block_start range(0 nblocks<times>4 4)# ??? big endian?
<block_start>k1=key[block_start+3]<lshift>24|key[block_start+2]<lshift>16|key[block_start+1]<lshift>8|key[block_start+0]<line_sep>k1=(c1<times>k1)&0xFFFFFFFF<line_sep>k1=(k1<lshift>15|k1<rshift>17)&0xFFFFFFFF# inlined ROTL32
k1=(c2<times>k1)&0xFFFFFFFF<line_sep>h1<augxor>k1<line_sep>h1=(h1<lshift>13|h1<rshift>19)&0xFFFFFFFF# inlined ROTL32
h1=(h1<times>5+0xe6546b64)&0xFFFFFFFF<block_end># tail
tail_index=nblocks<times>4<line_sep>k1=0<line_sep>tail_size=length&3<if_stmt>tail_size<ge>3<block_start>k1<augxor>key[tail_index+2]<lshift>16<block_end><if_stmt>tail_size<ge>2<block_start>k1<augxor>key[tail_index+1]<lshift>8<block_end><if_stmt>tail_size<ge>1<block_start>k1<augxor>key[tail_index+0]<block_end><if_stmt>tail_size<g>0<block_start>k1=(k1<times>c1)&0xFFFFFFFF<line_sep>k1=(k1<lshift>15|k1<rshift>17)&0xFFFFFFFF# inlined ROTL32
k1=(k1<times>c2)&0xFFFFFFFF<line_sep>h1<augxor>k1<block_end>#finalization
unsigned_val=fmix(h1^length)<if_stmt>unsigned_val&0x80000000<eq>0<block_start><return>unsigned_val<block_end><else_stmt><block_start><return>-((unsigned_val^0xFFFFFFFF)+1)<block_end><block_end><def_stmt>hash128 key seed=0x0 x64arch=<true><block_start>''' Implements 128bit murmur3 hash. '''<def_stmt>hash128_x64 key seed<block_start>''' Implements 128bit murmur3 hash for x64. '''<def_stmt>fmix k<block_start>k<augxor>k<rshift>33<line_sep>k=(k<times>0xff51afd7ed558ccd)&0xFFFFFFFFFFFFFFFF<line_sep>k<augxor>k<rshift>33<line_sep>k=(k<times>0xc4ceb9fe1a85ec53)&0xFFFFFFFFFFFFFFFF<line_sep>k<augxor>k<rshift>33<line_sep><return>k<block_end>length=len(key)<line_sep>nblocks=int(length/16)<line_sep>h1=seed<line_sep>h2=seed<line_sep>c1=0x87c37b91114253d5<line_sep>c2=0x4cf5ad432745937f<line_sep>#body
<for_stmt>block_start range(0 nblocks<times>8 8)# ??? big endian?
<block_start>k1=key[2<times>block_start+7]<lshift>56|key[2<times>block_start+6]<lshift>48|key[2<times>block_start+5]<lshift>40|key[2<times>block_start+4]<lshift>32|key[2<times>block_start+3]<lshift>24|key[2<times>block_start+2]<lshift>16|key[2<times>block_start+1]<lshift>8|key[2<times>block_start+0]<line_sep>k2=key[2<times>block_start+15]<lshift>56|key[2<times>block_start+14]<lshift>48|key[2<times>block_start+13]<lshift>40|key[2<times>block_start+12]<lshift>32|key[2<times>block_start+11]<lshift>24|key[2<times>block_start+10]<lshift>16|key[2<times>block_start+9]<lshift>8|key[2<times>block_start+8]<line_sep>k1=(c1<times>k1)&0xFFFFFFFFFFFFFFFF<line_sep>k1=(k1<lshift>31|k1<rshift>33)&0xFFFFFFFFFFFFFFFF# inlined ROTL64
k1=(c2<times>k1)&0xFFFFFFFFFFFFFFFF<line_sep>h1<augxor>k1<line_sep>h1=(h1<lshift>27|h1<rshift>37)&0xFFFFFFFFFFFFFFFF# inlined ROTL64
h1=(h1+h2)&0xFFFFFFFFFFFFFFFF<line_sep>h1=(h1<times>5+0x52dce729)&0xFFFFFFFFFFFFFFFF<line_sep>k2=(c2<times>k2)&0xFFFFFFFFFFFFFFFF<line_sep>k2=(k2<lshift>33|k2<rshift>31)&0xFFFFFFFFFFFFFFFF# inlined ROTL64
k2=(c1<times>k2)&0xFFFFFFFFFFFFFFFF<line_sep>h2<augxor>k2<line_sep>h2=(h2<lshift>31|h2<rshift>33)&0xFFFFFFFFFFFFFFFF# inlined ROTL64
h2=(h1+h2)&0xFFFFFFFFFFFFFFFF<line_sep>h2=(h2<times>5+0x38495ab5)&0xFFFFFFFFFFFFFFFF<block_end>#tail
tail_index=nblocks<times>16<line_sep>k1=0<line_sep>k2=0<line_sep>tail_size=length&15<if_stmt>tail_size<ge>15<block_start>k2<augxor>key[tail_index+14]<lshift>48<block_end><if_stmt>tail_size<ge>14<block_start>k2<augxor>key[tail_index+13]<lshift>40<block_end><if_stmt>tail_size<ge>13<block_start>k2<augxor>key[tail_index+12]<lshift>32<block_end><if_stmt>tail_size<ge>12<block_start>k2<augxor>key[tail_index+11]<lshift>24<block_end><if_stmt>tail_size<ge>11<block_start>k2<augxor>key[tail_index+10]<lshift>16<block_end><if_stmt>tail_size<ge>10<block_start>k2<augxor>key[tail_index+9]<lshift>8<block_end><if_stmt>tail_size<ge>9<block_start>k2<augxor>key[tail_index+8]<block_end><if_stmt>tail_size<g>8<block_start>k2=(k2<times>c2)&0xFFFFFFFFFFFFFFFF<line_sep>k2=(k2<lshift>33|k2<rshift>31)&0xFFFFFFFFFFFFFFFF# inlined ROTL64
k2=(k2<times>c1)&0xFFFFFFFFFFFFFFFF<line_sep>h2<augxor>k2<block_end><if_stmt>tail_size<ge>8<block_start>k1<augxor>key[tail_index+7]<lshift>56<block_end><if_stmt>tail_size<ge>7<block_start>k1<augxor>key[tail_index+6]<lshift>48<block_end><if_stmt>tail_size<ge>6<block_start>k1<augxor>key[tail_index+5]<lshift>40<block_end><if_stmt>tail_size<ge>5<block_start>k1<augxor>key[tail_index+4]<lshift>32<block_end><if_stmt>tail_size<ge>4<block_start>k1<augxor>key[tail_index+3]<lshift>24<block_end><if_stmt>tail_size<ge>3<block_start>k1<augxor>key[tail_index+2]<lshift>16<block_end><if_stmt>tail_size<ge>2<block_start>k1<augxor>key[tail_index+1]<lshift>8<block_end><if_stmt>tail_size<ge>1<block_start>k1<augxor>key[tail_index+0]<block_end><if_stmt>tail_size<g>0<block_start>k1=(k1<times>c1)&0xFFFFFFFFFFFFFFFF<line_sep>k1=(k1<lshift>31|k1<rshift>33)&0xFFFFFFFFFFFFFFFF# inlined ROTL64
k1=(k1<times>c2)&0xFFFFFFFFFFFFFFFF<line_sep>h1<augxor>k1<block_end>#finalization
h1<augxor>length<line_sep>h2<augxor>length<line_sep>h1=(h1+h2)&0xFFFFFFFFFFFFFFFF<line_sep>h2=(h1+h2)&0xFFFFFFFFFFFFFFFF<line_sep>h1=fmix(h1)<line_sep>h2=fmix(h2)<line_sep>h1=(h1+h2)&0xFFFFFFFFFFFFFFFF<line_sep>h2=(h1+h2)&0xFFFFFFFFFFFFFFFF<line_sep><return>(h2<lshift>64|h1)<block_end><def_stmt>hash128_x86 key seed<block_start>''' Implements 128bit murmur3 hash for x86. '''<def_stmt>fmix h<block_start>h<augxor>h<rshift>16<line_sep>h=(h<times>0x85ebca6b)&0xFFFFFFFF<line_sep>h<augxor>h<rshift>13<line_sep>h=(h<times>0xc2b2ae35)&0xFFFFFFFF<line_sep>h<augxor>h<rshift>16<line_sep><return>h<block_end>length=len(key)<line_sep>nblocks=int(length/16)<line_sep>h1=seed<line_sep>h2=seed<line_sep>h3=seed<line_sep>h4=seed<line_sep>c1=0x239b961b<line_sep>c2=0xab0e9789<line_sep>c3=0x38b34ae5<line_sep>c4=0xa1e38b93<line_sep>#body
<for_stmt>block_start range(0 nblocks<times>16 16)<block_start>k1=key[block_start+3]<lshift>24|key[block_start+2]<lshift>16|key[block_start+1]<lshift>8|key[block_start+0]<line_sep>k2=key[block_start+7]<lshift>24|key[block_start+6]<lshift>16|key[block_start+5]<lshift>8|key[block_start+4]<line_sep>k3=key[block_start+11]<lshift>24|key[block_start+10]<lshift>16|key[block_start+9]<lshift>8|key[block_start+8]<line_sep>k4=key[block_start+15]<lshift>24|key[block_start+14]<lshift>16|key[block_start+13]<lshift>8|key[block_start+12]<line_sep>k1=(c1<times>k1)&0xFFFFFFFF<line_sep>k1=(k1<lshift>15|k1<rshift>17)&0xFFFFFFFF# inlined ROTL32
k1=(c2<times>k1)&0xFFFFFFFF<line_sep>h1<augxor>k1<line_sep>h1=(h1<lshift>19|h1<rshift>13)&0xFFFFFFFF# inlined ROTL32
h1=(h1+h2)&0xFFFFFFFF<line_sep>h1=(h1<times>5+0x561ccd1b)&0xFFFFFFFF<line_sep>k2=(c2<times>k2)&0xFFFFFFFF<line_sep>k2=(k2<lshift>16|k2<rshift>16)&0xFFFFFFFF# inlined ROTL32
k2=(c3<times>k2)&0xFFFFFFFF<line_sep>h2<augxor>k2<line_sep>h2=(h2<lshift>17|h2<rshift>15)&0xFFFFFFFF# inlined ROTL32
h2=(h2+h3)&0xFFFFFFFF<line_sep>h2=(h2<times>5+0x0bcaa747)&0xFFFFFFFF<line_sep>k3=(c3<times>k3)&0xFFFFFFFF<line_sep>k3=(k3<lshift>17|k3<rshift>15)&0xFFFFFFFF# inlined ROTL32
k3=(c4<times>k3)&0xFFFFFFFF<line_sep>h3<augxor>k3<line_sep>h3=(h3<lshift>15|h3<rshift>17)&0xFFFFFFFF# inlined ROTL32
h3=(h3+h4)&0xFFFFFFFF<line_sep>h3=(h3<times>5+0x96cd1c35)&0xFFFFFFFF<line_sep>k4=(c4<times>k4)&0xFFFFFFFF<line_sep>k4=(k4<lshift>18|k4<rshift>14)&0xFFFFFFFF# inlined ROTL32
k4=(c1<times>k4)&0xFFFFFFFF<line_sep>h4<augxor>k4<line_sep>h4=(h4<lshift>13|h4<rshift>19)&0xFFFFFFFF# inlined ROTL32
h4=(h1+h4)&0xFFFFFFFF<line_sep>h4=(h4<times>5+0x32ac3b17)&0xFFFFFFFF<block_end>#tail
tail_index=nblocks<times>16<line_sep>k1=0<line_sep>k2=0<line_sep>k3=0<line_sep>k4=0<line_sep>tail_size=length&15<if_stmt>tail_size<ge>15<block_start>k4<augxor>key[tail_index+14]<lshift>16<block_end><if_stmt>tail_size<ge>14<block_start>k4<augxor>key[tail_index+13]<lshift>8<block_end><if_stmt>tail_size<ge>13<block_start>k4<augxor>key[tail_index+12]<block_end><if_stmt>tail_size<g>12<block_start>k4=(k4<times>c4)&0xFFFFFFFF<line_sep>k4=(k4<lshift>18|k4<rshift>14)&0xFFFFFFFF# inlined ROTL32
k4=(k4<times>c1)&0xFFFFFFFF<line_sep>h4<augxor>k4<block_end><if_stmt>tail_size<ge>12<block_start>k3<augxor>key[tail_index+11]<lshift>24<block_end><if_stmt>tail_size<ge>11<block_start>k3<augxor>key[tail_index+10]<lshift>16<block_end><if_stmt>tail_size<ge>10<block_start>k3<augxor>key[tail_index+9]<lshift>8<block_end><if_stmt>tail_size<ge>9<block_start>k3<augxor>key[tail_index+8]<block_end><if_stmt>tail_size<g>8<block_start>k3=(k3<times>c3)&0xFFFFFFFF<line_sep>k3=(k3<lshift>17|k3<rshift>15)&0xFFFFFFFF# inlined ROTL32
k3=(k3<times>c4)&0xFFFFFFFF<line_sep>h3<augxor>k3<block_end><if_stmt>tail_size<ge>8<block_start>k2<augxor>key[tail_index+7]<lshift>24<block_end><if_stmt>tail_size<ge>7<block_start>k2<augxor>key[tail_index+6]<lshift>16<block_end><if_stmt>tail_size<ge>6<block_start>k2<augxor>key[tail_index+5]<lshift>8<block_end><if_stmt>tail_size<ge>5<block_start>k2<augxor>key[tail_index+4]<block_end><if_stmt>tail_size<g>4<block_start>k2=(k2<times>c2)&0xFFFFFFFF<line_sep>k2=(k2<lshift>16|k2<rshift>16)&0xFFFFFFFF# inlined ROTL32
k2=(k2<times>c3)&0xFFFFFFFF<line_sep>h2<augxor>k2<block_end><if_stmt>tail_size<ge>4<block_start>k1<augxor>key[tail_index+3]<lshift>24<block_end><if_stmt>tail_size<ge>3<block_start>k1<augxor>key[tail_index+2]<lshift>16<block_end><if_stmt>tail_size<ge>2<block_start>k1<augxor>key[tail_index+1]<lshift>8<block_end><if_stmt>tail_size<ge>1<block_start>k1<augxor>key[tail_index+0]<block_end><if_stmt>tail_size<g>0<block_start>k1=(k1<times>c1)&0xFFFFFFFF<line_sep>k1=(k1<lshift>15|k1<rshift>17)&0xFFFFFFFF# inlined ROTL32
k1=(k1<times>c2)&0xFFFFFFFF<line_sep>h1<augxor>k1<block_end>#finalization
h1<augxor>length<line_sep>h2<augxor>length<line_sep>h3<augxor>length<line_sep>h4<augxor>length<line_sep>h1=(h1+h2)&0xFFFFFFFF<line_sep>h1=(h1+h3)&0xFFFFFFFF<line_sep>h1=(h1+h4)&0xFFFFFFFF<line_sep>h2=(h1+h2)&0xFFFFFFFF<line_sep>h3=(h1+h3)&0xFFFFFFFF<line_sep>h4=(h1+h4)&0xFFFFFFFF<line_sep>h1=fmix(h1)<line_sep>h2=fmix(h2)<line_sep>h3=fmix(h3)<line_sep>h4=fmix(h4)<line_sep>h1=(h1+h2)&0xFFFFFFFF<line_sep>h1=(h1+h3)&0xFFFFFFFF<line_sep>h1=(h1+h4)&0xFFFFFFFF<line_sep>h2=(h1+h2)&0xFFFFFFFF<line_sep>h3=(h1+h3)&0xFFFFFFFF<line_sep>h4=(h1+h4)&0xFFFFFFFF<line_sep><return>(h4<lshift>96|h3<lshift>64|h2<lshift>32|h1)<block_end>key=bytearray(xencode(key))<if_stmt>x64arch<block_start><return>hash128_x64(key seed)<block_end><else_stmt><block_start><return>hash128_x86(key seed)<block_end><block_end><def_stmt>hash64 key seed=0x0 x64arch=<true><block_start>''' Implements 64bit murmur3 hash. Returns a tuple. '''<line_sep>hash_128=hash128(key seed x64arch)<line_sep>unsigned_val1=hash_128&0xFFFFFFFFFFFFFFFF<if_stmt>unsigned_val1&0x8000000000000000<eq>0<block_start>signed_val1=unsigned_val1<block_end><else_stmt><block_start>signed_val1=-((unsigned_val1^0xFFFFFFFFFFFFFFFF)+1)<block_end>unsigned_val2=(hash_128<rshift>64)&0xFFFFFFFFFFFFFFFF<if_stmt>unsigned_val2&0x8000000000000000<eq>0<block_start>signed_val2=unsigned_val2<block_end><else_stmt><block_start>signed_val2=-((unsigned_val2^0xFFFFFFFFFFFFFFFF)+1)<block_end><return>(int(signed_val1) int(signed_val2))<block_end><def_stmt>hash_bytes key seed=0x0 x64arch=<true><block_start>''' Implements 128bit murmur3 hash. Returns a byte string. '''<line_sep>hash_128=hash128(key seed x64arch)<line_sep>bytestring=''<for_stmt>i range(0 16 1)<block_start>lsbyte=hash_128&0xFF<line_sep>bytestring=bytestring+str(chr(lsbyte))<line_sep>hash_128=hash_128<rshift>8<block_end><return>bytestring<block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>argparse<line_sep>parser=argparse.ArgumentParser('pymurmur3' 'pymurmur [options] "string to hash"')<line_sep>parser.add_argument('--seed' type=int default=0)<line_sep>parser.add_argument('strings' default=[] nargs='+')<line_sep>opts=parser.parse_args()<for_stmt>str_to_hash opts.strings<block_start>sys.stdout.write('"%s" = 0x%08X\n'%(str_to_hash hash(str_to_hash)))<block_end><block_end> |
<import_stmt>pytest<import_from_stmt>httmock urlmatch HTTMock<import_from_stmt>util.config URLSchemeAndHostname<import_from_stmt>util.config.validator ValidatorContext<import_from_stmt>util.config.validators ConfigValidationException<import_from_stmt>util.config.validators.validate_bitbucket_trigger BitbucketTriggerValidator<import_from_stmt>test.fixtures *<line_sep>@pytest.mark.parametrize("unvalidated_config" [(ValidatorContext({})) (ValidatorContext({"BITBUCKET_TRIGGER_CONFIG":{}})) (ValidatorContext({"BITBUCKET_TRIGGER_CONFIG":{"CONSUMER_KEY":"foo"}})) (ValidatorContext({"BITBUCKET_TRIGGER_CONFIG":{"CONSUMER_SECRET":"foo"}})) ] )<def_stmt>test_validate_invalid_bitbucket_trigger_config unvalidated_config app<block_start>validator=BitbucketTriggerValidator()<with_stmt>pytest.raises(ConfigValidationException)<block_start>validator.validate(unvalidated_config)<block_end><block_end><def_stmt>test_validate_bitbucket_trigger app<block_start>url_hit=[<false>]<line_sep>@urlmatch(netloc=r"bitbucket.org")<def_stmt>handler url request<block_start>url_hit[0]=<true><line_sep><return>{"status_code":200 "content":"oauth_token=foo&oauth_token_secret=bar" }<block_end><with_stmt>HTTMock(handler)<block_start>validator=BitbucketTriggerValidator()<line_sep>url_scheme_and_hostname=URLSchemeAndHostname("http" "localhost:5000")<line_sep>unvalidated_config=ValidatorContext({"BITBUCKET_TRIGGER_CONFIG":{"CONSUMER_KEY":"foo" "CONSUMER_SECRET":"bar" } } url_scheme_and_hostname=url_scheme_and_hostname )<line_sep>validator.validate(unvalidated_config)<assert_stmt>url_hit[0]<block_end><block_end> |
<import_from_stmt>typing List Tuple<import_from_stmt>omegaconf DictConfig<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>rlcycle.common.abstract.loss Loss<class_stmt>DQNLoss(Loss)<block_start>"""Compute double DQN loss"""<def_stmt>__init__ self hyper_params:DictConfig use_cuda:bool<block_start>Loss.__init__(self hyper_params use_cuda)<block_end><def_stmt>__call__ self networks:Tuple[nn.Module <ellipsis>] data:Tuple[torch.Tensor <ellipsis>]<arrow>Tuple[torch.Tensor <ellipsis>]<block_start>network,target_network=networks<line_sep>states,actions,rewards,next_states,dones=data<line_sep>q_value=network.forward(states).gather(1 actions)<with_stmt>torch.no_grad()<block_start>next_q=torch.max(target_network.forward(next_states) 1)[0].unsqueeze(1)<line_sep>n_step_gamma=self.hyper_params.gamma<power>self.hyper_params.n_step<line_sep>target_q=rewards+(1-dones)<times>n_step_gamma<times>next_q<block_end>element_wise_loss=F.smooth_l1_loss(q_value target_q.detach() reduction="none")<line_sep><return>element_wise_loss<block_end><block_end><class_stmt>QRLoss(Loss)<block_start>"""Compute quantile regression loss"""<def_stmt>__init__ self hyper_params:DictConfig use_cuda:bool<block_start>Loss.__init__(self hyper_params use_cuda)<block_end><def_stmt>__call__ self networks:Tuple[nn.Module <ellipsis>] data:Tuple[torch.Tensor <ellipsis>] <arrow>Tuple[torch.Tensor <ellipsis>]<block_start>network,target_network=networks<line_sep>states,actions,rewards,next_states,dones=data<line_sep>z_dists=network.forward(states)<line_sep>z_dists=z_dists[list(range(states.size(0))) actions.view(-1)]<with_stmt>torch.no_grad()<block_start>next_z=target_network.forward(next_states)<line_sep>next_actions=torch.max(next_z.mean(2) dim=1)[1]<line_sep>next_z=next_z[list(range(states.size(0))) next_actions]<line_sep>n_step_gamma=self.hyper_params.gamma<power>self.hyper_params.n_step<line_sep>target_z=rewards+(1-dones)<times>n_step_gamma<times>next_z<block_end>distance=target_z-z_dists<line_sep>quantile_huber_loss=(network.tau-(distance.detach()<l>0).float()).abs()<times>self.huber_loss(distance)<line_sep>element_wise_loss=torch.mean(quantile_huber_loss dim=1 keepdim=<true>)<line_sep><return>element_wise_loss<block_end>@staticmethod<def_stmt>huber_loss x:List[torch.Tensor] k:float=1.0<block_start><return>torch.where(x.abs()<le>k 0.5<times>x.pow(2) k<times>(x.abs()-0.5<times>k))<block_end><block_end><class_stmt>CategoricalLoss(Loss)<block_start>"""Compute C51 loss"""<def_stmt>__init__ self hyper_params:DictConfig use_cuda:bool<block_start>Loss.__init__(self hyper_params use_cuda)<block_end><def_stmt>__call__ self networks:Tuple[nn.Module <ellipsis>] data:Tuple[torch.Tensor <ellipsis>]<arrow>Tuple[torch.Tensor <ellipsis>]<block_start>network,target_network=networks<line_sep>states,actions,rewards,next_states,dones=data<line_sep>batch_size=states.size(0)<line_sep>offset=(torch.linspace(0 (batch_size-1)<times>network.num_atoms batch_size).long().unsqueeze(1).expand(batch_size network.num_atoms))<if_stmt>self.use_cuda<block_start>offset=offset.cuda()<block_end>z_dists=network.forward(states)<line_sep>z_dists=z_dists[list(range(states.size(0))) actions.view(-1)]<with_stmt>torch.no_grad()<block_start>next_z=target_network.forward(next_states)<line_sep>next_actions=torch.max(next_z.mean(2) dim=1)[1]<line_sep>next_z=next_z[list(range(states.size(0))) next_actions]<line_sep>n_step_gamma=self.hyper_params.gamma<power>self.hyper_params.n_step<line_sep>target_z=rewards+(1-dones)<times>n_step_gamma<times>network.support<line_sep>target_z=torch.clamp(target_z min=network.v_min max=network.v_max)<line_sep>target_proj=self.dist_projection(network next_z target_z offset)<block_end>log_dist=torch.log(z_dists)<line_sep>element_wise_loss=-(target_proj<times>log_dist).sum(1)<line_sep><return>element_wise_loss<block_end><def_stmt>dist_projection self network:nn.Module next_z:torch.Tensor target_z:torch.Tensor offset:torch.Tensor <arrow>torch.Tensor<block_start>b=(target_z-network.v_min)/network.delta_z<line_sep>lb=b.floor().long()<line_sep>ub=b.ceil().long()<line_sep>proj_dist=torch.zeros(next_z.size())<if_stmt>self.use_cuda<block_start>proj_dist=proj_dist.cuda()<block_end>proj_dist.view(-1).index_add_(0 (lb+offset).view(-1) (next_z<times>(ub.float()-b)).view(-1))<line_sep>proj_dist.view(-1).index_add_(0 (ub+offset).view(-1) (next_z<times>(b-lb.float())).view(-1))<line_sep><return>proj_dist<block_end><block_end> |
# -*- coding: utf8 -*-
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
<import_stmt>os<import_stmt>platform<import_stmt>time<import_stmt>pytest<import_stmt>zmq<import_from_stmt>zmq.tests BaseZMQTestCase skip_pypy<class_stmt>TestDraftSockets(BaseZMQTestCase)<block_start><def_stmt>setUp self<block_start><if_stmt><not>zmq.DRAFT_API<block_start><raise>pytest.skip("draft api unavailable")<block_end>super(TestDraftSockets self).setUp()<block_end><def_stmt>test_client_server self<block_start>client,server=self.create_bound_pair(zmq.CLIENT zmq.SERVER)<line_sep>client.send(b'request')<line_sep>msg=self.recv(server copy=<false>)<assert_stmt>msg.routing_id<is><not><none><line_sep>server.send(b'reply' routing_id=msg.routing_id)<line_sep>reply=self.recv(client)<assert_stmt>reply<eq>b'reply'<block_end><def_stmt>test_radio_dish self<block_start>dish,radio=self.create_bound_pair(zmq.DISH zmq.RADIO)<line_sep>dish.rcvtimeo=250<line_sep>group='mygroup'<line_sep>dish.join(group)<line_sep>received_count=0<line_sep>received=set()<line_sep>sent=set()<for_stmt>i range(10)<block_start>msg=str(i).encode('ascii')<line_sep>sent.add(msg)<line_sep>radio.send(msg group=group)<try_stmt><block_start>recvd=dish.recv()<block_end><except_stmt>zmq.Again<block_start>time.sleep(0.1)<block_end><else_stmt><block_start>received.add(recvd)<line_sep>received_count<augadd>1<block_end><block_end># assert that we got *something*
<assert_stmt>len(received.intersection(sent))<ge>5<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_from_stmt>django.conf.urls.defaults patterns url<import_from_stmt>django_qbe.exports formats<line_sep>urlpatterns=patterns('django_qbe.views' url(r'^$' 'qbe_form' name="qbe_form") url(r'^js/$' 'qbe_js' name="qbe_js") url(r'^results/bookmark/$' 'qbe_bookmark' name="qbe_bookmark") url(r'^results/export/(?P<format>(%s))/$'%"|".join(formats.keys()) 'qbe_export' name="qbe_export") url(r'^results/proxy/$' 'qbe_proxy' name="qbe_proxy") url(r'^results/(?P<query_hash>(.*))/$' 'qbe_results' name="qbe_results") url(r'^auto/$' 'qbe_autocomplete' name="qbe_autocomplete") )<line_sep> |
# -*- coding:utf-8 -*-
# Author: RubanSeven
# import cv2
<import_stmt>numpy<as>np<line_sep># from transform import get_perspective_transform, warp_perspective
<import_from_stmt>warp_mls WarpMLS<def_stmt>distort src segment<block_start>img_h,img_w=src.shape[:2]<line_sep>cut=img_w<floordiv>segment<line_sep>thresh=cut<floordiv>3<line_sep># thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts=list()<line_sep>dst_pts=list()<line_sep>src_pts.append([0 0])<line_sep>src_pts.append([img_w 0])<line_sep>src_pts.append([img_w img_h])<line_sep>src_pts.append([0 img_h])<line_sep>dst_pts.append([np.random.randint(thresh) np.random.randint(thresh)])<line_sep>dst_pts.append([img_w-np.random.randint(thresh) np.random.randint(thresh)])<line_sep>dst_pts.append([img_w-np.random.randint(thresh) img_h-np.random.randint(thresh)])<line_sep>dst_pts.append([np.random.randint(thresh) img_h-np.random.randint(thresh)])<line_sep>half_thresh=thresh<times>0.5<for_stmt>cut_idx np.arange(1 segment 1)<block_start>src_pts.append([cut<times>cut_idx 0])<line_sep>src_pts.append([cut<times>cut_idx img_h])<line_sep>dst_pts.append([cut<times>cut_idx+np.random.randint(thresh)-half_thresh np.random.randint(thresh)-half_thresh])<line_sep>dst_pts.append([cut<times>cut_idx+np.random.randint(thresh)-half_thresh img_h+np.random.randint(thresh)-half_thresh])<block_end>trans=WarpMLS(src src_pts dst_pts img_w img_h)<line_sep>dst=trans.generate()<line_sep><return>dst<block_end><def_stmt>stretch src segment<block_start>img_h,img_w=src.shape[:2]<line_sep>cut=img_w<floordiv>segment<line_sep>thresh=cut<times>4<floordiv>5<line_sep># thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts=list()<line_sep>dst_pts=list()<line_sep>src_pts.append([0 0])<line_sep>src_pts.append([img_w 0])<line_sep>src_pts.append([img_w img_h])<line_sep>src_pts.append([0 img_h])<line_sep>dst_pts.append([0 0])<line_sep>dst_pts.append([img_w 0])<line_sep>dst_pts.append([img_w img_h])<line_sep>dst_pts.append([0 img_h])<line_sep>half_thresh=thresh<times>0.5<for_stmt>cut_idx np.arange(1 segment 1)<block_start>move=np.random.randint(thresh)-half_thresh<line_sep>src_pts.append([cut<times>cut_idx 0])<line_sep>src_pts.append([cut<times>cut_idx img_h])<line_sep>dst_pts.append([cut<times>cut_idx+move 0])<line_sep>dst_pts.append([cut<times>cut_idx+move img_h])<block_end>trans=WarpMLS(src src_pts dst_pts img_w img_h)<line_sep>dst=trans.generate()<line_sep><return>dst<block_end><def_stmt>perspective src<block_start>img_h,img_w=src.shape[:2]<line_sep>thresh=img_h<floordiv>2<line_sep>src_pts=list()<line_sep>dst_pts=list()<line_sep>src_pts.append([0 0])<line_sep>src_pts.append([img_w 0])<line_sep>src_pts.append([img_w img_h])<line_sep>src_pts.append([0 img_h])<line_sep>dst_pts.append([0 np.random.randint(thresh)])<line_sep>dst_pts.append([img_w np.random.randint(thresh)])<line_sep>dst_pts.append([img_w img_h-np.random.randint(thresh)])<line_sep>dst_pts.append([0 img_h-np.random.randint(thresh)])<line_sep>trans=WarpMLS(src src_pts dst_pts img_w img_h)<line_sep>dst=trans.generate()<line_sep><return>dst<block_end># def distort(src, segment):
# img_h, img_w = src.shape[:2]
# dst = np.zeros_like(src, dtype=np.uint8)
#
# cut = img_w // segment
# thresh = img_h // 8
#
# src_pts = list()
# # dst_pts = list()
#
# src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)])
# src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)])
#
# # dst_pts.append([0, 0])
# # dst_pts.append([0, img_h])
# dst_box = np.array([[0, 0], [0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32)
#
# half_thresh = thresh * 0.5
#
# for cut_idx in np.arange(1, segment, 1):
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
#
# # dst_pts.append([cut * i, 0])
# # dst_pts.append([cut * i, img_h])
#
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # print(mat)
# # dst[:, cut * (cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h))
#
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (cut_idx - 1):cut * cut_idx] = warp_perspective(src, mat, (cut, img_h))
# # print(mat)
#
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # dst[:, cut * (segment - 1):] = cv2.warpPerspective(src, mat, (img_w - cut * (segment - 1), img_h))
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (segment - 1):] = warp_perspective(src, mat, (img_w - cut * (segment - 1), img_h))
#
# return dst
|
<import_from_stmt>devito.ir Call<import_from_stmt>devito.passes.iet.definitions DataManager<import_from_stmt>devito.passes.iet.langbase LangBB<line_sep>__all__=['CBB' 'CDataManager']<class_stmt>CBB(LangBB)<block_start>mapper={'aligned':<lambda>i:'__attribute__((aligned(%d)))'%i 'host-alloc':<lambda>i j k:Call('posix_memalign' (i j k)) 'host-free':<lambda>i:Call('free' (i )) }<block_end><class_stmt>CDataManager(DataManager)<block_start>lang=CBB<block_end> |
# PyTorch
<import_stmt>torch<import_from_stmt>torch.utils.data IterableDataset DataLoader<import_from_stmt>donkeycar.utils train_test_split<import_from_stmt>donkeycar.parts.tub_v2 Tub<import_from_stmt>torchvision transforms<import_from_stmt>typing List Any<import_from_stmt>donkeycar.pipeline.types TubRecord TubDataset<import_from_stmt>donkeycar.pipeline.sequence TubSequence<import_stmt>pytorch_lightning<as>pl<def_stmt>get_default_transform for_video=<false> for_inference=<false> resize=<true><block_start>"""
Creates a default transform to work with torchvision models
Video transform:
All pre-trained models expect input images normalized in the same way,
i.e. mini-batches of 3-channel RGB videos of shape (3 x T x H x W),
where H and W are expected to be 112, and T is a number of video frames
in a clip. The images have to be loaded in to a range of [0, 1] and
then normalized using mean = [0.43216, 0.394666, 0.37645] and
std = [0.22803, 0.22145, 0.216989].
"""<line_sep>mean=[0.485 0.456 0.406]<line_sep>std=[0.229 0.224 0.225]<line_sep>input_size=(224 224)<if_stmt>for_video<block_start>mean=[0.43216 0.394666 0.37645]<line_sep>std=[0.22803 0.22145 0.216989]<line_sep>input_size=(112 112)<block_end>transform_items=[transforms.ToTensor() transforms.Normalize(mean=mean std=std)]<if_stmt>resize<block_start>transform_items.insert(0 transforms.Resize(input_size))<block_end><return>transforms.Compose(transform_items)<block_end><class_stmt>TorchTubDataset(IterableDataset)<block_start>'''
Loads the dataset, and creates a train/test split.
'''<def_stmt>__init__ self config records:List[TubRecord] transform=<none><block_start>"""Create a PyTorch Tub Dataset
Args:
config (object): the configuration information
records (List[TubRecord]): a list of tub records
transform (function, optional): a transform to apply to the data
"""<line_sep>self.config=config<line_sep># Handle the transforms
<if_stmt>transform<block_start>self.transform=transform<block_end><else_stmt><block_start>self.transform=get_default_transform()<block_end>self.sequence=TubSequence(records)<line_sep>self.pipeline=self._create_pipeline()<line_sep>self.len=len(records)<block_end><def_stmt>_create_pipeline self<block_start>""" This can be overridden if more complicated pipelines are
required """<def_stmt>y_transform record:TubRecord<block_start>angle:float=record.underlying['user/angle']<line_sep>throttle:float=record.underlying['user/throttle']<line_sep>predictions=torch.tensor([angle throttle] dtype=torch.float)<line_sep># Normalize to be between [0, 1]
# angle and throttle are originally between [-1, 1]
predictions=(predictions+1)/2<line_sep><return>predictions<block_end><def_stmt>x_transform record:TubRecord# Loads the result of Image.open()
<block_start>img_arr=record.image(cached=<true> as_nparray=<false>)<line_sep><return>self.transform(img_arr)<block_end># Build pipeline using the transformations
pipeline=self.sequence.build_pipeline(x_transform=x_transform y_transform=y_transform)<line_sep><return>pipeline<block_end><def_stmt>__len__ self<block_start><return>len(self.sequence)<block_end><def_stmt>__iter__ self<block_start><return>iter(self.pipeline)<block_end><block_end><class_stmt>TorchTubDataModule(pl.LightningDataModule)<block_start><def_stmt>__init__ self config:Any tub_paths:List[str] transform=<none><block_start>"""Create a PyTorch Lightning Data Module to contain all data loading logic
Args:
config (object): the configuration information
tub_paths (List[str]): a list of paths to the tubs to use (minimum size of 1).
Each tub path corresponds to another training run.
transform (function, optional): a transform to apply to the data
"""<line_sep>super().__init__()<line_sep>self.config=config<line_sep>self.tub_paths=tub_paths<line_sep># Handle the transforms
<if_stmt>transform<block_start>self.transform=transform<block_end><else_stmt><block_start>self.transform=get_default_transform()<block_end>self.tubs:List[Tub]=[Tub(tub_path read_only=<true>)<for>tub_path self.tub_paths]<line_sep>self.records:List[TubRecord]=[]<block_end><def_stmt>setup self stage=<none><block_start>"""Load all the tub data and set up the datasets.
Args:
stage ([string], optional): setup expects a string arg stage.
It is used to separate setup logic for trainer.fit
and trainer.test. Defaults to None.
"""<line_sep># Loop through all the different tubs and load all the records for each of them
<for_stmt>tub self.tubs<block_start><for_stmt>underlying tub<block_start>record=TubRecord(self.config tub.base_path underlying=underlying)<line_sep>self.records.append(record)<block_end><block_end>train_records,val_records=train_test_split(self.records test_size=(1.-self.config.TRAIN_TEST_SPLIT))<assert_stmt>len(val_records)<g>0 "Not enough validation data. Add more data"<line_sep>self.train_dataset=TorchTubDataset(self.config train_records transform=self.transform)<line_sep>self.val_dataset=TorchTubDataset(self.config val_records transform=self.transform)<block_end><def_stmt>train_dataloader self# The number of workers are set to 0 to avoid errors on Macs and Windows
# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534
<block_start><return>DataLoader(self.train_dataset batch_size=self.config.BATCH_SIZE num_workers=0)<block_end><def_stmt>val_dataloader self# The number of workers are set to 0 to avoid errors on Macs and Windows
# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534
<block_start><return>DataLoader(self.val_dataset batch_size=self.config.BATCH_SIZE num_workers=0)<block_end><block_end> |
<import_from_stmt>robot __version__<as>ROBOT_VERSION<import_stmt>sys<import_stmt>tempfile<import_stmt>textwrap<import_stmt>unittest<import_stmt>shutil<import_stmt>subprocess<class_stmt>PabotOrderingGroupTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.tmpdir=tempfile.mkdtemp()<block_end><def_stmt>tearDown self<block_start>shutil.rmtree(self.tmpdir)<block_end><def_stmt>_run_tests_with self testfile orderfile<block_start>robot_file=open("{}/test.robot".format(self.tmpdir) "w")<line_sep>robot_file.write(textwrap.dedent(testfile))<line_sep>robot_file.close()<with_stmt>open("{}/order.dat".format(self.tmpdir) "w")<as>f<block_start>f.write(textwrap.dedent(orderfile))<block_end>process=subprocess.Popen([sys.executable "-m"<concat>"pabot.pabot" "--testlevelsplit" "--ordering" "{}/order.dat".format(self.tmpdir) "{}/test.robot".format(self.tmpdir) ] cwd=self.tmpdir stdout=subprocess.PIPE stderr=subprocess.PIPE )<line_sep><return>process.communicate()<block_end><def_stmt>test_orders self<block_start>stdout,stderr=self._run_tests_with("""
*** Variables ***
${SCALAR} Hello, globe!
*** Test Cases ***
First Test
Set Suite Variable ${SCALAR} Hello, world!
Second Test
Should Be Equal ${SCALAR} Hello, world!
Third Test
Should Be Equal ${SCALAR} Hello, globe!
""" """
{
--test Test.First Test
--test Test.Second Test
}
--test Test.Third Test
""" )<if_stmt>sys.version_info<l>(3 0)<block_start>self.assertIn("PASSED" stdout stderr)<line_sep>self.assertNotIn("FAILED" stdout stderr)<line_sep>self.assertEqual(stdout.count("PASSED") 2)<block_end><else_stmt><block_start>self.assertIn(b"PASSED" stdout stderr)<line_sep>self.assertNotIn(b"FAILED" stdout stderr)<line_sep>self.assertEqual(stdout.count(b"PASSED") 2)<block_end><block_end><def_stmt>test_two_orders self<block_start>stdout,stderr=self._run_tests_with("""
*** Variables ***
${SCALAR} Hello, globe!
*** Test Cases ***
First Test
Set Suite Variable ${SCALAR} Hello, world!
Second Test
Should Be Equal ${SCALAR} Hello, world!
Second And Quarter
Should Be Equal ${SCALAR} Hello, globe!
Second And Half
Should Be Equal ${SCALAR} Hello, globe!
Third Test
Should Be Equal ${SCALAR} Hello, globe!
""" """
{
--test Test.First Test
--test Test.Second Test
}
{
--test Test.Second And Quarter
--test Test.Second And Half
}
--test Test.Third Test
""" )<if_stmt>sys.version_info<l>(3 0)<block_start>self.assertIn("PASSED" stdout stderr)<line_sep>self.assertNotIn("FAILED" stdout stderr)<if_stmt>ROBOT_VERSION<l>"4.0"<block_start>expected_write="5 critical tests, 5 passed, 0 failed"<block_end><else_stmt><block_start>expected_write="5 tests, 5 passed, 0 failed, 0 skipped."<block_end>self.assertIn(expected_write stdout stderr)<line_sep>self.assertEqual(stdout.count("PASSED") 3)<block_end><else_stmt><block_start>self.assertIn(b"PASSED" stdout stderr)<line_sep>self.assertNotIn(b"FAILED" stdout stderr)<if_stmt>ROBOT_VERSION<l>"4.0"<block_start>expected_write=b"5 critical tests, 5 passed, 0 failed"<block_end><else_stmt><block_start>expected_write=b"5 tests, 5 passed, 0 failed, 0 skipped."<block_end>self.assertIn(expected_write stdout stderr)<line_sep>self.assertEqual(stdout.count(b"PASSED") 3)<block_end><block_end><def_stmt>test_too_big_testname self<block_start>stdout,stderr=self._run_tests_with("""
*** Test Cases ***
Test Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris eu velit nunc. Duis eget purus eget orci porta blandit sed ut tortor. Nunc vel nulla bibendum, auctor sem ac, molestie risus. Sed eu metus volutpat, hendrerit nibh in, auctor urna. Nunc a sodales.
Log Test
""" """
--test Invalid
""" )<if_stmt>sys.version_info<l>(3 0)<block_start>self.assertIn("PASSED" stdout stderr)<line_sep>self.assertNotIn("FAILED" stdout stderr)<line_sep>self.assertEqual(stdout.count("PASSED") 1)<block_end><else_stmt><block_start>self.assertIn(b"PASSED" stdout stderr)<line_sep>self.assertNotIn(b"FAILED" stdout stderr)<line_sep>self.assertEqual(stdout.count(b"PASSED") 1)<block_end><block_end><def_stmt>test_longnames_in_tests self<block_start>stdout,stderr=self._run_tests_with("""
*** Settings ***
Test Template Test1
*** Test Cases ***
The Somewhat Long Name Of The Test S1Test 01 1
The Somewhat Long Name Of The Test S1Test 02 1
The Somewhat Long Name Of The Test S1Test 03 1
The Somewhat Long Name Of The Test S1Test 04 1
The Somewhat Long Name Of The Test S1Test 05 1
The Somewhat Long Name Of The Test S1Test 06 1
The Somewhat Long Name Of The Test S1Test 07 1
The Somewhat Long Name Of The Test S1Test 08 1
The Somewhat Long Name Of The Test S1Test 09 1
The Somewhat Long Name Of The Test S1Test 10 1
The Somewhat Long Name Of The Test S1Test 11 1
The Somewhat Long Name Of The Test S1Test 12 1
*** Keywords ***
Test1
[Arguments] ${arg}
Log Test
""" """
{
--test Test.The Somewhat Long Name Of The Test S1Test 01
--test Test.The Somewhat Long Name Of The Test S1Test 02
--test Test.The Somewhat Long Name Of The Test S1Test 03
--test Test.The Somewhat Long Name Of The Test S1Test 04
--test Test.The Somewhat Long Name Of The Test S1Test 05
--test Test.The Somewhat Long Name Of The Test S1Test 06
}
{
--test Test.The Somewhat Long Name Of The Test S1Test 07
--test Test.The Somewhat Long Name Of The Test S1Test 08
--test Test.The Somewhat Long Name Of The Test S1Test 09
--test Test.The Somewhat Long Name Of The Test S1Test 10
--test Test.The Somewhat Long Name Of The Test S1Test 11
--test Test.The Somewhat Long Name Of The Test S1Test 12
}
""" )<if_stmt>sys.version_info<l>(3 0)<block_start>self.assertIn("PASSED" stdout stderr)<line_sep>self.assertNotIn("FAILED" stdout stderr)<line_sep>self.assertEqual(stdout.count("PASSED") 2)<block_end><else_stmt><block_start>self.assertIn(b"PASSED" stdout stderr)<line_sep>self.assertNotIn(b"FAILED" stdout stderr)<line_sep>self.assertEqual(stdout.count(b"PASSED") 2)<block_end><block_end><block_end> |
<import_stmt>torch<line_sep>ckp_path='./checkpoints/fashion_PATN/latest_net_netG.pth'<line_sep>save_path='./checkpoints/fashion_PATN_v1.0/latest_net_netG.pth'<line_sep>states_dict=torch.load(ckp_path)<line_sep>states_dict_new=states_dict.copy()<for_stmt>key states_dict.keys()<block_start><if_stmt>"running_var"<in>key<or>"running_mean"<in>key<block_start><del_stmt>states_dict_new[key]<block_end><block_end>torch.save(states_dict_new save_path)<line_sep> |
# Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2015 - Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>mistral exceptions<as>exc<import_from_stmt>mistral.tests.unit base<import_from_stmt>mistral.utils ssh_utils<import_from_stmt>mistral_lib utils<class_stmt>UtilsTest(base.BaseTest)<block_start><def_stmt>test_itersubclasses self<block_start><class_stmt>A(object)<block_start><pass><block_end><class_stmt>B(A)<block_start><pass><block_end><class_stmt>C(A)<block_start><pass><block_end><class_stmt>D(C)<block_start><pass><block_end>self.assertEqual([B C D] list(utils.iter_subclasses(A)))<block_end><def_stmt>test_paramiko_to_private_key self<block_start>self.assertRaises(exc.DataAccessException ssh_utils._to_paramiko_private_key "../dir")<line_sep>self.assertRaises(exc.DataAccessException ssh_utils._to_paramiko_private_key "..\\dir")<line_sep>self.assertIsNone(ssh_utils._to_paramiko_private_key(private_key_filename=<none> password='<PASSWORD>'))<block_end><block_end> |
<import_stmt>json<import_from_stmt>wptserve.utils isomorphic_decode<def_stmt>main request response<block_start>origin=request.GET.first(b"origin" request.headers.get(b'origin')<or>b'none')<if_stmt>b"check"<in>request.GET<block_start>token=request.GET.first(b"token")<line_sep>value=request.server.stash.take(token)<if_stmt>value<is><not><none><block_start><if_stmt>request.GET.first(b"check" <none>)<eq>b"keep"<block_start>request.server.stash.put(token value)<block_end>body=u"1"<block_end><else_stmt><block_start>body=u"0"<block_end><return>[(b"Content-Type" b"text/plain")] body<block_end><if_stmt>origin<ne>b'none'<block_start>response.headers.set(b"Access-Control-Allow-Origin" origin)<block_end><if_stmt>b'origin2'<in>request.GET<block_start>response.headers.append(b"Access-Control-Allow-Origin" request.GET.first(b'origin2'))<block_end>#Preflight
<if_stmt>b'headers'<in>request.GET<block_start>response.headers.set(b"Access-Control-Allow-Headers" request.GET.first(b'headers'))<block_end><if_stmt>b'credentials'<in>request.GET<block_start>response.headers.set(b"Access-Control-Allow-Credentials" request.GET.first(b'credentials'))<block_end><if_stmt>b'methods'<in>request.GET<block_start>response.headers.set(b"Access-Control-Allow-Methods" request.GET.first(b'methods'))<block_end>code_raw=request.GET.first(b'code' <none>)<if_stmt>code_raw<block_start>code=int(code_raw)<block_end><else_stmt><block_start>code=<none><block_end><if_stmt>request.method<eq>u'OPTIONS'#Override the response code if we're in a preflight and it's asked
<block_start><if_stmt>b'preflight'<in>request.GET<block_start>code=int(request.GET.first(b'preflight'))<block_end>#Log that the preflight actually happened if we have an ident
<if_stmt>b'token'<in>request.GET<block_start>request.server.stash.put(request.GET[b'token'] <true>)<block_end><block_end><if_stmt>b'location'<in>request.GET<block_start><if_stmt>code<is><none><block_start>code=302<block_end><if_stmt>code<ge>300<and>code<l>400<block_start>response.headers.set(b"Location" request.GET.first(b'location'))<block_end><block_end>headers={}<for_stmt>name,values request.headers.items()<block_start><if_stmt>len(values)<eq>1<block_start>headers[isomorphic_decode(name)]=isomorphic_decode(values[0])<block_end><else_stmt>#I have no idea, really
<block_start>headers[name]=values<block_end><block_end>headers[u'get_value']=isomorphic_decode(request.GET.first(b'get_value' b''))<line_sep>body=json.dumps(headers)<if_stmt>code<block_start><return>(code b"StatusText") [] body<block_end><else_stmt><block_start><return>body<block_end><block_end> |
"""Implementation of Rule L024."""<import_from_stmt>sqlfluff.core.rules.doc_decorators document_fix_compatible<import_from_stmt>sqlfluff.rules.L023 Rule_L023<line_sep>@document_fix_compatible<class_stmt>Rule_L024(Rule_L023)<block_start>"""Single whitespace expected after USING in JOIN clause.
| **Anti-pattern**
.. code-block:: sql
SELECT b
FROM foo
LEFT JOIN zoo USING(a)
| **Best practice**
| The • character represents a space.
| Add a space after USING, to avoid confusing it
| for a function.
.. code-block:: sql
:force:
SELECT b
FROM foo
LEFT JOIN zoo USING•(a)
"""<line_sep>expected_mother_segment_type="join_clause"<line_sep>pre_segment_identifier=("name" "using")<line_sep>post_segment_identifier=("type" "bracketed")<line_sep>expand_children=<none><line_sep>allow_newline=<true><block_end> |
<import_from_stmt>matplotlib.colors LinearSegmentedColormap<import_from_stmt>numpy nan inf<line_sep># Used to reconstruct the colormap in viscm
parameters={'xp':[-5.4895292543686764 14.790571669586654 82.5546687431056 29.15531114139253 -4.1316769886951761 -13.002076438907238] 'yp':[-35.948168839230306 -42.273376159885785 -28.845467523197698 52.03426124197 36.832712600868973 40.792291220556734] 'min_JK':16.8314150305 'max_JK':95}<line_sep>cm_data=[[5.03832136e-02 2.98028976e-02 5.27974883e-01] [6.35363639e-02 2.84259729e-02 5.33123681e-01] [7.53531234e-02 2.72063728e-02 5.38007001e-01] [8.62217979e-02 2.61253206e-02 5.42657691e-01] [9.63786097e-02 2.51650976e-02 5.47103487e-01] [1.05979704e-01 2.43092436e-02 5.51367851e-01] [1.15123641e-01 2.35562500e-02 5.55467728e-01] [1.23902903e-01 2.28781011e-02 5.59423480e-01] [1.32380720e-01 2.22583774e-02 5.63250116e-01] [1.40603076e-01 2.16866674e-02 5.66959485e-01] [1.48606527e-01 2.11535876e-02 5.70561711e-01] [1.56420649e-01 2.06507174e-02 5.74065446e-01] [1.64069722e-01 2.01705326e-02 5.77478074e-01] [1.71573925e-01 1.97063415e-02 5.80805890e-01] [1.78950212e-01 1.92522243e-02 5.84054243e-01] [1.86212958e-01 1.88029767e-02 5.87227661e-01] [1.93374449e-01 1.83540593e-02 5.90329954e-01] [2.00445260e-01 1.79015512e-02 5.93364304e-01] [2.07434551e-01 1.74421086e-02 5.96333341e-01] [2.14350298e-01 1.69729276e-02 5.99239207e-01] [2.21196750e-01 1.64970484e-02 6.02083323e-01] [2.27982971e-01 1.60071509e-02 6.04867403e-01] [2.34714537e-01 1.55015065e-02 6.07592438e-01] [2.41396253e-01 1.49791041e-02 6.10259089e-01] [2.48032377e-01 1.44393586e-02 6.12867743e-01] [2.54626690e-01 1.38820918e-02 6.15418537e-01] [2.61182562e-01 1.33075156e-02 6.17911385e-01] [2.67702993e-01 1.27162163e-02 6.20345997e-01] [2.74190665e-01 1.21091423e-02 6.22721903e-01] [2.80647969e-01 1.14875915e-02 6.25038468e-01] [2.87076059e-01 1.08554862e-02 6.27294975e-01] [2.93477695e-01 1.02128849e-02 6.29490490e-01] [2.99855122e-01 9.56079551e-03 6.31623923e-01] [3.06209825e-01 8.90185346e-03 6.33694102e-01] [3.12543124e-01 8.23900704e-03 6.35699759e-01] [3.18856183e-01 7.57551051e-03 6.37639537e-01] [3.25150025e-01 6.91491734e-03 6.39512001e-01] [3.31425547e-01 6.26107379e-03 6.41315649e-01] [3.37683446e-01 5.61830889e-03 6.43048936e-01] [3.43924591e-01 4.99053080e-03 6.44710195e-01] [3.50149699e-01 4.38202557e-03 6.46297711e-01] [3.56359209e-01 3.79781761e-03 6.47809772e-01] [3.62553473e-01 3.24319591e-03 6.49244641e-01] [3.68732762e-01 2.72370721e-03 6.50600561e-01] [3.74897270e-01 2.24514897e-03 6.51875762e-01] [3.81047116e-01 1.81356205e-03 6.53068467e-01] [3.87182639e-01 1.43446923e-03 6.54176761e-01] [3.93304010e-01 1.11388259e-03 6.55198755e-01] [3.99410821e-01 8.59420809e-04 6.56132835e-01] [4.05502914e-01 6.78091517e-04 6.56977276e-01] [4.11580082e-01 5.77101735e-04 6.57730380e-01] [4.17642063e-01 5.63847476e-04 6.58390492e-01] [4.23688549e-01 6.45902780e-04 6.58956004e-01] [4.29719186e-01 8.31008207e-04 6.59425363e-01] [4.35733575e-01 1.12705875e-03 6.59797077e-01] [4.41732123e-01 1.53984779e-03 6.60069009e-01] [4.47713600e-01 2.07954744e-03 6.60240367e-01] [4.53677394e-01 2.75470302e-03 6.60309966e-01] [4.59622938e-01 3.57374415e-03 6.60276655e-01] [4.65549631e-01 4.54518084e-03 6.60139383e-01] [4.71456847e-01 5.67758762e-03 6.59897210e-01] [4.77343929e-01 6.97958743e-03 6.59549311e-01] [4.83210198e-01 8.45983494e-03 6.59094989e-01] [4.89054951e-01 1.01269996e-02 6.58533677e-01] [4.94877466e-01 1.19897486e-02 6.57864946e-01] [5.00677687e-01 1.40550640e-02 6.57087561e-01] [5.06454143e-01 1.63333443e-02 6.56202294e-01] [5.12206035e-01 1.88332232e-02 6.55209222e-01] [5.17932580e-01 2.15631918e-02 6.54108545e-01] [5.23632990e-01 2.45316468e-02 6.52900629e-01] [5.29306474e-01 2.77468735e-02 6.51586010e-01] [5.34952244e-01 3.12170300e-02 6.50165396e-01] [5.40569510e-01 3.49501310e-02 6.48639668e-01] [5.46157494e-01 3.89540334e-02 6.47009884e-01] [5.51715423e-01 4.31364795e-02 6.45277275e-01] [5.57242538e-01 4.73307585e-02 6.43443250e-01] [5.62738096e-01 5.15448092e-02 6.41509389e-01] [5.68201372e-01 5.57776706e-02 6.39477440e-01] [5.73631859e-01 6.00281369e-02 6.37348841e-01] [5.79028682e-01 6.42955547e-02 6.35126108e-01] [5.84391137e-01 6.85790261e-02 6.32811608e-01] [5.89718606e-01 7.28775875e-02 6.30407727e-01] [5.95010505e-01 7.71902878e-02 6.27916992e-01] [6.00266283e-01 8.15161895e-02 6.25342058e-01] [6.05485428e-01 8.58543713e-02 6.22685703e-01] [6.10667469e-01 9.02039303e-02 6.19950811e-01] [6.15811974e-01 9.45639838e-02 6.17140367e-01] [6.20918555e-01 9.89336721e-02 6.14257440e-01] [6.25986869e-01 1.03312160e-01 6.11305174e-01] [6.31016615e-01 1.07698641e-01 6.08286774e-01] [6.36007543e-01 1.12092335e-01 6.05205491e-01] [6.40959444e-01 1.16492495e-01 6.02064611e-01] [6.45872158e-01 1.20898405e-01 5.98867442e-01] [6.50745571e-01 1.25309384e-01 5.95617300e-01] [6.55579615e-01 1.29724785e-01 5.92317494e-01] [6.60374266e-01 1.34143997e-01 5.88971318e-01] [6.65129493e-01 1.38566428e-01 5.85582301e-01] [6.69845385e-01 1.42991540e-01 5.82153572e-01] [6.74522060e-01 1.47418835e-01 5.78688247e-01] [6.79159664e-01 1.51847851e-01 5.75189431e-01] [6.83758384e-01 1.56278163e-01 5.71660158e-01] [6.88318440e-01 1.60709387e-01 5.68103380e-01] [6.92840088e-01 1.65141174e-01 5.64521958e-01] [6.97323615e-01 1.69573215e-01 5.60918659e-01] [7.01769334e-01 1.74005236e-01 5.57296144e-01] [7.06177590e-01 1.78437000e-01 5.53656970e-01] [7.10548747e-01 1.82868306e-01 5.50003579e-01] [7.14883195e-01 1.87298986e-01 5.46338299e-01] [7.19181339e-01 1.91728906e-01 5.42663338e-01] [7.23443604e-01 1.96157962e-01 5.38980786e-01] [7.27670428e-01 2.00586086e-01 5.35292612e-01] [7.31862231e-01 2.05013174e-01 5.31600995e-01] [7.36019424e-01 2.09439071e-01 5.27908434e-01] [7.40142557e-01 2.13863965e-01 5.24215533e-01] [7.44232102e-01 2.18287899e-01 5.20523766e-01] [7.48288533e-01 2.22710942e-01 5.16834495e-01] [7.52312321e-01 2.27133187e-01 5.13148963e-01] [7.56303937e-01 2.31554749e-01 5.09468305e-01] [7.60263849e-01 2.35975765e-01 5.05793543e-01] [7.64192516e-01 2.40396394e-01 5.02125599e-01] [7.68090391e-01 2.44816813e-01 4.98465290e-01] [7.71957916e-01 2.49237220e-01 4.94813338e-01] [7.75795522e-01 2.53657797e-01 4.91170517e-01] [7.79603614e-01 2.58078397e-01 4.87539124e-01] [7.83382636e-01 2.62499662e-01 4.83917732e-01] [7.87132978e-01 2.66921859e-01 4.80306702e-01] [7.90855015e-01 2.71345267e-01 4.76706319e-01] [7.94549101e-01 2.75770179e-01 4.73116798e-01] [7.98215577e-01 2.80196901e-01 4.69538286e-01] [8.01854758e-01 2.84625750e-01 4.65970871e-01] [8.05466945e-01 2.89057057e-01 4.62414580e-01] [8.09052419e-01 2.93491117e-01 4.58869577e-01] [8.12611506e-01 2.97927865e-01 4.55337565e-01] [8.16144382e-01 3.02368130e-01 4.51816385e-01] [8.19651255e-01 3.06812282e-01 4.48305861e-01] [8.23132309e-01 3.11260703e-01 4.44805781e-01] [8.26587706e-01 3.15713782e-01 4.41315901e-01] [8.30017584e-01 3.20171913e-01 4.37835947e-01] [8.33422053e-01 3.24635499e-01 4.34365616e-01] [8.36801237e-01 3.29104836e-01 4.30905052e-01] [8.40155276e-01 3.33580106e-01 4.27454836e-01] [8.43484103e-01 3.38062109e-01 4.24013059e-01] [8.46787726e-01 3.42551272e-01 4.20579333e-01] [8.50066132e-01 3.47048028e-01 4.17153264e-01] [8.53319279e-01 3.51552815e-01 4.13734445e-01] [8.56547103e-01 3.56066072e-01 4.10322469e-01] [8.59749520e-01 3.60588229e-01 4.06916975e-01] [8.62926559e-01 3.65119408e-01 4.03518809e-01] [8.66077920e-01 3.69660446e-01 4.00126027e-01] [8.69203436e-01 3.74211795e-01 3.96738211e-01] [8.72302917e-01 3.78773910e-01 3.93354947e-01] [8.75376149e-01 3.83347243e-01 3.89975832e-01] [8.78422895e-01 3.87932249e-01 3.86600468e-01] [8.81442916e-01 3.92529339e-01 3.83228622e-01] [8.84435982e-01 3.97138877e-01 3.79860246e-01] [8.87401682e-01 4.01761511e-01 3.76494232e-01] [8.90339687e-01 4.06397694e-01 3.73130228e-01] [8.93249647e-01 4.11047871e-01 3.69767893e-01] [8.96131191e-01 4.15712489e-01 3.66406907e-01] [8.98983931e-01 4.20391986e-01 3.63046965e-01] [9.01807455e-01 4.25086807e-01 3.59687758e-01] [9.04601295e-01 4.29797442e-01 3.56328796e-01] [9.07364995e-01 4.34524335e-01 3.52969777e-01] [9.10098088e-01 4.39267908e-01 3.49610469e-01] [9.12800095e-01 4.44028574e-01 3.46250656e-01] [9.15470518e-01 4.48806744e-01 3.42890148e-01] [9.18108848e-01 4.53602818e-01 3.39528771e-01] [9.20714383e-01 4.58417420e-01 3.36165582e-01] [9.23286660e-01 4.63250828e-01 3.32800827e-01] [9.25825146e-01 4.68103387e-01 3.29434512e-01] [9.28329275e-01 4.72975465e-01 3.26066550e-01] [9.30798469e-01 4.77867420e-01 3.22696876e-01] [9.33232140e-01 4.82779603e-01 3.19325444e-01] [9.35629684e-01 4.87712357e-01 3.15952211e-01] [9.37990034e-01 4.92666544e-01 3.12575440e-01] [9.40312939e-01 4.97642038e-01 3.09196628e-01] [9.42597771e-01 5.02639147e-01 3.05815824e-01] [9.44843893e-01 5.07658169e-01 3.02433101e-01] [9.47050662e-01 5.12699390e-01 2.99048555e-01] [9.49217427e-01 5.17763087e-01 2.95662308e-01] [9.51343530e-01 5.22849522e-01 2.92274506e-01] [9.53427725e-01 5.27959550e-01 2.88883445e-01] [9.55469640e-01 5.33093083e-01 2.85490391e-01] [9.57468770e-01 5.38250172e-01 2.82096149e-01] [9.59424430e-01 5.43431038e-01 2.78700990e-01] [9.61335930e-01 5.48635890e-01 2.75305214e-01] [9.63202573e-01 5.53864931e-01 2.71909159e-01] [9.65023656e-01 5.59118349e-01 2.68513200e-01] [9.66798470e-01 5.64396327e-01 2.65117752e-01] [9.68525639e-01 5.69699633e-01 2.61721488e-01] [9.70204593e-01 5.75028270e-01 2.58325424e-01] [9.71835007e-01 5.80382015e-01 2.54931256e-01] [9.73416145e-01 5.85761012e-01 2.51539615e-01] [9.74947262e-01 5.91165394e-01 2.48151200e-01] [9.76427606e-01 5.96595287e-01 2.44766775e-01] [9.77856416e-01 6.02050811e-01 2.41387186e-01] [9.79232922e-01 6.07532077e-01 2.38013359e-01] [9.80556344e-01 6.13039190e-01 2.34646316e-01] [9.81825890e-01 6.18572250e-01 2.31287178e-01] [9.83040742e-01 6.24131362e-01 2.27937141e-01] [9.84198924e-01 6.29717516e-01 2.24595006e-01] [9.85300760e-01 6.35329876e-01 2.21264889e-01] [9.86345421e-01 6.40968508e-01 2.17948456e-01] [9.87332067e-01 6.46633475e-01 2.14647532e-01] [9.88259846e-01 6.52324832e-01 2.11364122e-01] [9.89127893e-01 6.58042630e-01 2.08100426e-01] [9.89935328e-01 6.63786914e-01 2.04858855e-01] [9.90681261e-01 6.69557720e-01 2.01642049e-01] [9.91364787e-01 6.75355082e-01 1.98452900e-01] [9.91984990e-01 6.81179025e-01 1.95294567e-01] [9.92540939e-01 6.87029567e-01 1.92170500e-01] [9.93031693e-01 6.92906719e-01 1.89084459e-01] [9.93456302e-01 6.98810484e-01 1.86040537e-01] [9.93813802e-01 7.04740854e-01 1.83043180e-01] [9.94103226e-01 7.10697814e-01 1.80097207e-01] [9.94323596e-01 7.16681336e-01 1.77207826e-01] [9.94473934e-01 7.22691379e-01 1.74380656e-01] [9.94553260e-01 7.28727890e-01 1.71621733e-01] [9.94560594e-01 7.34790799e-01 1.68937522e-01] [9.94494964e-01 7.40880020e-01 1.66334918e-01] [9.94355411e-01 7.46995448e-01 1.63821243e-01] [9.94140989e-01 7.53136955e-01 1.61404226e-01] [9.93850778e-01 7.59304390e-01 1.59091984e-01] [9.93482190e-01 7.65498551e-01 1.56890625e-01] [9.93033251e-01 7.71719833e-01 1.54807583e-01] [9.92505214e-01 7.77966775e-01 1.52854862e-01] [9.91897270e-01 7.84239120e-01 1.51041581e-01] [9.91208680e-01 7.90536569e-01 1.49376885e-01] [9.90438793e-01 7.96858775e-01 1.47869810e-01] [9.89587065e-01 8.03205337e-01 1.46529128e-01] [9.88647741e-01 8.09578605e-01 1.45357284e-01] [9.87620557e-01 8.15977942e-01 1.44362644e-01] [9.86509366e-01 8.22400620e-01 1.43556679e-01] [9.85314198e-01 8.28845980e-01 1.42945116e-01] [9.84031139e-01 8.35315360e-01 1.42528388e-01] [9.82652820e-01 8.41811730e-01 1.42302653e-01] [9.81190389e-01 8.48328902e-01 1.42278607e-01] [9.79643637e-01 8.54866468e-01 1.42453425e-01] [9.77994918e-01 8.61432314e-01 1.42808191e-01] [9.76264977e-01 8.68015998e-01 1.43350944e-01] [9.74443038e-01 8.74622194e-01 1.44061156e-01] [9.72530009e-01 8.81250063e-01 1.44922913e-01] [9.70532932e-01 8.87896125e-01 1.45918663e-01] [9.68443477e-01 8.94563989e-01 1.47014438e-01] [9.66271225e-01 9.01249365e-01 1.48179639e-01] [9.64021057e-01 9.07950379e-01 1.49370428e-01] [9.61681481e-01 9.14672479e-01 1.50520343e-01] [9.59275646e-01 9.21406537e-01 1.51566019e-01] [9.56808068e-01 9.28152065e-01 1.52409489e-01] [9.54286813e-01 9.34907730e-01 1.52921158e-01] [9.51726083e-01 9.41670605e-01 1.52925363e-01] [9.49150533e-01 9.48434900e-01 1.52177604e-01] [9.46602270e-01 9.55189860e-01 1.50327944e-01] [9.44151742e-01 9.61916487e-01 1.46860789e-01] [9.41896120e-01 9.68589814e-01 1.40955606e-01] [9.40015097e-01 9.75158357e-01 1.31325517e-01]]<line_sep>test_cm=LinearSegmentedColormap.from_list(__file__ cm_data)<if_stmt>__name__<eq>"__main__"<block_start><import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<try_stmt><block_start><import_from_stmt>viscm viscm<line_sep>viscm(test_cm)<block_end><except_stmt>ImportError<block_start>print("viscm not found, falling back on simple display")<line_sep>plt.imshow(np.linspace(0 100 256)[<none> :] aspect='auto' cmap=test_cm)<block_end>plt.show()<block_end> |
"""Test the Airthings config flow."""<import_from_stmt>unittest.mock patch<import_stmt>airthings<import_from_stmt>homeassistant config_entries<import_from_stmt>homeassistant.components.airthings.const CONF_ID CONF_SECRET DOMAIN<import_from_stmt>homeassistant.core HomeAssistant<import_from_stmt>homeassistant.data_entry_flow RESULT_TYPE_CREATE_ENTRY RESULT_TYPE_FORM<import_from_stmt>tests.common MockConfigEntry<line_sep>TEST_DATA={CONF_ID:"client_id" CONF_SECRET:"secret" }<async_keyword><def_stmt>test_form hass:HomeAssistant<arrow><none><block_start>"""Test we get the form."""<line_sep>result=<await>hass.config_entries.flow.async_init(DOMAIN context={"source":config_entries.SOURCE_USER})<assert_stmt>result["type"]<eq>RESULT_TYPE_FORM<assert_stmt>result["errors"]<is><none><with_stmt>patch("airthings.get_token" return_value="test_token" ) patch("homeassistant.components.airthings.async_setup_entry" return_value=<true> )<as>mock_setup_entry<block_start>result2=<await>hass.config_entries.flow.async_configure(result["flow_id"] TEST_DATA )<line_sep><await>hass.async_block_till_done()<block_end><assert_stmt>result2["type"]<eq>RESULT_TYPE_CREATE_ENTRY<assert_stmt>result2["title"]<eq>"Airthings"<assert_stmt>result2["data"]<eq>TEST_DATA<assert_stmt>len(mock_setup_entry.mock_calls)<eq>1<block_end><async_keyword><def_stmt>test_form_invalid_auth hass:HomeAssistant<arrow><none><block_start>"""Test we handle invalid auth."""<line_sep>result=<await>hass.config_entries.flow.async_init(DOMAIN context={"source":config_entries.SOURCE_USER})<with_stmt>patch("airthings.get_token" side_effect=airthings.AirthingsAuthError )<block_start>result2=<await>hass.config_entries.flow.async_configure(result["flow_id"] TEST_DATA )<block_end><assert_stmt>result2["type"]<eq>RESULT_TYPE_FORM<assert_stmt>result2["errors"]<eq>{"base":"invalid_auth"}<block_end><async_keyword><def_stmt>test_form_cannot_connect hass:HomeAssistant<arrow><none><block_start>"""Test we handle cannot connect error."""<line_sep>result=<await>hass.config_entries.flow.async_init(DOMAIN context={"source":config_entries.SOURCE_USER})<with_stmt>patch("airthings.get_token" side_effect=airthings.AirthingsConnectionError )<block_start>result2=<await>hass.config_entries.flow.async_configure(result["flow_id"] TEST_DATA )<block_end><assert_stmt>result2["type"]<eq>RESULT_TYPE_FORM<assert_stmt>result2["errors"]<eq>{"base":"cannot_connect"}<block_end><async_keyword><def_stmt>test_form_unknown_error hass:HomeAssistant<arrow><none><block_start>"""Test we handle unknown error."""<line_sep>result=<await>hass.config_entries.flow.async_init(DOMAIN context={"source":config_entries.SOURCE_USER})<with_stmt>patch("airthings.get_token" side_effect=Exception )<block_start>result2=<await>hass.config_entries.flow.async_configure(result["flow_id"] TEST_DATA )<block_end><assert_stmt>result2["type"]<eq>RESULT_TYPE_FORM<assert_stmt>result2["errors"]<eq>{"base":"unknown"}<block_end><async_keyword><def_stmt>test_flow_entry_already_exists hass:HomeAssistant<arrow><none><block_start>"""Test user input for config_entry that already exists."""<line_sep>first_entry=MockConfigEntry(domain="airthings" data=TEST_DATA unique_id=TEST_DATA[CONF_ID] )<line_sep>first_entry.add_to_hass(hass)<with_stmt>patch("airthings.get_token" return_value="token")<block_start>result=<await>hass.config_entries.flow.async_init(DOMAIN context={"source":config_entries.SOURCE_USER} data=TEST_DATA)<block_end><assert_stmt>result["type"]<eq>"abort"<assert_stmt>result["reason"]<eq>"already_configured"<block_end> |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
<import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401
<import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>QueryResultRowTypeSummary(object)<block_start>"""
A summary of the datatype, unit and related metadata of an individual row element of a query result row that is returned.
"""<def_stmt>__init__ self **kwargs<block_start>"""
Initializes a new QueryResultRowTypeSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param data_type:
The value to assign to the data_type property of this QueryResultRowTypeSummary.
:type data_type: str
:param unit:
The value to assign to the unit property of this QueryResultRowTypeSummary.
:type unit: str
:param display_name:
The value to assign to the display_name property of this QueryResultRowTypeSummary.
:type display_name: str
:param expression:
The value to assign to the expression property of this QueryResultRowTypeSummary.
:type expression: str
:param query_result_row_type_summaries:
The value to assign to the query_result_row_type_summaries property of this QueryResultRowTypeSummary.
:type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary]
"""<line_sep>self.swagger_types={'data_type':'str' 'unit':'str' 'display_name':'str' 'expression':'str' 'query_result_row_type_summaries':'list[QueryResultRowTypeSummary]'}<line_sep>self.attribute_map={'data_type':'dataType' 'unit':'unit' 'display_name':'displayName' 'expression':'expression' 'query_result_row_type_summaries':'queryResultRowTypeSummaries'}<line_sep>self._data_type=<none><line_sep>self._unit=<none><line_sep>self._display_name=<none><line_sep>self._expression=<none><line_sep>self._query_result_row_type_summaries=<none><block_end>@property<def_stmt>data_type self<block_start>"""
Gets the data_type of this QueryResultRowTypeSummary.
Datatype of the query result row element.
:return: The data_type of this QueryResultRowTypeSummary.
:rtype: str
"""<line_sep><return>self._data_type<block_end>@data_type.setter<def_stmt>data_type self data_type<block_start>"""
Sets the data_type of this QueryResultRowTypeSummary.
Datatype of the query result row element.
:param data_type: The data_type of this QueryResultRowTypeSummary.
:type: str
"""<line_sep>self._data_type=data_type<block_end>@property<def_stmt>unit self<block_start>"""
Gets the unit of this QueryResultRowTypeSummary.
Granular unit in which the query result row element's data is represented.
:return: The unit of this QueryResultRowTypeSummary.
:rtype: str
"""<line_sep><return>self._unit<block_end>@unit.setter<def_stmt>unit self unit<block_start>"""
Sets the unit of this QueryResultRowTypeSummary.
Granular unit in which the query result row element's data is represented.
:param unit: The unit of this QueryResultRowTypeSummary.
:type: str
"""<line_sep>self._unit=unit<block_end>@property<def_stmt>display_name self<block_start>"""
Gets the display_name of this QueryResultRowTypeSummary.
Alias name if an alias is used for the query result row element or an assigned display name from the query language
in some default cases.
:return: The display_name of this QueryResultRowTypeSummary.
:rtype: str
"""<line_sep><return>self._display_name<block_end>@display_name.setter<def_stmt>display_name self display_name<block_start>"""
Sets the display_name of this QueryResultRowTypeSummary.
Alias name if an alias is used for the query result row element or an assigned display name from the query language
in some default cases.
:param display_name: The display_name of this QueryResultRowTypeSummary.
:type: str
"""<line_sep>self._display_name=display_name<block_end>@property<def_stmt>expression self<block_start>"""
Gets the expression of this QueryResultRowTypeSummary.
Actual show expression in the user typed query that produced this column.
:return: The expression of this QueryResultRowTypeSummary.
:rtype: str
"""<line_sep><return>self._expression<block_end>@expression.setter<def_stmt>expression self expression<block_start>"""
Sets the expression of this QueryResultRowTypeSummary.
Actual show expression in the user typed query that produced this column.
:param expression: The expression of this QueryResultRowTypeSummary.
:type: str
"""<line_sep>self._expression=expression<block_end>@property<def_stmt>query_result_row_type_summaries self<block_start>"""
Gets the query_result_row_type_summaries of this QueryResultRowTypeSummary.
A query result row type summary object that represents a nested table structure.
:return: The query_result_row_type_summaries of this QueryResultRowTypeSummary.
:rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary]
"""<line_sep><return>self._query_result_row_type_summaries<block_end>@query_result_row_type_summaries.setter<def_stmt>query_result_row_type_summaries self query_result_row_type_summaries<block_start>"""
Sets the query_result_row_type_summaries of this QueryResultRowTypeSummary.
A query result row type summary object that represents a nested table structure.
:param query_result_row_type_summaries: The query_result_row_type_summaries of this QueryResultRowTypeSummary.
:type: list[oci.apm_traces.models.QueryResultRowTypeSummary]
"""<line_sep>self._query_result_row_type_summaries=query_result_row_type_summaries<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end> |
# Copyright (c) 2022, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
<import_stmt>os<import_stmt>re<import_stmt>time<import_stmt>random<import_stmt>argparse<import_stmt>torch<import_from_stmt>transformers GPT2TokenizerFast<import_from_stmt>jaxformer.hf.codegen.modeling_codegen CodeGenForCausalLM<line_sep>########################################################################
# util
<class_stmt>print_time<block_start><def_stmt>__init__ self desc<block_start>self.desc=desc<block_end><def_stmt>__enter__ self<block_start>print(self.desc)<line_sep>self.t=time.time()<block_end><def_stmt>__exit__ self type value traceback<block_start>print(f'{self.desc} took {time.time()-self.t:.02f}s')<block_end><block_end><def_stmt>set_env <block_start>os.environ['TOKENIZERS_PARALLELISM']='false'<block_end><def_stmt>set_seed seed deterministic=<true><block_start>random.seed(seed)<line_sep>os.environ['PYTHONHASHSEED']=str(seed)<line_sep>torch.manual_seed(seed)<if_stmt>torch.cuda.is_available()<block_start>torch.cuda.manual_seed(seed)<line_sep>torch.backends.cudnn.deterministic=deterministic<line_sep>torch.backends.cudnn.benchmark=<not>deterministic<line_sep># torch.use_deterministic_algorithms(deterministic)
<block_end><block_end><def_stmt>cast model fp16=<true><block_start><if_stmt>fp16<block_start>model.half()<block_end><return>model<block_end>########################################################################
# model
<def_stmt>create_model ckpt fp16=<true><block_start><if_stmt>fp16<block_start><return>CodeGenForCausalLM.from_pretrained(ckpt revision='float16' torch_dtype=torch.float16 low_cpu_mem_usage=<true>)<block_end><else_stmt><block_start><return>CodeGenForCausalLM.from_pretrained(ckpt)<block_end><block_end><def_stmt>create_tokenizer <block_start>t=GPT2TokenizerFast.from_pretrained('gpt2')<line_sep>t.max_model_input_sizes['gpt2']=1e20<line_sep><return>t<block_end><def_stmt>include_whitespace t n_min=2 n_max=20 as_special_tokens=<false><block_start>t.add_tokens([' '<times>n<for>n reversed(range(n_min n_max))] special_tokens=as_special_tokens)<line_sep><return>t<block_end><def_stmt>include_tabs t n_min=2 n_max=20 as_special_tokens=<false><block_start>t.add_tokens(['\t'<times>n<for>n reversed(range(n_min n_max))] special_tokens=as_special_tokens)<line_sep><return>t<block_end><def_stmt>create_custom_gpt2_tokenizer <block_start>t=create_tokenizer()<line_sep>t=include_whitespace(t=t n_min=2 n_max=32 as_special_tokens=<false>)<line_sep>t=include_tabs(t=t n_min=2 n_max=10 as_special_tokens=<false>)<line_sep><return>t<block_end>########################################################################
# sample
<def_stmt>sample device model tokenizer context pad_token_id num_return_sequences=1 temp=0.2 top_p=0.95 max_length_sample=128 max_length=2048<block_start>input_ids=tokenizer(context truncation=<true> padding=<true> max_length=max_length return_tensors='pt' ).input_ids<line_sep>input_ids_len=input_ids.shape[1]<assert_stmt>input_ids_len<l>max_length<with_stmt>torch.no_grad()<block_start>input_ids=input_ids.to(device)<line_sep>tokens=model.generate(input_ids do_sample=<true> num_return_sequences=num_return_sequences temperature=temp max_length=input_ids_len+max_length_sample top_p=top_p pad_token_id=pad_token_id use_cache=<true> )<line_sep>text=tokenizer.batch_decode(tokens[: input_ids_len: <ellipsis>])<block_end><return>text<block_end><def_stmt>truncate completion<block_start><def_stmt>find_re string pattern start_pos<block_start>m=pattern.search(string start_pos)<line_sep><return>m.start()<if>m<else>-1<block_end>terminals=[re.compile(r re.MULTILINE)<for>r ['^#' re.escape('<|endoftext|>') "^'''" '^"""' '\n\n\n']]<line_sep>prints=list(re.finditer('^print' completion re.MULTILINE))<if_stmt>len(prints)<g>1<block_start>completion=completion[:prints[1].start()]<block_end>defs=list(re.finditer('^def' completion re.MULTILINE))<if_stmt>len(defs)<g>1<block_start>completion=completion[:defs[1].start()]<block_end>start_pos=0<line_sep>terminals_pos=[pos<for>pos [find_re(completion terminal start_pos)<for>terminal terminals]<if>pos<ne>-1]<if_stmt>len(terminals_pos)<g>0<block_start><return>completion[:min(terminals_pos)]<block_end><else_stmt><block_start><return>completion<block_end><block_end><def_stmt>test_truncate <block_start><assert_stmt>truncate('\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#')<eq>'\nif len_a > len_b:\n result = a\nelse:\n result = b'<block_end>########################################################################
# main
<def_stmt>main # (0) constants
<block_start>models_nl=['codegen-350M-nl' 'codegen-2B-nl' 'codegen-6B-nl' 'codegen-16B-nl']<line_sep>models_pl=['codegen-350M-multi' 'codegen-2B-multi' 'codegen-6B-multi' 'codegen-16B-multi' 'codegen-350M-mono' 'codegen-2B-mono' 'codegen-6B-mono' 'codegen-16B-mono']<line_sep>models=models_nl+models_pl<line_sep># (1) params
parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--model' type=str choices=models default='codegen-350M-mono')<line_sep>parser.add_argument('--device' type=str default='cuda:0')<line_sep>parser.add_argument('--rng-seed' type=int default=42)<line_sep>parser.add_argument('--rng-deterministic' type=bool default=<true>)<line_sep>parser.add_argument('--p' type=float default=0.95)<line_sep>parser.add_argument('--t' type=float default=0.2)<line_sep>parser.add_argument('--max-length' type=int default=128)<line_sep>parser.add_argument('--batch-size' type=int default=1)<line_sep>parser.add_argument('--no-fp16' action="store_false")<line_sep>parser.add_argument('--pad' type=int default=50256)<line_sep>parser.add_argument('--context' type=str default='def helloworld():')<line_sep>args=parser.parse_args()<line_sep># (2) preamble
set_env()<line_sep>set_seed(args.rng_seed deterministic=args.rng_deterministic)<line_sep>device=torch.device(args.device)<if_stmt>device.type<eq>"cpu"<block_start>args.no_fp16=<false><block_end><if_stmt>args.model.startswith("codegen-16B")<block_start>args.no_fp16=<true><block_end>ckpt=f'./checkpoints/{args.model}'<line_sep># (3) load
<with_stmt>print_time('loading parameters')<block_start>model=create_model(ckpt=ckpt fp16=args.no_fp16).to(device)<block_end><with_stmt>print_time('loading tokenizer')<block_start><if_stmt>args.model<in>models_pl<block_start>tokenizer=create_custom_gpt2_tokenizer()<block_end><else_stmt><block_start>tokenizer=create_tokenizer()<block_end>tokenizer.padding_side='left'<line_sep>tokenizer.pad_token=args.pad<block_end># (4) sample
<with_stmt>print_time('sampling')<block_start>completion=sample(device=device model=model tokenizer=tokenizer context=args.context pad_token_id=args.pad num_return_sequences=args.batch_size temp=args.t top_p=args.p max_length_sample=args.max_length)[0]<line_sep>truncation=truncate(completion)<line_sep>print('='<times>100)<line_sep>print(completion)<line_sep>print('='<times>100)<line_sep>print(args.context+truncation)<line_sep>print('='<times>100)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>test_truncate()<line_sep>main()<line_sep>print('done.')<block_end> |
<import_stmt>argparse<import_stmt>warnings<line_sep>warnings.simplefilter("ignore" UserWarning)<import_stmt>files<import_from_stmt>tensorboardX SummaryWriter<import_stmt>os<import_stmt>numpy<as>np<import_stmt>time<import_stmt>torch<import_stmt>torch.optim<import_stmt>torch.nn<as>nn<import_stmt>torch.utils.data<import_stmt>torchvision<import_stmt>torchvision.transforms<as>tfs<import_from_stmt>data DataSet return_model_loader<import_from_stmt>util weight_init write_conv setup_runtime AverageMeter MovingAverage<def_stmt>RotationDataLoader image_dir is_validation=<false> batch_size=256 crop_size=224 num_workers=4 shuffle=<true><block_start>normalize=tfs.Normalize(mean=[0.485 0.456 0.406] std=[0.229 0.224 0.225])<line_sep>transforms=tfs.Compose([tfs.RandomResizedCrop(crop_size) tfs.RandomGrayscale(p=0.2) tfs.ColorJitter(0.4 0.4 0.4 0.4) tfs.RandomHorizontalFlip() tfs.Lambda(<lambda>img:torch.stack([normalize(tfs.ToTensor()(tfs.functional.rotate(img angle)))<for>angle [0 90 180 270]]))])<if_stmt>is_validation<block_start>dataset=DataSet(torchvision.datasets.ImageFolder(image_dir+'/val' transforms))<block_end><else_stmt><block_start>dataset=DataSet(torchvision.datasets.ImageFolder(image_dir+'/train' transforms))<block_end>loader=torch.utils.data.DataLoader(dataset batch_size=batch_size shuffle=shuffle num_workers=num_workers pin_memory=<true> drop_last=<false>)<line_sep><return>loader<block_end><class_stmt>Optimizer<block_start><def_stmt>__init__ self<block_start>self.num_epochs=30<line_sep>self.lr=0.05<line_sep>self.lr_schedule=<lambda>epoch:(self.lr<times>(0.1<power>(epoch<floordiv>args.lrdrop)))<times>(epoch<l>80)+(epoch<ge>80)<times>self.lr<times>(0.1<power>3)<line_sep>self.momentum=0.9<line_sep>self.weight_decay=10<power>(-5)<line_sep>self.resume=<true><line_sep>self.checkpoint_dir=<none><line_sep>self.writer=<none><line_sep>self.K=args.ncl<line_sep>self.dev=torch.device("cuda"<if>torch.cuda.is_available()<else>"cpu")<line_sep>self.val_loader=RotationDataLoader(args.imagenet_path is_validation=<true> batch_size=args.batch_size num_workers=args.workers shuffle=<true>)<block_end><def_stmt>optimize_epoch self model optimizer loader epoch validation=<false><block_start>print(f"Starting epoch {epoch}, validation: {validation} "+"="<times>30)<line_sep>loss_value=AverageMeter()<line_sep>rotacc_value=AverageMeter()<line_sep># house keeping
<if_stmt><not>validation<block_start>model.train()<line_sep>lr=self.lr_schedule(epoch)<for_stmt>pg optimizer.param_groups<block_start>pg['lr']=lr<block_end><block_end><else_stmt><block_start>model.eval()<block_end>XE=torch.nn.CrossEntropyLoss().to(self.dev)<line_sep>l_dl=0# len(loader)
now=time.time()<line_sep>batch_time=MovingAverage(intertia=0.9)<for_stmt>iter,(data label selected) enumerate(loader)<block_start>now=time.time()<if_stmt><not>validation<block_start>niter=epoch<times>len(loader.dataset)+iter<times>args.batch_size<block_end>data=data.to(self.dev)<line_sep>mass=data.size(0)<line_sep>where=np.arange(mass dtype=int)<times>4<line_sep>data=data.view(mass<times>4 3 data.size(3) data.size(4))<line_sep>rotlabel=torch.tensor(range(4)).view(-1 1).repeat(mass 1).view(-1).to(self.dev)<line_sep>#################### train CNN ###########################################
<if_stmt><not>validation<block_start>final=model(data)<if_stmt>args.onlyrot<block_start>loss=torch.Tensor([0]).to(self.dev)<block_end><else_stmt><block_start><if_stmt>args.hc<eq>1<block_start>loss=XE(final[0][where] self.L[selected])<block_end><else_stmt><block_start>loss=torch.mean(torch.stack([XE(final[k][where] self.L[k selected])<for>k range(args.hc)]))<block_end><block_end>rotloss=XE(final[-1] rotlabel)<line_sep>pred=torch.argmax(final[-1] 1)<line_sep>total_loss=loss+rotloss<line_sep>optimizer.zero_grad()<line_sep>total_loss.backward()<line_sep>optimizer.step()<line_sep>correct=(pred<eq>rotlabel).to(torch.float)<line_sep>rotacc=correct.sum()/float(mass)<block_end><else_stmt><block_start>final=model(data)<line_sep>pred=torch.argmax(final[-1] 1)<line_sep>correct=(pred<eq>rotlabel.cuda()).to(torch.float)<line_sep>rotacc=correct.sum()/float(mass)<line_sep>total_loss=torch.Tensor([0])<line_sep>loss=torch.Tensor([0])<line_sep>rotloss=torch.Tensor([0])<block_end>rotacc_value.update(rotacc.item() mass)<line_sep>loss_value.update(total_loss.item() mass)<line_sep>batch_time.update(time.time()-now)<line_sep>now=time.time()<line_sep>print(f"Loss: {loss_value.avg:03.3f}, RotAcc: {rotacc_value.avg:03.3f} | {epoch:3}/{iter:05}/{l_dl:05} Freq: {mass/batch_time.avg:04.1f}Hz:" end='\r' flush=<true>)<line_sep># every few iter logging
<if_stmt>(iter%args.logiter<eq>0)<block_start><if_stmt><not>validation<block_start>print(niter " Loss: {0:.3f}".format(loss.item()) flush=<true>)<with_stmt>torch.no_grad()<block_start><if_stmt><not>args.onlyrot<block_start>pred=torch.argmax(final[0][where] dim=1)<line_sep>pseudoloss=XE(final[0][where] pred)<block_end><block_end><if_stmt><not>args.onlyrot<block_start>self.writer.add_scalar('Pseudoloss' pseudoloss.item() niter)<block_end>self.writer.add_scalar('lr' self.lr_schedule(epoch) niter)<line_sep>self.writer.add_scalar('Loss' loss.item() niter)<line_sep>self.writer.add_scalar('RotLoss' rotloss.item() niter)<line_sep>self.writer.add_scalar('RotAcc' rotacc.item() niter)<if_stmt>iter<g>0<block_start>self.writer.add_scalar('Freq(Hz)' mass/(time.time()-now) niter)<block_end><block_end><block_end><block_end># end of epoch logging
<if_stmt>self.writer<and>(epoch%self.log_interval<eq>0)<block_start>write_conv(self.writer model epoch)<if_stmt>validation<block_start>print('val Rot-Acc: ' rotacc_value.avg)<line_sep>self.writer.add_scalar('val Rot-Acc' rotacc_value.avg epoch)<block_end><block_end>files.save_checkpoint_all(self.checkpoint_dir model args.arch optimizer self.L epoch lowest=<false>)<line_sep><return>{'loss':loss_value.avg}<block_end><def_stmt>optimize self model train_loader<block_start>"""Perform full optimization."""<line_sep>first_epoch=0<line_sep>model=model.to(self.dev)<line_sep>self.optimize_times=[0]<line_sep>optimizer=torch.optim.SGD(filter(<lambda>p:p.requires_grad model.parameters()) weight_decay=self.weight_decay momentum=self.momentum lr=self.lr)<if_stmt>self.checkpoint_dir<is><not><none><and>self.resume<block_start>self.L,first_epoch=files.load_checkpoint_all(self.checkpoint_dir model=<none> opt=<none>)<line_sep>print('loaded from: ' self.checkpoint_dir flush=<true>)<line_sep>print('first five entries of L: ' self.L[:5] flush=<true>)<line_sep>print('found first epoch to be' first_epoch flush=<true>)<line_sep>first_epoch=0<line_sep>self.optimize_times=[0]<line_sep>self.L=self.L.cuda()<line_sep>print("model.headcount " model.headcount flush=<true>)<block_end>#####################################################################################
# Perform optmization ###############################################################
lowest_loss=1e9<line_sep>epoch=first_epoch<while_stmt>epoch<l>(self.num_epochs+1)<block_start><if_stmt><not>args.val_only<block_start>m=self.optimize_epoch(model optimizer train_loader epoch validation=<false>)<if_stmt>m['loss']<l>lowest_loss<block_start>lowest_loss=m['loss']<line_sep>files.save_checkpoint_all(self.checkpoint_dir model args.arch optimizer self.L epoch lowest=<true>)<block_end><block_end><else_stmt><block_start>print('='<times>30+' doing only validation '+"="<times>30)<line_sep>epoch=self.num_epochs<block_end>m=self.optimize_epoch(model optimizer self.val_loader epoch validation=<true>)<line_sep>epoch<augadd>1<block_end>print(f"Model optimization completed. Saving final model to {os.path.join(self.checkpoint_dir 'model_final.pth.tar')}")<line_sep>torch.save(model os.path.join(self.checkpoint_dir 'model_final.pth.tar'))<line_sep><return>model<block_end><block_end><def_stmt>get_parser <block_start>parser=argparse.ArgumentParser(description='Retrain with given labels combined with RotNet loss')<line_sep># optimizer
parser.add_argument('--epochs' default=90 type=int metavar='N' help='number of epochs')<line_sep>parser.add_argument('--batch-size' default=64 type=int metavar='BS' help='batch size')<line_sep>parser.add_argument('--lr' default=0.05 type=float metavar='FLOAT' help='initial learning rate')<line_sep>parser.add_argument('--lrdrop' default=30 type=int metavar='INT' help='multiply LR by 0.1 every')<line_sep># architecture
parser.add_argument('--arch' default='alexnet' type=str help='alexnet or resnet')<line_sep>parser.add_argument('--archspec' default='big' type=str help='big or small for alexnet ')<line_sep>parser.add_argument('--ncl' default=1000 type=int metavar='INT' help='number of clusters')<line_sep>parser.add_argument('--hc' default=1 type=int metavar='INT' help='number of heads')<line_sep>parser.add_argument('--init' default=<false> action='store_true' help='initialization of network to PyTorch 0.4')<line_sep># what we do in this code
parser.add_argument('--val-only' default=<false> action='store_true' help='if we run only validation set')<line_sep>parser.add_argument('--onlyrot' default=<false> action='store_true' help='if train only RotNet')<line_sep># housekeeping
parser.add_argument('--data' default="Imagenet" type=str)<line_sep>parser.add_argument('--device' default="0" type=str metavar='N' help='GPU device')<line_sep>parser.add_argument('--exp' default='./rot-retrain' metavar='DIR' help='path to result dirs')<line_sep>parser.add_argument('--workers' default=6 type=int metavar='N' help='number workers (default: 6)')<line_sep>parser.add_argument('--imagenet-path' default='/home/ubuntu/data/imagenet' type=str help='')<line_sep>parser.add_argument('--comment' default='rot-retrain' type=str help='comment for tensorboardX')<line_sep>parser.add_argument('--log-interval' default=1 type=int metavar='INT' help='save stuff every x epochs')<line_sep>parser.add_argument('--logiter' default=200 type=int metavar='INT' help='log every x-th batch')<line_sep><return>parser<block_end><if_stmt>__name__<eq>"__main__"<block_start>args=get_parser().parse_args()<line_sep>name="%s"%args.comment.replace('/' '_')<try_stmt><block_start>args.device=[int(item)<for>item args.device.split(',')]<block_end><except_stmt>AttributeError<block_start>args.device=[int(args.device)]<block_end>setup_runtime(seed=42 cuda_dev_id=args.device)<line_sep>print(args flush=<true>)<line_sep>print()<line_sep>print(name flush=<true>)<line_sep>writer=SummaryWriter('./runs/%s/%s'%(args.data name))<line_sep>writer.add_text('args' " \n".join(['%s %s'%(arg getattr(args arg))<for>arg vars(args)]))<line_sep># Setup model and train_loader
print('Commencing!' flush=<true>)<line_sep>model,train_loader=return_model_loader(args)<line_sep>train_loader=RotationDataLoader(args.imagenet_path is_validation=<false> crop_size=224 batch_size=args.batch_size num_workers=args.workers shuffle=<true>)<line_sep># add additional head to the network for RotNet loss.
<if_stmt>args.arch<eq>'alexnet'<block_start><if_stmt>args.hc<eq>1<block_start>model.__setattr__("top_layer0" nn.Linear(4096 args.ncl))<line_sep>model.top_layer=<none><block_end>model.headcount=args.hc+1<line_sep>model.__setattr__("top_layer%s"%args.hc nn.Linear(4096 4))<block_end><else_stmt><block_start><if_stmt>args.hc<eq>1<block_start>model.__setattr__("top_layer0" nn.Linear(2048<times>int(args.archspec) args.ncl))<line_sep>model.top_layer=<none><block_end>model.headcount=args.hc+1<line_sep>model.__setattr__("top_layer%s"%args.hc nn.Linear(2048<times>int(args.archspec) 4))<block_end><if_stmt>args.init<block_start><for_stmt>mod model.modules()<block_start>mod.apply(weight_init)<block_end><block_end># Setup optimizer
o=Optimizer()<line_sep>o.writer=writer<line_sep>o.lr=args.lr<line_sep>o.num_epochs=args.epochs<line_sep>o.resume=<true><line_sep>o.log_interval=args.log_interval<line_sep>o.checkpoint_dir=os.path.join(args.exp 'checkpoints')<line_sep># Optimize
o.optimize(model train_loader)<block_end> |
<import_stmt>pathlib<import_stmt>yaml<line_sep>documentations={"Our Platform":"QuantConnect-Platform-2.0.0.yaml" "Alpha Streams":"QuantConnect-Alpha-0.8.yaml"}<def_stmt>RequestTable api_call params<block_start>writeUp='<table class="table qc-table">\n<thead>\n<tr>\n'<line_sep>writeUp<augadd>f'<th colspan="2"><code>{api_call}</code> Method</th>\n</tr>\n</thead>'<line_sep>example='<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n{\n'<for_stmt>item params<block_start>example_="/"<line_sep>description_="Optional. "<if>"required"<not><in>item<or><not>item["required"]<else>""<line_sep>description_<augadd>item["description"]<if_stmt>description_[-1]<ne>"."<block_start>description_<augadd>"."<block_end><if_stmt>"type"<in>item["schema"]<block_start>type_=item["schema"]["type"]<block_end><else_stmt><block_start>type_=item["schema"]["$ref"].split("/")[-1]<block_end><if_stmt>"minimum"<in>item["schema"]<block_start>description_<augadd>f' Minimum: {item["schema"]["minimum"]}'<line_sep>example_=item["schema"]["minimum"]<block_end><elif_stmt>"maximum"<in>item["schema"]<block_start>description_<augadd>f' Maximum: {item["schema"]["maximum"]}'<line_sep>example_=item["schema"]["maximum"]<block_end><elif_stmt>"default"<in>item["schema"]<block_start>description_<augadd>f' Default: {item["schema"]["default"]}'<line_sep>example_=item["schema"]["default"]<block_end><if_stmt>type_<eq>"array"<block_start>array_obj=item["schema"]["items"]<if_stmt>"$ref"<in>array_obj<block_start>type_=array_obj["$ref"].split("/")[-1]+" Array"<line_sep>ref=array_obj["$ref"].split("/")[1:]<line_sep>type_=ref[-1]+" Array"<line_sep>request_object_=doc<for_stmt>path ref<block_start>request_object_=request_object_[path]<block_end><if_stmt>"properties"<in>request_object_<block_start>request_object_properties_=request_object_["properties"]<line_sep>example_,__,__=ExampleWriting(request_object_properties_ [] 1)<block_end><block_end><if_stmt>"type"<in>array_obj<block_start>type_=array_obj["type"]+" Array"<block_end><if_stmt>"enum"<in>array_obj<block_start>type_=type_+" Enum"<line_sep>description_<augadd>f' Options: {str(array_obj["enum"])}'<line_sep>example_=f'"{array_obj["enum"][0]}"'<block_end><block_end><if_stmt>"Enum"<not><in>type_<block_start><if_stmt>"string"<in>type_<block_start>example_='"string"'<block_end><elif_stmt>"number"<in>type_<or>"integer"<in>type_<block_start>example_='0'<block_end><elif_stmt>"boolean"<in>type_<block_start>example_='true'<block_end><block_end>writeUp<augadd>f'\n<tr>\n<td width="20%">{item["name"]}</td> <td> <code>{type_}</code><br/>{description_}</td>\n</tr>'<line_sep>example<augadd>f' "{item["name"]}": {example_},\n'<block_end><return>writeUp+example+"\b}</pre>\n</div>\n</td>\n</tr>\n</table>"<block_end><def_stmt>ResponseTable requestBody<block_start>writeUp=""<line_sep>array=<false><line_sep>order=0<if_stmt>"content"<in>requestBody<block_start>component=requestBody["content"]["application/json"]["schema"]<if_stmt>"$ref"<in>component<block_start>component=component["$ref"].split("/")[1:]<block_end><elif_stmt>"items"<in>component<and>"$ref"<in>component["items"]<block_start>component=component["items"]["$ref"].split("/")[1:]<line_sep>array=<true><line_sep>order<augadd>1<block_end><else_stmt><block_start>writeUp<augadd>'<table class="table qc-table">\n<thead>\n<tr>\n'<line_sep>writeUp<augadd>f'<th colspan="2">{requestBody["description"]}</th>\n'<line_sep>writeUp<augadd>'</tr>\n</thead>\n'<line_sep>writeUp<augadd>f'<tr>\n<td width="20%">value</td> <td> <code>{component["items"]["type"]}</code> <br/>/</td>\n</tr>\n'<line_sep>writeUp<augadd>'<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n'<line_sep>writeUp<augadd>f'[\n "{component["items"]["example"]}"\n]'<line_sep>writeUp<augadd>'</pre>\n</div>\n</td>\n</tr>\n</table>'<line_sep><return>writeUp<block_end><block_end><else_stmt><block_start>component=requestBody["$ref"].split("/")[1:]<block_end>item_list=[component]<line_sep>i=0<while_stmt>i<l>len(item_list)<block_start>request_object=doc<for_stmt>item item_list[i]<block_start>request_object=request_object[item]<block_end><if_stmt>"items"<in>request_object<and>"oneOf"<in>request_object["items"]<block_start>prop=request_object["items"]["oneOf"]<line_sep>example='<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n[\n ['<line_sep>writeUp<augadd>'<table class="table qc-table">\n<thead>\n<tr>\n'<line_sep>writeUp<augadd>f'<th colspan="2"><code>{item}</code> Model - {request_object["description"]}</th>\n'<line_sep>writeUp<augadd>'</tr>\n</thead>'<for_stmt>y prop<block_start>path=y["$ref"].split("/")[1:]<line_sep>name=path[-1]<line_sep>enum=""<line_sep>item_list.append(path)<line_sep>request_object=doc<for_stmt>item path<block_start>request_object=request_object[item]<block_end><if_stmt>"enum"<in>request_object<block_start>enum=" Options: "+str(request_object["enum"])<block_end>description_=request_object["description"]<if_stmt>description_[-1]<ne>"."<block_start>description_<augadd>"."<block_end>writeUp<augadd>f'\n<tr>\n<td width="20%">{name}</td> <td> <code>{request_object["type"]}</code> <br/> {description_+enum}</td>\n</tr>\n'<if_stmt>"example"<in>request_object<block_start>text=request_object["example"]<block_end><elif_stmt>"enum"<in>request_object<block_start>text='"'+request_object["enum"][0]+'"'<block_end>example<augadd>f'\n {text},'<block_end>example<augadd>'\b\n ]\n]'<line_sep>writeUp<augadd>example<line_sep>writeUp<augadd>'</pre>\n</div>\n</td>\n</tr>\n</table>'<line_sep>i<augadd>1<line_sep><continue><block_end><elif_stmt>"oneOf"<in>request_object<block_start><for_stmt>y request_object["oneOf"]<block_start>item_list.append(y["$ref"].split("/")[1:])<block_end>i<augadd>1<line_sep><continue><block_end><elif_stmt>"properties"<in>request_object<block_start>request_object_properties=request_object["properties"]<block_end><elif_stmt>"content"<in>request_object<block_start>item_list.append(request_object["content"]["application/json"]["schema"]["$ref"].split("/")[1:])<line_sep>i<augadd>1<line_sep><continue><block_end><elif_stmt>"type"<in>request_object<and>"properties"<not><in>request_object<block_start>request_object_properties={item:request_object}<block_end>writeUp<augadd>'<table class="table qc-table">\n<thead>\n<tr>\n'<if_stmt>"description"<in>request_object<block_start>writeUp<augadd>f'<th colspan="2"><code>{item_list[i][-1]}</code> Model - {request_object["description"]}</th>\n'<block_end><else_stmt><block_start>writeUp<augadd>f'<th colspan="2"><code>{item_list[i][-1]}</code> Model</th>\n'<block_end>writeUp<augadd>'</tr>\n</thead>\n'<line_sep>example,html_property,item_list=ExampleWriting(request_object_properties item_list array order)<if_stmt>array<block_start>array=<false><line_sep>order<augsub>1<block_end><for_stmt>line html_property<block_start>writeUp<augadd>line<block_end>writeUp<augadd>'<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n'<line_sep>writeUp<augadd>example<line_sep>writeUp<augadd>'</pre>\n</div>\n</td>\n</tr>\n</table>'<line_sep>i<augadd>1<block_end><return>writeUp<block_end><def_stmt>ExampleWriting request_object_properties item_list array=<false> order=0<block_start>tab=" "<times>order<if_stmt>array<block_start>example="[\n {\n"<block_end><else_stmt><block_start>example="{\n"<block_end>line=[]<for_stmt>name,properties request_object_properties.items()<block_start>type_=properties["type"]<if>"type"<in>properties<else>"object"<line_sep>description_=properties["description"]<if>"description"<in>properties<else>"/"<if_stmt>(example<ne>"{\n"<and><not>array)<or>(example<ne>"[\n {\n"<and>array)<block_start>example<augadd>",\n"<block_end>example_=tab+f' "{name}": '<if_stmt>type_<eq>"array"<block_start>example_<augadd>'[\n'<if_stmt>"type"<in>properties["items"]<block_start>type_=properties["items"]["type"]+" Array"<line_sep>example_<augadd>tab+f' "{properties["items"]["type"]}"'<block_end><elif_stmt>"$ref"<in>properties["items"]<block_start>ref=properties["items"]["$ref"].split("/")[1:]<line_sep>type_=ref[-1]+" Array"<if_stmt>ref<not><in>item_list<block_start>item_list.append(ref)<block_end>request_object_=doc<for_stmt>item ref<block_start>request_object_=request_object_[item]<block_end><if_stmt>"properties"<in>request_object_<block_start>request_object_properties_=request_object_["properties"]<line_sep>write_up,__,item_list=ExampleWriting(request_object_properties_ item_list order=order+2)<line_sep>example_<augadd>tab+" "<times>2+write_up<block_end><block_end><block_end><elif_stmt>type_<eq>"object"<block_start><if_stmt>"additionalProperties"<in>properties<block_start>add_prop=properties["additionalProperties"]<if_stmt>"type"<in>add_prop<block_start>prop_type=add_prop["type"]<if_stmt>"format"<in>prop_type<block_start>type_=prop_type+f'$({prop_type["format"]})'+" object"<if_stmt>prop_type["format"]<eq>"date-time"<block_start>example_<augadd>"2021-11-26T15:18:27.693Z"<block_end><else_stmt><block_start>example_<augadd>"0"<block_end><block_end><else_stmt><block_start>type_=prop_type+" object"<line_sep>example_<augadd>f'"{prop_type}"'<block_end><block_end><elif_stmt>"$ref"<in>add_prop<block_start>ref=add_prop["$ref"].split("/")[1:]<line_sep>type_=ref[-1]+" object"<if_stmt>ref<not><in>item_list<block_start>item_list.append(ref)<block_end>request_object_=doc<for_stmt>item ref<block_start>request_object_=request_object_[item]<block_end><if_stmt>"properties"<in>request_object_<block_start>request_object_properties_=request_object_["properties"]<line_sep>write_up,__,item_list=ExampleWriting(request_object_properties_ item_list order=order+1)<line_sep>example_<augadd>write_up<block_end><block_end><block_end><elif_stmt>"$ref"<in>properties<block_start>ref=properties["$ref"].split("/")[1:]<line_sep>type_=ref[-1]+" object"<if_stmt>ref<not><in>item_list<block_start>item_list.append(ref)<block_end>request_object_=doc<for_stmt>item ref<block_start>request_object_=request_object_[item]<block_end><if_stmt>"properties"<in>request_object_<block_start>request_object_properties_=request_object_["properties"]<line_sep>description_=request_object_["description"]<if>"description"<in>request_object_<else>"/"<line_sep>write_up,__,item_list=ExampleWriting(request_object_properties_ item_list order=order+1)<line_sep>example_<augadd>write_up<block_end><elif_stmt>"type"<in>request_object_<block_start>properties=request_object_properties_=request_object_<line_sep>type_=request_object_["type"]<line_sep>description_=request_object_["description"]<if>"description"<in>request_object_<else>"/"<block_end><block_end><block_end><elif_stmt>type_<eq>"integer"<or>type_<eq>"number"<block_start>example_<augadd>"0"<block_end><elif_stmt>type_<eq>"boolean"<block_start>example_<augadd>"true"<block_end><elif_stmt>type_<eq>"string"<block_start><if_stmt>"format"<in>properties<block_start>type_<augadd>f'(${properties["format"]})'<line_sep>example_<augadd>"2021-11-26T15:18:27.693Z"<block_end><else_stmt><block_start>example_<augadd>'"string"'<block_end><block_end><if_stmt>description_[-1]<ne>"."<block_start>description_<augadd>"."<block_end><if_stmt>"enum"<in>properties<block_start>type_<augadd>" Enum"<line_sep>description_<augadd>f' Options : {properties["enum"]}'<if_stmt>"string"<in>type_<block_start>example_=tab+f' "{name}": "{properties["enum"][0]}"'<block_end><else_stmt><block_start>example_=tab+f' "{name}": {properties["enum"][0]}'<block_end><block_end><if_stmt>"example"<in>properties<block_start>eg=properties["example"]<line_sep>type_<augadd>f'<br/><i><sub>example: {eg}</sub></i>'<if_stmt>isinstance(eg str)<block_start>eg='"'+eg+'"'<block_end>example_=tab+f' "{name}": {eg}'<block_end><if_stmt>"Array"<in>type_<block_start>example_<augadd>"\n"+tab+" ]"<block_end><if_stmt>order<eq>0<or>array<block_start>line.append(f'<tr>\n<td width="20%">{name}</td> <td> <code>{type_}</code> <br/> {description_}</td>\n</tr>\n')<block_end>example<augadd>example_<block_end><if_stmt><not>array<block_start><return>example+"\n"+tab+"}" line item_list<block_end><return>example+"\n"+tab+"}\n"+" "<times>(order-1)+"]" line item_list<block_end><for_stmt>section,source documentations.items()<block_start>yaml_file=open(source)<line_sep>doc=yaml.load(yaml_file Loader=yaml.Loader)<line_sep>paths=doc["paths"]<for_stmt>api_call,result paths.items()<block_start>j=1<line_sep>content=result["post"]<if>"post"<in>result<else>result["get"]<line_sep># Create path if not exist
destination_folder=pathlib.Path("/".join(content["tags"]))<line_sep>destination_folder.mkdir(parents=<true> exist_ok=<true>)<line_sep># Create Introduction part
<with_stmt>open(destination_folder/f'{j:02} Introduction.html' "w")<as>html_file<block_start>html_file.write("<p>\n")<line_sep>html_file.write(f"{content['summary']}\n")<line_sep>html_file.write("</p>\n")<line_sep>j<augadd>1<block_end># Create Description part if having one
<if_stmt>"description"<in>content<block_start><with_stmt>open(destination_folder/f'{j:02} Description.html' "w")<as>html_file<block_start>html_file.write('<p>\n')<line_sep>html_file.write(f'{content["description"]}\n')<line_sep>html_file.write('</p>\n')<line_sep>j<augadd>1<block_end><block_end># Create Request part
<with_stmt>open(destination_folder/f'{j:02} Request.html' "w")<as>html_file<block_start>description_=""<if_stmt>"parameters"<in>content<block_start>writeUp=RequestTable(api_call content["parameters"])<block_end><elif_stmt>"requestBody"<in>content<block_start><if_stmt>"description"<in>content["requestBody"]<block_start>description_=str(content["requestBody"]["description"])<if_stmt>description_[-1]<ne>"."<block_start>description_<augadd>"."<block_end>description_<augadd>" "<block_end>writeUp=ResponseTable(content["requestBody"])<block_end><else_stmt><block_start>writeUp='<table class="table qc-table">\n<thead>\n<tr>\n'<line_sep>writeUp<augadd>f'<th colspan="1"><code>{api_call}</code> Method</th>\n</tr>\n</thead>\n'<line_sep>writeUp<augadd>f'</tr>\n<td><code>{api_call}</code> method takes no parameters.</td>\n</tr>\n</table>'<block_end>description_<augadd>f'The <code>{api_call}</code> API accepts requests in the following format:\n'<line_sep>html_file.write("<p>\n"+description_+"</p>\n")<line_sep>html_file.write(writeUp)<line_sep>j<augadd>1<block_end># Create Response part
<with_stmt>open(destination_folder/f'{j:02} Responses.html' "w")<as>html_file<block_start>html_file.write('<p>\n')<line_sep>html_file.write(f'The <code>{api_call}</code> API provides a response in the following format:\n')<line_sep>html_file.write('</p>\n')<line_sep>request_body=content["responses"]<for_stmt>code,properties request_body.items()<block_start><if_stmt>code<eq>"200"<block_start>html_file.write('<h4>200 Success</h4>\n')<block_end><elif_stmt>code<eq>"401"<block_start>html_file.write('<h4>401 Authentication Error</h4>\n<table class="table qc-table">\n<thead>\n<tr>\n')<line_sep>html_file.write('<th colspan="2"><code>UnauthorizedError</code> Model - Unauthorized response from the API. Key is missing, invalid, or timestamp is too old for hash.</th>\n')<line_sep>html_file.write('</tr>\n</thead>\n<tr>\n<td width="20%">www_authenticate</td> <td> <code>string</code> <br/> Header</td>\n</tr>\n</table>\n')<line_sep><continue><block_end><elif_stmt>code<eq>"404"<block_start>html_file.write('<h4>404 Not Found Error</h4>\n')<line_sep>html_file.write('<p>The requested item, index, page was not found.</p>\n')<line_sep><continue><block_end><elif_stmt>code<eq>"default"<block_start>html_file.write('<h4>Default Generic Error</h4>\n')<block_end>writeUp=ResponseTable(properties)<line_sep>html_file.write(writeUp)<block_end><block_end><block_end>print(f"Documentation of {section} is generated and inplace!")<block_end> |
<import_stmt>logging<import_stmt>os<import_stmt>re<import_stmt>uuid<import_from_stmt>pathlib Path<import_from_stmt>ludwig.constants CHECKSUM META TEST TRAINING VALIDATION<import_from_stmt>ludwig.data.cache.util calculate_checksum<import_from_stmt>ludwig.utils data_utils<import_from_stmt>ludwig.utils.fs_utils delete path_exists<line_sep>logger=logging.getLogger(__name__)<def_stmt>alphanum v<block_start>"""Filters a string to only its alphanumeric characters."""<line_sep><return>re.sub(r"\W+" "" v)<block_end><class_stmt>DatasetCache<block_start><def_stmt>__init__ self config checksum cache_map dataset_manager<block_start>self.config=config<line_sep>self.checksum=checksum<line_sep>self.cache_map=cache_map<line_sep>self.dataset_manager=dataset_manager<block_end><def_stmt>get self<block_start>training_set_metadata_fp=self.cache_map[META]<if_stmt><not>path_exists(training_set_metadata_fp)<block_start><return><none><block_end>cache_training_set_metadata=data_utils.load_json(training_set_metadata_fp)<line_sep>cached_training_set=self.cache_map[TRAINING]<if>path_exists(self.cache_map[TRAINING])<else><none><line_sep>cached_test_set=self.cache_map[TEST]<if>path_exists(self.cache_map[TEST])<else><none><line_sep>cached_validation_set=self.cache_map[VALIDATION]<if>path_exists(self.cache_map[VALIDATION])<else><none><line_sep>valid=self.checksum<eq>cache_training_set_metadata.get(CHECKSUM)<and>cached_training_set<is><not><none><line_sep><return>valid cache_training_set_metadata cached_training_set cached_test_set cached_validation_set<block_end><def_stmt>put self training_set test_set validation_set training_set_metadata<block_start>logger.info("Writing preprocessed training set cache")<line_sep>training_set=self.dataset_manager.save(self.cache_map[TRAINING] training_set self.config training_set_metadata TRAINING )<if_stmt>test_set<is><not><none><block_start>logger.info("Writing preprocessed test set cache")<line_sep>test_set=self.dataset_manager.save(self.cache_map[TEST] test_set self.config training_set_metadata TEST )<block_end><if_stmt>validation_set<is><not><none><block_start>logger.info("Writing preprocessed validation set cache")<line_sep>validation_set=self.dataset_manager.save(self.cache_map[VALIDATION] validation_set self.config training_set_metadata VALIDATION )<block_end>logger.info("Writing train set metadata")<line_sep>data_utils.save_json(self.cache_map[META] training_set_metadata)<line_sep><return>training_set test_set validation_set training_set_metadata<block_end><def_stmt>delete self<block_start><for_stmt>fname self.cache_map.values()<block_start><if_stmt>path_exists(fname)<block_start>delete(fname)<block_end><block_end><block_end><block_end><class_stmt>CacheManager<block_start><def_stmt>__init__ self dataset_manager cache_dir=<none><block_start>self._dataset_manager=dataset_manager<line_sep>self._cache_dir=cache_dir<block_end><def_stmt>get_dataset_cache self config dataset=<none> training_set=<none> test_set=<none> validation_set=<none><block_start><if_stmt>dataset<is><not><none><block_start>key=self.get_cache_key(dataset config)<line_sep>cache_map={META:self.get_cache_path(dataset key META "json") TRAINING:self.get_cache_path(dataset key TRAINING) TEST:self.get_cache_path(dataset key TEST) VALIDATION:self.get_cache_path(dataset key VALIDATION) }<line_sep><return>DatasetCache(config key cache_map self._dataset_manager)<block_end><else_stmt><block_start>key=self.get_cache_key(training_set config)<line_sep>cache_map={META:self.get_cache_path(training_set key META "json") TRAINING:self.get_cache_path(training_set key TRAINING) TEST:self.get_cache_path(test_set key TEST) VALIDATION:self.get_cache_path(validation_set key VALIDATION) }<line_sep><return>DatasetCache(config key cache_map self._dataset_manager)<block_end><block_end><def_stmt>get_cache_key self dataset config<block_start><if_stmt><not>isinstance(dataset str)# TODO(travis): could try hashing the in-memory dataset, but this is tricky for Dask
<block_start><return>str(uuid.uuid1())<block_end><return>calculate_checksum(dataset config)<block_end><def_stmt>get_cache_path self dataset key tag ext=<none><block_start><if_stmt><not>isinstance(dataset str)<block_start>dataset=<none><block_end><if_stmt>self._cache_dir<is><none><and>dataset<is><not><none># Use the input dataset filename (minus the extension) as the cache path
<block_start>stem=Path(dataset).stem<block_end><else_stmt># To avoid collisions across different directories, we use the unique checksum
# as the cache path
<block_start>stem=alphanum(key)<block_end>ext=ext<or>self.data_format<line_sep>cache_fname=f"{stem}.{tag}.{ext}"<line_sep><return>os.path.join(self.get_cache_directory(dataset) cache_fname)<block_end><def_stmt>get_cache_directory self input_fname<block_start><if_stmt>self._cache_dir<is><none><block_start><if_stmt>input_fname<is><not><none><block_start><return>os.path.dirname(input_fname)<block_end><return>"."<block_end><return>self._cache_dir<block_end><def_stmt>can_cache self skip_save_processed_input<block_start><return>self._dataset_manager.can_cache(skip_save_processed_input)<block_end>@property<def_stmt>data_format self<block_start><return>self._dataset_manager.data_format<block_end><block_end> |
<import_from_stmt>guillotina.contrib.workflows.interfaces IWorkflowChangedEvent<import_from_stmt>guillotina.events ObjectEvent<import_from_stmt>zope.interface implementer<line_sep>@implementer(IWorkflowChangedEvent)<class_stmt>WorkflowChangedEvent(ObjectEvent)<block_start>"""An object has been moved"""<def_stmt>__init__ self object workflow action comments<block_start>ObjectEvent.__init__(self object)<line_sep>self.object=object<line_sep>self.workflow=workflow<line_sep>self.action=action<line_sep>self.comments=comments<block_end><block_end> |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
<import_from_stmt>common.chrome_proxy_shared_page_state ChromeProxySharedPageState<import_from_stmt>telemetry.page page<as>page_module<import_from_stmt>telemetry story<class_stmt>HTML5TestPage(page_module.Page)<block_start><def_stmt>__init__ self url page_set<block_start>super(HTML5TestPage self).__init__(url=url page_set=page_set shared_page_state_class=ChromeProxySharedPageState)<block_end><block_end><class_stmt>HTML5TestStorySet(story.StorySet)<block_start>""" Chrome proxy test page for traffic over https. """<def_stmt>__init__ self<block_start>super(HTML5TestStorySet self).__init__()<line_sep>urls_list=['http://html5test.com/' ]<for_stmt>url urls_list<block_start>self.AddStory(HTML5TestPage(url self))<block_end><block_end><block_end> |
<import_from_stmt>django.db.models.fields.files FieldFile ImageField ImageFileDescriptor <import_from_stmt>django.utils.translation ugettext<as>_<import_from_stmt>.backends get_backend_class<import_from_stmt>.files VideoFile<class_stmt>VideoFileDescriptor(ImageFileDescriptor)<block_start><pass><block_end><class_stmt>VideoFieldFile(VideoFile FieldFile)<block_start><def_stmt>delete self save=<true># Clear the video info cache
<block_start><if_stmt>hasattr(self '_info_cache')<block_start><del_stmt>self._info_cache<block_end>super(VideoFieldFile self).delete(save=save)<block_end><block_end><class_stmt>VideoField(ImageField)<block_start>attr_class=VideoFieldFile<line_sep>descriptor_class=VideoFileDescriptor<line_sep>description=_("Video")<def_stmt>__init__ self verbose_name=<none> name=<none> duration_field=<none> **kwargs<block_start>self.duration_field=duration_field<line_sep>super(VideoField self).__init__(verbose_name name **kwargs)<block_end><def_stmt>check self **kwargs<block_start>errors=super(ImageField self).check(**kwargs)<line_sep>errors.extend(self._check_backend())<line_sep><return>errors<block_end><def_stmt>_check_backend self<block_start>backend=get_backend_class()<line_sep><return>backend.check()<block_end><def_stmt>to_python self data# use FileField method
<block_start><return>super(ImageField self).to_python(data)<block_end><def_stmt>update_dimension_fields self instance force=<false> *args **kwargs<block_start>_file=getattr(instance self.attname)<line_sep># we need a real file
<if_stmt><not>_file._committed<block_start><return><block_end># write `width` and `height`
super(VideoField self).update_dimension_fields(instance force *args **kwargs)<if_stmt><not>self.duration_field<block_start><return><block_end># Nothing to update if we have no file and not being forced to update.
<if_stmt><not>_file<and><not>force<block_start><return><block_end><if_stmt>getattr(instance self.duration_field)<and><not>force<block_start><return><block_end># get duration if file is defined
duration=_file.duration<if>_file<else><none><line_sep># update duration
setattr(instance self.duration_field duration)<block_end><def_stmt>formfield self **kwargs# use normal FileFieldWidget for now
<block_start><return>super(ImageField self).formfield(**kwargs)<block_end><block_end> |
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""<import_stmt>unittest<import_from_stmt>programy.config.file.yaml_file YamlConfigurationFile<import_from_stmt>programy.config.brain.oob BrainOOBConfiguration<import_from_stmt>programy.clients.events.console.config ConsoleConfiguration<class_stmt>BrainOOBConfigurationTests(unittest.TestCase)<block_start><def_stmt>test_oob_with_data self<block_start>yaml=YamlConfigurationFile()<line_sep>self.assertIsNotNone(yaml)<line_sep>yaml.load_from_text("""
brain:
oobs:
default:
classname: programy.oob.defaults.default.DefaultOutOfBandProcessor
""" ConsoleConfiguration() ".")<line_sep>brain_config=yaml.get_section("brain")<line_sep>self.assertIsNotNone(brain_config)<line_sep>oobs_config=yaml.get_section("oobs" brain_config)<line_sep>self.assertIsNotNone(oobs_config)<line_sep>oob_config=BrainOOBConfiguration("default")<line_sep>oob_config.load_config_section(yaml oobs_config ".")<line_sep>self.assertEqual("programy.oob.defaults.default.DefaultOutOfBandProcessor" oob_config.classname)<block_end><def_stmt>test_default_without_data self<block_start>yaml=YamlConfigurationFile()<line_sep>self.assertIsNotNone(yaml)<line_sep>yaml.load_from_text("""
brain:
oobs:
default:
""" ConsoleConfiguration() ".")<line_sep>brain_config=yaml.get_section("brain")<line_sep>self.assertIsNotNone(brain_config)<line_sep>oobs_config=yaml.get_section("oobs" brain_config)<line_sep>self.assertIsNotNone(oobs_config)<line_sep>oob_config=BrainOOBConfiguration("default")<line_sep>oob_config.load_config_section(yaml oobs_config ".")<line_sep>self.assertIsNone(oob_config.classname)<block_end><block_end> |
"""\
Code generator functions for wxDatePickerCtrl objects
@copyright: 2002-2007 <NAME>
@copyright: 2014-2016 <NAME>
@copyright: 2016-2021 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""<import_stmt>common compat<import_stmt>wcodegen<class_stmt>PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter)<block_start>tmpl='%(name)s = %(klass)s(%(parent)s, %(id)s%(style)s)\n'<line_sep># XXX the following needs to depend on the code generator when Phoenix is about to be supported fully:
<if_stmt>compat.IS_PHOENIX<block_start>import_modules=['import wx.adv\n']<block_end><if_stmt>compat.IS_PHOENIX<block_start><def_stmt>cn self name# don't process already formatted items again
<block_start><if_stmt>name.startswith('wx.')<block_start><return>name<block_end><if_stmt>name.startswith('wx')<block_start><return>'wx.adv.'+name[2:]<block_end><elif_stmt>name.startswith('EVT_')<block_start><return>'wx.adv.'+name<block_end><return>name<block_end><block_end><def_stmt>_prepare_tmpl_content self obj<block_start>wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self obj)<line_sep>self.has_setdefault=int(obj.properties.get('default' 0))<line_sep><return><block_end><block_end><class_stmt>CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter)<block_start>import_modules=['<wx/datectrl.h>']<line_sep>tmpl='%(name)s = new %(klass)s(%(parent)s, %(id)s, '<concat>'wxDefaultDateTime, wxDefaultPosition, wxDefaultSize, '<concat>'%(style)s);\n'<line_sep>prefix_style=<false><line_sep>set_default_style=<true><def_stmt>_prepare_tmpl_content self obj<block_start>wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self obj)<line_sep>self.has_setdefault=int(obj.properties.get('default' 0))<line_sep><return><block_end><block_end><def_stmt>xrc_code_generator obj<block_start>xrcgen=common.code_writers['XRC']<class_stmt>DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject)<block_start><def_stmt>write_property self name val output tabs<block_start><if_stmt>name<eq>'label'# translate & into _ as accelerator marker
<block_start>val2=val.replace('&' '_')<if_stmt>val.count('&&')<g>0<block_start><while_stmt><true><block_start>index=val.find('&&')<if_stmt>index<l>0<block_start><break><block_end>val=val2[:index]+'&&'+val2[index+2:]<block_end><block_end><else_stmt><block_start>val=val2<block_end><block_end>xrcgen.DefaultXrcObject.write_property(self name val output tabs)<block_end><block_end><return>DatePickerCtrlXrcObject(obj)<block_end><def_stmt>initialize <block_start>klass='wxDatePickerCtrl'<line_sep>common.class_names['EditDatePickerCtrl']=klass<line_sep>common.register('python' klass PythonDatePickerCtrlGenerator(klass))<line_sep>common.register('C++' klass CppDatePickerCtrlGenerator(klass))<line_sep>common.register('XRC' klass xrc_code_generator)<block_end> |
<import_stmt>os<import_stmt>sys<import_stmt>unittest<import_from_stmt>tests.tests_bin_class.test_performance *<if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
<import_stmt>unittest<import_stmt>pytest<import_from_stmt>small_text.integrations.pytorch.exceptions PytorchNotFoundError<try_stmt><block_start><import_from_stmt>small_text.integrations.pytorch.query_strategies BADGE ExpectedGradientLength ExpectedGradientLengthMaxWord <block_end><except_stmt>PytorchNotFoundError<block_start><pass><block_end>@pytest.mark.pytorch<class_stmt>BADGETest(unittest.TestCase)<block_start><def_stmt>test_init_default self<block_start>strategy=BADGE(2)<line_sep>self.assertEqual(2 strategy.num_classes)<block_end><def_stmt>test_init self<block_start>strategy=BADGE(4)<line_sep>self.assertEqual(4 strategy.num_classes)<block_end><def_stmt>test_badge_str self<block_start>strategy=BADGE(2)<line_sep>expected_str='BADGE(num_classes=2)'<line_sep>self.assertEqual(expected_str str(strategy))<block_end><block_end>@pytest.mark.pytorch<class_stmt>ExpectedGradientLengthTest(unittest.TestCase)<block_start><def_stmt>test_init_default self<block_start>strategy=ExpectedGradientLength(2)<line_sep>self.assertEqual(2 strategy.num_classes)<line_sep>self.assertEqual(50 strategy.batch_size)<line_sep>self.assertEqual('cuda' strategy.device)<block_end><def_stmt>test_init self<block_start>strategy=ExpectedGradientLength(4 batch_size=100 device='cpu')<line_sep>self.assertEqual(4 strategy.num_classes)<line_sep>self.assertEqual(100 strategy.batch_size)<line_sep>self.assertEqual('cpu' strategy.device)<block_end><def_stmt>test_expected_gradient_length_str self<block_start>strategy=ExpectedGradientLength(2)<line_sep>expected_str='ExpectedGradientLength()'<line_sep>self.assertEqual(expected_str str(strategy))<block_end><block_end>@pytest.mark.pytorch<class_stmt>ExpectedGradientLengthMaxWordTest(unittest.TestCase)<block_start><def_stmt>test_init_default self<block_start>strategy=ExpectedGradientLengthMaxWord(2 'embedding')<line_sep>self.assertEqual(2 strategy.num_classes)<line_sep>self.assertEqual(50 strategy.batch_size)<line_sep>self.assertEqual('cuda' strategy.device)<line_sep>self.assertEqual('embedding' strategy.layer_name)<block_end><def_stmt>test_init self<block_start>strategy=ExpectedGradientLengthMaxWord(4 'embedding' batch_size=100 device='cpu')<line_sep>self.assertEqual(4 strategy.num_classes)<line_sep>self.assertEqual(100 strategy.batch_size)<line_sep>self.assertEqual('cpu' strategy.device)<line_sep>self.assertEqual('embedding' strategy.layer_name)<block_end><block_end> |
TANGO_PALLETE=['2e2e34343636' 'cccc00000000' '4e4e9a9a0606' 'c4c4a0a00000' '34346565a4a4' '757550507b7b' '060698989a9a' 'd3d3d7d7cfcf' '555557575353' 'efef29292929' '8a8ae2e23434' 'fcfce9e94f4f' '72729f9fcfcf' 'adad7f7fa8a8' '3434e2e2e2e2' 'eeeeeeeeecec' ]<def_stmt>parse_tango_color c<block_start>r=int(c[:4][:2] 16)<line_sep>g=int(c[4:8][:2] 16)<line_sep>b=int(c[8:][:2] 16)<line_sep><return>[r g b 0xFF]<block_end><def_stmt>apply_color cfg color_table<block_start>cfg.default_foreground_color=parse_tango_color('eeeeeeeeecec')<line_sep>cfg.default_background_color=parse_tango_color('323232323232')<line_sep>cfg.default_cursor_color=cfg.default_foreground_color<for_stmt>i range(len(TANGO_PALLETE))<block_start><if_stmt>i<l>len(color_table)<block_start>color_table[i]=parse_tango_color(TANGO_PALLETE[i])<block_end><block_end><block_end> |
<import_from_stmt>mongoengine Document<import_from_stmt>mongoengine.fields FloatField StringField ListField URLField ObjectIdField <class_stmt>Shop(Document)<block_start>meta={"collection":"shop"}<line_sep>ID=ObjectIdField()<line_sep>name=StringField()<line_sep>address=StringField()<line_sep>website=URLField()<block_end><class_stmt>Bike(Document)<block_start>meta={"collection":"bike"}<line_sep>ID=ObjectIdField()<line_sep>name=StringField()<line_sep>brand=StringField()<line_sep>year=StringField()<line_sep>size=ListField(StringField())<line_sep>wheel_size=FloatField()<line_sep>type=StringField()<block_end> |
<import_from_stmt>..charts Chart<import_from_stmt>flask jsonify request<line_sep>_BASE_CONFIG={"showLink":<false> "displaylogo":<false> "modeBarButtonsToRemove":["sendDataToCloud"]}<class_stmt>PlotlyAPI(Chart)<block_start>""" Base class for Plotly.js API
This class is used to create charts using the plotly.js api
To keep this general, this chart does not have a default
method of transmitting data. Instead the user must supply
a route_func method.
"""<def_stmt>__init__ self chart_id url route_func init_params={}<block_start>options={"chartid":chart_id "url":url "params":init_params}<line_sep>super(PlotlyAPI self).__init__("PlotlyAPI" options route_func)<block_end>@staticmethod<def_stmt>line_plot df xypairs mode layout={} config=_BASE_CONFIG<block_start>""" basic line plot
dataframe to json for a line plot
Args:
df (pandas.DataFrame): input dataframe
xypairs (list): list of tuples containing column names
mode (str): plotly.js mode (e.g. lines)
layout (dict): layout parameters
config (dict): config parameters
"""<if_stmt>df.empty<block_start><return>{"x":[] "y":[] "mode":mode}<block_end>_data=[]<for_stmt>x,y xypairs<block_start><if_stmt>(x<in>df.columns)<and>(y<in>df.columns)<block_start>_data.append({"x":df[x].values.tolist() "y":df[y].values.tolist() "mode":mode})<block_end><block_end><return>{"data":_data "layout":layout "config":config}<block_end><block_end> |
<import_from_stmt>.functions.deform_conv deform_conv modulated_deform_conv<import_from_stmt>.functions.deform_pool deform_roi_pooling<import_from_stmt>.modules.deform_conv DeformConv ModulatedDeformConv DeformConvPack ModulatedDeformConvPack <import_from_stmt>.modules.deform_pool DeformRoIPooling DeformRoIPoolingPack ModulatedDeformRoIPoolingPack <line_sep>__all__=['DeformConv' 'DeformConvPack' 'ModulatedDeformConv' 'ModulatedDeformConvPack' 'DeformRoIPooling' 'DeformRoIPoolingPack' 'ModulatedDeformRoIPoolingPack' 'deform_conv' 'modulated_deform_conv' 'deform_roi_pooling']<line_sep> |
<def_stmt>gen <block_start>i=0<while_stmt>1<block_start><yield>i<line_sep>i<augadd>1<block_end><block_end>g=gen()<try_stmt><block_start>g.pend_throw<block_end><except_stmt>AttributeError<block_start>print("SKIP")<line_sep><raise>SystemExit<block_end>print(next(g))<line_sep>print(next(g))<line_sep>g.pend_throw(ValueError())<line_sep>v=<none><try_stmt><block_start>v=next(g)<block_end><except_stmt>Exception<as>e<block_start>print("raised" repr(e))<block_end>print("ret was:" v)<line_sep># It's legal to pend exception in a just-started generator, just the same
# as it's legal to .throw() into it.
g=gen()<line_sep>g.pend_throw(ValueError())<try_stmt><block_start>next(g)<block_end><except_stmt>ValueError<block_start>print("ValueError from just-started gen")<block_end> |
<import_stmt>math<import_stmt>numpy<as>np<import_stmt>numpy.random<as>npr<import_stmt>torch<import_stmt>torch.utils.data<as>data<import_stmt>torch.utils.data.sampler<as>torch_sampler<import_from_stmt>torch.utils.data.dataloader default_collate<import_from_stmt>torch._six int_classes<as>_int_classes<import_from_stmt>core.config cfg<import_from_stmt>roi_data.minibatch get_minibatch<import_stmt>utils.blob<as>blob_utils<line_sep># from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes
<class_stmt>RoiDataLoader(data.Dataset)<block_start><def_stmt>__init__ self roidb num_classes training=<true><block_start>self._roidb=roidb<line_sep>self._num_classes=num_classes<line_sep>self.training=training<line_sep>self.DATA_SIZE=len(self._roidb)<block_end><def_stmt>__getitem__ self index_tuple<block_start>index,ratio=index_tuple<line_sep>single_db=[self._roidb[index]]<line_sep>blobs,valid=get_minibatch(single_db self._num_classes)<line_sep>#TODO: Check if minibatch is valid ? If not, abandon it.
# Need to change _worker_loop in torch.utils.data.dataloader.py.
# Squeeze batch dim
# for key in blobs:
# if key != 'roidb':
# blobs[key] = blobs[key].squeeze(axis=0)
blobs['data']=blobs['data'].squeeze(axis=0)<line_sep><return>blobs<block_end><def_stmt>__len__ self<block_start><return>self.DATA_SIZE<block_end><block_end><def_stmt>cal_minibatch_ratio ratio_list<block_start>"""Given the ratio_list, we want to make the RATIO same for each minibatch on each GPU.
Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob`
and 2) cfg.TRAIN.SCALES containing SINGLE scale.
Since all prepared images will have same min side length of cfg.TRAIN.SCALES[0], we can
pad and batch images base on that.
"""<line_sep>DATA_SIZE=len(ratio_list)<line_sep>ratio_list_minibatch=np.empty((DATA_SIZE ))<line_sep>num_minibatch=int(np.ceil(DATA_SIZE/cfg.TRAIN.IMS_PER_BATCH))# Include leftovers
<for_stmt>i range(num_minibatch)<block_start>left_idx=i<times>cfg.TRAIN.IMS_PER_BATCH<line_sep>right_idx=min((i+1)<times>cfg.TRAIN.IMS_PER_BATCH-1 DATA_SIZE-1)<if_stmt>ratio_list[right_idx]<l>1# for ratio < 1, we preserve the leftmost in each batch.
<block_start>target_ratio=ratio_list[left_idx]<block_end><elif_stmt>ratio_list[left_idx]<g>1# for ratio > 1, we preserve the rightmost in each batch.
<block_start>target_ratio=ratio_list[right_idx]<block_end><else_stmt># for ratio cross 1, we make it to be 1.
<block_start>target_ratio=1<block_end>ratio_list_minibatch[left_idx:(right_idx+1)]=target_ratio<block_end><return>ratio_list_minibatch<block_end><class_stmt>MinibatchSampler(torch_sampler.Sampler)<block_start><def_stmt>__init__ self ratio_list ratio_index<block_start>self.ratio_list=ratio_list<line_sep>self.ratio_index=ratio_index<line_sep>self.num_data=len(ratio_list)<block_end><def_stmt>__iter__ self<block_start>rand_perm=npr.permutation(self.num_data)<line_sep>ratio_list=self.ratio_list[rand_perm]<line_sep>ratio_index=self.ratio_index[rand_perm]<line_sep># re-calculate minibatch ratio list
ratio_list_minibatch=cal_minibatch_ratio(ratio_list)<line_sep><return>iter(zip(ratio_index.tolist() ratio_list_minibatch.tolist()))<block_end><def_stmt>__len__ self<block_start><return>self.num_data<block_end><block_end><class_stmt>BatchSampler(torch_sampler.BatchSampler)<block_start>r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(range(10), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(range(10), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""<def_stmt>__init__ self sampler batch_size drop_last<block_start><if_stmt><not>isinstance(sampler torch_sampler.Sampler)<block_start><raise>ValueError("sampler should be an instance of "<concat>"torch.utils.data.Sampler, but got sampler={}".format(sampler))<block_end><if_stmt><not>isinstance(batch_size _int_classes)<or>isinstance(batch_size bool)<or>batch_size<le>0<block_start><raise>ValueError("batch_size should be a positive integeral value, "<concat>"but got batch_size={}".format(batch_size))<block_end><if_stmt><not>isinstance(drop_last bool)<block_start><raise>ValueError("drop_last should be a boolean value, but got "<concat>"drop_last={}".format(drop_last))<block_end>self.sampler=sampler<line_sep>self.batch_size=batch_size<line_sep>self.drop_last=drop_last<block_end><def_stmt>__iter__ self<block_start>batch=[]<for_stmt>idx self.sampler<block_start>batch.append(idx)# Difference: batch.append(int(idx))
<if_stmt>len(batch)<eq>self.batch_size<block_start><yield>batch<line_sep>batch=[]<block_end><block_end><if_stmt>len(batch)<g>0<and><not>self.drop_last<block_start><yield>batch<block_end><block_end><def_stmt>__len__ self<block_start><if_stmt>self.drop_last<block_start><return>len(self.sampler)<floordiv>self.batch_size<block_end><else_stmt><block_start><return>(len(self.sampler)+self.batch_size-1)<floordiv>self.batch_size<block_end><block_end><block_end><def_stmt>collate_minibatch list_of_blobs<block_start>"""Stack samples seperately and return a list of minibatches
A batch contains NUM_GPUS minibatches and image size in different minibatch may be different.
Hence, we need to stack smaples from each minibatch seperately.
"""<line_sep>Batch={key:[]<for>key list_of_blobs[0]}<line_sep># Because roidb consists of entries of variable length, it can't be batch into a tensor.
# So we keep roidb in the type of "list of ndarray".
lists=[]<for_stmt>blobs list_of_blobs<block_start>lists.append({'data':blobs.pop('data') 'rois':blobs.pop('rois') 'labels':blobs.pop('labels')})<block_end><for_stmt>i range(0 len(list_of_blobs) cfg.TRAIN.IMS_PER_BATCH)<block_start>mini_list=lists[i:(i+cfg.TRAIN.IMS_PER_BATCH)]<line_sep>minibatch=default_collate(mini_list)<for_stmt>key minibatch<block_start>Batch[key].append(minibatch[key])<block_end><block_end><return>Batch<block_end> |
# -------------------------------------------------------------------------- #
# OpenSim Moco: examplePredictAndTrack.py #
# -------------------------------------------------------------------------- #
# Copyright (c) 2018 Stanford University and the Authors #
# #
# Author(s): <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain a #
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
<import_stmt>os<import_stmt>math<import_stmt>opensim<as>osim<line_sep>"""
This file performs the following problems using a
double pendulum model:
1. predict an optimal trajectory (and controls),
2. track the states from the optimal trajectory, and
3. track the marker trajectories from the optimal trajectory.
"""<line_sep>visualize=<true><line_sep># The following environment variable is set during automated testing.
<if_stmt>os.getenv('OPENSIM_USE_VISUALIZER')<eq>'0'<block_start>visualize=<false><block_end># Create a model of a double pendulum.
# ------------------------------------
<def_stmt>createDoublePendulumModel <block_start>model=osim.Model()<line_sep>model.setName("double_pendulum")<line_sep># Create two links, each with a mass of 1 kg, center of mass at the body's
# origin, and moments and products of inertia of zero.
b0=osim.Body("b0" 1 osim.Vec3(0) osim.Inertia(1))<line_sep>model.addBody(b0)<line_sep>b1=osim.Body("b1" 1 osim.Vec3(0) osim.Inertia(1))<line_sep>model.addBody(b1)<line_sep># Add markers to body origin locations.
m0=osim.Marker("m0" b0 osim.Vec3(0))<line_sep>m1=osim.Marker("m1" b1 osim.Vec3(0))<line_sep>model.addMarker(m0)<line_sep>model.addMarker(m1)<line_sep># Connect the bodies with pin joints. Assume each body is 1 m long.
j0=osim.PinJoint("j0" model.getGround() osim.Vec3(0) osim.Vec3(0) b0 osim.Vec3(-1 0 0) osim.Vec3(0))<line_sep>q0=j0.updCoordinate()<line_sep>q0.setName("q0")<line_sep>j1=osim.PinJoint("j1" b0 osim.Vec3(0) osim.Vec3(0) b1 osim.Vec3(-1 0 0) osim.Vec3(0))<line_sep>q1=j1.updCoordinate()<line_sep>q1.setName("q1")<line_sep>model.addJoint(j0)<line_sep>model.addJoint(j1)<line_sep>tau0=osim.CoordinateActuator()<line_sep>tau0.setCoordinate(j0.updCoordinate())<line_sep>tau0.setName("tau0")<line_sep>tau0.setOptimalForce(1)<line_sep>model.addComponent(tau0)<line_sep>tau1=osim.CoordinateActuator()<line_sep>tau1.setCoordinate(j1.updCoordinate())<line_sep>tau1.setName("tau1")<line_sep>tau1.setOptimalForce(1)<line_sep>model.addComponent(tau1)<line_sep># Add display geometry.
bodyGeometry=osim.Ellipsoid(0.5 0.1 0.1)<line_sep>transform=osim.Transform(osim.Vec3(-0.5 0 0))<line_sep>b0Center=osim.PhysicalOffsetFrame("b0_center" b0 transform)<line_sep>b0.addComponent(b0Center)<line_sep>b0Center.attachGeometry(bodyGeometry.clone())<line_sep>b1Center=osim.PhysicalOffsetFrame("b1_center" b1 transform)<line_sep>b1.addComponent(b1Center)<line_sep>b1Center.attachGeometry(bodyGeometry.clone())<line_sep>model.finalizeConnections()<line_sep>model.printToXML("double_pendulum.osim")<line_sep><return>model<block_end><def_stmt>solvePrediction # Predict the optimal trajectory for a minimum time swing-up.
# In the diagram below, + represents the origin, and ---o represents a link
# in the double pendulum.
#
# o
# |
# o
# |
# +---o---o +
#
# iniital pose final pose
#
<block_start>study=osim.MocoStudy()<line_sep>study.setName("double_pendulum_predict")<line_sep>problem=study.updProblem()<line_sep># Model (dynamics).
problem.setModel(createDoublePendulumModel())<line_sep># Bounds.
problem.setTimeBounds(0 [0 5])<line_sep># Arguments are name, [lower bound, upper bound],
# initial [lower bound, upper bound],
# final [lower bound, upper bound].
problem.setStateInfo("/jointset/j0/q0/value" [-10 10] 0)<line_sep>problem.setStateInfo("/jointset/j0/q0/speed" [-50 50] 0 0)<line_sep>problem.setStateInfo("/jointset/j1/q1/value" [-10 10] 0)<line_sep>problem.setStateInfo("/jointset/j1/q1/speed" [-50 50] 0 0)<line_sep>problem.setControlInfo("/tau0" [-100 100])<line_sep>problem.setControlInfo("/tau1" [-100 100])<line_sep># Cost: minimize final time and error from desired
# end effector position.
ftCost=osim.MocoFinalTimeGoal()<line_sep>ftCost.setWeight(0.001)<line_sep>problem.addGoal(ftCost)<line_sep>finalCost=osim.MocoMarkerFinalGoal()<line_sep>finalCost.setName("final")<line_sep>finalCost.setWeight(1000.0)<line_sep>finalCost.setPointName("/markerset/m1")<line_sep>finalCost.setReferenceLocation(osim.Vec3(0 2 0))<line_sep>problem.addGoal(finalCost)<line_sep># Configure the solver.
solver=study.initTropterSolver()<line_sep>solver.set_num_mesh_intervals(100)<line_sep>solver.set_verbosity(2)<line_sep>solver.set_optim_solver("ipopt")<line_sep>guess=solver.createGuess()<line_sep>guess.setNumTimes(2)<line_sep>guess.setTime([0 1])<line_sep>guess.setState("/jointset/j0/q0/value" [0 -math.pi])<line_sep>guess.setState("/jointset/j1/q1/value" [0 2<times>math.pi])<line_sep>guess.setState("/jointset/j0/q0/speed" [0 0])<line_sep>guess.setState("/jointset/j1/q1/speed" [0 0])<line_sep>guess.setControl("/tau0" [0 0])<line_sep>guess.setControl("/tau1" [0 0])<line_sep>guess.resampleWithNumTimes(10)<line_sep>solver.setGuess(guess)<line_sep># Save the problem to a setup file for reference.
study.printToXML("examplePredictAndTrack_predict.omoco")<line_sep># Solve the problem.
solution=study.solve()<line_sep>solution.write("examplePredictAndTrack_predict_solution.sto")<if_stmt>visualize<block_start>study.visualize(solution)<block_end><return>solution<block_end><def_stmt>computeMarkersReference predictedSolution<block_start>model=createDoublePendulumModel()<line_sep>model.initSystem()<line_sep>states=predictedSolution.exportToStatesTable()<line_sep>statesTraj=osim.StatesTrajectory.createFromStatesTable(model states)<line_sep>markerTrajectories=osim.TimeSeriesTableVec3()<line_sep>markerTrajectories.setColumnLabels(["/markerset/m0" "/markerset/m1"])<for_stmt>state statesTraj<block_start>model.realizePosition(state)<line_sep>m0=model.getComponent("markerset/m0")<line_sep>m1=model.getComponent("markerset/m1")<line_sep>markerTrajectories.appendRow(state.getTime() osim.RowVectorVec3([m0.getLocationInGround(state) m1.getLocationInGround(state)]))<block_end># Assign a weight to each marker.
markerWeights=osim.SetMarkerWeights()<line_sep>markerWeights.cloneAndAppend(osim.MarkerWeight("/markerset/m0" 1))<line_sep>markerWeights.cloneAndAppend(osim.MarkerWeight("/markerset/m1" 5))<line_sep><return>osim.MarkersReference(markerTrajectories markerWeights)<block_end><def_stmt>solveStateTracking stateRef# Predict the optimal trajectory for a minimum time swing-up.
<block_start>study=osim.MocoStudy()<line_sep>study.setName("double_pendulum_track")<line_sep>problem=study.updProblem()<line_sep># Model (dynamics).
problem.setModel(createDoublePendulumModel())<line_sep># Bounds.
# Arguments are name, [lower bound, upper bound],
# initial [lower bound, upper bound],
# final [lower bound, upper bound].
finalTime=stateRef.getIndependentColumn()[-1]<line_sep>problem.setTimeBounds(0 finalTime)<line_sep>problem.setStateInfo("/jointset/j0/q0/value" [-10 10] 0)<line_sep>problem.setStateInfo("/jointset/j0/q0/speed" [-50 50] 0)<line_sep>problem.setStateInfo("/jointset/j1/q1/value" [-10 10] 0)<line_sep>problem.setStateInfo("/jointset/j1/q1/speed" [-50 50] 0)<line_sep>problem.setControlInfo("/tau0" [-150 150])<line_sep>problem.setControlInfo("/tau1" [-150 150])<line_sep># Cost: track provided state data.
stateTracking=osim.MocoStateTrackingGoal()<line_sep>stateTracking.setReference(osim.TableProcessor(stateRef))<line_sep>problem.addGoal(stateTracking)<line_sep>effort=osim.MocoControlGoal()<line_sep>effort.setName("effort")<line_sep>effort.setWeight(0.001)<line_sep># TODO problem.addGoal(effort)
# Configure the solver.
solver=study.initTropterSolver()<line_sep>solver.set_num_mesh_intervals(50)<line_sep>solver.set_verbosity(2)<line_sep>solver.set_optim_solver("ipopt")<line_sep>solver.set_optim_jacobian_approximation("exact")<line_sep>solver.set_optim_hessian_approximation("exact")<line_sep>solver.set_exact_hessian_block_sparsity_mode("dense")<line_sep># Save the problem to a setup file for reference.
study.printToXML("examplePredictAndTrack_track_states.omoco")<line_sep># Solve the problem.
solution=study.solve()<line_sep>solution.write("examplePredictAndTrack_track_states_solution.sto")<if_stmt>visualize<block_start>study.visualize(solution)<block_end><return>solution<block_end><def_stmt>solveMarkerTracking markersRef guess# Predict the optimal trajectory for a minimum time swing-up.
<block_start>study=osim.MocoStudy()<line_sep>study.setName("double_pendulum_track")<line_sep>problem=study.updProblem()<line_sep># Model (dynamics).
problem.setModel(createDoublePendulumModel())<line_sep># Bounds.
# Arguments are name, [lower bound, upper bound],
# initial [lower bound, upper bound],
# final [lower bound, upper bound].
finalTime=markersRef.getMarkerTable().getIndependentColumn()[-1]<line_sep>problem.setTimeBounds(0 finalTime)<line_sep>problem.setStateInfo("/jointset/j0/q0/value" [-10 10] 0)<line_sep>problem.setStateInfo("/jointset/j0/q0/speed" [-50 50] 0)<line_sep>problem.setStateInfo("/jointset/j1/q1/value" [-10 10] 0)<line_sep>problem.setStateInfo("/jointset/j1/q1/speed" [-50 50] 0)<line_sep>problem.setControlInfo("/tau0" [-100 100])<line_sep>problem.setControlInfo("/tau1" [-100 100])<line_sep># Cost: track provided marker data.
markerTracking=osim.MocoMarkerTrackingGoal()<line_sep>markerTracking.setMarkersReference(markersRef)<line_sep>problem.addGoal(markerTracking)<line_sep>effort=osim.MocoControlGoal()<line_sep>effort.setName("effort")<line_sep>effort.setWeight(0.0001)<line_sep># problem.addGoal(effort)
# Configure the solver.
solver=study.initTropterSolver()<line_sep>solver.set_num_mesh_intervals(50)<line_sep>solver.set_verbosity(2)<line_sep>solver.set_optim_solver("ipopt")<line_sep>solver.set_optim_jacobian_approximation("exact")<line_sep>solver.set_optim_hessian_approximation("exact")<line_sep>solver.set_exact_hessian_block_sparsity_mode("dense")<line_sep>solver.setGuess(guess)<line_sep># Save the problem to a setup file for reference.
study.printToXML("examplePredictAndTrack_track_markers.omoco")<line_sep># Solve the problem.
solution=study.solve()<line_sep>solution.write("examplePredictAndTrack_track_markers_solution.sto")<if_stmt>visualize<block_start>study.visualize(solution)<block_end><return>solution<block_end>optimalTrajectory=solvePrediction()<line_sep>markersRef=computeMarkersReference(optimalTrajectory)<line_sep>trackedSolution=solveStateTracking(optimalTrajectory.exportToStatesTable())<line_sep>trackedSolution2=solveMarkerTracking(markersRef trackedSolution)<line_sep> |