rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
print "di"
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False, fill_value=np.nan): """ Initialize a 2D interpolator.
print self.x, self.y, self.z
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False, fill_value=np.nan): """ Initialize a 2D interpolator.
expr = numexpr("2.0*a+3.0*c",[('a',float),('c', float)]) assert_array_equal(expr(a,c), 2.0*a+3.0*c) def check_all_scalar(self): a = 3. b = 4. assert_equal(evaluate("a+b"), a+b) expr = numexpr("2*a+3*b",[('a',float),('b', float)]) assert_equal(expr(a,b), 2*a+3*b) def check_run(self): a = arange(100).reshape(10,10)[::2] b = arange(10) expr = numexpr("2*a+3*b",[('a',float),('b', float)]) assert_array_equal(expr(a,b), expr.run(a,b))
def check_broadcasting(self): a = arange(100).reshape(10,10)[::2] c = arange(10) d = arange(5).reshape(5,1) assert_array_equal(evaluate("a+c"), a+c) assert_array_equal(evaluate("a+d"), a+d)
M = amax(new.rowind) + 1
M = int(amax(new.rowind)) + 1
def Construct(s, ij=None, M=None ,N=None, nzmax=100, dtype='d', copy=False): """ Allows constructing a csc_matrix by passing: - data, ij, {M,N,nzmax} a[ij[k,0],ij[k,1]] = data[k] - data, (row, ptr) """ # Moved out of the __init__ function for now for simplicity. # I think this should eventually be moved to be a module-level # function. Otherwise we overload the __init__ method too much, # given Python's weak type checking. This should also remove # some code duplication.
new.data = new.data * other
new.data *= other
def __mul__(self, other): # implement matrix multiplication and matrix-vector multiplication if isspmatrix(other): return self.matmat(other) elif isscalar(other): new = self.copy() new.data = new.data * other new._dtypechar = new.data.dtypechar new.ftype = _transtabl[new._dtypechar] return new else: return self.matvec(other)
if isinstance(other, type(3)): raise NotImplementedError elif isscalar(other):
if isscalar(other):
def __pow__(self, other): if isinstance(other, type(3)): raise NotImplementedError elif isscalar(other): new = self.copy() new.data = new.data * other new._dtypechar = new.data.dtypechar new.ftype = _transtabl[new._dtypechar] return new else: ocs = csc_matrix(other) if (ocs.shape != self.shape): raise ValueError, "Inconsistent shapes." dtypechar = _coerce_rules[(self._dtypechar,ocs._dtypechar)] nnz1, nnz2 = self.nnz, ocs.nnz data1, data2 = _convert_data(self.data[:nnz1], ocs.data[:nnz2], dtypechar) func = getattr(sparsetools,_transtabl[dtypechar]+'cscmul') c,rowc,ptrc,ierr = func(data1,self.rowind[:nnz1],self.indptr,data2,ocs.rowind[:nnz2],ocs.indptr) if ierr: raise ValueError, "Ran out of space (but shouldn't have happened)." M, N = self.shape return csc_matrix.Construct(c,(rowc,ptrc),M=M,N=N)
new.data = new.data * other
new.data = new.data ** other
def __pow__(self, other): if isinstance(other, type(3)): raise NotImplementedError elif isscalar(other): new = self.copy() new.data = new.data * other new._dtypechar = new.data.dtypechar new.ftype = _transtabl[new._dtypechar] return new else: ocs = csc_matrix(other) if (ocs.shape != self.shape): raise ValueError, "Inconsistent shapes." dtypechar = _coerce_rules[(self._dtypechar,ocs._dtypechar)] nnz1, nnz2 = self.nnz, ocs.nnz data1, data2 = _convert_data(self.data[:nnz1], ocs.data[:nnz2], dtypechar) func = getattr(sparsetools,_transtabl[dtypechar]+'cscmul') c,rowc,ptrc,ierr = func(data1,self.rowind[:nnz1],self.indptr,data2,ocs.rowind[:nnz2],ocs.indptr) if ierr: raise ValueError, "Ran out of space (but shouldn't have happened)." M, N = self.shape return csc_matrix.Construct(c,(rowc,ptrc),M=M,N=N)
new = csr_matrix(N,M,nzmax=0,dtype=self._dtypechar)
new = csr_matrix((N,M), nzmax=0, dtype=self._dtypechar)
def transpose(self, copy=False): M,N = self.shape new = csr_matrix(N,M,nzmax=0,dtype=self._dtypechar) if copy: new.data = self.data.copy() new.colind = self.rowind.copy() new.indptr = self.indptr.copy() else: new.data = self.data new.colind = self.rowind new.indptr = self.indptr new._check() return new
elif isinstance(key,type(3)):
elif type(key) == int:
def __getitem__(self, key): if isinstance(key,types.TupleType): row = key[0] col = key[1] func = getattr(sparsetools,self.ftype+'cscgetel') M, N = self.shape if not (0<=row<M) or not (0<=col<N): raise KeyError, "Index out of bounds." ind, val = func(self.data, self.rowind, self.indptr, row, col) return val elif isinstance(key,type(3)): return self.data[key] else: raise NotImplementedError
M, N = self.shape
def copy(self): M, N = self.shape dtype = self._dtypechar new = csc_matrix.Construct(M, N, nzmax=0, dtype=dtype) new.data = self.data.copy() new.rowind = self.rowind.copy() new.indptr = self.indptr.copy() new._check() return new
new = csc_matrix.Construct(M, N, nzmax=0, dtype=dtype)
new = csc_matrix(self.shape, nzmax=0, dtype=dtype)
def copy(self): M, N = self.shape dtype = self._dtypechar new = csc_matrix.Construct(M, N, nzmax=0, dtype=dtype) new.data = self.data.copy() new.rowind = self.rowind.copy() new.indptr = self.indptr.copy() new._check() return new
N = amax(new.colind) + 1
N = int(amax(new.colind)) + 1
def Construct(s, ij=None, M=None ,N=None, nzmax=100, dtype='d', copy=False): """ Allows constructing a csr_matrix by passing: - data, ij, {M,N,nzmax} a[ij[k,0],ij[k,1]] = data[k] - data, (row, ptr) """ # Moved out of the __init__ function for now for simplicity. # I think this should eventually be moved to be a module-level # function. Otherwise we overload the __init__ method too much, # given Python's weak type checking. This should also remove # some code duplication. if (isinstance(s, ArrayType) or \ isinstance(s, type([]))): s = asarray(s) if (rank(s) == 2): # converting from a full array ocsc = csc_matrix(transpose(s)) dims = (ocsc.shape[1], ocsc.shape[0]) new = csr_matrix(dims) new.shape = dims new.colind = ocsc.rowind new.indptr = ocsc.indptr new.data = ocsc.data elif isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s),2)): ijnew = ij.copy() ijnew[:,0] = ij[:,1] ijnew[:,1] = ij[:,0] temp = coo_matrix(s,ijnew,M=M,N=N,nzmax=nzmax, dtype=dtype) temp = temp.tocsc() dims = temp.shape new = csr_matrix(dims) new.data = temp.data new.colind = temp.colind new.indptr = temp.indptr # new.shape = temp.shape elif isinstance(ij, types.TupleType) and (len(ij)==2): # What are the new dimensions? Do we need to know them now? dims = (0,0) new = csr_matrix(dims) new.data = asarray(s) new.colind = ij[0] new.indptr = ij[1] if N is None: try: N = amax(new.colind) + 1 except ValueError: N = 0 if M is None: M = len(new.indptr) - 1 if M == -1: M = 0 new.shape = (M,N) else: raise ValueError, "Unrecognized form for csr_matrix constructor." else: raise ValueError, "Unrecognized form for csr_matrix constructor."
if isinstance(other, type(3)): raise NotImplementedError elif isscalar(other):
if isscalar(other):
def __pow__(self, other): if isinstance(other, type(3)): raise NotImplementedError elif isscalar(other): new = self.copy() new.data = new.data * other new._dtypechar = new.data.dtypechar new.ftype = _transtabl[new._dtypechar] return new else: ocs = csr_matrix(other) if (ocs.shape != self.shape): raise ValueError, "Inconsistent shapes." dtypechar = _coerce_rules[(self._dtypechar,ocs._dtypechar)] data1, data2 = _convert_data(self.data, ocs.data, dtypechar) func = getattr(sparsetools,_transtabl[dtypechar]+'cscmul') c,colc,ptrc,ierr = func(data1,self.colind,self.indptr,data2,ocs.colind,ocs.indptr) if ierr: raise ValueError, "Ran out of space (but shouldn't have happened)." M, N = self.shape return csr_matrix.Construct(c,(colc,ptrc),M=M,N=N)
new.data = new.data * other
new.data = new.data ** other
def __pow__(self, other): if isinstance(other, type(3)): raise NotImplementedError elif isscalar(other): new = self.copy() new.data = new.data * other new._dtypechar = new.data.dtypechar new.ftype = _transtabl[new._dtypechar] return new else: ocs = csr_matrix(other) if (ocs.shape != self.shape): raise ValueError, "Inconsistent shapes." dtypechar = _coerce_rules[(self._dtypechar,ocs._dtypechar)] data1, data2 = _convert_data(self.data, ocs.data, dtypechar) func = getattr(sparsetools,_transtabl[dtypechar]+'cscmul') c,colc,ptrc,ierr = func(data1,self.colind,self.indptr,data2,ocs.colind,ocs.indptr) if ierr: raise ValueError, "Ran out of space (but shouldn't have happened)." M, N = self.shape return csr_matrix.Construct(c,(colc,ptrc),M=M,N=N)
elif isinstance(key,type(3)):
elif type(key) == int:
def __getitem__(self, key): if isinstance(key,types.TupleType): row = key[0] col = key[1] func = getattr(sparsetools,self.ftype+'cscgetel') M, N = self.shape if (row < 0): row = M + row if (col < 0): col = N + col if (row >= M ) or (col >= N) or (row < 0) or (col < 0): raise IndexError, "Index out of bounds." ind, val = func(self.data, self.colind, self.indptr, col, row) return val elif isinstance(key,type(3)): return self.data[key] else: raise NotImplementedError
M, N = self.shape new = csr_matrix(M, N, nzmax=0, dtype=self._dtypechar)
new = csr_matrix(self.shape, nzmax=0, dtype=self._dtypechar)
def copy(self): M, N = self.shape new = csr_matrix(M, N, nzmax=0, dtype=self._dtypechar) new.data = self.data.copy() new.colind = self.colind.copy() new.indptr = self.indptr.copy() new._check() return new
keys = self.keys()
def matvec(self, other): other = asarray(other) if other.shape[0] != self.shape[1]: raise ValueError, "Dimensions do not match." keys = self.keys() res = [0]*self.shape[0] for key in keys: res[int(key[0])] += self[key] * other[int(key[1]),...] return array(res)
for key in keys:
for key in self.keys():
def matvec(self, other): other = asarray(other) if other.shape[0] != self.shape[1]: raise ValueError, "Dimensions do not match." keys = self.keys() res = [0]*self.shape[0] for key in keys: res[int(key[0])] += self[key] * other[int(key[1]),...] return array(res)
keys = self.keys()
def rmatvec(self, other): other = asarray(other)
for key in keys:
for key in self.keys():
def rmatvec(self, other): other = asarray(other)
M = amax(ij[0])
M = int(amax(ij[0]))
def __init__(self, obj, ij, M=None, N=None, nzmax=None, dtype=None): spmatrix.__init__(self) if type(ij) is type(()) and len(ij)==2: if M is None: M = amax(ij[0]) if N is None: N = amax(ij[1]) self.row = asarray(ij[0],'i') self.col = asarray(ij[1],'i') else: aij = asarray(ij,'i') if M is None: M = amax(aij[:,0]) if N is None: N = amax(aij[:,1]) self.row = aij[:,0] self.col = aij[:,1] aobj = asarray(obj,dtype=dtype) self.shape = (M,N) if nzmax is None: nzmax = len(aobj) self.nzmax = nzmax self.data = aobj self._dtypechar = aobj.dtypechar self._check()
N = amax(ij[1])
N = int(amax(ij[1]))
def __init__(self, obj, ij, M=None, N=None, nzmax=None, dtype=None): spmatrix.__init__(self) if type(ij) is type(()) and len(ij)==2: if M is None: M = amax(ij[0]) if N is None: N = amax(ij[1]) self.row = asarray(ij[0],'i') self.col = asarray(ij[1],'i') else: aij = asarray(ij,'i') if M is None: M = amax(aij[:,0]) if N is None: N = amax(aij[:,1]) self.row = aij[:,0] self.col = aij[:,1] aobj = asarray(obj,dtype=dtype) self.shape = (M,N) if nzmax is None: nzmax = len(aobj) self.nzmax = nzmax self.data = aobj self._dtypechar = aobj.dtypechar self._check()
M = amax(aij[:,0])
M = int(amax(aij[:,0]))
def __init__(self, obj, ij, M=None, N=None, nzmax=None, dtype=None): spmatrix.__init__(self) if type(ij) is type(()) and len(ij)==2: if M is None: M = amax(ij[0]) if N is None: N = amax(ij[1]) self.row = asarray(ij[0],'i') self.col = asarray(ij[1],'i') else: aij = asarray(ij,'i') if M is None: M = amax(aij[:,0]) if N is None: N = amax(aij[:,1]) self.row = aij[:,0] self.col = aij[:,1] aobj = asarray(obj,dtype=dtype) self.shape = (M,N) if nzmax is None: nzmax = len(aobj) self.nzmax = nzmax self.data = aobj self._dtypechar = aobj.dtypechar self._check()
N = amax(aij[:,1])
N = int(amax(aij[:,1]))
def __init__(self, obj, ij, M=None, N=None, nzmax=None, dtype=None): spmatrix.__init__(self) if type(ij) is type(()) and len(ij)==2: if M is None: M = amax(ij[0]) if N is None: N = amax(ij[1]) self.row = asarray(ij[0],'i') self.col = asarray(ij[1],'i') else: aij = asarray(ij,'i') if M is None: M = amax(aij[:,0]) if N is None: N = amax(aij[:,1]) self.row = aij[:,0] self.col = aij[:,1] aobj = asarray(obj,dtype=dtype) self.shape = (M,N) if nzmax is None: nzmax = len(aobj) self.nzmax = nzmax self.data = aobj self._dtypechar = aobj.dtypechar self._check()
edges.update(zip(self.triangle_nodes[border[:,0]][:,1], self.triangle_nodes[border[:,0]][:,2])) edges.update(zip(self.triangle_nodes[border[:,1]][:,2], self.triangle_nodes[border[:,1]][:,0])) edges.update(zip(self.triangle_nodes[border[:,2]][:,0], self.triangle_nodes[border[:,2]][:,1]))
edges.update(dict(zip(self.triangle_nodes[border[:,0]][:,1], self.triangle_nodes[border[:,0]][:,2]))) edges.update(dict(zip(self.triangle_nodes[border[:,1]][:,2], self.triangle_nodes[border[:,1]][:,0]))) edges.update(dict(zip(self.triangle_nodes[border[:,2]][:,0], self.triangle_nodes[border[:,2]][:,1])))
def _compute_convex_hull(self): """Extract the convex hull from the triangulation information.
def configuration(parent_package='',parent_path=None): from scipy.distutils.system_info import get_info package = 'cluster' local_path = get_path(__name__,parent_path) config = Configuration(package,parent_package)
def configuration(parent_package='',top_path=None): from scipy.distutils.misc_util import Configuration config = Configuration('cluster',parent_package,top_path) config.add_data_dir('tests')
def configuration(parent_package='',parent_path=None): from scipy.distutils.system_info import get_info package = 'cluster' local_path = get_path(__name__,parent_path) config = Configuration(package,parent_package) config.add_extension('_vq', sources=[join('src', 'vq_wrap.cpp')]) return config
**configuration()
**configuration(top_path='').todict()
def configuration(parent_package='',parent_path=None): from scipy.distutils.system_info import get_info package = 'cluster' local_path = get_path(__name__,parent_path) config = Configuration(package,parent_package) config.add_extension('_vq', sources=[join('src', 'vq_wrap.cpp')]) return config
def __init__(self,freq,year=None, month=None, day=None, seconds=None,quarter=None, date=None, val=None): if hasattr(freq,'freq'):
def __init__(self, freq, year=None, month=None, day=None, seconds=None,quarter=None, mxDate=None, val=None): if hasattr(freq, 'freq'):
def __init__(self,freq,year=None, month=None, day=None, seconds=None,quarter=None, date=None, val=None): if hasattr(freq,'freq'): self.freq = corelib.fmtFreq(freq.freq) else: self.freq = corelib.fmtFreq(freq) self.type = corelib.freqToType(self.freq) if val is not None: if self.freq == 'D': self.__date = val+originDate elif self.freq == 'B': self.__date = originDate + val + (val//5)*7 - (val//5)*5 elif self.freq == 'S': self.__date = secondlyOriginDate + mx.DateTime.DateTimeDeltaFromSeconds(val) elif self.freq == 'M': self.__date = originDate + mx.DateTime.RelativeDateTime(months=val, day=-1) elif self.freq == 'A': self.__date = originDate + mx.DateTime.RelativeDateTime(years=val, month=-1, day=-1) elif self.freq == 'Q': self.__date = originDate + 1 + mx.DateTime.RelativeDateTime(years=int(val/4), month=int(12 * (float(val)/4 - val/4)), day=-1) elif date is not None: self.__date = date else: error = ValueError("Insufficient parameters given to create a date at the given frequency")
elif date is not None: self.__date = date
elif mxDate is not None: self.__date = mxDate
def __init__(self,freq,year=None, month=None, day=None, seconds=None,quarter=None, date=None, val=None): if hasattr(freq,'freq'): self.freq = corelib.fmtFreq(freq.freq) else: self.freq = corelib.fmtFreq(freq) self.type = corelib.freqToType(self.freq) if val is not None: if self.freq == 'D': self.__date = val+originDate elif self.freq == 'B': self.__date = originDate + val + (val//5)*7 - (val//5)*5 elif self.freq == 'S': self.__date = secondlyOriginDate + mx.DateTime.DateTimeDeltaFromSeconds(val) elif self.freq == 'M': self.__date = originDate + mx.DateTime.RelativeDateTime(months=val, day=-1) elif self.freq == 'A': self.__date = originDate + mx.DateTime.RelativeDateTime(years=val, month=-1, day=-1) elif self.freq == 'Q': self.__date = originDate + 1 + mx.DateTime.RelativeDateTime(years=int(val/4), month=int(12 * (float(val)/4 - val/4)), day=-1) elif date is not None: self.__date = date else: error = ValueError("Insufficient parameters given to create a date at the given frequency")
if self.freq in ("B","D"): return self.__date.strftime("%d-%b-%y")
if self.freq in ("B", "D"): return self.strfmt("%d-%b-%y")
def __str__(self): if self.freq in ("B","D"): return self.__date.strftime("%d-%b-%y") elif self.freq == "S": return self.__date.strftime("%d-%b-%Y %H:%M:%S") elif self.freq == "M": return self.__date.strftime("%b-%Y") elif self.freq == "Q": return str(self.year())+"q"+str(self.quarter()) elif self.freq == "A": return str(self.year()) else: return self.__date.strftime("%d-%b-%y")
return self.__date.strftime("%d-%b-%Y %H:%M:%S")
return self.strfmt("%d-%b-%Y %H:%M:%S")
def __str__(self): if self.freq in ("B","D"): return self.__date.strftime("%d-%b-%y") elif self.freq == "S": return self.__date.strftime("%d-%b-%Y %H:%M:%S") elif self.freq == "M": return self.__date.strftime("%b-%Y") elif self.freq == "Q": return str(self.year())+"q"+str(self.quarter()) elif self.freq == "A": return str(self.year()) else: return self.__date.strftime("%d-%b-%y")
return self.__date.strftime("%b-%Y")
return self.strfmt("%b-%Y")
def __str__(self): if self.freq in ("B","D"): return self.__date.strftime("%d-%b-%y") elif self.freq == "S": return self.__date.strftime("%d-%b-%Y %H:%M:%S") elif self.freq == "M": return self.__date.strftime("%b-%Y") elif self.freq == "Q": return str(self.year())+"q"+str(self.quarter()) elif self.freq == "A": return str(self.year()) else: return self.__date.strftime("%d-%b-%y")
return str(self.year())+"q"+str(self.quarter())
return self.strfmt("%Yq%q")
def __str__(self): if self.freq in ("B","D"): return self.__date.strftime("%d-%b-%y") elif self.freq == "S": return self.__date.strftime("%d-%b-%Y %H:%M:%S") elif self.freq == "M": return self.__date.strftime("%b-%Y") elif self.freq == "Q": return str(self.year())+"q"+str(self.quarter()) elif self.freq == "A": return str(self.year()) else: return self.__date.strftime("%d-%b-%y")
return str(self.year())
return self.strfmt("%Y")
def __str__(self): if self.freq in ("B","D"): return self.__date.strftime("%d-%b-%y") elif self.freq == "S": return self.__date.strftime("%d-%b-%Y %H:%M:%S") elif self.freq == "M": return self.__date.strftime("%b-%Y") elif self.freq == "Q": return str(self.year())+"q"+str(self.quarter()) elif self.freq == "A": return str(self.year()) else: return self.__date.strftime("%d-%b-%y")
return self.__date.strftime("%d-%b-%y")
return self.strfmt("%d-%b-%y")
def __str__(self): if self.freq in ("B","D"): return self.__date.strftime("%d-%b-%y") elif self.freq == "S": return self.__date.strftime("%d-%b-%Y %H:%M:%S") elif self.freq == "M": return self.__date.strftime("%b-%Y") elif self.freq == "Q": return str(self.year())+"q"+str(self.quarter()) elif self.freq == "A": return str(self.year()) else: return self.__date.strftime("%d-%b-%y")
if self.freq <> other.freq: raise ValueError("Cannont subtract dates of different frequency (" + str(self.freq) + " <> " + str(other.freq) + ")")
if self.freq != other.freq: raise ValueError("Cannont subtract dates of different frequency (" + str(self.freq) + " != " + str(other.freq) + ")")
def __sub__(self, other): try: return self + (-1) * other except: pass try: if self.freq <> other.freq: raise ValueError("Cannont subtract dates of different frequency (" + str(self.freq) + " <> " + str(other.freq) + ")") return int(self) - int(other) except TypeError: raise TypeError("Could not subtract types " + str(type(self)) + " and " + str(type(other)))
if self.freq <> other.freq:
if self.freq != other.freq:
def __eq__(self, other): if self.freq <> other.freq: raise TypeError("frequencies are not equal!") return int(self) == int(other)
if self.freq <> other.freq:
if self.freq != other.freq:
def __cmp__(self, other): if self.freq <> other.freq: raise TypeError("frequencies are not equal!") return int(self)-int(other)
return Date(freq, date=tempDate)
return Date(freq, mxDate=tempDate)
def thisday(freq): freq = corelib.fmtFreq(freq) tempDate = mx.DateTime.now() # if it is Saturday or Sunday currently, freq==B, then we want to use Friday if freq == 'B' and tempDate.day_of_week >= 5: tempDate -= (tempDate.day_of_week - 4) if freq == 'B' or freq == 'D' or freq == 'S': return Date(freq, date=tempDate) elif freq == 'M': return Date(freq,tempDate.year,tempDate.month) elif freq == 'Q': return Date(freq,tempDate.year,quarter=monthToQuarter(tempDate.month)) elif freq == 'A': return Date(freq,tempDate.year)
return Date(freq,tempDate.year,tempDate.month)
return Date(freq, year=tempDate.year, month=tempDate.month)
def thisday(freq): freq = corelib.fmtFreq(freq) tempDate = mx.DateTime.now() # if it is Saturday or Sunday currently, freq==B, then we want to use Friday if freq == 'B' and tempDate.day_of_week >= 5: tempDate -= (tempDate.day_of_week - 4) if freq == 'B' or freq == 'D' or freq == 'S': return Date(freq, date=tempDate) elif freq == 'M': return Date(freq,tempDate.year,tempDate.month) elif freq == 'Q': return Date(freq,tempDate.year,quarter=monthToQuarter(tempDate.month)) elif freq == 'A': return Date(freq,tempDate.year)
return Date(freq,tempDate.year,quarter=monthToQuarter(tempDate.month))
return Date(freq, yaer=tempDate.year, quarter=monthToQuarter(tempDate.month))
def thisday(freq): freq = corelib.fmtFreq(freq) tempDate = mx.DateTime.now() # if it is Saturday or Sunday currently, freq==B, then we want to use Friday if freq == 'B' and tempDate.day_of_week >= 5: tempDate -= (tempDate.day_of_week - 4) if freq == 'B' or freq == 'D' or freq == 'S': return Date(freq, date=tempDate) elif freq == 'M': return Date(freq,tempDate.year,tempDate.month) elif freq == 'Q': return Date(freq,tempDate.year,quarter=monthToQuarter(tempDate.month)) elif freq == 'A': return Date(freq,tempDate.year)
return Date(freq,tempDate.year) def prevbusday(day_end_hour=18,day_end_min=0):
return Date(freq, year=tempDate.year) def prevbusday(day_end_hour=18, day_end_min=0):
def thisday(freq): freq = corelib.fmtFreq(freq) tempDate = mx.DateTime.now() # if it is Saturday or Sunday currently, freq==B, then we want to use Friday if freq == 'B' and tempDate.day_of_week >= 5: tempDate -= (tempDate.day_of_week - 4) if freq == 'B' or freq == 'D' or freq == 'S': return Date(freq, date=tempDate) elif freq == 'M': return Date(freq,tempDate.year,tempDate.month) elif freq == 'Q': return Date(freq,tempDate.year,quarter=monthToQuarter(tempDate.month)) elif freq == 'A': return Date(freq,tempDate.year)
def dateOf(_date,_destFreq,_relation="BEFORE"): _destFreq = corelib.fmtFreq(_destFreq) _rel = _relation.upper()[0] if _date.freq == _destFreq: return _date elif _date.freq == 'D': if _destFreq == 'B': tempDate = _date.mxDate() if _rel == "B":
def dateOf(date, toFreq, relation="BEFORE"): toFreq = corelib.fmtFreq(toFreq) _rel = relation.upper()[0] if date.freq == toFreq: return date elif date.freq == 'D': if toFreq == 'B': tempDate = date.mxDate() if _rel == 'B':
def prevbusday(day_end_hour=18,day_end_min=0): tempDate = mx.DateTime.localtime() dateNum = tempDate.hour + float(tempDate.minute)/60 checkNum = day_end_hour + float(day_end_min)/60 if dateNum < checkNum: return thisday('B') - 1 else: return thisday('B')
'blas_src',blas_src_info['sources'],
'blas_src',blas_src_info['sources'] + \ [os.path.join(local_path,'src','fblaswrap.f')],
def configuration(parent_package=''): if sys.platform == 'win32': import scipy_distutils.mingw32_support from scipy_distutils.core import Extension from scipy_distutils.misc_util import get_path, default_config_dict from scipy_distutils.misc_util import fortran_library_item, dot_join from scipy_distutils.system_info import get_info,dict_append,\ AtlasNotFoundError,LapackNotFoundError,BlasNotFoundError,\ LapackSrcNotFoundError,BlasSrcNotFoundError package = 'linalg' from interface_gen import generate_interface config = default_config_dict(package,parent_package) local_path = get_path(__name__) m = re.compile(r'(build|install|bdist|run_f2py)') if not filter(m.match,sys.argv): sources = [] sources += glob(os.path.join(local_path,'src','*.f')) sources += glob(os.path.join(local_path,'src','*.c')) sources += glob(os.path.join(local_path,'generic_*.pyf')) sources += [os.path.join(local_path,f) for f in [\ 'flapack_user_routines.pyf','atlas_version.c']] config['ext_modules'].append(Extension(\ name='fake_linalg_ext_module', sources = sources)) return config atlas_info = get_info('atlas') #atlas_info = {} # uncomment if ATLAS is available but want to use # Fortran LAPACK/BLAS; useful for testing f_libs = [] atlas_version = None if atlas_info: # Try to determine ATLAS version cur_dir = os.getcwd() os.chdir(local_path) cmd = '%s %s build_ext --inplace --force'%\ (sys.executable, os.path.join(local_path,'setup_atlas_version.py')) print cmd s,o=run_command(cmd) if not s: cmd = sys.executable+' -c "import atlas_version"' print cmd s,o=run_command(cmd) if not s: m = re.match(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)',o) if m: atlas_version = m.group('version') print 'ATLAS version',atlas_version if atlas_version is None: if re.search(r'undefined symbol: ATL_buildinfo',o,re.M): atlas_version = '3.2.1' # or pre 3.3.6 print 'ATLAS version',atlas_version,'(or pre 3.3.6)' else: print o else: print o if atlas_version is None: print 'Failed to determine ATLAS version' os.chdir(cur_dir) if ('ATLAS_WITHOUT_LAPACK',None) in atlas_info.get('define_macros',[]): lapack_info = get_info('lapack') if not lapack_info: warnings.warn(LapackNotFoundError.__doc__) lapack_src_info = get_info('lapack_src') if not lapack_src_info: raise LapackSrcNotFoundError,LapackSrcNotFoundError.__doc__ dict_append(lapack_info,libraries=['lapack_src']) f_libs.append(fortran_library_item(\ 'lapack_src',lapack_src_info['sources'], )) dict_append(lapack_info,**atlas_info) atlas_info = lapack_info blas_info,lapack_info = {},{} if not atlas_info: warnings.warn(AtlasNotFoundError.__doc__) blas_info = get_info('blas') #blas_info = {} # test building BLAS from sources. if not blas_info: warnings.warn(BlasNotFoundError.__doc__) blas_src_info = get_info('blas_src') if not blas_src_info: raise BlasSrcNotFoundError,BlasSrcNotFoundError.__doc__ dict_append(blas_info,libraries=['blas_src']) f_libs.append(fortran_library_item(\ 'blas_src',blas_src_info['sources'], )) lapack_info = get_info('lapack') #lapack_info = {} # test building LAPACK from sources. if not lapack_info: warnings.warn(LapackNotFoundError.__doc__) lapack_src_info = get_info('lapack_src') if not lapack_src_info: raise LapackSrcNotFoundError,LapackSrcNotFoundError.__doc__ dict_append(lapack_info,libraries=['lapack_src']) f_libs.append(fortran_library_item(\ 'lapack_src',lapack_src_info['sources'], )) mod_sources = {} if atlas_info or blas_info: mod_sources['fblas'] = ['generic_fblas.pyf', 'generic_fblas1.pyf', 'generic_fblas2.pyf', 'generic_fblas3.pyf', os.path.join('src','fblaswrap.f'), ] if atlas_info or lapack_info: mod_sources['flapack'] = ['generic_flapack.pyf'] if atlas_info: mod_sources['cblas'] = ['generic_cblas.pyf', 'generic_cblas1.pyf'] mod_sources['clapack'] = ['generic_clapack.pyf'] else: dict_append(atlas_info,**lapack_info) dict_append(atlas_info,**blas_info) skip_names = {'clapack':[],'flapack':[],'cblas':[],'fblas':[]} if skip_single_routines: skip_names['clapack'].extend(\ 'sgesv cgesv sgetrf cgetrf sgetrs cgetrs sgetri cgetri'\ ' sposv cposv spotrf cpotrf spotrs cpotrs spotri cpotri'\ ' slauum clauum strtri ctrtri'.split()) skip_names['flapack'].extend(skip_names['clapack']) skip_names['flapack'].extend(\ 'sgesdd cgesdd sgelss cgelss sgeqrf cgeqrf sgeev cgeev'\ ' sgegv cgegv ssyev cheev slaswp claswp sgees cgees' ' sggev cggev'.split()) skip_names['cblas'].extend('saxpy caxpy'.split()) skip_names['fblas'].extend(skip_names['cblas']) skip_names['fblas'].extend(\ 'srotg crotg srotmg srot csrot srotm sswap cswap sscal cscal'\ ' csscal scopy ccopy sdot cdotu cdotc snrm2 scnrm2 sasum scasum'\ ' isamax icamax sgemv cgemv chemv ssymv strmv ctrmv'\ ' sgemm cgemm'.split()) if using_lapack_blas: skip_names['fblas'].extend(\ 'drotmg srotmg drotm srotm'.split()) if atlas_version=='3.2.1': skip_names['clapack'].extend(\ 'sgetri dgetri cgetri zgetri spotri dpotri cpotri zpotri'\ ' slauum dlauum clauum zlauum strtri dtrtri ctrtri ztrtri'.split()) for mod_name,sources in mod_sources.items(): sources = [os.path.join(local_path,s) for s in sources] pyf_sources = filter(lambda s:s[-4:]=='.pyf',sources) mod_file = os.path.join(local_path,mod_name+'.pyf') if dep_util.newer_group(pyf_sources,mod_file): generate_interface(mod_name,sources[0],mod_file, skip_names.get(mod_name,[])) sources = filter(lambda s:s[-4:]!='.pyf',sources) ext_args = {'name':dot_join(parent_package,package,mod_name), 'sources':[mod_file]+sources} dict_append(ext_args,**atlas_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) flinalg = [] for f in ['det.f','lu.f', #'wrappers.c','inv.f', ]: flinalg.append(os.path.join(local_path,'src',f)) ext_args = {'name':dot_join(parent_package,package,'_flinalg'), 'sources':flinalg} dict_append(ext_args,**atlas_info) config['ext_modules'].append(Extension(**ext_args)) ext_args = {'name':dot_join(parent_package,package,'calc_lwork'), 'sources':[os.path.join(local_path,'src','calc_lwork.f')], } dict_append(ext_args,**atlas_info) config['ext_modules'].append(Extension(**ext_args)) config['fortran_libraries'].extend(f_libs) return config
sys.args.insert(0,'scipy_core')
sys.argv.insert(0,'scipy_core')
def get_package_config(name): sys.path.insert(0,os.path.join('scipy_core',name)) try: mod = __import__('setup_'+name) config = mod.configuration() finally: del sys.path[0] return config
from scipy.special import binomcdf, binomcdfc, binomcdfinv, betacdf, betaq, fcdf, \ fcdfc, fp, gammacdf, gammacdfc, gammaq, negbinomcdf, negbinomcdfinv, \ possioncdf, poissioncdfc, possioncdfinv, studentcdf, studentq, \ chi2cdf, chi2cdfc, chi2p, normalcdf, normalq, smirnovcdfc, smirnovp, \ kolmogorovcdfc, kolmogorovp
def friedmanchisquare(*args): """
fcdfc, fp, gammacdf, gammacdfc, gammaq, negbinomcdf, negbinomcdfinv, \ possioncdf, poissioncdfc, possioncdfinv, studentcdf, studentq, \ chi2cdf, chi2cdfc, chi2p, normalcdf, normalq, smirnovcdfc, smirnovp, \ kolmogorovcdfc, kolmogorovp
fcdfc, fp, gammacdf, gammacdfc, gammaq, negbinomcdf, negbinomcdfinv from scipy.special import poissoncdf, poissoncdfc, poissoncdfinv, studentcdf, \ studentq, chi2cdf, chi2cdfc, chi2p, normalcdf, normalq, smirnovcdfc from scipy.special import smirnovp, kolmogorovcdfc, kolmogorovp
def friedmanchisquare(*args): """
lin = 1. + b * X
lin = 1 + b*X
def information(self, b, ties='breslow'):
maxfun=None, full_output=0, disp=1, retall=0, callback=None):
maxfun=None, full_output=0, disp=1, retall=0, callback=None, direc=None):
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration See also: fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers leastsq -- nonlinear least squares minimizer fmin_l_bfgs_b, fmin_tnc, fmin_cobyla -- constrained multivariate optimizers anneal, brute -- global optimizers fminbound, brent, golden, bracket -- local scalar minimizers fsolve -- n-dimenstional root-finding brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding fixed_point -- scalar fixed-point finder """ # we need to use a mutable object here that we can update in the # wrapper function fcalls, func = wrap_function(func, args) x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,dtype=float) fval = squeeze(func(x)) x1 = x.copy() iter = 0; ilist = range(N) while True: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if callback is not None: callback(x) if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if fcalls[0] >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(func(x2)) if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if fcalls[0] >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % fcalls[0] x = squeeze(x) if full_output: retlist = x, fval, direc, iter, fcalls[0], warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
direc = eye(N,dtype=float)
if direc is None: direc = eye(N, dtype=float) else: direc = asarray(direc, dtype=float)
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration See also: fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers leastsq -- nonlinear least squares minimizer fmin_l_bfgs_b, fmin_tnc, fmin_cobyla -- constrained multivariate optimizers anneal, brute -- global optimizers fminbound, brent, golden, bracket -- local scalar minimizers fsolve -- n-dimenstional root-finding brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding fixed_point -- scalar fixed-point finder """ # we need to use a mutable object here that we can update in the # wrapper function fcalls, func = wrap_function(func, args) x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,dtype=float) fval = squeeze(func(x)) x1 = x.copy() iter = 0; ilist = range(N) while True: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if callback is not None: callback(x) if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if fcalls[0] >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(func(x2)) if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if fcalls[0] >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % fcalls[0] x = squeeze(x) if full_output: retlist = x, fval, direc, iter, fcalls[0], warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
print "Gegenbauer, a = ", a
def check_gegenbauer(self): a = 5*rand()-0.5 if any(a==0): a = -0.2 print "Gegenbauer, a = ", a Ca0 = gegenbauer(0,a) Ca1 = gegenbauer(1,a) Ca2 = gegenbauer(2,a) Ca3 = gegenbauer(3,a) Ca4 = gegenbauer(4,a) Ca5 = gegenbauer(5,a)
jc = jv(0,.1) assert_almost_equal(jc,0.99750156206604002,8)
values = [[0, 0.1, 0.99750156206604002], [2./3, 1e-8, 0.3239028506761532e-5], [2./3, 1e-10, 0.1503423854873779e-6], [3.1, 1e-10, 0.1711956265409013e-32], [2./3, 4.0, -0.2325440850267039], ] for i, (v, x, y) in enumerate(values): yc = jv(v, x) assert_almost_equal(yc, y, 8, err_msg='test
def check_jv(self): jc = jv(0,.1) assert_almost_equal(jc,0.99750156206604002,8)
maxnfeval : max. number of function evaluation
maxfun : max. number of function evaluation
def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=False, bounds=None, epsilon=1e-8, scale=None, messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1, stepmx=0, accuracy=0, fmin=0, ftol=0, rescale=-1): """Minimize a function with variables subject to bounds, using gradient information. returns (rc, nfeval, x). Inputs: func -- function to minimize. Called as func(x, *args) x0 -- initial guess to minimum fprime -- gradient of func. If None, then func returns the function value and the gradient ( f, g = func(x, *args) ). Called as fprime(x, *args) args -- arguments to pass to function approx_grad -- if true, approximate the gradient numerically bounds -- a list of (min, max) pairs for each element in x, defining the bounds on that parameter. Use None for one of min or max when there is no bound in that direction scale : scaling factors to apply to each variable (a list of floats) if None, the factors are up-low for interval bounded variables and 1+|x] fo the others. defaults to None messages : bit mask used to select messages display during minimization values defined in the optimize.tnc.MSGS dict. defaults to optimize.tnc.MGS_ALL maxCGit : max. number of hessian*vector evaluation per main iteration if maxCGit == 0, the direction chosen is -gradient if maxCGit < 0, maxCGit is set to max(1,min(50,n/2)) defaults to -1 maxnfeval : max. number of function evaluation if None, maxnfeval is set to max(1000, 100*len(x0)) defaults to None eta : severity of the line search. if < 0 or > 1, set to 0.25 defaults to -1 stepmx : maximum step for the line search. may be increased during call if too small, will be set to 10.0 defaults to 0 accuracy : relative precision for finite difference calculations if <= machine_precision, set to sqrt(machine_precision) defaults to 0 fmin : minimum function value estimate defaults to 0 ftol : precision goal for the value of f in the stoping criterion relative to the machine precision and the value of f. if ftol < 0.0, ftol is set to 0.0 defaults to 0 rescale : Scaling factor (in log10) used to trigger rescaling if 0, rescale at each iteration if a large value, never rescale if < 0, rescale is set to 1.3 Outputs: x : the solution (a list of floats) nfeval : the number of function evaluations rc : return code (corresponding message in optimize.tnc.RCSTRINGS) """ n = len(x0) if bounds is None: bounds = [(None,None)] * n if len(bounds) != n: raise ValueError('length of x0 != length of bounds') if approx_grad: def func_and_grad(x): x = asarray(x) f = func(x, *args) g = approx_fprime(x, func, epsilon, *args) return f, list(g) elif fprime is None: def func_and_grad(x): x = asarray(x) f, g = func(x, *args) return f, list(g) else: def func_and_grad(x): x = asarray(x) f = func(x, *args) g = fprime(x, *args) return f, list(g) low = [0]*n up = [0]*n for i in range(n): l,u = bounds[i] if l is None: low[i] = -HUGE_VAL else: low[i] = l if u is None: up[i] = HUGE_VAL else: up[i] = l if scale == None: scale = [] if maxfun == None: maxfun = max(1000, 100*len(x0)) return moduleTNC.minimize(func_and_grad, x0, low, up, scale, messages, maxCGit, maxfun, eta, stepmx, accuracy, fmin, ftol, rescale)
up[i] = l
up[i] = u
def func_and_grad(x): x = asarray(x) f = func(x, *args) g = fprime(x, *args) return f, list(g)
rc, nf, x = minimize(function, [-7, 3], bounds=([-10, 10], [1, 10]))
rc, nf, x = fmin_tnc(function, [-7, 3], bounds=([-10, 10], [1, 10]))
def function(x): f = pow(x[0],2.0)+pow(abs(x[1]),3.0) g = [0,0] g[0] = 2.0*x[0] g[1] = 3.0*pow(abs(x[1]),2.0) if x[1]<0: g[1] = -g[1] return f, g
rc, nf, x = minimize(fg, x, bounds=bounds, messages = MSG_NONE, maxnfeval = 200)
rc, nf, x = fmin_tnc(fg, x, bounds=bounds, messages = MSG_NONE, maxnfeval = 200)
def test(fg, x, bounds, xopt): print "** Test", fg.__name__ rc, nf, x = minimize(fg, x, bounds=bounds, messages = MSG_NONE, maxnfeval = 200) print "After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc] print "x =", x print "exact value =", xopt enorm = 0.0 norm = 1.0 for y,yo in zip(x, xopt): enorm += (y-yo)*(y-yo) norm += yo*yo e = pow(enorm/norm, 0.5) print "Error =", e if e > 1e-8: raise "Test "+fg.__name__+" failed"
assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2))
assert isinstance(ij, ArrayType) and (rank(ij) == 2) \ and (shape(ij) == (2, len(s)))
def __init__(self, arg1, dims=None, nzmax=NZMAX, dtype=None, copy=False): spmatrix.__init__(self) if isdense(arg1): self.dtype = getdtype(dtype, arg1) # Convert the dense array or matrix arg1 to CSC format if rank(arg1) == 1: # Convert to a row vector arg1 = arg1.reshape(1, arg1.shape[0]) if rank(arg1) == 2: #s = asarray(arg1) s = arg1 if s.dtype.char not in 'fdFD': # Use a double array as the source (but leave it alone) s = s*1.0 if (rank(s) == 2): M, N = s.shape dtype = s.dtype func = getattr(sparsetools, _transtabl[dtype.char]+'fulltocsc') ierr = irow = jcol = 0 nnz = (s != 0.0).sum() a = zeros((nnz,), self.dtype) rowa = zeros((nnz,), intc) ptra = zeros((N+1,), intc) while 1: a, rowa, ptra, irow, jcol, ierr = \ func(s, a, rowa, ptra, irow, jcol, ierr) if (ierr == 0): break nnz = nnz + ALLOCSIZE a = resize1d(a, nnz) rowa = resize1d(rowa, nnz) self.data = a self.rowind = rowa self.indptr = ptra self.shape = (M, N) else: raise ValueError, "dense array must have rank 1 or 2" elif isspmatrix(arg1): s = arg1 self.dtype = getdtype(dtype, s) if isinstance(s, csc_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.rowind = s.rowind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.rowind = s.rowind self.indptr = s.indptr elif isinstance(s, csr_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.rowind, self.indptr = \ func(s.shape[1], s.data, s.colind, s.indptr) else: temp = s.tocsc() self.data = temp.data self.rowind = temp.rowind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: if isshape(arg1): self.dtype = getdtype(dtype, default=float) # It's a tuple of matrix dimensions (M, N) M, N = arg1 self.data = zeros((nzmax,), self.dtype) self.rowind = zeros((nzmax,), intc) self.indptr = zeros((N+1,), intc) self.shape = (M, N) else: try: # Try interpreting it as (data, ij) (s, ij) = arg1 assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2)) except (AssertionError, TypeError, ValueError): try: # Try interpreting it as (data, rowind, indptr) (s, rowind, indptr) = arg1 self.dtype = getdtype(dtype, s) if copy: self.data = array(s) self.rowind = array(rowind) self.indptr = array(indptr) else: self.data = asarray(s) self.rowind = asarray(rowind) self.indptr = asarray(indptr) except: raise ValueError, "unrecognized form for csc_matrix constructor" else: # (data, ij) format self.dtype = getdtype(dtype, s) temp = coo_matrix((s, ij), dims=dims, dtype=dtype).tocsc() self.shape = temp.shape self.data = temp.data self.rowind = temp.rowind self.indptr = temp.indptr else: raise ValueError, "unrecognized form for csc_matrix constructor"
temp = coo_matrix((s, ij), dims=dims, dtype=dtype).tocsc()
ijnew = array(ij, copy=copy) temp = coo_matrix((s, ijnew), dims=dims, \ dtype=self.dtype).tocsc()
def __init__(self, arg1, dims=None, nzmax=NZMAX, dtype=None, copy=False): spmatrix.__init__(self) if isdense(arg1): self.dtype = getdtype(dtype, arg1) # Convert the dense array or matrix arg1 to CSC format if rank(arg1) == 1: # Convert to a row vector arg1 = arg1.reshape(1, arg1.shape[0]) if rank(arg1) == 2: #s = asarray(arg1) s = arg1 if s.dtype.char not in 'fdFD': # Use a double array as the source (but leave it alone) s = s*1.0 if (rank(s) == 2): M, N = s.shape dtype = s.dtype func = getattr(sparsetools, _transtabl[dtype.char]+'fulltocsc') ierr = irow = jcol = 0 nnz = (s != 0.0).sum() a = zeros((nnz,), self.dtype) rowa = zeros((nnz,), intc) ptra = zeros((N+1,), intc) while 1: a, rowa, ptra, irow, jcol, ierr = \ func(s, a, rowa, ptra, irow, jcol, ierr) if (ierr == 0): break nnz = nnz + ALLOCSIZE a = resize1d(a, nnz) rowa = resize1d(rowa, nnz) self.data = a self.rowind = rowa self.indptr = ptra self.shape = (M, N) else: raise ValueError, "dense array must have rank 1 or 2" elif isspmatrix(arg1): s = arg1 self.dtype = getdtype(dtype, s) if isinstance(s, csc_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.rowind = s.rowind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.rowind = s.rowind self.indptr = s.indptr elif isinstance(s, csr_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.rowind, self.indptr = \ func(s.shape[1], s.data, s.colind, s.indptr) else: temp = s.tocsc() self.data = temp.data self.rowind = temp.rowind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: if isshape(arg1): self.dtype = getdtype(dtype, default=float) # It's a tuple of matrix dimensions (M, N) M, N = arg1 self.data = zeros((nzmax,), self.dtype) self.rowind = zeros((nzmax,), intc) self.indptr = zeros((N+1,), intc) self.shape = (M, N) else: try: # Try interpreting it as (data, ij) (s, ij) = arg1 assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2)) except (AssertionError, TypeError, ValueError): try: # Try interpreting it as (data, rowind, indptr) (s, rowind, indptr) = arg1 self.dtype = getdtype(dtype, s) if copy: self.data = array(s) self.rowind = array(rowind) self.indptr = array(indptr) else: self.data = asarray(s) self.rowind = asarray(rowind) self.indptr = asarray(indptr) except: raise ValueError, "unrecognized form for csc_matrix constructor" else: # (data, ij) format self.dtype = getdtype(dtype, s) temp = coo_matrix((s, ij), dims=dims, dtype=dtype).tocsc() self.shape = temp.shape self.data = temp.data self.rowind = temp.rowind self.indptr = temp.indptr else: raise ValueError, "unrecognized form for csc_matrix constructor"
a[ij[k, 0], ij[k, 1]] = data[k]
a[ij[0, k], ij[1, k]] = data[k]
def copy(self): new = csc_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtype) new.data = self.data.copy() new.rowind = self.rowind.copy() new.indptr = self.indptr.copy() new._check() return new
assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2))
assert isinstance(ij, ArrayType) and (rank(ij) == 2) \ and (shape(ij) == (2, len(s)))
def __init__(self, arg1, dims=None, nzmax=NZMAX, dtype=None, copy=False): spmatrix.__init__(self) if isdense(arg1): self.dtype = getdtype(dtype, arg1) # Convert the dense array or matrix arg1 to CSR format if rank(arg1) == 1: # Convert to a row vector arg1 = arg1.reshape(1, arg1.shape[0]) if rank(arg1) == 2: s = arg1 ocsc = csc_matrix(transpose(s)) self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data self.shape = (ocsc.shape[1], ocsc.shape[0]) else: raise ValueError, "dense array must have rank 1 or 2" elif isspmatrix(arg1): s = arg1 self.dtype = getdtype(dtype, s) if isinstance(s, csr_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.colind = s.colind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.colind = s.colind self.indptr = s.indptr elif isinstance(s, csc_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.colind, self.indptr = \ func(s.shape[1], s.data, s.rowind, s.indptr) else: try: temp = s.tocsr() except AttributeError: temp = csr_matrix(s.tocsc()) self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: if isshape(arg1): # It's a tuple of matrix dimensions (M, N) M, N = arg1 self.dtype = getdtype(dtype, default=float) self.data = zeros((nzmax,), self.dtype) self.colind = zeros((nzmax,), intc) self.indptr = zeros((M+1,), intc) self.shape = (M, N) else: try: # Try interpreting it as (data, ij) (s, ij) = arg1 assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2)) except (AssertionError, TypeError, ValueError, AttributeError): try: # Try interpreting it as (data, colind, indptr) (s, colind, indptr) = arg1 self.dtype = getdtype(dtype, s) if copy: self.data = array(s, dtype=self.dtype) self.colind = array(colind) self.indptr = array(indptr) else: self.data = asarray(s, dtype=self.dtype) self.colind = asarray(colind) self.indptr = asarray(indptr) except: raise ValueError, "unrecognized form for csr_matrix constructor" else: # (data, ij) format ijnew = ij.copy() ijnew[:, 0] = ij[:, 1] ijnew[:, 1] = ij[:, 0] temp = coo_matrix((s, ijnew), dims=dims, dtype=dtype).tocsr() self.shape = temp.shape self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.dtype = temp.dtype else: raise ValueError, "unrecognized form for csr_matrix constructor"
except: raise ValueError, "unrecognized form for csr_matrix constructor"
def __init__(self, arg1, dims=None, nzmax=NZMAX, dtype=None, copy=False): spmatrix.__init__(self) if isdense(arg1): self.dtype = getdtype(dtype, arg1) # Convert the dense array or matrix arg1 to CSR format if rank(arg1) == 1: # Convert to a row vector arg1 = arg1.reshape(1, arg1.shape[0]) if rank(arg1) == 2: s = arg1 ocsc = csc_matrix(transpose(s)) self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data self.shape = (ocsc.shape[1], ocsc.shape[0]) else: raise ValueError, "dense array must have rank 1 or 2" elif isspmatrix(arg1): s = arg1 self.dtype = getdtype(dtype, s) if isinstance(s, csr_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.colind = s.colind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.colind = s.colind self.indptr = s.indptr elif isinstance(s, csc_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.colind, self.indptr = \ func(s.shape[1], s.data, s.rowind, s.indptr) else: try: temp = s.tocsr() except AttributeError: temp = csr_matrix(s.tocsc()) self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: if isshape(arg1): # It's a tuple of matrix dimensions (M, N) M, N = arg1 self.dtype = getdtype(dtype, default=float) self.data = zeros((nzmax,), self.dtype) self.colind = zeros((nzmax,), intc) self.indptr = zeros((M+1,), intc) self.shape = (M, N) else: try: # Try interpreting it as (data, ij) (s, ij) = arg1 assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2)) except (AssertionError, TypeError, ValueError, AttributeError): try: # Try interpreting it as (data, colind, indptr) (s, colind, indptr) = arg1 self.dtype = getdtype(dtype, s) if copy: self.data = array(s, dtype=self.dtype) self.colind = array(colind) self.indptr = array(indptr) else: self.data = asarray(s, dtype=self.dtype) self.colind = asarray(colind) self.indptr = asarray(indptr) except: raise ValueError, "unrecognized form for csr_matrix constructor" else: # (data, ij) format ijnew = ij.copy() ijnew[:, 0] = ij[:, 1] ijnew[:, 1] = ij[:, 0] temp = coo_matrix((s, ijnew), dims=dims, dtype=dtype).tocsr() self.shape = temp.shape self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.dtype = temp.dtype else: raise ValueError, "unrecognized form for csr_matrix constructor"
ijnew = ij.copy() ijnew[:, 0] = ij[:, 1] ijnew[:, 1] = ij[:, 0] temp = coo_matrix((s, ijnew), dims=dims, dtype=dtype).tocsr()
self.dtype = getdtype(dtype, s) ijnew = array([ij[1], ij[0]], copy=copy) temp = coo_matrix((s, ijnew), dims=dims, \ dtype=self.dtype).tocsr()
def __init__(self, arg1, dims=None, nzmax=NZMAX, dtype=None, copy=False): spmatrix.__init__(self) if isdense(arg1): self.dtype = getdtype(dtype, arg1) # Convert the dense array or matrix arg1 to CSR format if rank(arg1) == 1: # Convert to a row vector arg1 = arg1.reshape(1, arg1.shape[0]) if rank(arg1) == 2: s = arg1 ocsc = csc_matrix(transpose(s)) self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data self.shape = (ocsc.shape[1], ocsc.shape[0]) else: raise ValueError, "dense array must have rank 1 or 2" elif isspmatrix(arg1): s = arg1 self.dtype = getdtype(dtype, s) if isinstance(s, csr_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.colind = s.colind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.colind = s.colind self.indptr = s.indptr elif isinstance(s, csc_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.colind, self.indptr = \ func(s.shape[1], s.data, s.rowind, s.indptr) else: try: temp = s.tocsr() except AttributeError: temp = csr_matrix(s.tocsc()) self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: if isshape(arg1): # It's a tuple of matrix dimensions (M, N) M, N = arg1 self.dtype = getdtype(dtype, default=float) self.data = zeros((nzmax,), self.dtype) self.colind = zeros((nzmax,), intc) self.indptr = zeros((M+1,), intc) self.shape = (M, N) else: try: # Try interpreting it as (data, ij) (s, ij) = arg1 assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2)) except (AssertionError, TypeError, ValueError, AttributeError): try: # Try interpreting it as (data, colind, indptr) (s, colind, indptr) = arg1 self.dtype = getdtype(dtype, s) if copy: self.data = array(s, dtype=self.dtype) self.colind = array(colind) self.indptr = array(indptr) else: self.data = asarray(s, dtype=self.dtype) self.colind = asarray(colind) self.indptr = asarray(indptr) except: raise ValueError, "unrecognized form for csr_matrix constructor" else: # (data, ij) format ijnew = ij.copy() ijnew[:, 0] = ij[:, 1] ijnew[:, 1] = ij[:, 0] temp = coo_matrix((s, ijnew), dims=dims, dtype=dtype).tocsr() self.shape = temp.shape self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.dtype = temp.dtype else: raise ValueError, "unrecognized form for csr_matrix constructor"
self.dtype = temp.dtype
def __init__(self, arg1, dims=None, nzmax=NZMAX, dtype=None, copy=False): spmatrix.__init__(self) if isdense(arg1): self.dtype = getdtype(dtype, arg1) # Convert the dense array or matrix arg1 to CSR format if rank(arg1) == 1: # Convert to a row vector arg1 = arg1.reshape(1, arg1.shape[0]) if rank(arg1) == 2: s = arg1 ocsc = csc_matrix(transpose(s)) self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data self.shape = (ocsc.shape[1], ocsc.shape[0]) else: raise ValueError, "dense array must have rank 1 or 2" elif isspmatrix(arg1): s = arg1 self.dtype = getdtype(dtype, s) if isinstance(s, csr_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.colind = s.colind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.colind = s.colind self.indptr = s.indptr elif isinstance(s, csc_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.colind, self.indptr = \ func(s.shape[1], s.data, s.rowind, s.indptr) else: try: temp = s.tocsr() except AttributeError: temp = csr_matrix(s.tocsc()) self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: if isshape(arg1): # It's a tuple of matrix dimensions (M, N) M, N = arg1 self.dtype = getdtype(dtype, default=float) self.data = zeros((nzmax,), self.dtype) self.colind = zeros((nzmax,), intc) self.indptr = zeros((M+1,), intc) self.shape = (M, N) else: try: # Try interpreting it as (data, ij) (s, ij) = arg1 assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2)) except (AssertionError, TypeError, ValueError, AttributeError): try: # Try interpreting it as (data, colind, indptr) (s, colind, indptr) = arg1 self.dtype = getdtype(dtype, s) if copy: self.data = array(s, dtype=self.dtype) self.colind = array(colind) self.indptr = array(indptr) else: self.data = asarray(s, dtype=self.dtype) self.colind = asarray(colind) self.indptr = asarray(indptr) except: raise ValueError, "unrecognized form for csr_matrix constructor" else: # (data, ij) format ijnew = ij.copy() ijnew[:, 0] = ij[:, 1] ijnew[:, 1] = ij[:, 0] temp = coo_matrix((s, ijnew), dims=dims, dtype=dtype).tocsr() self.shape = temp.shape self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.dtype = temp.dtype else: raise ValueError, "unrecognized form for csr_matrix constructor"
A = coo_matrix(obj, ij, [dims])
A = coo_matrix((obj, ij), [dims])
def resize(self, shape): """ Resize the matrix to dimensions given by 'shape', removing any non-zero elements that lie outside. """ M, N = self.shape try: newM, newN = shape assert newM == int(newM) and newM > 0 assert newN == int(newN) and newN > 0 except (TypeError, ValueError, AssertionError): raise TypeError, "dimensions must be a 2-tuple of positive"\ " integers" if newM < M or newN < N: # Remove all elements outside new dimensions for (i,j) in self.keys(): if i >= newM or j >= newN: del self[i,j] self.shape = (newM, newN)
ij[:][0] and ij[:][1]
ij[0][:] and ij[1][:]
def resize(self, shape): """ Resize the matrix to dimensions given by 'shape', removing any non-zero elements that lie outside. """ M, N = self.shape try: newM, newN = shape assert newM == int(newM) and newM > 0 assert newN == int(newN) and newN > 0 except (TypeError, ValueError, AssertionError): raise TypeError, "dimensions must be a 2-tuple of positive"\ " integers" if newM < M or newN < N: # Remove all elements outside new dimensions for (i,j) in self.keys(): if i >= newM or j >= newN: del self[i,j] self.shape = (newM, newN)
1. obj[:]: the entries of the matrix, in any order 2. ij[:][0]: the row indices of the matrix entries 3. ij[:][1]: the column indices of the matrix entries
1. obj[:] the entries of the matrix, in any order 2. ij[0][:] the row indices of the matrix entries 3. ij[1][:] the column indices of the matrix entries
def resize(self, shape): """ Resize the matrix to dimensions given by 'shape', removing any non-zero elements that lie outside. """ M, N = self.shape try: newM, newN = shape assert newM == int(newM) and newM > 0 assert newN == int(newN) and newN > 0 except (TypeError, ValueError, AssertionError): raise TypeError, "dimensions must be a 2-tuple of positive"\ " integers" if newM < M or newN < N: # Remove all elements outside new dimensions for (i,j) in self.keys(): if i >= newM or j >= newN: del self[i,j] self.shape = (newM, newN)
A[ij[k][0], ij[k][1]] = obj[k]
A[ij[0][k], ij[1][k] = obj[k]
def resize(self, shape): """ Resize the matrix to dimensions given by 'shape', removing any non-zero elements that lie outside. """ M, N = self.shape try: newM, newN = shape assert newM == int(newM) and newM > 0 assert newN == int(newN) and newN > 0 except (TypeError, ValueError, AssertionError): raise TypeError, "dimensions must be a 2-tuple of positive"\ " integers" if newM < M or newN < N: # Remove all elements outside new dimensions for (i,j) in self.keys(): if i >= newM or j >= newN: del self[i,j] self.shape = (newM, newN)
obj, ij_in = arg1
obj, ij = arg1
def __init__(self, arg1, dims=None, dtype=None): spmatrix.__init__(self) if isinstance(arg1, tuple): try: obj, ij_in = arg1 except: raise TypeError, "invalid input format" elif arg1 is None: # clumsy! We should make ALL arguments # keyword arguments instead! # Initialize an empty matrix. if not isinstance(dims, tuple) or not isinstance(dims[0], int): raise TypeError, "dimensions not understood" self.shape = dims self.dtype = getdtype(dtype, default=float) self.data = array([]) self.row = array([]) self.col = array([]) self._check() return self.dtype = getdtype(dtype, obj, default=float) try: # Assume the first calling convention # assert len(ij) == 2 if len(ij_in) != 2: if isdense( ij_in ) and (ij_in.shape[1] == 2): ij = (ij_in[:,0], ij_in[:,1]) else: raise AssertionError else: ij = ij_in if dims is None: M = int(amax(ij[0])) + 1 N = int(amax(ij[1])) + 1 self.shape = (M, N) else: # Use 2 steps to ensure dims has length 2. M, N = dims self.shape = (M, N) self.row = asarray(ij[0]) self.col = asarray(ij[1]) self.data = asarray(obj, dtype=self.dtype) self._check() except Exception: raise TypeError, "invalid input format"
if len(ij_in) != 2: if isdense( ij_in ) and (ij_in.shape[1] == 2): ij = (ij_in[:,0], ij_in[:,1]) else: raise AssertionError else: ij = ij_in if dims is None: M = int(amax(ij[0])) + 1 N = int(amax(ij[1])) + 1 self.shape = (M, N) else: M, N = dims self.shape = (M, N) self.row = asarray(ij[0]) self.col = asarray(ij[1]) self.data = asarray(obj, dtype=self.dtype) self._check() except Exception:
if len(ij) != 2: raise TypeError except TypeError:
def __init__(self, arg1, dims=None, dtype=None): spmatrix.__init__(self) if isinstance(arg1, tuple): try: obj, ij_in = arg1 except: raise TypeError, "invalid input format" elif arg1 is None: # clumsy! We should make ALL arguments # keyword arguments instead! # Initialize an empty matrix. if not isinstance(dims, tuple) or not isinstance(dims[0], int): raise TypeError, "dimensions not understood" self.shape = dims self.dtype = getdtype(dtype, default=float) self.data = array([]) self.row = array([]) self.col = array([]) self._check() return self.dtype = getdtype(dtype, obj, default=float) try: # Assume the first calling convention # assert len(ij) == 2 if len(ij_in) != 2: if isdense( ij_in ) and (ij_in.shape[1] == 2): ij = (ij_in[:,0], ij_in[:,1]) else: raise AssertionError else: ij = ij_in if dims is None: M = int(amax(ij[0])) + 1 N = int(amax(ij[1])) + 1 self.shape = (M, N) else: # Use 2 steps to ensure dims has length 2. M, N = dims self.shape = (M, N) self.row = asarray(ij[0]) self.col = asarray(ij[1]) self.data = asarray(obj, dtype=self.dtype) self._check() except Exception: raise TypeError, "invalid input format"
def complex(a, b, complex=__builtins__.complex): c = zeros(a.shape, dtype=complex)
def complex(a, b): c = zeros(a.shape, dtype=complex_)
def complex(a, b, complex=__builtins__.complex): c = zeros(a.shape, dtype=complex) c.real = a c.imag = b return c
tests.append(('OPERATIONS', optests))
def complex(a, b, complex=__builtins__.complex): c = zeros(a.shape, dtype=complex) c.real = a c.imag = b return c
x = random.randint(1,2**31-1)
x = random.randint(1,2**31-2)
def seed(x=0,y=0): """seed(x, y), set the seed using the integers x, y; Set a random one from clock if y == 0 """ if type (x) != types.IntType or type (y) != types.IntType : raise ArgumentError, "seed requires integer arguments." if y == 0: import random y = int(rv.initial_seed()) x = random.randint(1,2**31-1) rand.set_seeds(x,y)
self.isCSR = 1
self.isCSR = 0
def _getIndx( self, mtx ):
self.isCSR = 0
self.isCSR = 1
def _getIndx( self, mtx ):
assert rt == T, 'Expected %s, got %s type' % (T, rt)
assert N.dtype(rt) == N.dtype(T), \ 'Expected %s, got %s type' % (T, rt)
def test_smallest_int_sctype(self): # Smallest int sctype with testing recaster params = sctype_attributes() mmax = params[N.int32]['max'] mmin = params[N.int32]['min'] for kind in ('int', 'uint'): for T in N.sctypes[kind]: mx = params[T]['max'] mn = params[T]['min'] rt = self.recaster.smallest_int_sctype(mx, mn) if mx <= mmax and mn >= mmin: assert rt == N.int32, 'Expected int32 type' else: assert rt is None, 'Expected None, got %s for %s' % (T, rt) # Smallest int sctype with full recaster RF = Recaster() test_triples = [(N.uint8, 0, 255), (N.int8, -128, 0), (N.uint16, 0, params[N.uint16]['max']), (N.int16, params[N.int16]['min'], 0), (N.uint32, 0, params[N.uint32]['max']), (N.int32, params[N.int32]['min'], 0), (N.uint64, 0, params[N.uint64]['max']), (N.int64, params[N.int64]['min'], 0)] for T, mn, mx in test_triples: rt = RF.smallest_int_sctype(mx, mn) assert rt == T, 'Expected %s, got %s type' % (T, rt)
panel = TestPanel(self)
self.panel = TestPanel(self)
def __init__(self, parent):
def is_alive(obj): if obj() is None: return 0 else: return 1
def is_alive(obj): if obj() is None: return 0 else: return 1
time.sleep(0.25)
yield()
def check_wx_class(self): "Checking a wxFrame proxied class" for i in range(5): f = gui_thread.register(TestFrame) a = f(None) p = weakref.ref(a) a.Close(1) del a time.sleep(0.25) # sync threads # this checks for memory leaks self.assertEqual(is_alive(p), 0)
class NoThreadTestFrame(wxFrame):
class TesterApp (wxApp): def OnInit (self): f = TesterFrame(None) return true class TesterFrame(wxFrame):
def test(): all_tests = test_suite() runner = unittest.TextTestRunner(verbosity=2) runner.run(all_tests)
wxFrame.__init__(self, parent, -1, "Hello Test")
wxFrame.__init__(self, parent, -1, "Tester") self.CreateStatusBar() sizer = wxBoxSizer(wxHORIZONTAL) ID = NewId() btn = wxButton(self, ID, "Start Test") EVT_BUTTON(self, ID, self.OnStart) msg = "Click to start running tests. "\ "Tester Output will be shown on the shell." btn.SetToolTip(wxToolTip(msg)) sizer.Add(btn, 1, wxEXPAND) ID = NewId() btn = wxButton(self, ID, "Close") EVT_BUTTON(self, ID, self.OnClose) btn.SetToolTip(wxToolTip("Click to close the tester.")) sizer.Add(btn, 1, wxEXPAND) sizer.Fit(self) self.SetAutoLayout(true) self.SetSizer(sizer) self.Show(1) def OnStart(self, evt): self.SetStatusText("Running Tests")
def __init__(self, parent): wxFrame.__init__(self, parent, -1, "Hello Test") test() self.Close(1)
app = wxPySimpleApp() frame = NoThreadTestFrame(None)
app = TesterApp()
def __init__(self, parent): wxFrame.__init__(self, parent, -1, "Hello Test") test() self.Close(1)
'libraries' : ['specfun']
'libraries' : ['specfun'], 'depends':specfun
def configuration(parent_package='',parent_path=None): from scipy_distutils.core import Extension from scipy_distutils.misc_util import get_path,\ default_config_dict, dot_join from scipy_distutils.system_info import dict_append, get_info package = 'special' config = default_config_dict(package,parent_package) local_path = get_path(__name__,parent_path) numpy_info = get_info('numpy',notfound_action=2) define_macros = [] if sys.byteorder == "little": define_macros.append(('USE_MCONF_LE',None)) else: define_macros.append(('USE_MCONF_BE',None)) if sys.platform=='win32': define_macros.append(('NOINFINITIES',None)) define_macros.append(('NONANS',None)) c_misc = glob(os.path.join(local_path,'c_misc','*.c')) cephes = glob(os.path.join(local_path,'cephes','*.c')) if sys.platform=='win32': cephes = [f for f in cephes if os.path.basename(f)!='fabs.c'] mach = glob(os.path.join(local_path,'mach','*.f')) amos = glob(os.path.join(local_path,'amos','*.f')) toms = glob(os.path.join(local_path,'toms','*.f')) cdf = glob(os.path.join(local_path,'cdflib','*.f')) specfun = glob(os.path.join(local_path, 'specfun','*.f')) # C libraries config['libraries'].append(('c_misc',{'sources':c_misc})) config['libraries'].append(('cephes',{'sources':cephes, 'macros':define_macros})) # Fortran libraries config['libraries'].append(('mach',{'sources':mach})) config['libraries'].append(('amos',{'sources':amos})) config['libraries'].append(('toms',{'sources':toms})) config['libraries'].append(('cdf',{'sources':cdf})) config['libraries'].append(('specfun',{'sources':specfun})) # Extension sources = ['cephesmodule.c', 'amos_wrappers.c', 'specfun_wrappers.c', 'toms_wrappers.c','cdf_wrappers.c','ufunc_extras.c'] sources = [os.path.join(local_path,x) for x in sources] ext_args = {} dict_append(ext_args, name=dot_join(parent_package,package,'cephes'), sources = sources, libraries = ['amos','toms','c_misc','cephes','mach', 'cdf', 'specfun'], define_macros = define_macros ) dict_append(ext_args,**numpy_info) ext = Extension(**ext_args) config['ext_modules'].append(ext) ext_args = {'name':dot_join(parent_package,package,'specfun'), 'sources':[os.path.join(local_path,'specfun.pyf')], 'f2py_options':['--no-wrap-functions'], #'define_macros':[('F2PY_REPORT_ATEXIT_DISABLE',None)], 'libraries' : ['specfun'] } dict_append(ext_args,**numpy_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) return config
self.lower[self.lower == numpy.NINF] = -_double_max
self.lower = where(self.lower == numpy.NINF, -_double_max, self.lower)
def init(self, **options): self.__dict__.update(options) self.lower = asarray(self.lower) self.lower[self.lower == numpy.NINF] = -_double_max self.upper = asarray(self.upper) self.upper[self.upper == numpy.PINF] = _double_max self.k = 0 self.accepted = 0 self.feval = 0 self.tests = 0
self.upper[self.upper == numpy.PINF] = _double_max
self.upper = where(self.upper == numpy.PINF, _double_max, self.upper)
def init(self, **options): self.__dict__.update(options) self.lower = asarray(self.lower) self.lower[self.lower == numpy.NINF] = -_double_max self.upper = asarray(self.upper) self.upper[self.upper == numpy.PINF] = _double_max self.k = 0 self.accepted = 0 self.feval = 0 self.tests = 0
iter = 0
iters = 0
def anneal(func, x0, args=(), schedule='fast', full_output=0, T0=None, Tf=1e-12, maxeval=None, maxaccept=None, maxiter=400, boltzmann=1.0, learn_rate=0.5, feps=1e-6, quench=1.0, m=1.0, n=1.0, lower=-100, upper=100, dwell=50): """Minimize a function using simulated annealing. Schedule is a schedule class implementing the annealing schedule. Available ones are 'fast', 'cauchy', 'boltzmann' Inputs: func -- Function to be optimized x0 -- Parameters to be optimized over args -- Extra parameters to function schedule -- Annealing schedule to use (a class) full_output -- Return optional outputs T0 -- Initial Temperature (estimated as 1.2 times the largest cost-function deviation over random points in the range) Tf -- Final goal temperature maxeval -- Maximum function evaluations maxaccept -- Maximum changes to accept maxiter -- Maximum cooling iterations learn_rate -- scale constant for adjusting guesses boltzmann -- Boltzmann constant in acceptance test (increase for less stringent test at each temperature). feps -- Stopping relative error tolerance for the function value in last four coolings. quench, m, n -- Parameters to alter fast_sa schedule lower, upper -- lower and upper bounds on x0 (scalar or array). dwell -- The number of times to search the space at each temperature. Outputs: (xmin, {Jmin, T, feval, iters, accept,} retval) xmin -- Point giving smallest value found retval -- Flag indicating stopping condition: 0 : Cooled to global optimum 1 : Cooled to final temperature 2 : Maximum function evaluations 3 : Maximum cooling iterations reached 4 : Maximum accepted query locations reached Jmin -- Minimum value of function found T -- final temperature feval -- Number of function evaluations iters -- Number of cooling iterations accept -- Number of tests accepted. """ x0 = asarray(x0) lower = asarray(lower) upper = asarray(upper) schedule = eval(schedule+'_sa()') # initialize the schedule schedule.init(dims=shape(x0),func=func,args=args,boltzmann=boltzmann,T0=T0, learn_rate=learn_rate, lower=lower, upper=upper, m=m, n=n, quench=quench, dwell=dwell) current_state, last_state, best_state = _state(), _state(), _state() if T0 is None: x0 = schedule.getstart_temp(best_state) else: best_state.x = None best_state.cost = 300e8 last_state.x = asarray(x0).copy() fval = func(x0,*args) schedule.feval += 1 last_state.cost = fval if last_state.cost < best_state.cost: best_state.cost = fval best_state.x = asarray(x0).copy() schedule.T = schedule.T0 fqueue = [100, 300, 500, 700] iter = 0 while 1: for n in range(dwell): current_state.x = schedule.update_guess(last_state.x) current_state.cost = func(current_state.x,*args) schedule.feval += 1 dE = current_state.cost - last_state.cost if schedule.accept_test(dE): last_state.x = current_state.x.copy() last_state.cost = current_state.cost if last_state.cost < best_state.cost: best_state.x = last_state.x.copy() best_state.cost = last_state.cost schedule.update_temp() iter += 1 # Stopping conditions # 0) last saved values of f from each cooling step # are all very similar (effectively cooled) # 1) Tf is set and we are below it # 2) maxeval is set and we are past it # 3) maxiter is set and we are past it # 4) maxaccept is set and we are past it fqueue.append(squeeze(last_state.cost)) fqueue.pop(0) af = asarray(fqueue)*1.0 if all(abs((af-af[0])/af[0]) < feps): retval = 0 if abs(af[-1]-best_state.cost) > feps*10: retval = 5 print "Warning: Cooled to %f at %s but this is not" \ % (squeeze(last_state.cost), str(squeeze(last_state.x))) \ + " the smallest point found." break if (Tf is not None) and (schedule.T < Tf): retval = 1 break if (maxeval is not None) and (schedule.feval > maxeval): retval = 2 break if (iter > maxiter): print "Warning: Maximum number of iterations exceeded." retval = 3 break if (maxaccept is not None) and (schedule.accepted > maxaccept): retval = 4 break if full_output: return best_state.x, best_state.cost, schedule.T, \ schedule.feval, iter, schedule.accepted, retval else: return best_state.x, retval
iter += 1
iters += 1
def anneal(func, x0, args=(), schedule='fast', full_output=0, T0=None, Tf=1e-12, maxeval=None, maxaccept=None, maxiter=400, boltzmann=1.0, learn_rate=0.5, feps=1e-6, quench=1.0, m=1.0, n=1.0, lower=-100, upper=100, dwell=50): """Minimize a function using simulated annealing. Schedule is a schedule class implementing the annealing schedule. Available ones are 'fast', 'cauchy', 'boltzmann' Inputs: func -- Function to be optimized x0 -- Parameters to be optimized over args -- Extra parameters to function schedule -- Annealing schedule to use (a class) full_output -- Return optional outputs T0 -- Initial Temperature (estimated as 1.2 times the largest cost-function deviation over random points in the range) Tf -- Final goal temperature maxeval -- Maximum function evaluations maxaccept -- Maximum changes to accept maxiter -- Maximum cooling iterations learn_rate -- scale constant for adjusting guesses boltzmann -- Boltzmann constant in acceptance test (increase for less stringent test at each temperature). feps -- Stopping relative error tolerance for the function value in last four coolings. quench, m, n -- Parameters to alter fast_sa schedule lower, upper -- lower and upper bounds on x0 (scalar or array). dwell -- The number of times to search the space at each temperature. Outputs: (xmin, {Jmin, T, feval, iters, accept,} retval) xmin -- Point giving smallest value found retval -- Flag indicating stopping condition: 0 : Cooled to global optimum 1 : Cooled to final temperature 2 : Maximum function evaluations 3 : Maximum cooling iterations reached 4 : Maximum accepted query locations reached Jmin -- Minimum value of function found T -- final temperature feval -- Number of function evaluations iters -- Number of cooling iterations accept -- Number of tests accepted. """ x0 = asarray(x0) lower = asarray(lower) upper = asarray(upper) schedule = eval(schedule+'_sa()') # initialize the schedule schedule.init(dims=shape(x0),func=func,args=args,boltzmann=boltzmann,T0=T0, learn_rate=learn_rate, lower=lower, upper=upper, m=m, n=n, quench=quench, dwell=dwell) current_state, last_state, best_state = _state(), _state(), _state() if T0 is None: x0 = schedule.getstart_temp(best_state) else: best_state.x = None best_state.cost = 300e8 last_state.x = asarray(x0).copy() fval = func(x0,*args) schedule.feval += 1 last_state.cost = fval if last_state.cost < best_state.cost: best_state.cost = fval best_state.x = asarray(x0).copy() schedule.T = schedule.T0 fqueue = [100, 300, 500, 700] iter = 0 while 1: for n in range(dwell): current_state.x = schedule.update_guess(last_state.x) current_state.cost = func(current_state.x,*args) schedule.feval += 1 dE = current_state.cost - last_state.cost if schedule.accept_test(dE): last_state.x = current_state.x.copy() last_state.cost = current_state.cost if last_state.cost < best_state.cost: best_state.x = last_state.x.copy() best_state.cost = last_state.cost schedule.update_temp() iter += 1 # Stopping conditions # 0) last saved values of f from each cooling step # are all very similar (effectively cooled) # 1) Tf is set and we are below it # 2) maxeval is set and we are past it # 3) maxiter is set and we are past it # 4) maxaccept is set and we are past it fqueue.append(squeeze(last_state.cost)) fqueue.pop(0) af = asarray(fqueue)*1.0 if all(abs((af-af[0])/af[0]) < feps): retval = 0 if abs(af[-1]-best_state.cost) > feps*10: retval = 5 print "Warning: Cooled to %f at %s but this is not" \ % (squeeze(last_state.cost), str(squeeze(last_state.x))) \ + " the smallest point found." break if (Tf is not None) and (schedule.T < Tf): retval = 1 break if (maxeval is not None) and (schedule.feval > maxeval): retval = 2 break if (iter > maxiter): print "Warning: Maximum number of iterations exceeded." retval = 3 break if (maxaccept is not None) and (schedule.accepted > maxaccept): retval = 4 break if full_output: return best_state.x, best_state.cost, schedule.T, \ schedule.feval, iter, schedule.accepted, retval else: return best_state.x, retval
if (iter > maxiter):
if (iters > maxiter):
def anneal(func, x0, args=(), schedule='fast', full_output=0, T0=None, Tf=1e-12, maxeval=None, maxaccept=None, maxiter=400, boltzmann=1.0, learn_rate=0.5, feps=1e-6, quench=1.0, m=1.0, n=1.0, lower=-100, upper=100, dwell=50): """Minimize a function using simulated annealing. Schedule is a schedule class implementing the annealing schedule. Available ones are 'fast', 'cauchy', 'boltzmann' Inputs: func -- Function to be optimized x0 -- Parameters to be optimized over args -- Extra parameters to function schedule -- Annealing schedule to use (a class) full_output -- Return optional outputs T0 -- Initial Temperature (estimated as 1.2 times the largest cost-function deviation over random points in the range) Tf -- Final goal temperature maxeval -- Maximum function evaluations maxaccept -- Maximum changes to accept maxiter -- Maximum cooling iterations learn_rate -- scale constant for adjusting guesses boltzmann -- Boltzmann constant in acceptance test (increase for less stringent test at each temperature). feps -- Stopping relative error tolerance for the function value in last four coolings. quench, m, n -- Parameters to alter fast_sa schedule lower, upper -- lower and upper bounds on x0 (scalar or array). dwell -- The number of times to search the space at each temperature. Outputs: (xmin, {Jmin, T, feval, iters, accept,} retval) xmin -- Point giving smallest value found retval -- Flag indicating stopping condition: 0 : Cooled to global optimum 1 : Cooled to final temperature 2 : Maximum function evaluations 3 : Maximum cooling iterations reached 4 : Maximum accepted query locations reached Jmin -- Minimum value of function found T -- final temperature feval -- Number of function evaluations iters -- Number of cooling iterations accept -- Number of tests accepted. """ x0 = asarray(x0) lower = asarray(lower) upper = asarray(upper) schedule = eval(schedule+'_sa()') # initialize the schedule schedule.init(dims=shape(x0),func=func,args=args,boltzmann=boltzmann,T0=T0, learn_rate=learn_rate, lower=lower, upper=upper, m=m, n=n, quench=quench, dwell=dwell) current_state, last_state, best_state = _state(), _state(), _state() if T0 is None: x0 = schedule.getstart_temp(best_state) else: best_state.x = None best_state.cost = 300e8 last_state.x = asarray(x0).copy() fval = func(x0,*args) schedule.feval += 1 last_state.cost = fval if last_state.cost < best_state.cost: best_state.cost = fval best_state.x = asarray(x0).copy() schedule.T = schedule.T0 fqueue = [100, 300, 500, 700] iter = 0 while 1: for n in range(dwell): current_state.x = schedule.update_guess(last_state.x) current_state.cost = func(current_state.x,*args) schedule.feval += 1 dE = current_state.cost - last_state.cost if schedule.accept_test(dE): last_state.x = current_state.x.copy() last_state.cost = current_state.cost if last_state.cost < best_state.cost: best_state.x = last_state.x.copy() best_state.cost = last_state.cost schedule.update_temp() iter += 1 # Stopping conditions # 0) last saved values of f from each cooling step # are all very similar (effectively cooled) # 1) Tf is set and we are below it # 2) maxeval is set and we are past it # 3) maxiter is set and we are past it # 4) maxaccept is set and we are past it fqueue.append(squeeze(last_state.cost)) fqueue.pop(0) af = asarray(fqueue)*1.0 if all(abs((af-af[0])/af[0]) < feps): retval = 0 if abs(af[-1]-best_state.cost) > feps*10: retval = 5 print "Warning: Cooled to %f at %s but this is not" \ % (squeeze(last_state.cost), str(squeeze(last_state.x))) \ + " the smallest point found." break if (Tf is not None) and (schedule.T < Tf): retval = 1 break if (maxeval is not None) and (schedule.feval > maxeval): retval = 2 break if (iter > maxiter): print "Warning: Maximum number of iterations exceeded." retval = 3 break if (maxaccept is not None) and (schedule.accepted > maxaccept): retval = 4 break if full_output: return best_state.x, best_state.cost, schedule.T, \ schedule.feval, iter, schedule.accepted, retval else: return best_state.x, retval
schedule.feval, iter, schedule.accepted, retval
schedule.feval, iters, schedule.accepted, retval
def anneal(func, x0, args=(), schedule='fast', full_output=0, T0=None, Tf=1e-12, maxeval=None, maxaccept=None, maxiter=400, boltzmann=1.0, learn_rate=0.5, feps=1e-6, quench=1.0, m=1.0, n=1.0, lower=-100, upper=100, dwell=50): """Minimize a function using simulated annealing. Schedule is a schedule class implementing the annealing schedule. Available ones are 'fast', 'cauchy', 'boltzmann' Inputs: func -- Function to be optimized x0 -- Parameters to be optimized over args -- Extra parameters to function schedule -- Annealing schedule to use (a class) full_output -- Return optional outputs T0 -- Initial Temperature (estimated as 1.2 times the largest cost-function deviation over random points in the range) Tf -- Final goal temperature maxeval -- Maximum function evaluations maxaccept -- Maximum changes to accept maxiter -- Maximum cooling iterations learn_rate -- scale constant for adjusting guesses boltzmann -- Boltzmann constant in acceptance test (increase for less stringent test at each temperature). feps -- Stopping relative error tolerance for the function value in last four coolings. quench, m, n -- Parameters to alter fast_sa schedule lower, upper -- lower and upper bounds on x0 (scalar or array). dwell -- The number of times to search the space at each temperature. Outputs: (xmin, {Jmin, T, feval, iters, accept,} retval) xmin -- Point giving smallest value found retval -- Flag indicating stopping condition: 0 : Cooled to global optimum 1 : Cooled to final temperature 2 : Maximum function evaluations 3 : Maximum cooling iterations reached 4 : Maximum accepted query locations reached Jmin -- Minimum value of function found T -- final temperature feval -- Number of function evaluations iters -- Number of cooling iterations accept -- Number of tests accepted. """ x0 = asarray(x0) lower = asarray(lower) upper = asarray(upper) schedule = eval(schedule+'_sa()') # initialize the schedule schedule.init(dims=shape(x0),func=func,args=args,boltzmann=boltzmann,T0=T0, learn_rate=learn_rate, lower=lower, upper=upper, m=m, n=n, quench=quench, dwell=dwell) current_state, last_state, best_state = _state(), _state(), _state() if T0 is None: x0 = schedule.getstart_temp(best_state) else: best_state.x = None best_state.cost = 300e8 last_state.x = asarray(x0).copy() fval = func(x0,*args) schedule.feval += 1 last_state.cost = fval if last_state.cost < best_state.cost: best_state.cost = fval best_state.x = asarray(x0).copy() schedule.T = schedule.T0 fqueue = [100, 300, 500, 700] iter = 0 while 1: for n in range(dwell): current_state.x = schedule.update_guess(last_state.x) current_state.cost = func(current_state.x,*args) schedule.feval += 1 dE = current_state.cost - last_state.cost if schedule.accept_test(dE): last_state.x = current_state.x.copy() last_state.cost = current_state.cost if last_state.cost < best_state.cost: best_state.x = last_state.x.copy() best_state.cost = last_state.cost schedule.update_temp() iter += 1 # Stopping conditions # 0) last saved values of f from each cooling step # are all very similar (effectively cooled) # 1) Tf is set and we are below it # 2) maxeval is set and we are past it # 3) maxiter is set and we are past it # 4) maxaccept is set and we are past it fqueue.append(squeeze(last_state.cost)) fqueue.pop(0) af = asarray(fqueue)*1.0 if all(abs((af-af[0])/af[0]) < feps): retval = 0 if abs(af[-1]-best_state.cost) > feps*10: retval = 5 print "Warning: Cooled to %f at %s but this is not" \ % (squeeze(last_state.cost), str(squeeze(last_state.x))) \ + " the smallest point found." break if (Tf is not None) and (schedule.T < Tf): retval = 1 break if (maxeval is not None) and (schedule.feval > maxeval): retval = 2 break if (iter > maxiter): print "Warning: Maximum number of iterations exceeded." retval = 3 break if (maxaccept is not None) and (schedule.accepted > maxaccept): retval = 4 break if full_output: return best_state.x, best_state.cost, schedule.T, \ schedule.feval, iter, schedule.accepted, retval else: return best_state.x, retval
indx = numpy.argsort( perm ) return numpy.take( flag, indx[:len( ar1 )] )
ii = numpy.where( flag * aux2 ) aux = perm[ii+1] perm[ii+1] = perm[ii] perm[ii] = aux indx = numpy.argsort( perm )[:len( ar1 )] return numpy.take( flag, indx )
def setmember1d( ar1, ar2 ): """Return an array of shape of ar1 containing 1 where the elements of ar1 are in ar2 and 0 otherwise.""" ar = numpy.concatenate( (ar1, ar2 ) ) perm = numpy.argsort( ar ) aux = numpy.take( ar, perm ) flag = ediff1d( aux, 1 ) == 0 indx = numpy.argsort( perm ) return numpy.take( flag, indx[:len( ar1 )] )
if hasattr(object, '_ppimport_attr'):
if hasattr(object,'_ppimport_importer') or \ hasattr(object, '_ppimport_module'): object = object._ppimport_module elif hasattr(object, '_ppimport_attr'):
def info(object=None,maxwidth=76,output=sys.stdout,): """Get help information for a function, class, or module. Example: >>> from scipy import * >>> info(polyval) polyval(p, x) Evaluate the polymnomial p at x. Description: If p is of length N, this function returns the value: p[0]*(x**N-1) + p[1]*(x**N-2) + ... + p[N-2]*x + p[N-1] """ global _namedict, _dictlist if hasattr(object, '_ppimport_attr'): object = object._ppimport_attr elif hasattr(object,'_ppimport_importer') or \ hasattr(object, '_ppimport_module'): object = object._ppimport_module if object is None: info(info) elif isinstance(object, types.StringType): if _namedict is None: _namedict, _dictlist = makenamedict() numfound = 0 objlist = [] for namestr in _dictlist: try: obj = _namedict[namestr][object] if id(obj) in objlist: print >> output, "\n *** Repeat reference found in %s *** " % namestr else: objlist.append(id(obj)) print >> output, " *** Found in %s ***" % namestr info(obj) print >> output, "-"*maxwidth numfound += 1 except KeyError: pass if numfound == 0: print >> output, "Help for %s not found." % object else: print >> output, "\n *** Total of %d references found. ***" % numfound elif inspect.isfunction(object): name = object.func_name arguments = apply(inspect.formatargspec, inspect.getargspec(object)) if len(name+arguments) > maxwidth: argstr = split_line(name, arguments, maxwidth) else: argstr = name + arguments print >> output, " " + argstr + "\n" print >> output, inspect.getdoc(object) elif inspect.isclass(object): name = object.__name__ if hasattr(object, '__init__'): arguments = apply(inspect.formatargspec, inspect.getargspec(object.__init__.im_func)) arglist = arguments.split(', ') if len(arglist) > 1: arglist[1] = "("+arglist[1] arguments = ", ".join(arglist[1:]) else: arguments = "()" else: arguments = "()" if len(name+arguments) > maxwidth: argstr = split_line(name, arguments, maxwidth) else: argstr = name + arguments print >> output, " " + argstr + "\n" doc1 = inspect.getdoc(object) if doc1 is None: if hasattr(object,'__init__'): print >> output, inspect.getdoc(object.__init__) else: print >> output, inspect.getdoc(object) elif type(object) is types.InstanceType: ## check for __call__ method print >> output, "Instance of class: ", object.__class__.__name__ print >> output if hasattr(object, '__call__'): arguments = apply(inspect.formatargspec, inspect.getargspec(object.__call__.im_func)) arglist = arguments.split(', ') if len(arglist) > 1: arglist[1] = "("+arglist[1] arguments = ", ".join(arglist[1:]) else: arguments = "()" name = "<name>" if len(name+arguments) > maxwidth: argstr = split_line(name, arguments, maxwidth) else: argstr = name + arguments print >> output, " " + argstr + "\n" doc = inspect.getdoc(object.__call__) if doc is not None: print >> output, inspect.getdoc(object.__call__) print >> output, inspect.getdoc(object) else: print >> output, inspect.getdoc(object) elif inspect.ismethod(object): name = object.__name__ arguments = apply(inspect.formatargspec, inspect.getargspec(object.im_func)) arglist = arguments.split(', ') if len(arglist) > 1: arglist[1] = "("+arglist[1] arguments = ", ".join(arglist[1:]) else: arguments = "()" if len(name+arguments) > maxwidth: argstr = split_line(name, arguments, maxwidth) else: argstr = name + arguments print >> output, " " + argstr + "\n" print >> output, inspect.getdoc(object) elif hasattr(object, '__doc__'): print >> output, inspect.getdoc(object)
elif hasattr(object,'_ppimport_importer') or \ hasattr(object, '_ppimport_module'): object = object._ppimport_module
def info(object=None,maxwidth=76,output=sys.stdout,): """Get help information for a function, class, or module. Example: >>> from scipy import * >>> info(polyval) polyval(p, x) Evaluate the polymnomial p at x. Description: If p is of length N, this function returns the value: p[0]*(x**N-1) + p[1]*(x**N-2) + ... + p[N-2]*x + p[N-1] """ global _namedict, _dictlist if hasattr(object, '_ppimport_attr'): object = object._ppimport_attr elif hasattr(object,'_ppimport_importer') or \ hasattr(object, '_ppimport_module'): object = object._ppimport_module if object is None: info(info) elif isinstance(object, types.StringType): if _namedict is None: _namedict, _dictlist = makenamedict() numfound = 0 objlist = [] for namestr in _dictlist: try: obj = _namedict[namestr][object] if id(obj) in objlist: print >> output, "\n *** Repeat reference found in %s *** " % namestr else: objlist.append(id(obj)) print >> output, " *** Found in %s ***" % namestr info(obj) print >> output, "-"*maxwidth numfound += 1 except KeyError: pass if numfound == 0: print >> output, "Help for %s not found." % object else: print >> output, "\n *** Total of %d references found. ***" % numfound elif inspect.isfunction(object): name = object.func_name arguments = apply(inspect.formatargspec, inspect.getargspec(object)) if len(name+arguments) > maxwidth: argstr = split_line(name, arguments, maxwidth) else: argstr = name + arguments print >> output, " " + argstr + "\n" print >> output, inspect.getdoc(object) elif inspect.isclass(object): name = object.__name__ if hasattr(object, '__init__'): arguments = apply(inspect.formatargspec, inspect.getargspec(object.__init__.im_func)) arglist = arguments.split(', ') if len(arglist) > 1: arglist[1] = "("+arglist[1] arguments = ", ".join(arglist[1:]) else: arguments = "()" else: arguments = "()" if len(name+arguments) > maxwidth: argstr = split_line(name, arguments, maxwidth) else: argstr = name + arguments print >> output, " " + argstr + "\n" doc1 = inspect.getdoc(object) if doc1 is None: if hasattr(object,'__init__'): print >> output, inspect.getdoc(object.__init__) else: print >> output, inspect.getdoc(object) elif type(object) is types.InstanceType: ## check for __call__ method print >> output, "Instance of class: ", object.__class__.__name__ print >> output if hasattr(object, '__call__'): arguments = apply(inspect.formatargspec, inspect.getargspec(object.__call__.im_func)) arglist = arguments.split(', ') if len(arglist) > 1: arglist[1] = "("+arglist[1] arguments = ", ".join(arglist[1:]) else: arguments = "()" name = "<name>" if len(name+arguments) > maxwidth: argstr = split_line(name, arguments, maxwidth) else: argstr = name + arguments print >> output, " " + argstr + "\n" doc = inspect.getdoc(object.__call__) if doc is not None: print >> output, inspect.getdoc(object.__call__) print >> output, inspect.getdoc(object) else: print >> output, inspect.getdoc(object) elif inspect.ismethod(object): name = object.__name__ arguments = apply(inspect.formatargspec, inspect.getargspec(object.im_func)) arglist = arguments.split(', ') if len(arglist) > 1: arglist[1] = "("+arglist[1] arguments = ", ".join(arglist[1:]) else: arguments = "()" if len(name+arguments) > maxwidth: argstr = split_line(name, arguments, maxwidth) else: argstr = name + arguments print >> output, " " + argstr + "\n" print >> output, inspect.getdoc(object) elif hasattr(object, '__doc__'): print >> output, inspect.getdoc(object)
if hasattr(a,'_ppimport_module') or \ hasattr(a,'_ppimport_importer'):
if hasattr(a,'_ppimport_importer') or \ hasattr(a,'_ppimport_module'):
def __call__ (self, *args, **kwds): new_args = [] for a in args: if hasattr(a,'_ppimport_module') or \
def nnlf(self, *args):
def nnlf(self, theta, x):
def nnlf(self, *args): # - sum (log pdf(x, theta)) # where theta are the parameters (including loc and scale) # try: x = args[-1] loc = args[-2] scale = args[-3] args = args[:-3] except IndexError: raise ValueError, "Not enough input arguments." if not self._argcheck(*args) or scale <= 0: return inf x = arr((x-loc) / scale) cond0 = (x <= self.a) | (x >= self.b) if (any(cond0)): return inf else: N = len(x) return self._nnlf(self, x, *args) + N*log(scale)
x = args[-1] loc = args[-2] scale = args[-3] args = args[:-3]
loc = theta[-2] scale = theta[-1] args = tuple(theta[:-2])
def nnlf(self, *args): # - sum (log pdf(x, theta)) # where theta are the parameters (including loc and scale) # try: x = args[-1] loc = args[-2] scale = args[-3] args = args[:-3] except IndexError: raise ValueError, "Not enough input arguments." if not self._argcheck(*args) or scale <= 0: return inf x = arr((x-loc) / scale) cond0 = (x <= self.a) | (x >= self.b) if (any(cond0)): return inf else: N = len(x) return self._nnlf(self, x, *args) + N*log(scale)
return self._nnlf(self, x, *args) + N*log(scale)
return self._nnlf(x, *args) + N*log(scale)
def nnlf(self, *args): # - sum (log pdf(x, theta)) # where theta are the parameters (including loc and scale) # try: x = args[-1] loc = args[-2] scale = args[-3] args = args[:-3] except IndexError: raise ValueError, "Not enough input arguments." if not self._argcheck(*args) or scale <= 0: return inf x = arr((x-loc) / scale) cond0 = (x <= self.a) | (x >= self.b) if (any(cond0)): return inf else: N = len(x) return self._nnlf(self, x, *args) + N*log(scale)
nzmax = 0
try: nzmax = self.nnz except AtrributeError: nzmax = 0
def getnzmax(self): try: nzmax = self.nzmax except AttributeError: nzmax = 0 return nzmax
self.vecfunc = sgf(self._single_call)
self.vecfunc = sgf(self._single_call,otypes='d')
def __init__(self, dist, xa=-10.0, xb=10.0, xtol=1e-14): self.dist = dist self.cdf = eval('%scdf'%dist) self.xa = xa self.xb = xb self.xtol = xtol self.vecfunc = sgf(self._single_call)
self.vecfunc = sgf(self._ppf_single_call) self.vecentropy = sgf(self._entropy)
self.vecfunc = sgf(self._ppf_single_call,otypes='d') self.vecentropy = sgf(self._entropy,otypes='d') self.veccdf = sgf(self._cdf_single_call,otypes='d')
def __init__(self, momtype=1, a=None, b=None, xa=-10.0, xb=10.0, xtol=1e-14, badvalue=None, name=None, longname=None, shapes=None, extradoc=None): if badvalue is None: badvalue = nan self.badvalue = badvalue self.name = name self.a = a self.b = b if a is None: self.a = -scipy.inf if b is None: self.b = scipy.inf self.xa = xa self.xb = xb self.xtol = xtol self._size = 1 self.m = 0.0 self.moment_type = momtype self.vecfunc = sgf(self._ppf_single_call) self.vecentropy = sgf(self._entropy) self.expandarr = 1 if momtype == 0: self.generic_moment = sgf(self._mom0_sc) else: self.generic_moment = sgf(self._mom1_sc) cdf_signature = inspect.getargspec(self._cdf.im_func) numargs1 = len(cdf_signature[0]) - 2 pdf_signature = inspect.getargspec(self._pdf.im_func) numargs2 = len(pdf_signature[0]) - 2 self.numargs = max(numargs1, numargs2)
self.generic_moment = sgf(self._mom0_sc)
self.generic_moment = sgf(self._mom0_sc,otypes='d')
def __init__(self, momtype=1, a=None, b=None, xa=-10.0, xb=10.0, xtol=1e-14, badvalue=None, name=None, longname=None, shapes=None, extradoc=None): if badvalue is None: badvalue = nan self.badvalue = badvalue self.name = name self.a = a self.b = b if a is None: self.a = -scipy.inf if b is None: self.b = scipy.inf self.xa = xa self.xb = xb self.xtol = xtol self._size = 1 self.m = 0.0 self.moment_type = momtype self.vecfunc = sgf(self._ppf_single_call) self.vecentropy = sgf(self._entropy) self.expandarr = 1 if momtype == 0: self.generic_moment = sgf(self._mom0_sc) else: self.generic_moment = sgf(self._mom1_sc) cdf_signature = inspect.getargspec(self._cdf.im_func) numargs1 = len(cdf_signature[0]) - 2 pdf_signature = inspect.getargspec(self._pdf.im_func) numargs2 = len(pdf_signature[0]) - 2 self.numargs = max(numargs1, numargs2)
self.generic_moment = sgf(self._mom1_sc)
self.generic_moment = sgf(self._mom1_sc,otypes='d')
def __init__(self, momtype=1, a=None, b=None, xa=-10.0, xb=10.0, xtol=1e-14, badvalue=None, name=None, longname=None, shapes=None, extradoc=None): if badvalue is None: badvalue = nan self.badvalue = badvalue self.name = name self.a = a self.b = b if a is None: self.a = -scipy.inf if b is None: self.b = scipy.inf self.xa = xa self.xb = xb self.xtol = xtol self._size = 1 self.m = 0.0 self.moment_type = momtype self.vecfunc = sgf(self._ppf_single_call) self.vecentropy = sgf(self._entropy) self.expandarr = 1 if momtype == 0: self.generic_moment = sgf(self._mom0_sc) else: self.generic_moment = sgf(self._mom1_sc) cdf_signature = inspect.getargspec(self._cdf.im_func) numargs1 = len(cdf_signature[0]) - 2 pdf_signature = inspect.getargspec(self._pdf.im_func) numargs2 = len(pdf_signature[0]) - 2 self.numargs = max(numargs1, numargs2)

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
5
Add dataset card

Collection including h4iku/coconut_python2010_preprocessed